2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
12 #include "afs/param.h"
14 #include <afs/param.h>
22 # include "afs/sysincludes.h"
23 # include "afsincludes.h"
24 # include "rx/rx_kcommon.h"
25 # include "rx/rx_clock.h"
26 # include "rx/rx_queue.h"
27 # include "rx/rx_packet.h"
28 # else /* defined(UKERNEL) */
29 # ifdef RX_KERNEL_TRACE
30 # include "../rx/rx_kcommon.h"
33 # ifndef AFS_LINUX20_ENV
36 # if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
37 # include "afs/sysincludes.h"
39 # if defined(AFS_OBSD_ENV)
42 # include "h/socket.h"
43 # if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
44 # if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
45 # include "sys/mount.h" /* it gets pulled in by something later anyway */
49 # include "netinet/in.h"
50 # include "afs/afs_osi.h"
51 # include "rx_kmutex.h"
52 # include "rx/rx_clock.h"
53 # include "rx/rx_queue.h"
55 # include <sys/sysmacros.h>
57 # include "rx/rx_packet.h"
58 # endif /* defined(UKERNEL) */
59 # include "rx/rx_internal.h"
60 # include "rx/rx_globals.h"
62 # include "sys/types.h"
63 # include <sys/stat.h>
65 # if defined(AFS_NT40_ENV)
66 # include <winsock2.h>
68 # define EWOULDBLOCK WSAEWOULDBLOCK
71 # include "rx_xmit_nt.h"
74 # include <sys/socket.h>
75 # include <netinet/in.h>
77 # include "rx_clock.h"
78 # include "rx_internal.h"
80 # include "rx_queue.h"
82 # include <sys/sysmacros.h>
84 # include "rx_packet.h"
85 # include "rx_globals.h"
95 /* rxdb_fileID is used to identify the lock location, along with line#. */
96 static int rxdb_fileID = RXDB_FILE_RX_PACKET;
97 #endif /* RX_LOCKS_DB */
98 static struct rx_packet *rx_mallocedP = 0;
100 static afs_uint32 rx_packet_id = 0;
103 extern char cml_version_number[];
105 static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
107 static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
108 afs_int32 ahost, short aport,
111 static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
113 struct rx_queue * q);
114 #ifdef RX_ENABLE_TSFPQ
116 rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
119 /* some rules about packets:
120 * 1. When a packet is allocated, the final iov_buf contains room for
121 * a security trailer, but iov_len masks that fact. If the security
122 * package wants to add the trailer, it may do so, and then extend
123 * iov_len appropriately. For this reason, packet's niovecs and
124 * iov_len fields should be accurate before calling PreparePacket.
128 * all packet buffers (iov_base) are integral multiples of
130 * offset is an integral multiple of the word size.
133 rx_SlowGetInt32(struct rx_packet *packet, size_t offset)
137 for (l = 0, i = 1; i < packet->niovecs; i++) {
138 if (l + packet->wirevec[i].iov_len > offset) {
140 *((afs_int32 *) ((char *)(packet->wirevec[i].iov_base) +
143 l += packet->wirevec[i].iov_len;
150 * all packet buffers (iov_base) are integral multiples of the word size.
151 * offset is an integral multiple of the word size.
154 rx_SlowPutInt32(struct rx_packet * packet, size_t offset, afs_int32 data)
158 for (l = 0, i = 1; i < packet->niovecs; i++) {
159 if (l + packet->wirevec[i].iov_len > offset) {
160 *((afs_int32 *) ((char *)(packet->wirevec[i].iov_base) +
161 (offset - l))) = data;
164 l += packet->wirevec[i].iov_len;
171 * all packet buffers (iov_base) are integral multiples of the
173 * offset is an integral multiple of the word size.
175 * all buffers are contiguously arrayed in the iovec from 0..niovecs-1
178 rx_SlowReadPacket(struct rx_packet * packet, unsigned int offset, int resid,
181 unsigned int i, j, l, r;
182 for (l = 0, i = 1; i < packet->niovecs; i++) {
183 if (l + packet->wirevec[i].iov_len > offset) {
186 l += packet->wirevec[i].iov_len;
189 /* i is the iovec which contains the first little bit of data in which we
190 * are interested. l is the total length of everything prior to this iovec.
191 * j is the number of bytes we can safely copy out of this iovec.
192 * offset only applies to the first iovec.
195 while ((resid > 0) && (i < packet->niovecs)) {
196 j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
197 memcpy(out, (char *)(packet->wirevec[i].iov_base) + (offset - l), j);
200 l += packet->wirevec[i].iov_len;
205 return (resid ? (r - resid) : r);
210 * all packet buffers (iov_base) are integral multiples of the
212 * offset is an integral multiple of the word size.
215 rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
220 for (l = 0, i = 1; i < packet->niovecs; i++) {
221 if (l + packet->wirevec[i].iov_len > offset) {
224 l += packet->wirevec[i].iov_len;
227 /* i is the iovec which contains the first little bit of data in which we
228 * are interested. l is the total length of everything prior to this iovec.
229 * j is the number of bytes we can safely copy out of this iovec.
230 * offset only applies to the first iovec.
233 while ((resid > 0) && (i < RX_MAXWVECS)) {
234 if (i >= packet->niovecs)
235 if (rxi_AllocDataBuf(packet, resid, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
238 b = (char *)(packet->wirevec[i].iov_base) + (offset - l);
239 j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
243 l += packet->wirevec[i].iov_len;
248 return (resid ? (r - resid) : r);
252 rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
254 register struct rx_packet *p, *np;
256 num_pkts = AllocPacketBufs(class, num_pkts, q);
258 for (queue_Scan(q, p, np, rx_packet)) {
259 RX_PACKET_IOV_FULLINIT(p);
265 #ifdef RX_ENABLE_TSFPQ
267 AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
269 register struct rx_ts_info_t * rx_ts_info;
273 RX_TS_INFO_GET(rx_ts_info);
275 transfer = num_pkts - rx_ts_info->_FPQ.len;
278 MUTEX_ENTER(&rx_freePktQ_lock);
279 transfer = MAX(transfer, rx_TSFPQGlobSize);
280 if (transfer > rx_nFreePackets) {
281 /* alloc enough for us, plus a few globs for other threads */
282 rxi_MorePacketsNoLock(transfer + 4 * rx_initSendWindow);
285 RX_TS_FPQ_GTOL2(rx_ts_info, transfer);
287 MUTEX_EXIT(&rx_freePktQ_lock);
291 RX_TS_FPQ_QCHECKOUT(rx_ts_info, num_pkts, q);
295 #else /* RX_ENABLE_TSFPQ */
297 AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
308 MUTEX_ENTER(&rx_freePktQ_lock);
311 for (; (num_pkts > 0) && (rxi_OverQuota2(class,num_pkts));
312 num_pkts--, overq++);
315 rxi_NeedMorePackets = TRUE;
317 case RX_PACKET_CLASS_RECEIVE:
318 rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
320 case RX_PACKET_CLASS_SEND:
321 rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
323 case RX_PACKET_CLASS_SPECIAL:
324 rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
326 case RX_PACKET_CLASS_RECV_CBUF:
327 rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
329 case RX_PACKET_CLASS_SEND_CBUF:
330 rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
335 if (rx_nFreePackets < num_pkts)
336 num_pkts = rx_nFreePackets;
339 rxi_NeedMorePackets = TRUE;
343 if (rx_nFreePackets < num_pkts) {
344 rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), 4 * rx_initSendWindow));
348 for (i=0, c=queue_First(&rx_freePacketQueue, rx_packet);
350 i++, c=queue_Next(c, rx_packet)) {
354 queue_SplitBeforeAppend(&rx_freePacketQueue,q,c);
356 rx_nFreePackets -= num_pkts;
361 MUTEX_EXIT(&rx_freePktQ_lock);
366 #endif /* RX_ENABLE_TSFPQ */
369 * Free a packet currently used as a continuation buffer
371 #ifdef RX_ENABLE_TSFPQ
372 /* num_pkts=0 means queue length is unknown */
374 rxi_FreePackets(int num_pkts, struct rx_queue * q)
376 register struct rx_ts_info_t * rx_ts_info;
377 register struct rx_packet *c, *nc;
380 osi_Assert(num_pkts >= 0);
381 RX_TS_INFO_GET(rx_ts_info);
384 for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
385 rxi_FreeDataBufsTSFPQ(c, 2, 0);
388 for (queue_Scan(q, c, nc, rx_packet)) {
389 rxi_FreeDataBufsTSFPQ(c, 2, 0);
394 RX_TS_FPQ_QCHECKIN(rx_ts_info, num_pkts, q);
397 if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
399 MUTEX_ENTER(&rx_freePktQ_lock);
401 RX_TS_FPQ_LTOG(rx_ts_info);
403 /* Wakeup anyone waiting for packets */
406 MUTEX_EXIT(&rx_freePktQ_lock);
412 #else /* RX_ENABLE_TSFPQ */
413 /* num_pkts=0 means queue length is unknown */
415 rxi_FreePackets(int num_pkts, struct rx_queue *q)
418 register struct rx_packet *p, *np;
422 osi_Assert(num_pkts >= 0);
426 for (queue_Scan(q, p, np, rx_packet), num_pkts++) {
427 if (p->niovecs > 2) {
428 qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
435 for (queue_Scan(q, p, np, rx_packet)) {
436 if (p->niovecs > 2) {
437 qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
444 queue_SpliceAppend(q, &cbs);
450 MUTEX_ENTER(&rx_freePktQ_lock);
452 queue_SpliceAppend(&rx_freePacketQueue, q);
453 rx_nFreePackets += qlen;
455 /* Wakeup anyone waiting for packets */
458 MUTEX_EXIT(&rx_freePktQ_lock);
463 #endif /* RX_ENABLE_TSFPQ */
465 /* this one is kind of awful.
466 * In rxkad, the packet has been all shortened, and everything, ready for
467 * sending. All of a sudden, we discover we need some of that space back.
468 * This isn't terribly general, because it knows that the packets are only
469 * rounded up to the EBS (userdata + security header).
472 rxi_RoundUpPacket(struct rx_packet *p, unsigned int nb)
476 if (p->wirevec[i].iov_base == (caddr_t) p->localdata) {
477 if (p->wirevec[i].iov_len <= RX_FIRSTBUFFERSIZE - nb) {
478 p->wirevec[i].iov_len += nb;
482 if (p->wirevec[i].iov_len <= RX_CBUFFERSIZE - nb) {
483 p->wirevec[i].iov_len += nb;
491 /* get sufficient space to store nb bytes of data (or more), and hook
492 * it into the supplied packet. Return nbytes<=0 if successful, otherwise
493 * returns the number of bytes >0 which it failed to come up with.
494 * Don't need to worry about locking on packet, since only
495 * one thread can manipulate one at a time. Locking on continution
496 * packets is handled by AllocPacketBufs */
497 /* MTUXXX don't need to go throught the for loop if we can trust niovecs */
499 rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
503 register struct rx_packet *cb, *ncb;
505 /* compute the number of cbuf's we need */
506 nv = nb / RX_CBUFFERSIZE;
507 if ((nv * RX_CBUFFERSIZE) < nb)
509 if ((nv + p->niovecs) > RX_MAXWVECS)
510 nv = RX_MAXWVECS - p->niovecs;
514 /* allocate buffers */
516 nv = AllocPacketBufs(class, nv, &q);
518 /* setup packet iovs */
519 for (i = p->niovecs, queue_Scan(&q, cb, ncb, rx_packet), i++) {
521 p->wirevec[i].iov_base = (caddr_t) cb->localdata;
522 p->wirevec[i].iov_len = RX_CBUFFERSIZE;
525 nb -= (nv * RX_CBUFFERSIZE);
526 p->length += (nv * RX_CBUFFERSIZE);
532 /* Add more packet buffers */
533 #ifdef RX_ENABLE_TSFPQ
535 rxi_MorePackets(int apackets)
537 struct rx_packet *p, *e;
538 register struct rx_ts_info_t * rx_ts_info;
542 getme = apackets * sizeof(struct rx_packet);
543 p = (struct rx_packet *)osi_Alloc(getme);
546 PIN(p, getme); /* XXXXX */
547 memset((char *)p, 0, getme);
548 RX_TS_INFO_GET(rx_ts_info);
550 RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
551 /* TSFPQ patch also needs to keep track of total packets */
552 MUTEX_ENTER(&rx_stats_mutex);
553 rx_nPackets += apackets;
554 RX_TS_FPQ_COMPUTE_LIMITS;
555 MUTEX_EXIT(&rx_stats_mutex);
557 for (e = p + apackets; p < e; p++) {
558 RX_PACKET_IOV_INIT(p);
561 RX_TS_FPQ_CHECKIN(rx_ts_info,p);
564 MUTEX_ENTER(&rx_freePktQ_lock);
566 p->packetId = rx_packet_id++;
567 p->allNextp = rx_mallocedP;
570 MUTEX_EXIT(&rx_freePktQ_lock);
573 rx_ts_info->_FPQ.delta += apackets;
575 if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
577 MUTEX_ENTER(&rx_freePktQ_lock);
579 RX_TS_FPQ_LTOG(rx_ts_info);
580 rxi_NeedMorePackets = FALSE;
583 MUTEX_EXIT(&rx_freePktQ_lock);
587 #else /* RX_ENABLE_TSFPQ */
589 rxi_MorePackets(int apackets)
591 struct rx_packet *p, *e;
595 getme = apackets * sizeof(struct rx_packet);
596 p = (struct rx_packet *)osi_Alloc(getme);
599 PIN(p, getme); /* XXXXX */
600 memset((char *)p, 0, getme);
602 MUTEX_ENTER(&rx_freePktQ_lock);
604 for (e = p + apackets; p < e; p++) {
605 RX_PACKET_IOV_INIT(p);
606 p->flags |= RX_PKTFLAG_FREE;
609 queue_Append(&rx_freePacketQueue, p);
611 p->packetId = rx_packet_id++;
612 p->allNextp = rx_mallocedP;
617 rx_nFreePackets += apackets;
618 rxi_NeedMorePackets = FALSE;
621 MUTEX_EXIT(&rx_freePktQ_lock);
624 #endif /* RX_ENABLE_TSFPQ */
626 #ifdef RX_ENABLE_TSFPQ
628 rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
630 struct rx_packet *p, *e;
631 register struct rx_ts_info_t * rx_ts_info;
635 getme = apackets * sizeof(struct rx_packet);
636 p = (struct rx_packet *)osi_Alloc(getme);
638 PIN(p, getme); /* XXXXX */
639 memset((char *)p, 0, getme);
640 RX_TS_INFO_GET(rx_ts_info);
642 RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
643 /* TSFPQ patch also needs to keep track of total packets */
644 MUTEX_ENTER(&rx_stats_mutex);
645 rx_nPackets += apackets;
646 RX_TS_FPQ_COMPUTE_LIMITS;
647 MUTEX_EXIT(&rx_stats_mutex);
649 for (e = p + apackets; p < e; p++) {
650 RX_PACKET_IOV_INIT(p);
652 RX_TS_FPQ_CHECKIN(rx_ts_info,p);
655 MUTEX_ENTER(&rx_freePktQ_lock);
657 p->packetId = rx_packet_id++;
658 p->allNextp = rx_mallocedP;
661 MUTEX_EXIT(&rx_freePktQ_lock);
664 rx_ts_info->_FPQ.delta += apackets;
667 (num_keep_local < apackets)) {
669 MUTEX_ENTER(&rx_freePktQ_lock);
671 RX_TS_FPQ_LTOG2(rx_ts_info, (apackets - num_keep_local));
672 rxi_NeedMorePackets = FALSE;
675 MUTEX_EXIT(&rx_freePktQ_lock);
679 #endif /* RX_ENABLE_TSFPQ */
682 /* Add more packet buffers */
684 rxi_MorePacketsNoLock(int apackets)
686 #ifdef RX_ENABLE_TSFPQ
687 register struct rx_ts_info_t * rx_ts_info;
688 #endif /* RX_ENABLE_TSFPQ */
689 struct rx_packet *p, *e;
692 /* allocate enough packets that 1/4 of the packets will be able
693 * to hold maximal amounts of data */
694 apackets += (apackets / 4)
695 * ((rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE) / RX_CBUFFERSIZE);
697 getme = apackets * sizeof(struct rx_packet);
698 p = (struct rx_packet *)osi_Alloc(getme);
700 apackets -= apackets / 4;
701 osi_Assert(apackets > 0);
704 memset((char *)p, 0, getme);
706 #ifdef RX_ENABLE_TSFPQ
707 RX_TS_INFO_GET(rx_ts_info);
708 RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info,apackets);
709 #endif /* RX_ENABLE_TSFPQ */
711 for (e = p + apackets; p < e; p++) {
712 RX_PACKET_IOV_INIT(p);
713 p->flags |= RX_PKTFLAG_FREE;
716 queue_Append(&rx_freePacketQueue, p);
718 p->packetId = rx_packet_id++;
719 p->allNextp = rx_mallocedP;
724 rx_nFreePackets += apackets;
725 #ifdef RX_ENABLE_TSFPQ
726 /* TSFPQ patch also needs to keep track of total packets */
727 MUTEX_ENTER(&rx_stats_mutex);
728 rx_nPackets += apackets;
729 RX_TS_FPQ_COMPUTE_LIMITS;
730 MUTEX_EXIT(&rx_stats_mutex);
731 #endif /* RX_ENABLE_TSFPQ */
732 rxi_NeedMorePackets = FALSE;
738 rxi_FreeAllPackets(void)
740 /* must be called at proper interrupt level, etcetera */
741 /* MTUXXX need to free all Packets */
742 osi_Free(rx_mallocedP,
743 (rx_maxReceiveWindow + 2) * sizeof(struct rx_packet));
744 UNPIN(rx_mallocedP, (rx_maxReceiveWindow + 2) * sizeof(struct rx_packet));
747 #ifdef RX_ENABLE_TSFPQ
749 rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit)
751 register struct rx_ts_info_t * rx_ts_info;
755 RX_TS_INFO_GET(rx_ts_info);
757 if (num_keep_local != rx_ts_info->_FPQ.len) {
759 MUTEX_ENTER(&rx_freePktQ_lock);
760 if (num_keep_local < rx_ts_info->_FPQ.len) {
761 xfer = rx_ts_info->_FPQ.len - num_keep_local;
762 RX_TS_FPQ_LTOG2(rx_ts_info, xfer);
765 xfer = num_keep_local - rx_ts_info->_FPQ.len;
766 if ((num_keep_local > rx_TSFPQLocalMax) && !allow_overcommit)
767 xfer = rx_TSFPQLocalMax - rx_ts_info->_FPQ.len;
768 if (rx_nFreePackets < xfer) {
769 rxi_MorePacketsNoLock(MAX(xfer - rx_nFreePackets, 4 * rx_initSendWindow));
771 RX_TS_FPQ_GTOL2(rx_ts_info, xfer);
773 MUTEX_EXIT(&rx_freePktQ_lock);
779 rxi_FlushLocalPacketsTSFPQ(void)
781 rxi_AdjustLocalPacketsTSFPQ(0, 0);
783 #endif /* RX_ENABLE_TSFPQ */
785 /* Allocate more packets iff we need more continuation buffers */
786 /* In kernel, can't page in memory with interrupts disabled, so we
787 * don't use the event mechanism. */
789 rx_CheckPackets(void)
791 if (rxi_NeedMorePackets) {
792 rxi_MorePackets(rx_initSendWindow);
796 /* In the packet freeing routine below, the assumption is that
797 we want all of the packets to be used equally frequently, so that we
798 don't get packet buffers paging out. It would be just as valid to
799 assume that we DO want them to page out if not many are being used.
800 In any event, we assume the former, and append the packets to the end
802 /* This explanation is bogus. The free list doesn't remain in any kind of
803 useful order for afs_int32: the packets in use get pretty much randomly scattered
804 across all the pages. In order to permit unused {packets,bufs} to page out, they
805 must be stored so that packets which are adjacent in memory are adjacent in the
806 free list. An array springs rapidly to mind.
809 /* Actually free the packet p. */
810 #ifdef RX_ENABLE_TSFPQ
812 rxi_FreePacketNoLock(struct rx_packet *p)
814 register struct rx_ts_info_t * rx_ts_info;
815 dpf(("Free %lx\n", (unsigned long)p));
817 RX_TS_INFO_GET(rx_ts_info);
818 RX_TS_FPQ_CHECKIN(rx_ts_info,p);
819 if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
820 RX_TS_FPQ_LTOG(rx_ts_info);
823 #else /* RX_ENABLE_TSFPQ */
825 rxi_FreePacketNoLock(struct rx_packet *p)
827 dpf(("Free %lx\n", (unsigned long)p));
831 queue_Append(&rx_freePacketQueue, p);
833 #endif /* RX_ENABLE_TSFPQ */
835 #ifdef RX_ENABLE_TSFPQ
837 rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
839 register struct rx_ts_info_t * rx_ts_info;
840 dpf(("Free %lx\n", (unsigned long)p));
842 RX_TS_INFO_GET(rx_ts_info);
843 RX_TS_FPQ_CHECKIN(rx_ts_info,p);
845 if (flush_global && (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax)) {
847 MUTEX_ENTER(&rx_freePktQ_lock);
849 RX_TS_FPQ_LTOG(rx_ts_info);
851 /* Wakeup anyone waiting for packets */
854 MUTEX_EXIT(&rx_freePktQ_lock);
858 #endif /* RX_ENABLE_TSFPQ */
861 * free continuation buffers off a packet into a queue
863 * [IN] p -- packet from which continuation buffers will be freed
864 * [IN] first -- iovec offset of first continuation buffer to free
865 * [IN] q -- queue into which continuation buffers will be chained
868 * number of continuation buffers freed
870 #ifndef RX_ENABLE_TSFPQ
872 rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
875 struct rx_packet * cb;
878 for (first = MAX(2, first); first < p->niovecs; first++, count++) {
879 iov = &p->wirevec[first];
881 osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
882 cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
883 RX_FPQ_MARK_FREE(cb);
894 * free packet continuation buffers into the global free packet pool
896 * [IN] p -- packet from which to free continuation buffers
897 * [IN] first -- iovec offset of first continuation buffer to free
903 rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
907 for (first = MAX(2, first); first < p->niovecs; first++) {
908 iov = &p->wirevec[first];
910 osi_Panic("rxi_FreeDataBufsNoLock: unexpected NULL iov");
911 rxi_FreePacketNoLock(RX_CBUF_TO_PACKET(iov->iov_base, p));
919 #ifdef RX_ENABLE_TSFPQ
921 * free packet continuation buffers into the thread-local free pool
923 * [IN] p -- packet from which continuation buffers will be freed
924 * [IN] first -- iovec offset of first continuation buffer to free
925 * any value less than 2, the min number of iovecs,
926 * is treated as if it is 2.
927 * [IN] flush_global -- if nonzero, we will flush overquota packets to the
928 * global free pool before returning
934 rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global)
937 register struct rx_ts_info_t * rx_ts_info;
939 RX_TS_INFO_GET(rx_ts_info);
941 for (first = MAX(2, first); first < p->niovecs; first++) {
942 iov = &p->wirevec[first];
944 osi_Panic("rxi_FreeDataBufsTSFPQ: unexpected NULL iov");
945 RX_TS_FPQ_CHECKIN(rx_ts_info,RX_CBUF_TO_PACKET(iov->iov_base, p));
950 if (flush_global && (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax)) {
952 MUTEX_ENTER(&rx_freePktQ_lock);
954 RX_TS_FPQ_LTOG(rx_ts_info);
956 /* Wakeup anyone waiting for packets */
959 MUTEX_EXIT(&rx_freePktQ_lock);
964 #endif /* RX_ENABLE_TSFPQ */
966 int rxi_nBadIovecs = 0;
968 /* rxi_RestoreDataBufs
970 * Restore the correct sizes to the iovecs. Called when reusing a packet
971 * for reading off the wire.
974 rxi_RestoreDataBufs(struct rx_packet *p)
977 struct iovec *iov = &p->wirevec[2];
979 RX_PACKET_IOV_INIT(p);
981 for (i = 2, iov = &p->wirevec[2]; i < p->niovecs; i++, iov++) {
982 if (!iov->iov_base) {
987 iov->iov_len = RX_CBUFFERSIZE;
991 #ifdef RX_ENABLE_TSFPQ
993 rxi_TrimDataBufs(struct rx_packet *p, int first)
996 struct iovec *iov, *end;
997 register struct rx_ts_info_t * rx_ts_info;
1001 osi_Panic("TrimDataBufs 1: first must be 1");
1003 /* Skip over continuation buffers containing message data */
1004 iov = &p->wirevec[2];
1005 end = iov + (p->niovecs - 2);
1006 length = p->length - p->wirevec[1].iov_len;
1007 for (; iov < end && length > 0; iov++) {
1009 osi_Panic("TrimDataBufs 3: vecs 1-niovecs must not be NULL");
1010 length -= iov->iov_len;
1013 /* iov now points to the first empty data buffer. */
1017 RX_TS_INFO_GET(rx_ts_info);
1018 for (; iov < end; iov++) {
1020 osi_Panic("TrimDataBufs 4: vecs 2-niovecs must not be NULL");
1021 RX_TS_FPQ_CHECKIN(rx_ts_info,RX_CBUF_TO_PACKET(iov->iov_base, p));
1024 if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
1026 MUTEX_ENTER(&rx_freePktQ_lock);
1028 RX_TS_FPQ_LTOG(rx_ts_info);
1029 rxi_PacketsUnWait();
1031 MUTEX_EXIT(&rx_freePktQ_lock);
1037 #else /* RX_ENABLE_TSFPQ */
1039 rxi_TrimDataBufs(struct rx_packet *p, int first)
1042 struct iovec *iov, *end;
1046 osi_Panic("TrimDataBufs 1: first must be 1");
1048 /* Skip over continuation buffers containing message data */
1049 iov = &p->wirevec[2];
1050 end = iov + (p->niovecs - 2);
1051 length = p->length - p->wirevec[1].iov_len;
1052 for (; iov < end && length > 0; iov++) {
1054 osi_Panic("TrimDataBufs 3: vecs 1-niovecs must not be NULL");
1055 length -= iov->iov_len;
1058 /* iov now points to the first empty data buffer. */
1063 MUTEX_ENTER(&rx_freePktQ_lock);
1065 for (; iov < end; iov++) {
1067 osi_Panic("TrimDataBufs 4: vecs 2-niovecs must not be NULL");
1068 rxi_FreePacketNoLock(RX_CBUF_TO_PACKET(iov->iov_base, p));
1071 rxi_PacketsUnWait();
1073 MUTEX_EXIT(&rx_freePktQ_lock);
1078 #endif /* RX_ENABLE_TSFPQ */
1080 /* Free the packet p. P is assumed not to be on any queue, i.e.
1081 * remove it yourself first if you call this routine. */
1082 #ifdef RX_ENABLE_TSFPQ
1084 rxi_FreePacket(struct rx_packet *p)
1086 rxi_FreeDataBufsTSFPQ(p, 2, 0);
1087 rxi_FreePacketTSFPQ(p, RX_TS_FPQ_FLUSH_GLOBAL);
1089 #else /* RX_ENABLE_TSFPQ */
1091 rxi_FreePacket(struct rx_packet *p)
1096 MUTEX_ENTER(&rx_freePktQ_lock);
1098 rxi_FreeDataBufsNoLock(p, 2);
1099 rxi_FreePacketNoLock(p);
1100 /* Wakeup anyone waiting for packets */
1101 rxi_PacketsUnWait();
1103 MUTEX_EXIT(&rx_freePktQ_lock);
1106 #endif /* RX_ENABLE_TSFPQ */
1108 /* rxi_AllocPacket sets up p->length so it reflects the number of
1109 * bytes in the packet at this point, **not including** the header.
1110 * The header is absolutely necessary, besides, this is the way the
1111 * length field is usually used */
1112 #ifdef RX_ENABLE_TSFPQ
1114 rxi_AllocPacketNoLock(int class)
1116 register struct rx_packet *p;
1117 register struct rx_ts_info_t * rx_ts_info;
1119 RX_TS_INFO_GET(rx_ts_info);
1122 if (rxi_OverQuota(class)) {
1123 rxi_NeedMorePackets = TRUE;
1125 case RX_PACKET_CLASS_RECEIVE:
1126 rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
1128 case RX_PACKET_CLASS_SEND:
1129 rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
1131 case RX_PACKET_CLASS_SPECIAL:
1132 rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
1134 case RX_PACKET_CLASS_RECV_CBUF:
1135 rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
1137 case RX_PACKET_CLASS_SEND_CBUF:
1138 rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
1141 return (struct rx_packet *)0;
1145 rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
1146 if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
1149 if (queue_IsEmpty(&rx_freePacketQueue))
1150 osi_Panic("rxi_AllocPacket error");
1152 if (queue_IsEmpty(&rx_freePacketQueue))
1153 rxi_MorePacketsNoLock(4 * rx_initSendWindow);
1157 RX_TS_FPQ_GTOL(rx_ts_info);
1160 RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
1162 dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
1165 /* have to do this here because rx_FlushWrite fiddles with the iovs in
1166 * order to truncate outbound packets. In the near future, may need
1167 * to allocate bufs from a static pool here, and/or in AllocSendPacket
1169 RX_PACKET_IOV_FULLINIT(p);
1172 #else /* RX_ENABLE_TSFPQ */
1174 rxi_AllocPacketNoLock(int class)
1176 register struct rx_packet *p;
1179 if (rxi_OverQuota(class)) {
1180 rxi_NeedMorePackets = TRUE;
1182 case RX_PACKET_CLASS_RECEIVE:
1183 rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
1185 case RX_PACKET_CLASS_SEND:
1186 rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
1188 case RX_PACKET_CLASS_SPECIAL:
1189 rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
1191 case RX_PACKET_CLASS_RECV_CBUF:
1192 rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
1194 case RX_PACKET_CLASS_SEND_CBUF:
1195 rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
1198 return (struct rx_packet *)0;
1202 rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
1205 if (queue_IsEmpty(&rx_freePacketQueue))
1206 osi_Panic("rxi_AllocPacket error");
1208 if (queue_IsEmpty(&rx_freePacketQueue))
1209 rxi_MorePacketsNoLock(4 * rx_initSendWindow);
1213 p = queue_First(&rx_freePacketQueue, rx_packet);
1215 RX_FPQ_MARK_USED(p);
1217 dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
1220 /* have to do this here because rx_FlushWrite fiddles with the iovs in
1221 * order to truncate outbound packets. In the near future, may need
1222 * to allocate bufs from a static pool here, and/or in AllocSendPacket
1224 RX_PACKET_IOV_FULLINIT(p);
1227 #endif /* RX_ENABLE_TSFPQ */
1229 #ifdef RX_ENABLE_TSFPQ
1231 rxi_AllocPacketTSFPQ(int class, int pull_global)
1233 register struct rx_packet *p;
1234 register struct rx_ts_info_t * rx_ts_info;
1236 RX_TS_INFO_GET(rx_ts_info);
1238 rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
1239 if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
1240 MUTEX_ENTER(&rx_freePktQ_lock);
1242 if (queue_IsEmpty(&rx_freePacketQueue))
1243 rxi_MorePacketsNoLock(4 * rx_initSendWindow);
1245 RX_TS_FPQ_GTOL(rx_ts_info);
1247 MUTEX_EXIT(&rx_freePktQ_lock);
1248 } else if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
1252 RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
1254 dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
1256 /* have to do this here because rx_FlushWrite fiddles with the iovs in
1257 * order to truncate outbound packets. In the near future, may need
1258 * to allocate bufs from a static pool here, and/or in AllocSendPacket
1260 RX_PACKET_IOV_FULLINIT(p);
1263 #endif /* RX_ENABLE_TSFPQ */
1265 #ifdef RX_ENABLE_TSFPQ
1267 rxi_AllocPacket(int class)
1269 register struct rx_packet *p;
1271 p = rxi_AllocPacketTSFPQ(class, RX_TS_FPQ_PULL_GLOBAL);
1274 #else /* RX_ENABLE_TSFPQ */
1276 rxi_AllocPacket(int class)
1278 register struct rx_packet *p;
1280 MUTEX_ENTER(&rx_freePktQ_lock);
1281 p = rxi_AllocPacketNoLock(class);
1282 MUTEX_EXIT(&rx_freePktQ_lock);
1285 #endif /* RX_ENABLE_TSFPQ */
1287 /* This guy comes up with as many buffers as it {takes,can get} given
1288 * the MTU for this call. It also sets the packet length before
1289 * returning. caution: this is often called at NETPRI
1290 * Called with call locked.
1293 rxi_AllocSendPacket(register struct rx_call *call, int want)
1295 register struct rx_packet *p = (struct rx_packet *)0;
1297 register unsigned delta;
1300 mud = call->MTU - RX_HEADER_SIZE;
1302 rx_GetSecurityHeaderSize(rx_ConnectionOf(call)) +
1303 rx_GetSecurityMaxTrailerSize(rx_ConnectionOf(call));
1305 #ifdef RX_ENABLE_TSFPQ
1306 if ((p = rxi_AllocPacketTSFPQ(RX_PACKET_CLASS_SEND, 0))) {
1308 want = MIN(want, mud);
1310 if ((unsigned)want > p->length)
1311 (void)rxi_AllocDataBuf(p, (want - p->length),
1312 RX_PACKET_CLASS_SEND_CBUF);
1314 if ((unsigned)p->length > mud)
1317 if (delta >= p->length) {
1325 #endif /* RX_ENABLE_TSFPQ */
1327 while (!(call->error)) {
1328 MUTEX_ENTER(&rx_freePktQ_lock);
1329 /* if an error occurred, or we get the packet we want, we're done */
1330 if ((p = rxi_AllocPacketNoLock(RX_PACKET_CLASS_SEND))) {
1331 MUTEX_EXIT(&rx_freePktQ_lock);
1334 want = MIN(want, mud);
1336 if ((unsigned)want > p->length)
1337 (void)rxi_AllocDataBuf(p, (want - p->length),
1338 RX_PACKET_CLASS_SEND_CBUF);
1340 if ((unsigned)p->length > mud)
1343 if (delta >= p->length) {
1352 /* no error occurred, and we didn't get a packet, so we sleep.
1353 * At this point, we assume that packets will be returned
1354 * sooner or later, as packets are acknowledged, and so we
1357 call->flags |= RX_CALL_WAIT_PACKETS;
1358 CALL_HOLD(call, RX_CALL_REFCOUNT_PACKET);
1359 MUTEX_EXIT(&call->lock);
1360 rx_waitingForPackets = 1;
1362 #ifdef RX_ENABLE_LOCKS
1363 CV_WAIT(&rx_waitingForPackets_cv, &rx_freePktQ_lock);
1365 osi_rxSleep(&rx_waitingForPackets);
1367 MUTEX_EXIT(&rx_freePktQ_lock);
1368 MUTEX_ENTER(&call->lock);
1369 CALL_RELE(call, RX_CALL_REFCOUNT_PACKET);
1370 call->flags &= ~RX_CALL_WAIT_PACKETS;
1379 /* Windows does not use file descriptors. */
1380 #define CountFDs(amax) 0
1382 /* count the number of used FDs */
1384 CountFDs(register int amax)
1387 register int i, code;
1391 for (i = 0; i < amax; i++) {
1392 code = fstat(i, &tstat);
1398 #endif /* AFS_NT40_ENV */
1401 #define CountFDs(amax) amax
1405 #if !defined(KERNEL) || defined(UKERNEL)
1407 /* This function reads a single packet from the interface into the
1408 * supplied packet buffer (*p). Return 0 if the packet is bogus. The
1409 * (host,port) of the sender are stored in the supplied variables, and
1410 * the data length of the packet is stored in the packet structure.
1411 * The header is decoded. */
1413 rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * host,
1416 struct sockaddr_in from;
1419 register afs_int32 tlen, savelen;
1421 rx_computelen(p, tlen);
1422 rx_SetDataSize(p, tlen); /* this is the size of the user data area */
1424 tlen += RX_HEADER_SIZE; /* now this is the size of the entire packet */
1425 rlen = rx_maxJumboRecvSize; /* this is what I am advertising. Only check
1426 * it once in order to avoid races. */
1429 tlen = rxi_AllocDataBuf(p, tlen, RX_PACKET_CLASS_SEND_CBUF);
1437 /* Extend the last iovec for padding, it's just to make sure that the
1438 * read doesn't return more data than we expect, and is done to get around
1439 * our problems caused by the lack of a length field in the rx header.
1440 * Use the extra buffer that follows the localdata in each packet
1442 savelen = p->wirevec[p->niovecs - 1].iov_len;
1443 p->wirevec[p->niovecs - 1].iov_len += RX_EXTRABUFFERSIZE;
1445 memset((char *)&msg, 0, sizeof(msg));
1446 msg.msg_name = (char *)&from;
1447 msg.msg_namelen = sizeof(struct sockaddr_in);
1448 msg.msg_iov = p->wirevec;
1449 msg.msg_iovlen = p->niovecs;
1450 nbytes = rxi_Recvmsg(socket, &msg, 0);
1452 /* restore the vec to its correct state */
1453 p->wirevec[p->niovecs - 1].iov_len = savelen;
1455 p->length = (nbytes - RX_HEADER_SIZE);
1456 if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
1457 if (nbytes < 0 && errno == EWOULDBLOCK) {
1458 rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
1459 } else if (nbytes <= 0) {
1460 MUTEX_ENTER(&rx_stats_mutex);
1461 rx_stats.bogusPacketOnRead++;
1462 rx_stats.bogusHost = from.sin_addr.s_addr;
1463 MUTEX_EXIT(&rx_stats_mutex);
1464 dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
1465 ntohs(from.sin_port), nbytes));
1470 else if ((rx_intentionallyDroppedOnReadPer100 > 0)
1471 && (random() % 100 < rx_intentionallyDroppedOnReadPer100)) {
1472 rxi_DecodePacketHeader(p);
1474 *host = from.sin_addr.s_addr;
1475 *port = from.sin_port;
1477 dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
1478 p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
1479 p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
1481 rxi_TrimDataBufs(p, 1);
1486 /* Extract packet header. */
1487 rxi_DecodePacketHeader(p);
1489 *host = from.sin_addr.s_addr;
1490 *port = from.sin_port;
1491 if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
1492 struct rx_peer *peer;
1493 rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
1495 * Try to look up this peer structure. If it doesn't exist,
1496 * don't create a new one -
1497 * we don't keep count of the bytes sent/received if a peer
1498 * structure doesn't already exist.
1500 * The peer/connection cleanup code assumes that there is 1 peer
1501 * per connection. If we actually created a peer structure here
1502 * and this packet was an rxdebug packet, the peer structure would
1503 * never be cleaned up.
1505 peer = rxi_FindPeer(*host, *port, 0, 0);
1506 /* Since this may not be associated with a connection,
1507 * it may have no refCount, meaning we could race with
1510 if (peer && (peer->refCount > 0)) {
1511 MUTEX_ENTER(&peer->peer_lock);
1512 hadd32(peer->bytesReceived, p->length);
1513 MUTEX_EXIT(&peer->peer_lock);
1517 /* Free any empty packet buffers at the end of this packet */
1518 rxi_TrimDataBufs(p, 1);
1524 #endif /* !KERNEL || UKERNEL */
1526 /* This function splits off the first packet in a jumbo packet.
1527 * As of AFS 3.5, jumbograms contain more than one fixed size
1528 * packet, and the RX_JUMBO_PACKET flag is set in all but the
1529 * last packet header. All packets (except the last) are padded to
1530 * fall on RX_CBUFFERSIZE boundaries.
1531 * HACK: We store the length of the first n-1 packets in the
1532 * last two pad bytes. */
1535 rxi_SplitJumboPacket(register struct rx_packet *p, afs_int32 host, short port,
1538 struct rx_packet *np;
1539 struct rx_jumboHeader *jp;
1545 /* All but the last packet in each jumbogram are RX_JUMBOBUFFERSIZE
1546 * bytes in length. All but the first packet are preceded by
1547 * an abbreviated four byte header. The length of the last packet
1548 * is calculated from the size of the jumbogram. */
1549 length = RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
1551 if ((int)p->length < length) {
1552 dpf(("rxi_SplitJumboPacket: bogus length %d\n", p->length));
1555 niov = p->niovecs - 2;
1557 dpf(("rxi_SplitJumboPacket: bogus niovecs %d\n", p->niovecs));
1560 iov = &p->wirevec[2];
1561 np = RX_CBUF_TO_PACKET(iov->iov_base, p);
1563 /* Get a pointer to the abbreviated packet header */
1564 jp = (struct rx_jumboHeader *)
1565 ((char *)(p->wirevec[1].iov_base) + RX_JUMBOBUFFERSIZE);
1567 /* Set up the iovecs for the next packet */
1568 np->wirevec[0].iov_base = (char *)(&np->wirehead[0]);
1569 np->wirevec[0].iov_len = sizeof(struct rx_header);
1570 np->wirevec[1].iov_base = (char *)(&np->localdata[0]);
1571 np->wirevec[1].iov_len = length - RX_JUMBOHEADERSIZE;
1572 np->niovecs = niov + 1;
1573 for (i = 2, iov++; i <= niov; i++, iov++) {
1574 np->wirevec[i] = *iov;
1576 np->length = p->length - length;
1577 p->length = RX_JUMBOBUFFERSIZE;
1580 /* Convert the jumbo packet header to host byte order */
1581 temp = ntohl(*(afs_uint32 *) jp);
1582 jp->flags = (u_char) (temp >> 24);
1583 jp->cksum = (u_short) (temp);
1585 /* Fill in the packet header */
1586 np->header = p->header;
1587 np->header.serial = p->header.serial + 1;
1588 np->header.seq = p->header.seq + 1;
1589 np->header.flags = jp->flags;
1590 np->header.spare = jp->cksum;
1596 /* Send a udp datagram */
1598 osi_NetSend(osi_socket socket, void *addr, struct iovec *dvec, int nvecs,
1599 int length, int istack)
1604 memset(&msg, 0, sizeof(msg));
1606 msg.msg_iovlen = nvecs;
1607 msg.msg_name = addr;
1608 msg.msg_namelen = sizeof(struct sockaddr_in);
1610 ret = rxi_Sendmsg(socket, &msg, 0);
1614 #elif !defined(UKERNEL)
1616 * message receipt is done in rxk_input or rx_put.
1619 #if defined(AFS_SUN5_ENV) || defined(AFS_HPUX110_ENV)
1621 * Copy an mblock to the contiguous area pointed to by cp.
1622 * MTUXXX Supposed to skip <off> bytes and copy <len> bytes,
1623 * but it doesn't really.
1624 * Returns the number of bytes not transferred.
1625 * The message is NOT changed.
1628 cpytoc(mblk_t * mp, register int off, register int len, register char *cp)
1632 for (; mp && len > 0; mp = mp->b_cont) {
1633 if (mp->b_datap->db_type != M_DATA) {
1636 n = MIN(len, (mp->b_wptr - mp->b_rptr));
1637 memcpy(cp, (char *)mp->b_rptr, n);
1645 /* MTUXXX Supposed to skip <off> bytes and copy <len> bytes,
1646 * but it doesn't really.
1647 * This sucks, anyway, do it like m_cpy.... below
1650 cpytoiovec(mblk_t * mp, int off, int len, register struct iovec *iovs,
1653 register int m, n, o, t, i;
1655 for (i = -1, t = 0; i < niovs && mp && len > 0; mp = mp->b_cont) {
1656 if (mp->b_datap->db_type != M_DATA) {
1659 n = MIN(len, (mp->b_wptr - mp->b_rptr));
1665 t = iovs[i].iov_len;
1668 memcpy(iovs[i].iov_base + o, (char *)mp->b_rptr, m);
1678 #define m_cpytoc(a, b, c, d) cpytoc(a, b, c, d)
1679 #define m_cpytoiovec(a, b, c, d, e) cpytoiovec(a, b, c, d, e)
1681 #if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
1683 m_cpytoiovec(struct mbuf *m, int off, int len, struct iovec iovs[], int niovs)
1686 unsigned int l1, l2, i, t;
1688 if (m == NULL || off < 0 || len < 0 || iovs == NULL)
1689 osi_Panic("m_cpytoiovec"); /* MTUXXX probably don't need this check */
1692 if (m->m_len <= off) {
1702 p1 = mtod(m, caddr_t) + off;
1703 l1 = m->m_len - off;
1705 p2 = iovs[0].iov_base;
1706 l2 = iovs[0].iov_len;
1709 t = MIN(l1, MIN(l2, (unsigned int)len));
1720 p1 = mtod(m, caddr_t);
1726 p2 = iovs[i].iov_base;
1727 l2 = iovs[i].iov_len;
1735 #endif /* AFS_SUN5_ENV */
1737 #if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
1739 rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
1740 #if defined(AFS_SUN5_ENV) || defined(AFS_HPUX110_ENV)
1746 struct rx_packet *phandle;
1747 int hdr_len, data_len;
1752 m_cpytoiovec(amb, hdr_len, data_len, phandle->wirevec,
1759 #endif /*KERNEL && !UKERNEL */
1762 /* send a response to a debug packet */
1765 rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
1766 afs_int32 ahost, short aport, int istack)
1768 struct rx_debugIn tin;
1770 struct rx_serverQueueEntry *np, *nqe;
1773 * Only respond to client-initiated Rx debug packets,
1774 * and clear the client flag in the response.
1776 if (ap->header.flags & RX_CLIENT_INITIATED) {
1777 ap->header.flags = ap->header.flags & ~RX_CLIENT_INITIATED;
1778 rxi_EncodePacketHeader(ap);
1783 rx_packetread(ap, 0, sizeof(struct rx_debugIn), (char *)&tin);
1784 /* all done with packet, now set length to the truth, so we can
1785 * reuse this packet */
1786 rx_computelen(ap, ap->length);
1788 tin.type = ntohl(tin.type);
1789 tin.index = ntohl(tin.index);
1791 case RX_DEBUGI_GETSTATS:{
1792 struct rx_debugStats tstat;
1794 /* get basic stats */
1795 memset((char *)&tstat, 0, sizeof(tstat)); /* make sure spares are zero */
1796 tstat.version = RX_DEBUGI_VERSION;
1797 #ifndef RX_ENABLE_LOCKS
1798 tstat.waitingForPackets = rx_waitingForPackets;
1800 MUTEX_ENTER(&rx_serverPool_lock);
1801 tstat.nFreePackets = htonl(rx_nFreePackets);
1802 tstat.nPackets = htonl(rx_nPackets);
1803 tstat.callsExecuted = htonl(rxi_nCalls);
1804 tstat.packetReclaims = htonl(rx_packetReclaims);
1805 tstat.usedFDs = CountFDs(64);
1806 tstat.nWaiting = htonl(rx_nWaiting);
1807 tstat.nWaited = htonl(rx_nWaited);
1808 queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
1810 MUTEX_EXIT(&rx_serverPool_lock);
1811 tstat.idleThreads = htonl(tstat.idleThreads);
1812 tl = sizeof(struct rx_debugStats) - ap->length;
1814 tl = rxi_AllocDataBuf(ap, tl, RX_PACKET_CLASS_SEND_CBUF);
1817 rx_packetwrite(ap, 0, sizeof(struct rx_debugStats),
1819 ap->length = sizeof(struct rx_debugStats);
1820 rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
1821 rx_computelen(ap, ap->length);
1826 case RX_DEBUGI_GETALLCONN:
1827 case RX_DEBUGI_GETCONN:{
1829 register struct rx_connection *tc;
1830 struct rx_call *tcall;
1831 struct rx_debugConn tconn;
1832 int all = (tin.type == RX_DEBUGI_GETALLCONN);
1835 tl = sizeof(struct rx_debugConn) - ap->length;
1837 tl = rxi_AllocDataBuf(ap, tl, RX_PACKET_CLASS_SEND_CBUF);
1841 memset((char *)&tconn, 0, sizeof(tconn)); /* make sure spares are zero */
1842 /* get N'th (maybe) "interesting" connection info */
1843 for (i = 0; i < rx_hashTableSize; i++) {
1844 #if !defined(KERNEL)
1845 /* the time complexity of the algorithm used here
1846 * exponentially increses with the number of connections.
1848 #ifdef AFS_PTHREAD_ENV
1854 RWLOCK_RDLOCK(&rx_connHashTable_lock);
1855 /* We might be slightly out of step since we are not
1856 * locking each call, but this is only debugging output.
1858 for (tc = rx_connHashTable[i]; tc; tc = tc->next) {
1859 if ((all || rxi_IsConnInteresting(tc))
1860 && tin.index-- <= 0) {
1861 tconn.host = tc->peer->host;
1862 tconn.port = tc->peer->port;
1863 tconn.cid = htonl(tc->cid);
1864 tconn.epoch = htonl(tc->epoch);
1865 tconn.serial = htonl(tc->serial);
1866 for (j = 0; j < RX_MAXCALLS; j++) {
1867 tconn.callNumber[j] = htonl(tc->callNumber[j]);
1868 if ((tcall = tc->call[j])) {
1869 tconn.callState[j] = tcall->state;
1870 tconn.callMode[j] = tcall->mode;
1871 tconn.callFlags[j] = tcall->flags;
1872 if (queue_IsNotEmpty(&tcall->rq))
1873 tconn.callOther[j] |= RX_OTHER_IN;
1874 if (queue_IsNotEmpty(&tcall->tq))
1875 tconn.callOther[j] |= RX_OTHER_OUT;
1877 tconn.callState[j] = RX_STATE_NOTINIT;
1880 tconn.natMTU = htonl(tc->peer->natMTU);
1881 tconn.error = htonl(tc->error);
1882 tconn.flags = tc->flags;
1883 tconn.type = tc->type;
1884 tconn.securityIndex = tc->securityIndex;
1885 if (tc->securityObject) {
1886 RXS_GetStats(tc->securityObject, tc,
1888 #define DOHTONL(a) (tconn.secStats.a = htonl(tconn.secStats.a))
1889 #define DOHTONS(a) (tconn.secStats.a = htons(tconn.secStats.a))
1892 DOHTONL(packetsReceived);
1893 DOHTONL(packetsSent);
1894 DOHTONL(bytesReceived);
1898 sizeof(tconn.secStats.spares) /
1903 sizeof(tconn.secStats.sparel) /
1904 sizeof(afs_int32); i++)
1908 RWLOCK_UNLOCK(&rx_connHashTable_lock);
1909 rx_packetwrite(ap, 0, sizeof(struct rx_debugConn),
1912 ap->length = sizeof(struct rx_debugConn);
1913 rxi_SendDebugPacket(ap, asocket, ahost, aport,
1919 RWLOCK_UNLOCK(&rx_connHashTable_lock);
1921 /* if we make it here, there are no interesting packets */
1922 tconn.cid = htonl(0xffffffff); /* means end */
1923 rx_packetwrite(ap, 0, sizeof(struct rx_debugConn),
1926 ap->length = sizeof(struct rx_debugConn);
1927 rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
1933 * Pass back all the peer structures we have available
1936 case RX_DEBUGI_GETPEER:{
1938 register struct rx_peer *tp;
1939 struct rx_debugPeer tpeer;
1942 tl = sizeof(struct rx_debugPeer) - ap->length;
1944 tl = rxi_AllocDataBuf(ap, tl, RX_PACKET_CLASS_SEND_CBUF);
1948 memset((char *)&tpeer, 0, sizeof(tpeer));
1949 for (i = 0; i < rx_hashTableSize; i++) {
1950 #if !defined(KERNEL)
1951 /* the time complexity of the algorithm used here
1952 * exponentially increses with the number of peers.
1954 * Yielding after processing each hash table entry
1955 * and dropping rx_peerHashTable_lock.
1956 * also increases the risk that we will miss a new
1957 * entry - but we are willing to live with this
1958 * limitation since this is meant for debugging only
1960 #ifdef AFS_PTHREAD_ENV
1966 RWLOCK_RDLOCK(&rx_peerHashTable_lock);
1967 /* XXX should copy out, then unlock and byteswap */
1968 for (tp = rx_peerHashTable[i]; tp; tp = tp->next) {
1969 if (tin.index-- <= 0) {
1970 tpeer.host = tp->host;
1971 tpeer.port = tp->port;
1972 tpeer.ifMTU = htons(tp->ifMTU);
1973 tpeer.idleWhen = htonl(tp->idleWhen);
1974 tpeer.refCount = htons(tp->refCount);
1975 tpeer.burstSize = tp->burstSize;
1976 tpeer.burst = tp->burst;
1977 tpeer.burstWait.sec = htonl(tp->burstWait.sec);
1978 tpeer.burstWait.usec = htonl(tp->burstWait.usec);
1979 tpeer.rtt = htonl(tp->rtt);
1980 tpeer.rtt_dev = htonl(tp->rtt_dev);
1981 tpeer.timeout.sec = htonl(tp->timeout.sec);
1982 tpeer.timeout.usec = htonl(tp->timeout.usec);
1983 tpeer.nSent = htonl(tp->nSent);
1984 tpeer.reSends = htonl(tp->reSends);
1985 tpeer.inPacketSkew = htonl(tp->inPacketSkew);
1986 tpeer.outPacketSkew = htonl(tp->outPacketSkew);
1987 tpeer.rateFlag = htonl(tp->rateFlag);
1988 tpeer.natMTU = htons(tp->natMTU);
1989 tpeer.maxMTU = htons(tp->maxMTU);
1990 tpeer.maxDgramPackets = htons(tp->maxDgramPackets);
1991 tpeer.ifDgramPackets = htons(tp->ifDgramPackets);
1992 tpeer.MTU = htons(tp->MTU);
1993 tpeer.cwind = htons(tp->cwind);
1994 tpeer.nDgramPackets = htons(tp->nDgramPackets);
1995 tpeer.congestSeq = htons(tp->congestSeq);
1996 tpeer.bytesSent.high = htonl(tp->bytesSent.high);
1997 tpeer.bytesSent.low = htonl(tp->bytesSent.low);
1998 tpeer.bytesReceived.high =
1999 htonl(tp->bytesReceived.high);
2000 tpeer.bytesReceived.low =
2001 htonl(tp->bytesReceived.low);
2003 RWLOCK_UNLOCK(&rx_peerHashTable_lock);
2004 rx_packetwrite(ap, 0, sizeof(struct rx_debugPeer),
2007 ap->length = sizeof(struct rx_debugPeer);
2008 rxi_SendDebugPacket(ap, asocket, ahost, aport,
2014 RWLOCK_UNLOCK(&rx_peerHashTable_lock);
2016 /* if we make it here, there are no interesting packets */
2017 tpeer.host = htonl(0xffffffff); /* means end */
2018 rx_packetwrite(ap, 0, sizeof(struct rx_debugPeer),
2021 ap->length = sizeof(struct rx_debugPeer);
2022 rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
2027 case RX_DEBUGI_RXSTATS:{
2031 tl = sizeof(rx_stats) - ap->length;
2033 tl = rxi_AllocDataBuf(ap, tl, RX_PACKET_CLASS_SEND_CBUF);
2037 /* Since its all int32s convert to network order with a loop. */
2038 MUTEX_ENTER(&rx_stats_mutex);
2039 s = (afs_int32 *) & rx_stats;
2040 for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
2041 rx_PutInt32(ap, i * sizeof(afs_int32), htonl(*s));
2044 ap->length = sizeof(rx_stats);
2045 MUTEX_EXIT(&rx_stats_mutex);
2046 rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
2052 /* error response packet */
2053 tin.type = htonl(RX_DEBUGI_BADTYPE);
2054 tin.index = tin.type;
2055 rx_packetwrite(ap, 0, sizeof(struct rx_debugIn), (char *)&tin);
2057 ap->length = sizeof(struct rx_debugIn);
2058 rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
2066 rxi_ReceiveVersionPacket(register struct rx_packet *ap, osi_socket asocket,
2067 afs_int32 ahost, short aport, int istack)
2072 * Only respond to client-initiated version requests, and
2073 * clear that flag in the response.
2075 if (ap->header.flags & RX_CLIENT_INITIATED) {
2078 ap->header.flags = ap->header.flags & ~RX_CLIENT_INITIATED;
2079 rxi_EncodePacketHeader(ap);
2080 memset(buf, 0, sizeof(buf));
2081 strncpy(buf, cml_version_number + 4, sizeof(buf) - 1);
2082 rx_packetwrite(ap, 0, 65, buf);
2085 rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
2093 /* send a debug packet back to the sender */
2095 rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
2096 afs_int32 ahost, short aport, afs_int32 istack)
2098 struct sockaddr_in taddr;
2104 int waslocked = ISAFS_GLOCK();
2107 taddr.sin_family = AF_INET;
2108 taddr.sin_port = aport;
2109 taddr.sin_addr.s_addr = ahost;
2110 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
2111 taddr.sin_len = sizeof(struct sockaddr_in);
2114 /* We need to trim the niovecs. */
2115 nbytes = apacket->length;
2116 for (i = 1; i < apacket->niovecs; i++) {
2117 if (nbytes <= apacket->wirevec[i].iov_len) {
2118 savelen = apacket->wirevec[i].iov_len;
2119 saven = apacket->niovecs;
2120 apacket->wirevec[i].iov_len = nbytes;
2121 apacket->niovecs = i + 1; /* so condition fails because i == niovecs */
2123 nbytes -= apacket->wirevec[i].iov_len;
2126 #ifdef RX_KERNEL_TRACE
2127 if (ICL_SETACTIVE(afs_iclSetp)) {
2130 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
2131 "before osi_NetSend()");
2139 /* debug packets are not reliably delivered, hence the cast below. */
2140 (void)osi_NetSend(asocket, &taddr, apacket->wirevec, apacket->niovecs,
2141 apacket->length + RX_HEADER_SIZE, istack);
2143 #ifdef RX_KERNEL_TRACE
2144 if (ICL_SETACTIVE(afs_iclSetp)) {
2146 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
2147 "after osi_NetSend()");
2156 if (saven) { /* means we truncated the packet above. */
2157 apacket->wirevec[i - 1].iov_len = savelen;
2158 apacket->niovecs = saven;
2163 /* Send the packet to appropriate destination for the specified
2164 * call. The header is first encoded and placed in the packet.
2167 rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
2168 struct rx_packet *p, int istack)
2174 struct sockaddr_in addr;
2175 register struct rx_peer *peer = conn->peer;
2178 char deliveryType = 'S';
2180 /* The address we're sending the packet to */
2181 memset(&addr, 0, sizeof(addr));
2182 addr.sin_family = AF_INET;
2183 addr.sin_port = peer->port;
2184 addr.sin_addr.s_addr = peer->host;
2186 /* This stuff should be revamped, I think, so that most, if not
2187 * all, of the header stuff is always added here. We could
2188 * probably do away with the encode/decode routines. XXXXX */
2190 /* Stamp each packet with a unique serial number. The serial
2191 * number is maintained on a connection basis because some types
2192 * of security may be based on the serial number of the packet,
2193 * and security is handled on a per authenticated-connection
2195 /* Pre-increment, to guarantee no zero serial number; a zero
2196 * serial number means the packet was never sent. */
2197 MUTEX_ENTER(&conn->conn_data_lock);
2198 p->header.serial = ++conn->serial;
2199 MUTEX_EXIT(&conn->conn_data_lock);
2200 /* This is so we can adjust retransmit time-outs better in the face of
2201 * rapidly changing round-trip times. RTO estimation is not a la Karn.
2203 if (p->firstSerial == 0) {
2204 p->firstSerial = p->header.serial;
2207 /* If an output tracer function is defined, call it with the packet and
2208 * network address. Note this function may modify its arguments. */
2209 if (rx_almostSent) {
2210 int drop = (*rx_almostSent) (p, &addr);
2211 /* drop packet if return value is non-zero? */
2213 deliveryType = 'D'; /* Drop the packet */
2217 /* Get network byte order header */
2218 rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
2219 * touch ALL the fields */
2221 /* Send the packet out on the same socket that related packets are being
2225 RX_CLIENT_CONNECTION ? rx_socket : conn->service->socket);
2228 /* Possibly drop this packet, for testing purposes */
2229 if ((deliveryType == 'D')
2230 || ((rx_intentionallyDroppedPacketsPer100 > 0)
2231 && (random() % 100 < rx_intentionallyDroppedPacketsPer100))) {
2232 deliveryType = 'D'; /* Drop the packet */
2234 deliveryType = 'S'; /* Send the packet */
2235 #endif /* RXDEBUG */
2237 /* Loop until the packet is sent. We'd prefer just to use a
2238 * blocking socket, but unfortunately the interface doesn't
2239 * allow us to have the socket block in send mode, and not
2240 * block in receive mode */
2242 waslocked = ISAFS_GLOCK();
2243 #ifdef RX_KERNEL_TRACE
2244 if (ICL_SETACTIVE(afs_iclSetp)) {
2247 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
2248 "before osi_NetSend()");
2257 osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
2258 p->length + RX_HEADER_SIZE, istack)) != 0) {
2259 /* send failed, so let's hurry up the resend, eh? */
2260 rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
2261 p->retryTime = p->timeSent; /* resend it very soon */
2262 clock_Addmsec(&(p->retryTime),
2263 10 + (((afs_uint32) p->backoff) << 8));
2264 /* Some systems are nice and tell us right away that we cannot
2265 * reach this recipient by returning an error code.
2266 * So, when this happens let's "down" the host NOW so
2267 * we don't sit around waiting for this host to timeout later.
2271 code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
2272 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
2273 code == -ENETUNREACH
2274 #elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
2275 code == EHOSTUNREACH
2280 call->lastReceiveTime = 0;
2283 #ifdef RX_KERNEL_TRACE
2284 if (ICL_SETACTIVE(afs_iclSetp)) {
2286 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
2287 "after osi_NetSend()");
2298 dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
2300 rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
2301 MUTEX_ENTER(&peer->peer_lock);
2302 hadd32(peer->bytesSent, p->length);
2303 MUTEX_EXIT(&peer->peer_lock);
2306 /* Send a list of packets to appropriate destination for the specified
2307 * connection. The headers are first encoded and placed in the packets.
2310 rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
2311 struct rx_packet **list, int len, int istack)
2313 #if defined(AFS_SUN5_ENV) && defined(KERNEL)
2316 struct sockaddr_in addr;
2317 register struct rx_peer *peer = conn->peer;
2319 struct rx_packet *p = NULL;
2320 struct iovec wirevec[RX_MAXIOVECS];
2321 int i, length, code;
2324 struct rx_jumboHeader *jp;
2326 char deliveryType = 'S';
2328 /* The address we're sending the packet to */
2329 addr.sin_family = AF_INET;
2330 addr.sin_port = peer->port;
2331 addr.sin_addr.s_addr = peer->host;
2333 if (len + 1 > RX_MAXIOVECS) {
2334 osi_Panic("rxi_SendPacketList, len > RX_MAXIOVECS\n");
2338 * Stamp the packets in this jumbogram with consecutive serial numbers
2340 MUTEX_ENTER(&conn->conn_data_lock);
2341 serial = conn->serial;
2342 conn->serial += len;
2343 MUTEX_EXIT(&conn->conn_data_lock);
2346 /* This stuff should be revamped, I think, so that most, if not
2347 * all, of the header stuff is always added here. We could
2348 * probably do away with the encode/decode routines. XXXXX */
2351 length = RX_HEADER_SIZE;
2352 wirevec[0].iov_base = (char *)(&list[0]->wirehead[0]);
2353 wirevec[0].iov_len = RX_HEADER_SIZE;
2354 for (i = 0; i < len; i++) {
2357 /* The whole 3.5 jumbogram scheme relies on packets fitting
2358 * in a single packet buffer. */
2359 if (p->niovecs > 2) {
2360 osi_Panic("rxi_SendPacketList, niovecs > 2\n");
2363 /* Set the RX_JUMBO_PACKET flags in all but the last packets
2366 if (p->length != RX_JUMBOBUFFERSIZE) {
2367 osi_Panic("rxi_SendPacketList, length != jumbo size\n");
2369 p->header.flags |= RX_JUMBO_PACKET;
2370 length += RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
2371 wirevec[i + 1].iov_len = RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
2373 wirevec[i + 1].iov_len = p->length;
2374 length += p->length;
2376 wirevec[i + 1].iov_base = (char *)(&p->localdata[0]);
2378 /* Convert jumbo packet header to network byte order */
2379 temp = (afs_uint32) (p->header.flags) << 24;
2380 temp |= (afs_uint32) (p->header.spare);
2381 *(afs_uint32 *) jp = htonl(temp);
2383 jp = (struct rx_jumboHeader *)
2384 ((char *)(&p->localdata[0]) + RX_JUMBOBUFFERSIZE);
2386 /* Stamp each packet with a unique serial number. The serial
2387 * number is maintained on a connection basis because some types
2388 * of security may be based on the serial number of the packet,
2389 * and security is handled on a per authenticated-connection
2391 /* Pre-increment, to guarantee no zero serial number; a zero
2392 * serial number means the packet was never sent. */
2393 p->header.serial = ++serial;
2394 /* This is so we can adjust retransmit time-outs better in the face of
2395 * rapidly changing round-trip times. RTO estimation is not a la Karn.
2397 if (p->firstSerial == 0) {
2398 p->firstSerial = p->header.serial;
2401 /* If an output tracer function is defined, call it with the packet and
2402 * network address. Note this function may modify its arguments. */
2403 if (rx_almostSent) {
2404 int drop = (*rx_almostSent) (p, &addr);
2405 /* drop packet if return value is non-zero? */
2407 deliveryType = 'D'; /* Drop the packet */
2411 /* Get network byte order header */
2412 rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
2413 * touch ALL the fields */
2416 /* Send the packet out on the same socket that related packets are being
2420 RX_CLIENT_CONNECTION ? rx_socket : conn->service->socket);
2423 /* Possibly drop this packet, for testing purposes */
2424 if ((deliveryType == 'D')
2425 || ((rx_intentionallyDroppedPacketsPer100 > 0)
2426 && (random() % 100 < rx_intentionallyDroppedPacketsPer100))) {
2427 deliveryType = 'D'; /* Drop the packet */
2429 deliveryType = 'S'; /* Send the packet */
2430 #endif /* RXDEBUG */
2432 /* Loop until the packet is sent. We'd prefer just to use a
2433 * blocking socket, but unfortunately the interface doesn't
2434 * allow us to have the socket block in send mode, and not
2435 * block in receive mode */
2436 #if defined(AFS_SUN5_ENV) && defined(KERNEL)
2437 waslocked = ISAFS_GLOCK();
2438 if (!istack && waslocked)
2442 osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
2444 /* send failed, so let's hurry up the resend, eh? */
2445 rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
2446 for (i = 0; i < len; i++) {
2448 p->retryTime = p->timeSent; /* resend it very soon */
2449 clock_Addmsec(&(p->retryTime),
2450 10 + (((afs_uint32) p->backoff) << 8));
2452 /* Some systems are nice and tell us right away that we cannot
2453 * reach this recipient by returning an error code.
2454 * So, when this happens let's "down" the host NOW so
2455 * we don't sit around waiting for this host to timeout later.
2459 code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
2460 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
2461 code == -ENETUNREACH
2462 #elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
2463 code == EHOSTUNREACH
2468 call->lastReceiveTime = 0;
2470 #if defined(AFS_SUN5_ENV) && defined(KERNEL)
2471 if (!istack && waslocked)
2479 dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
2482 rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
2483 MUTEX_ENTER(&peer->peer_lock);
2484 hadd32(peer->bytesSent, p->length);
2485 MUTEX_EXIT(&peer->peer_lock);
2489 /* Send a "special" packet to the peer connection. If call is
2490 * specified, then the packet is directed to a specific call channel
2491 * associated with the connection, otherwise it is directed to the
2492 * connection only. Uses optionalPacket if it is supplied, rather than
2493 * allocating a new packet buffer. Nbytes is the length of the data
2494 * portion of the packet. If data is non-null, nbytes of data are
2495 * copied into the packet. Type is the type of the packet, as defined
2496 * in rx.h. Bug: there's a lot of duplication between this and other
2497 * routines. This needs to be cleaned up. */
2499 rxi_SendSpecial(register struct rx_call *call,
2500 register struct rx_connection *conn,
2501 struct rx_packet *optionalPacket, int type, char *data,
2502 int nbytes, int istack)
2504 /* Some of the following stuff should be common code for all
2505 * packet sends (it's repeated elsewhere) */
2506 register struct rx_packet *p;
2508 int savelen = 0, saven = 0;
2509 int channel, callNumber;
2511 channel = call->channel;
2512 callNumber = *call->callNumber;
2513 /* BUSY packets refer to the next call on this connection */
2514 if (type == RX_PACKET_TYPE_BUSY) {
2523 p = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
2525 osi_Panic("rxi_SendSpecial failure");
2532 p->header.serviceId = conn->serviceId;
2533 p->header.securityIndex = conn->securityIndex;
2534 p->header.cid = (conn->cid | channel);
2535 p->header.callNumber = callNumber;
2537 p->header.epoch = conn->epoch;
2538 p->header.type = type;
2539 p->header.flags = 0;
2540 if (conn->type == RX_CLIENT_CONNECTION)
2541 p->header.flags |= RX_CLIENT_INITIATED;
2543 rx_packetwrite(p, 0, nbytes, data);
2545 for (i = 1; i < p->niovecs; i++) {
2546 if (nbytes <= p->wirevec[i].iov_len) {
2547 savelen = p->wirevec[i].iov_len;
2549 p->wirevec[i].iov_len = nbytes;
2550 p->niovecs = i + 1; /* so condition fails because i == niovecs */
2552 nbytes -= p->wirevec[i].iov_len;
2556 rxi_Send(call, p, istack);
2558 rxi_SendPacket((struct rx_call *)0, conn, p, istack);
2559 if (saven) { /* means we truncated the packet above. We probably don't */
2560 /* really need to do this, but it seems safer this way, given that */
2561 /* sneaky optionalPacket... */
2562 p->wirevec[i - 1].iov_len = savelen;
2565 if (!optionalPacket)
2567 return optionalPacket;
2571 /* Encode the packet's header (from the struct header in the packet to
2572 * the net byte order representation in the wire representation of the
2573 * packet, which is what is actually sent out on the wire) */
2575 rxi_EncodePacketHeader(register struct rx_packet *p)
2577 register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
2579 memset((char *)buf, 0, RX_HEADER_SIZE);
2580 *buf++ = htonl(p->header.epoch);
2581 *buf++ = htonl(p->header.cid);
2582 *buf++ = htonl(p->header.callNumber);
2583 *buf++ = htonl(p->header.seq);
2584 *buf++ = htonl(p->header.serial);
2585 *buf++ = htonl((((afs_uint32) p->header.type) << 24)
2586 | (((afs_uint32) p->header.flags) << 16)
2587 | (p->header.userStatus << 8) | p->header.securityIndex);
2588 /* Note: top 16 bits of this next word were reserved */
2589 *buf++ = htonl((p->header.spare << 16) | (p->header.serviceId & 0xffff));
2592 /* Decode the packet's header (from net byte order to a struct header) */
2594 rxi_DecodePacketHeader(register struct rx_packet *p)
2596 register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
2599 p->header.epoch = ntohl(*buf);
2601 p->header.cid = ntohl(*buf);
2603 p->header.callNumber = ntohl(*buf);
2605 p->header.seq = ntohl(*buf);
2607 p->header.serial = ntohl(*buf);
2613 /* C will truncate byte fields to bytes for me */
2614 p->header.type = temp >> 24;
2615 p->header.flags = temp >> 16;
2616 p->header.userStatus = temp >> 8;
2617 p->header.securityIndex = temp >> 0;
2622 p->header.serviceId = (temp & 0xffff);
2623 p->header.spare = temp >> 16;
2624 /* Note: top 16 bits of this last word are the security checksum */
2628 rxi_PrepareSendPacket(register struct rx_call *call,
2629 register struct rx_packet *p, register int last)
2631 register struct rx_connection *conn = call->conn;
2633 ssize_t len; /* len must be a signed type; it can go negative */
2635 p->flags &= ~RX_PKTFLAG_ACKED;
2636 p->header.cid = (conn->cid | call->channel);
2637 p->header.serviceId = conn->serviceId;
2638 p->header.securityIndex = conn->securityIndex;
2640 /* No data packets on call 0. Where do these come from? */
2641 if (*call->callNumber == 0)
2642 *call->callNumber = 1;
2644 p->header.callNumber = *call->callNumber;
2645 p->header.seq = call->tnext++;
2646 p->header.epoch = conn->epoch;
2647 p->header.type = RX_PACKET_TYPE_DATA;
2648 p->header.flags = 0;
2649 p->header.spare = 0;
2650 if (conn->type == RX_CLIENT_CONNECTION)
2651 p->header.flags |= RX_CLIENT_INITIATED;
2654 p->header.flags |= RX_LAST_PACKET;
2656 clock_Zero(&p->retryTime); /* Never yet transmitted */
2657 clock_Zero(&p->firstSent); /* Never yet transmitted */
2658 p->header.serial = 0; /* Another way of saying never transmitted... */
2661 /* Now that we're sure this is the last data on the call, make sure
2662 * that the "length" and the sum of the iov_lens matches. */
2663 len = p->length + call->conn->securityHeaderSize;
2665 for (i = 1; i < p->niovecs && len > 0; i++) {
2666 len -= p->wirevec[i].iov_len;
2669 osi_Panic("PrepareSendPacket 1\n"); /* MTUXXX */
2670 } else if (i < p->niovecs) {
2671 /* Free any extra elements in the wirevec */
2672 #if defined(RX_ENABLE_TSFPQ)
2673 rxi_FreeDataBufsTSFPQ(p, i, 1 /* allow global pool flush if overquota */);
2674 #else /* !RX_ENABLE_TSFPQ */
2675 MUTEX_ENTER(&rx_freePktQ_lock);
2676 rxi_FreeDataBufsNoLock(p, i);
2677 MUTEX_EXIT(&rx_freePktQ_lock);
2678 #endif /* !RX_ENABLE_TSFPQ */
2683 p->wirevec[i - 1].iov_len += len;
2684 RXS_PreparePacket(conn->securityObject, call, p);
2687 /* Given an interface MTU size, calculate an adjusted MTU size that
2688 * will make efficient use of the RX buffers when the peer is sending
2689 * either AFS 3.4a jumbograms or AFS 3.5 jumbograms. */
2691 rxi_AdjustIfMTU(int mtu)
2696 if (rxi_nRecvFrags == 1 && rxi_nSendFrags == 1)
2698 adjMTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
2699 if (mtu <= adjMTU) {
2706 frags = mtu / (RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE);
2707 return (adjMTU + (frags * (RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE)));
2710 /* Given an interface MTU size, and the peer's advertised max receive
2711 * size, calculate an adjisted maxMTU size that makes efficient use
2712 * of our packet buffers when we are sending AFS 3.4a jumbograms. */
2714 rxi_AdjustMaxMTU(int mtu, int peerMaxMTU)
2716 int maxMTU = mtu * rxi_nSendFrags;
2717 maxMTU = MIN(maxMTU, peerMaxMTU);
2718 return rxi_AdjustIfMTU(maxMTU);
2721 /* Given a packet size, figure out how many datagram packet will fit.
2722 * The first buffer always contains RX_HEADER_SIZE+RX_JUMBOBUFFERSIZE+
2723 * RX_JUMBOHEADERSIZE, the middle buffers contain RX_JUMBOBUFFERSIZE+
2724 * RX_JUMBOHEADERSIZE, and the last buffer contains RX_JUMBOBUFFERSIZE */
2726 rxi_AdjustDgramPackets(int frags, int mtu)
2729 if (mtu + IPv6_FRAG_HDR_SIZE < RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE) {
2732 maxMTU = (frags * (mtu + UDP_HDR_SIZE)) - UDP_HDR_SIZE;
2733 maxMTU = MIN(maxMTU, RX_MAX_PACKET_SIZE);
2734 /* subtract the size of the first and last packets */
2735 maxMTU -= RX_HEADER_SIZE + (2 * RX_JUMBOBUFFERSIZE) + RX_JUMBOHEADERSIZE;
2739 return (2 + (maxMTU / (RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE)));
2744 * This function can be used by the Windows Cache Manager
2745 * to dump the list of all rx packets so that we can determine
2746 * where the packet leakage is.
2748 int rx_DumpPackets(FILE *outputFile, char *cookie)
2752 struct rx_packet *p;
2756 MUTEX_ENTER(&rx_freePktQ_lock);
2757 sprintf(output, "%s - Start dumping all Rx Packets - count=%u\r\n", cookie, rx_packet_id);
2758 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2760 for (p = rx_mallocedP; p; p = p->allNextp) {
2761 sprintf(output, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
2762 cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
2763 p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
2764 p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
2765 (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
2766 (afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);
2767 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2770 sprintf(output, "%s - End dumping all Rx Packets\r\n", cookie);
2771 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2773 MUTEX_EXIT(&rx_freePktQ_lock);
2778 #endif /* AFS_NT40_ENV */