#include <afs/param.h>
#endif
-RCSID
- ("$Header$");
#ifdef KERNEL
#if defined(UKERNEL)
#include "sys/types.h"
#include <sys/stat.h>
#include <errno.h>
-#if defined(AFS_NT40_ENV) || defined(AFS_DJGPP_ENV)
-#ifdef AFS_NT40_ENV
+#if defined(AFS_NT40_ENV)
#include <winsock2.h>
-#else
-#include <sys/socket.h>
-#include <netinet/in.h>
-#endif /* AFS_NT40_ENV */
+#ifndef EWOULDBLOCK
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#endif
+#include "rx_user.h"
#include "rx_xmit_nt.h"
#include <stdlib.h>
#else
#include "rx_packet.h"
#include "rx_globals.h"
#include <lwp.h>
-#ifdef HAVE_STRING_H
+#include <assert.h>
#include <string.h>
-#else
-#ifdef HAVE_STRINGS_H
-#include <strings.h>
-#endif
-#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
/* rxdb_fileID is used to identify the lock location, along with line#. */
static int rxdb_fileID = RXDB_FILE_RX_PACKET;
#endif /* RX_LOCKS_DB */
-struct rx_packet *rx_mallocedP = 0;
+static struct rx_packet *rx_mallocedP = 0;
+#ifdef RXDEBUG_PACKET
+static afs_uint32 rx_packet_id = 0;
+#endif
extern char cml_version_number[];
-extern int (*rx_almostSent) ();
+
+static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
afs_int32 ahost, short aport,
afs_int32 istack);
+#ifdef RX_ENABLE_TSFPQ
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
+#else
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
+ afs_uint32 first,
+ struct rx_queue * q);
+#endif
+
/* some rules about packets:
* 1. When a packet is allocated, the final iov_buf contains room for
* a security trailer, but iov_len masks that fact. If the security
/* i is the iovec which contains the first little bit of data in which we
* are interested. l is the total length of everything prior to this iovec.
* j is the number of bytes we can safely copy out of this iovec.
+ * offset only applies to the first iovec.
*/
r = resid;
- while ((resid > 0) && (i < packet->niovecs)) {
- j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+ while ((r > 0) && (i < packet->niovecs)) {
+ j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
memcpy(out, (char *)(packet->wirevec[i].iov_base) + (offset - l), j);
- resid -= j;
- l += packet->wirevec[i].iov_len;
+ r -= j;
+ out += j;
+ l += packet->wirevec[i].iov_len;
+ offset = l;
i++;
}
- return (resid ? (r - resid) : r);
+ return (r ? (resid - r) : resid);
}
afs_int32
rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
{
- int i, j, l, r;
+ unsigned int i, j, l, o, r;
char *b;
- for (l = 0, i = 1; i < packet->niovecs; i++) {
- if (l + packet->wirevec[i].iov_len > offset) {
+ for (l = 0, i = 1, o = offset; i < packet->niovecs; i++) {
+ if (l + packet->wirevec[i].iov_len > o) {
break;
}
l += packet->wirevec[i].iov_len;
/* i is the iovec which contains the first little bit of data in which we
* are interested. l is the total length of everything prior to this iovec.
* j is the number of bytes we can safely copy out of this iovec.
+ * offset only applies to the first iovec.
*/
r = resid;
- while ((resid > 0) && (i < RX_MAXWVECS)) {
+ while ((r > 0) && (i <= RX_MAXWVECS)) {
if (i >= packet->niovecs)
- if (rxi_AllocDataBuf(packet, resid, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
+ if (rxi_AllocDataBuf(packet, r, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
break;
b = (char *)(packet->wirevec[i].iov_base) + (offset - l);
- j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+ j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
memcpy(b, in, j);
- resid -= j;
+ r -= j;
+ in += j;
l += packet->wirevec[i].iov_len;
+ offset = l;
i++;
}
- return (resid ? (r - resid) : r);
+ return (r ? (resid - r) : resid);
}
-static struct rx_packet *
-allocCBuf(int class)
+int
+rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
+{
+ struct rx_packet *p, *np;
+
+ num_pkts = AllocPacketBufs(class, num_pkts, q);
+
+ for (queue_Scan(q, p, np, rx_packet)) {
+ RX_PACKET_IOV_FULLINIT(p);
+ }
+
+ return num_pkts;
+}
+
+#ifdef RX_ENABLE_TSFPQ
+static int
+AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
+{
+ struct rx_ts_info_t * rx_ts_info;
+ int transfer;
+ SPLVAR;
+
+ RX_TS_INFO_GET(rx_ts_info);
+
+ transfer = num_pkts - rx_ts_info->_FPQ.len;
+ if (transfer > 0) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ transfer = MAX(transfer, rx_TSFPQGlobSize);
+ if (transfer > rx_nFreePackets) {
+ /* alloc enough for us, plus a few globs for other threads */
+ rxi_MorePacketsNoLock(transfer + 4 * rx_initSendWindow);
+ }
+
+ RX_TS_FPQ_GTOL2(rx_ts_info, transfer);
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+
+ RX_TS_FPQ_QCHECKOUT(rx_ts_info, num_pkts, q);
+
+ return num_pkts;
+}
+#else /* RX_ENABLE_TSFPQ */
+static int
+AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
struct rx_packet *c;
+ int i;
+#ifdef KERNEL
+ int overq = 0;
+#endif
SPLVAR;
NETPRI;
+
MUTEX_ENTER(&rx_freePktQ_lock);
#ifdef KERNEL
- if (rxi_OverQuota(class)) {
- c = NULL;
+ for (; (num_pkts > 0) && (rxi_OverQuota2(class,num_pkts));
+ num_pkts--, overq++);
+
+ if (overq) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
- break;
- case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
- break;
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
}
- MUTEX_EXIT(&rx_stats_mutex);
- goto done;
}
- if (queue_IsEmpty(&rx_freePacketQueue)) {
- c = NULL;
+ if (rx_nFreePackets < num_pkts)
+ num_pkts = rx_nFreePackets;
+
+ if (!num_pkts) {
rxi_NeedMorePackets = TRUE;
goto done;
}
#else /* KERNEL */
- if (queue_IsEmpty(&rx_freePacketQueue)) {
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ if (rx_nFreePackets < num_pkts) {
+ rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), 4 * rx_initSendWindow));
}
#endif /* KERNEL */
- rx_nFreePackets--;
- c = queue_First(&rx_freePacketQueue, rx_packet);
- queue_Remove(c);
- if (!(c->flags & RX_PKTFLAG_FREE))
- osi_Panic("rxi_AllocPacket: packet not free\n");
- c->flags = 0; /* clear RX_PKTFLAG_FREE, initialize the rest */
- c->header.flags = 0;
+ for (i=0, c=queue_First(&rx_freePacketQueue, rx_packet);
+ i < num_pkts;
+ i++, c=queue_Next(c, rx_packet)) {
+ RX_FPQ_MARK_USED(c);
+ }
+
+ queue_SplitBeforeAppend(&rx_freePacketQueue,q,c);
+
+ rx_nFreePackets -= num_pkts;
#ifdef KERNEL
done:
MUTEX_EXIT(&rx_freePktQ_lock);
USERPRI;
- return c;
+ return num_pkts;
}
+#endif /* RX_ENABLE_TSFPQ */
/*
* Free a packet currently used as a continuation buffer
*/
-void
-rxi_freeCBuf(struct rx_packet *c)
+#ifdef RX_ENABLE_TSFPQ
+/* num_pkts=0 means queue length is unknown */
+int
+rxi_FreePackets(int num_pkts, struct rx_queue * q)
{
+ struct rx_ts_info_t * rx_ts_info;
+ struct rx_packet *c, *nc;
SPLVAR;
+ osi_Assert(num_pkts >= 0);
+ RX_TS_INFO_GET(rx_ts_info);
+
+ if (!num_pkts) {
+ for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
+ rxi_FreeDataBufsTSFPQ(c, 2, 0);
+ }
+ } else {
+ for (queue_Scan(q, c, nc, rx_packet)) {
+ rxi_FreeDataBufsTSFPQ(c, 2, 0);
+ }
+ }
+
+ if (num_pkts) {
+ RX_TS_FPQ_QCHECKIN(rx_ts_info, num_pkts, q);
+ }
+
+ if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+
+ RX_TS_FPQ_LTOG(rx_ts_info);
+
+ /* Wakeup anyone waiting for packets */
+ rxi_PacketsUnWait();
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+
+ return num_pkts;
+}
+#else /* RX_ENABLE_TSFPQ */
+/* num_pkts=0 means queue length is unknown */
+int
+rxi_FreePackets(int num_pkts, struct rx_queue *q)
+{
+ struct rx_queue cbs;
+ struct rx_packet *p, *np;
+ int qlen = 0;
+ SPLVAR;
+
+ osi_Assert(num_pkts >= 0);
+ queue_Init(&cbs);
+
+ if (!num_pkts) {
+ for (queue_Scan(q, p, np, rx_packet), num_pkts++) {
+ if (p->niovecs > 2) {
+ qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
+ }
+ RX_FPQ_MARK_FREE(p);
+ }
+ if (!num_pkts)
+ return 0;
+ } else {
+ for (queue_Scan(q, p, np, rx_packet)) {
+ if (p->niovecs > 2) {
+ qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
+ }
+ RX_FPQ_MARK_FREE(p);
+ }
+ }
+
+ if (qlen) {
+ queue_SpliceAppend(q, &cbs);
+ qlen += num_pkts;
+ } else
+ qlen = num_pkts;
+
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
- rxi_FreePacketNoLock(c);
+ queue_SpliceAppend(&rx_freePacketQueue, q);
+ rx_nFreePackets += qlen;
+
/* Wakeup anyone waiting for packets */
rxi_PacketsUnWait();
MUTEX_EXIT(&rx_freePktQ_lock);
USERPRI;
+
+ return num_pkts;
}
+#endif /* RX_ENABLE_TSFPQ */
/* this one is kind of awful.
* In rxkad, the packet has been all shortened, and everything, ready for
* returns the number of bytes >0 which it failed to come up with.
* Don't need to worry about locking on packet, since only
* one thread can manipulate one at a time. Locking on continution
- * packets is handled by allocCBuf */
+ * packets is handled by AllocPacketBufs */
/* MTUXXX don't need to go throught the for loop if we can trust niovecs */
int
rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
{
- int i;
-
- for (i = p->niovecs; nb > 0 && i < RX_MAXWVECS; i++) {
- register struct rx_packet *cb;
- if ((cb = allocCBuf(class))) {
- p->wirevec[i].iov_base = (caddr_t) cb->localdata;
- p->wirevec[i].iov_len = RX_CBUFFERSIZE;
- nb -= RX_CBUFFERSIZE;
- p->length += RX_CBUFFERSIZE;
- p->niovecs++;
- } else
- break;
+ int i, nv;
+ struct rx_queue q;
+ struct rx_packet *cb, *ncb;
+
+ /* compute the number of cbuf's we need */
+ nv = nb / RX_CBUFFERSIZE;
+ if ((nv * RX_CBUFFERSIZE) < nb)
+ nv++;
+ if ((nv + p->niovecs) > RX_MAXWVECS)
+ nv = RX_MAXWVECS - p->niovecs;
+ if (nv < 1)
+ return nb;
+
+ /* allocate buffers */
+ queue_Init(&q);
+ nv = AllocPacketBufs(class, nv, &q);
+
+ /* setup packet iovs */
+ for (i = p->niovecs, queue_Scan(&q, cb, ncb, rx_packet), i++) {
+ queue_Remove(cb);
+ p->wirevec[i].iov_base = (caddr_t) cb->localdata;
+ p->wirevec[i].iov_len = RX_CBUFFERSIZE;
}
+ nb -= (nv * RX_CBUFFERSIZE);
+ p->length += (nv * RX_CBUFFERSIZE);
+ p->niovecs += nv;
+
return nb;
}
/* Add more packet buffers */
+#ifdef RX_ENABLE_TSFPQ
+void
+rxi_MorePackets(int apackets)
+{
+ struct rx_packet *p, *e;
+ struct rx_ts_info_t * rx_ts_info;
+ int getme;
+ SPLVAR;
+
+ getme = apackets * sizeof(struct rx_packet);
+ p = (struct rx_packet *)osi_Alloc(getme);
+ osi_Assert(p);
+
+ PIN(p, getme); /* XXXXX */
+ memset(p, 0, getme);
+ RX_TS_INFO_GET(rx_ts_info);
+
+ RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+ /* TSFPQ patch also needs to keep track of total packets */
+
+ MUTEX_ENTER(&rx_packets_mutex);
+ rx_nPackets += apackets;
+ RX_TS_FPQ_COMPUTE_LIMITS;
+ MUTEX_EXIT(&rx_packets_mutex);
+
+ for (e = p + apackets; p < e; p++) {
+ RX_PACKET_IOV_INIT(p);
+ p->niovecs = 2;
+
+ RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+ rx_ts_info->_FPQ.delta += apackets;
+
+ if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+
+ RX_TS_FPQ_LTOG(rx_ts_info);
+ rxi_NeedMorePackets = FALSE;
+ rxi_PacketsUnWait();
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+}
+#else /* RX_ENABLE_TSFPQ */
void
rxi_MorePackets(int apackets)
{
SPLVAR;
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
+ osi_Assert(p);
PIN(p, getme); /* XXXXX */
- memset((char *)p, 0, getme);
+ memset(p, 0, getme);
NETPRI;
- AFS_RXGLOCK();
MUTEX_ENTER(&rx_freePktQ_lock);
for (e = p + apackets; p < e; p++) {
- p->wirevec[0].iov_base = (char *)(p->wirehead);
- p->wirevec[0].iov_len = RX_HEADER_SIZE;
- p->wirevec[1].iov_base = (char *)(p->localdata);
- p->wirevec[1].iov_len = RX_FIRSTBUFFERSIZE;
+ RX_PACKET_IOV_INIT(p);
p->flags |= RX_PKTFLAG_FREE;
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
}
+
+ rx_nPackets += apackets;
rx_nFreePackets += apackets;
rxi_NeedMorePackets = FALSE;
rxi_PacketsUnWait();
- AFS_RXGUNLOCK();
MUTEX_EXIT(&rx_freePktQ_lock);
USERPRI;
}
+#endif /* RX_ENABLE_TSFPQ */
+
+#ifdef RX_ENABLE_TSFPQ
+void
+rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
+{
+ struct rx_packet *p, *e;
+ struct rx_ts_info_t * rx_ts_info;
+ int getme;
+ SPLVAR;
+
+ getme = apackets * sizeof(struct rx_packet);
+ p = (struct rx_packet *)osi_Alloc(getme);
+
+ PIN(p, getme); /* XXXXX */
+ memset(p, 0, getme);
+ RX_TS_INFO_GET(rx_ts_info);
+
+ RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+ /* TSFPQ patch also needs to keep track of total packets */
+ MUTEX_ENTER(&rx_packets_mutex);
+ rx_nPackets += apackets;
+ RX_TS_FPQ_COMPUTE_LIMITS;
+ MUTEX_EXIT(&rx_packets_mutex);
+
+ for (e = p + apackets; p < e; p++) {
+ RX_PACKET_IOV_INIT(p);
+ p->niovecs = 2;
+ RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+ rx_ts_info->_FPQ.delta += apackets;
+
+ if (flush_global &&
+ (num_keep_local < apackets)) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+
+ RX_TS_FPQ_LTOG2(rx_ts_info, (apackets - num_keep_local));
+ rxi_NeedMorePackets = FALSE;
+ rxi_PacketsUnWait();
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+}
+#endif /* RX_ENABLE_TSFPQ */
#ifndef KERNEL
/* Add more packet buffers */
void
rxi_MorePacketsNoLock(int apackets)
{
+#ifdef RX_ENABLE_TSFPQ
+ struct rx_ts_info_t * rx_ts_info;
+#endif /* RX_ENABLE_TSFPQ */
struct rx_packet *p, *e;
int getme;
* to hold maximal amounts of data */
apackets += (apackets / 4)
* ((rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE) / RX_CBUFFERSIZE);
- getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
-
- memset((char *)p, 0, getme);
+ do {
+ getme = apackets * sizeof(struct rx_packet);
+ p = (struct rx_packet *)osi_Alloc(getme);
+ if (p == NULL) {
+ apackets -= apackets / 4;
+ osi_Assert(apackets > 0);
+ }
+ } while(p == NULL);
+ memset(p, 0, getme);
+
+#ifdef RX_ENABLE_TSFPQ
+ RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info,apackets);
+#endif /* RX_ENABLE_TSFPQ */
for (e = p + apackets; p < e; p++) {
- p->wirevec[0].iov_base = (char *)(p->wirehead);
- p->wirevec[0].iov_len = RX_HEADER_SIZE;
- p->wirevec[1].iov_base = (char *)(p->localdata);
- p->wirevec[1].iov_len = RX_FIRSTBUFFERSIZE;
+ RX_PACKET_IOV_INIT(p);
p->flags |= RX_PKTFLAG_FREE;
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
}
+
rx_nFreePackets += apackets;
+ MUTEX_ENTER(&rx_packets_mutex);
+ rx_nPackets += apackets;
+#ifdef RX_ENABLE_TSFPQ
+ RX_TS_FPQ_COMPUTE_LIMITS;
+#endif /* RX_ENABLE_TSFPQ */
+ MUTEX_EXIT(&rx_packets_mutex);
rxi_NeedMorePackets = FALSE;
rxi_PacketsUnWait();
}
UNPIN(rx_mallocedP, (rx_maxReceiveWindow + 2) * sizeof(struct rx_packet));
}
+#ifdef RX_ENABLE_TSFPQ
+void
+rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit)
+{
+ struct rx_ts_info_t * rx_ts_info;
+ int xfer;
+ SPLVAR;
+
+ RX_TS_INFO_GET(rx_ts_info);
+
+ if (num_keep_local != rx_ts_info->_FPQ.len) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ if (num_keep_local < rx_ts_info->_FPQ.len) {
+ xfer = rx_ts_info->_FPQ.len - num_keep_local;
+ RX_TS_FPQ_LTOG2(rx_ts_info, xfer);
+ rxi_PacketsUnWait();
+ } else {
+ xfer = num_keep_local - rx_ts_info->_FPQ.len;
+ if ((num_keep_local > rx_TSFPQLocalMax) && !allow_overcommit)
+ xfer = rx_TSFPQLocalMax - rx_ts_info->_FPQ.len;
+ if (rx_nFreePackets < xfer) {
+ rxi_MorePacketsNoLock(MAX(xfer - rx_nFreePackets, 4 * rx_initSendWindow));
+ }
+ RX_TS_FPQ_GTOL2(rx_ts_info, xfer);
+ }
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+}
+
+void
+rxi_FlushLocalPacketsTSFPQ(void)
+{
+ rxi_AdjustLocalPacketsTSFPQ(0, 0);
+}
+#endif /* RX_ENABLE_TSFPQ */
+
/* Allocate more packets iff we need more continuation buffers */
/* In kernel, can't page in memory with interrupts disabled, so we
* don't use the event mechanism. */
rx_CheckPackets(void)
{
if (rxi_NeedMorePackets) {
- rxi_MorePackets(rx_initSendWindow);
+ rxi_MorePackets(rx_maxSendWindow);
}
}
*/
/* Actually free the packet p. */
+#ifdef RX_ENABLE_TSFPQ
void
rxi_FreePacketNoLock(struct rx_packet *p)
{
- dpf(("Free %x\n", (int)p));
+ struct rx_ts_info_t * rx_ts_info;
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
- if (p->flags & RX_PKTFLAG_FREE)
- osi_Panic("rxi_FreePacketNoLock: packet already free\n");
+ RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+ if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
+ RX_TS_FPQ_LTOG(rx_ts_info);
+ }
+}
+#else /* RX_ENABLE_TSFPQ */
+void
+rxi_FreePacketNoLock(struct rx_packet *p)
+{
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
+
+ RX_FPQ_MARK_FREE(p);
rx_nFreePackets++;
- p->flags |= RX_PKTFLAG_FREE;
queue_Append(&rx_freePacketQueue, p);
}
+#endif /* RX_ENABLE_TSFPQ */
+#ifdef RX_ENABLE_TSFPQ
+void
+rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
+{
+ struct rx_ts_info_t * rx_ts_info;
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
+
+ RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+ if (flush_global && (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax)) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+
+ RX_TS_FPQ_LTOG(rx_ts_info);
+
+ /* Wakeup anyone waiting for packets */
+ rxi_PacketsUnWait();
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+}
+#endif /* RX_ENABLE_TSFPQ */
+
+/*
+ * free continuation buffers off a packet into a queue
+ *
+ * [IN] p -- packet from which continuation buffers will be freed
+ * [IN] first -- iovec offset of first continuation buffer to free
+ * [IN] q -- queue into which continuation buffers will be chained
+ *
+ * returns:
+ * number of continuation buffers freed
+ */
+#ifndef RX_ENABLE_TSFPQ
+static int
+rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
+{
+ struct iovec *iov;
+ struct rx_packet * cb;
+ int count = 0;
+
+ for (first = MAX(2, first); first < p->niovecs; first++, count++) {
+ iov = &p->wirevec[first];
+ if (!iov->iov_base)
+ osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
+ cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
+ RX_FPQ_MARK_FREE(cb);
+ queue_Append(q, cb);
+ }
+ p->length = 0;
+ p->niovecs = 0;
+
+ return count;
+}
+#endif
+
+/*
+ * free packet continuation buffers into the global free packet pool
+ *
+ * [IN] p -- packet from which to free continuation buffers
+ * [IN] first -- iovec offset of first continuation buffer to free
+ *
+ * returns:
+ * zero always
+ */
int
-rxi_FreeDataBufsNoLock(struct rx_packet *p, int first)
+rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
{
- struct iovec *iov, *end;
+ struct iovec *iov;
- if (first != 1) /* MTUXXX */
- osi_Panic("FreeDataBufs 1: first must be 1");
- iov = &p->wirevec[1];
- end = iov + (p->niovecs - 1);
- if (iov->iov_base != (caddr_t) p->localdata) /* MTUXXX */
- osi_Panic("FreeDataBufs 2: vec 1 must be localdata");
- for (iov++; iov < end; iov++) {
+ for (first = MAX(2, first); first < p->niovecs; first++) {
+ iov = &p->wirevec[first];
if (!iov->iov_base)
- osi_Panic("FreeDataBufs 3: vecs 2-niovecs must not be NULL");
+ osi_Panic("rxi_FreeDataBufsNoLock: unexpected NULL iov");
rxi_FreePacketNoLock(RX_CBUF_TO_PACKET(iov->iov_base, p));
}
p->length = 0;
return 0;
}
+#ifdef RX_ENABLE_TSFPQ
+/*
+ * free packet continuation buffers into the thread-local free pool
+ *
+ * [IN] p -- packet from which continuation buffers will be freed
+ * [IN] first -- iovec offset of first continuation buffer to free
+ * any value less than 2, the min number of iovecs,
+ * is treated as if it is 2.
+ * [IN] flush_global -- if nonzero, we will flush overquota packets to the
+ * global free pool before returning
+ *
+ * returns:
+ * zero always
+ */
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global)
+{
+ struct iovec *iov;
+ struct rx_ts_info_t * rx_ts_info;
+
+ RX_TS_INFO_GET(rx_ts_info);
+
+ for (first = MAX(2, first); first < p->niovecs; first++) {
+ iov = &p->wirevec[first];
+ if (!iov->iov_base)
+ osi_Panic("rxi_FreeDataBufsTSFPQ: unexpected NULL iov");
+ RX_TS_FPQ_CHECKIN(rx_ts_info,RX_CBUF_TO_PACKET(iov->iov_base, p));
+ }
+ p->length = 0;
+ p->niovecs = 0;
+
+ if (flush_global && (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax)) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+
+ RX_TS_FPQ_LTOG(rx_ts_info);
+
+ /* Wakeup anyone waiting for packets */
+ rxi_PacketsUnWait();
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+ return 0;
+}
+#endif /* RX_ENABLE_TSFPQ */
+
int rxi_nBadIovecs = 0;
/* rxi_RestoreDataBufs
void
rxi_RestoreDataBufs(struct rx_packet *p)
{
- int i;
+ unsigned int i;
struct iovec *iov = &p->wirevec[2];
- p->wirevec[0].iov_base = (char *)(p->wirehead);
- p->wirevec[0].iov_len = RX_HEADER_SIZE;
- p->wirevec[1].iov_base = (char *)(p->localdata);
- p->wirevec[1].iov_len = RX_FIRSTBUFFERSIZE;
+ RX_PACKET_IOV_INIT(p);
for (i = 2, iov = &p->wirevec[2]; i < p->niovecs; i++, iov++) {
if (!iov->iov_base) {
}
}
+#ifdef RX_ENABLE_TSFPQ
+int
+rxi_TrimDataBufs(struct rx_packet *p, int first)
+{
+ int length;
+ struct iovec *iov, *end;
+ struct rx_ts_info_t * rx_ts_info;
+ SPLVAR;
+
+ if (first != 1)
+ osi_Panic("TrimDataBufs 1: first must be 1");
+
+ /* Skip over continuation buffers containing message data */
+ iov = &p->wirevec[2];
+ end = iov + (p->niovecs - 2);
+ length = p->length - p->wirevec[1].iov_len;
+ for (; iov < end && length > 0; iov++) {
+ if (!iov->iov_base)
+ osi_Panic("TrimDataBufs 3: vecs 1-niovecs must not be NULL");
+ length -= iov->iov_len;
+ }
+
+ /* iov now points to the first empty data buffer. */
+ if (iov >= end)
+ return 0;
+
+ RX_TS_INFO_GET(rx_ts_info);
+ for (; iov < end; iov++) {
+ if (!iov->iov_base)
+ osi_Panic("TrimDataBufs 4: vecs 2-niovecs must not be NULL");
+ RX_TS_FPQ_CHECKIN(rx_ts_info,RX_CBUF_TO_PACKET(iov->iov_base, p));
+ p->niovecs--;
+ }
+ if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+
+ RX_TS_FPQ_LTOG(rx_ts_info);
+ rxi_PacketsUnWait();
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+ }
+
+ return 0;
+}
+#else /* RX_ENABLE_TSFPQ */
int
rxi_TrimDataBufs(struct rx_packet *p, int first)
{
return 0;
}
+#endif /* RX_ENABLE_TSFPQ */
/* Free the packet p. P is assumed not to be on any queue, i.e.
* remove it yourself first if you call this routine. */
+#ifdef RX_ENABLE_TSFPQ
+void
+rxi_FreePacket(struct rx_packet *p)
+{
+ rxi_FreeDataBufsTSFPQ(p, 2, 0);
+ rxi_FreePacketTSFPQ(p, RX_TS_FPQ_FLUSH_GLOBAL);
+}
+#else /* RX_ENABLE_TSFPQ */
void
rxi_FreePacket(struct rx_packet *p)
{
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
- rxi_FreeDataBufsNoLock(p, 1);
+ rxi_FreeDataBufsNoLock(p, 2);
rxi_FreePacketNoLock(p);
/* Wakeup anyone waiting for packets */
rxi_PacketsUnWait();
MUTEX_EXIT(&rx_freePktQ_lock);
USERPRI;
}
-
+#endif /* RX_ENABLE_TSFPQ */
/* rxi_AllocPacket sets up p->length so it reflects the number of
* bytes in the packet at this point, **not including** the header.
* The header is absolutely necessary, besides, this is the way the
* length field is usually used */
+#ifdef RX_ENABLE_TSFPQ
struct rx_packet *
rxi_AllocPacketNoLock(int class)
{
- register struct rx_packet *p;
+ struct rx_packet *p;
+ struct rx_ts_info_t * rx_ts_info;
+
+ RX_TS_INFO_GET(rx_ts_info);
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
- break;
- case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
- break;
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
}
- MUTEX_EXIT(&rx_stats_mutex);
+ return (struct rx_packet *)0;
+ }
+#endif /* KERNEL */
+
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+
+#ifdef KERNEL
+ if (queue_IsEmpty(&rx_freePacketQueue))
+ osi_Panic("rxi_AllocPacket error");
+#else /* KERNEL */
+ if (queue_IsEmpty(&rx_freePacketQueue))
+ rxi_MorePacketsNoLock(rx_maxSendWindow);
+#endif /* KERNEL */
+
+
+ RX_TS_FPQ_GTOL(rx_ts_info);
+ }
+
+ RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
+
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
+
+
+ /* have to do this here because rx_FlushWrite fiddles with the iovs in
+ * order to truncate outbound packets. In the near future, may need
+ * to allocate bufs from a static pool here, and/or in AllocSendPacket
+ */
+ RX_PACKET_IOV_FULLINIT(p);
+ return p;
+}
+#else /* RX_ENABLE_TSFPQ */
+struct rx_packet *
+rxi_AllocPacketNoLock(int class)
+{
+ struct rx_packet *p;
+
+#ifdef KERNEL
+ if (rxi_OverQuota(class)) {
+ rxi_NeedMorePackets = TRUE;
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
+ }
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
osi_Panic("rxi_AllocPacket error");
#else /* KERNEL */
if (queue_IsEmpty(&rx_freePacketQueue))
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ rxi_MorePacketsNoLock(rx_maxSendWindow);
#endif /* KERNEL */
rx_nFreePackets--;
p = queue_First(&rx_freePacketQueue, rx_packet);
- if (!(p->flags & RX_PKTFLAG_FREE))
- osi_Panic("rxi_AllocPacket: packet not free\n");
+ queue_Remove(p);
+ RX_FPQ_MARK_USED(p);
- dpf(("Alloc %x, class %d\n", (int)p, class));
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
- queue_Remove(p);
- p->flags = 0; /* clear RX_PKTFLAG_FREE, initialize the rest */
- p->header.flags = 0;
/* have to do this here because rx_FlushWrite fiddles with the iovs in
* order to truncate outbound packets. In the near future, may need
* to allocate bufs from a static pool here, and/or in AllocSendPacket
*/
- p->wirevec[0].iov_base = (char *)(p->wirehead);
- p->wirevec[0].iov_len = RX_HEADER_SIZE;
- p->wirevec[1].iov_base = (char *)(p->localdata);
- p->wirevec[1].iov_len = RX_FIRSTBUFFERSIZE;
- p->niovecs = 2;
- p->length = RX_FIRSTBUFFERSIZE;
+ RX_PACKET_IOV_FULLINIT(p);
return p;
}
+#endif /* RX_ENABLE_TSFPQ */
+#ifdef RX_ENABLE_TSFPQ
+struct rx_packet *
+rxi_AllocPacketTSFPQ(int class, int pull_global)
+{
+ struct rx_packet *p;
+ struct rx_ts_info_t * rx_ts_info;
+
+ RX_TS_INFO_GET(rx_ts_info);
+
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
+ MUTEX_ENTER(&rx_freePktQ_lock);
+
+ if (queue_IsEmpty(&rx_freePacketQueue))
+ rxi_MorePacketsNoLock(rx_maxSendWindow);
+
+ RX_TS_FPQ_GTOL(rx_ts_info);
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ } else if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+ return NULL;
+ }
+
+ RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
+
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
+
+ /* have to do this here because rx_FlushWrite fiddles with the iovs in
+ * order to truncate outbound packets. In the near future, may need
+ * to allocate bufs from a static pool here, and/or in AllocSendPacket
+ */
+ RX_PACKET_IOV_FULLINIT(p);
+ return p;
+}
+#endif /* RX_ENABLE_TSFPQ */
+
+#ifdef RX_ENABLE_TSFPQ
struct rx_packet *
rxi_AllocPacket(int class)
{
- register struct rx_packet *p;
+ struct rx_packet *p;
+
+ p = rxi_AllocPacketTSFPQ(class, RX_TS_FPQ_PULL_GLOBAL);
+ return p;
+}
+#else /* RX_ENABLE_TSFPQ */
+struct rx_packet *
+rxi_AllocPacket(int class)
+{
+ struct rx_packet *p;
MUTEX_ENTER(&rx_freePktQ_lock);
p = rxi_AllocPacketNoLock(class);
MUTEX_EXIT(&rx_freePktQ_lock);
return p;
}
+#endif /* RX_ENABLE_TSFPQ */
/* This guy comes up with as many buffers as it {takes,can get} given
* the MTU for this call. It also sets the packet length before
* Called with call locked.
*/
struct rx_packet *
-rxi_AllocSendPacket(register struct rx_call *call, int want)
+rxi_AllocSendPacket(struct rx_call *call, int want)
{
- register struct rx_packet *p = (struct rx_packet *)0;
- register int mud;
- register unsigned delta;
+ struct rx_packet *p = (struct rx_packet *)0;
+ int mud;
+ unsigned delta;
SPLVAR;
mud = call->MTU - RX_HEADER_SIZE;
rx_GetSecurityHeaderSize(rx_ConnectionOf(call)) +
rx_GetSecurityMaxTrailerSize(rx_ConnectionOf(call));
+#ifdef RX_ENABLE_TSFPQ
+ if ((p = rxi_AllocPacketTSFPQ(RX_PACKET_CLASS_SEND, 0))) {
+ want += delta;
+ want = MIN(want, mud);
+
+ if ((unsigned)want > p->length)
+ (void)rxi_AllocDataBuf(p, (want - p->length),
+ RX_PACKET_CLASS_SEND_CBUF);
+
+ if (p->length > mud)
+ p->length = mud;
+
+ if (delta >= p->length) {
+ rxi_FreePacket(p);
+ p = NULL;
+ } else {
+ p->length -= delta;
+ }
+ return p;
+ }
+#endif /* RX_ENABLE_TSFPQ */
+
while (!(call->error)) {
MUTEX_ENTER(&rx_freePktQ_lock);
/* if an error occurred, or we get the packet we want, we're done */
(void)rxi_AllocDataBuf(p, (want - p->length),
RX_PACKET_CLASS_SEND_CBUF);
- if ((unsigned)p->length > mud)
+ if (p->length > mud)
p->length = mud;
if (delta >= p->length) {
}
#ifndef KERNEL
-
+#ifdef AFS_NT40_ENV
+/* Windows does not use file descriptors. */
+#define CountFDs(amax) 0
+#else
/* count the number of used FDs */
static int
-CountFDs(register int amax)
+CountFDs(int amax)
{
struct stat tstat;
- register int i, code;
- register int count;
+ int i, code;
+ int count;
count = 0;
for (i = 0; i < amax; i++) {
}
return count;
}
-
+#endif /* AFS_NT40_ENV */
#else /* KERNEL */
#define CountFDs(amax) amax
* the data length of the packet is stored in the packet structure.
* The header is decoded. */
int
-rxi_ReadPacket(int socket, register struct rx_packet *p, afs_uint32 * host,
+rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
u_short * port)
{
struct sockaddr_in from;
- int nbytes;
+ unsigned int nbytes;
afs_int32 rlen;
- register afs_int32 tlen, savelen;
+ afs_uint32 tlen, savelen;
struct msghdr msg;
rx_computelen(p, tlen);
rx_SetDataSize(p, tlen); /* this is the size of the user data area */
savelen = p->wirevec[p->niovecs - 1].iov_len;
p->wirevec[p->niovecs - 1].iov_len += RX_EXTRABUFFERSIZE;
- memset((char *)&msg, 0, sizeof(msg));
+ memset(&msg, 0, sizeof(msg));
msg.msg_name = (char *)&from;
msg.msg_namelen = sizeof(struct sockaddr_in);
msg.msg_iov = p->wirevec;
/* restore the vec to its correct state */
p->wirevec[p->niovecs - 1].iov_len = savelen;
- p->length = (nbytes - RX_HEADER_SIZE);
+ p->length = (u_short)(nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
- if (nbytes > 0)
- rxi_MorePackets(rx_initSendWindow);
-#ifndef AFS_NT40_ENV
- else if (nbytes < 0 && errno == EWOULDBLOCK) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.noPacketOnRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (nbytes < 0 && errno == EWOULDBLOCK) {
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ } else if (nbytes <= 0) {
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rx_stats.bogusPacketOnRead++;
+ rx_stats.bogusHost = from.sin_addr.s_addr;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
+ dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
+ ntohs(from.sin_port), nbytes));
}
+ return 0;
+ }
+#ifdef RXDEBUG
+ else if ((rx_intentionallyDroppedOnReadPer100 > 0)
+ && (random() % 100 < rx_intentionallyDroppedOnReadPer100)) {
+ rxi_DecodePacketHeader(p);
+
+ *host = from.sin_addr.s_addr;
+ *port = from.sin_port;
+
+ dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
+ p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
+ p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
+ p->length));
+#ifdef RX_TRIMDATABUFS
+ rxi_TrimDataBufs(p, 1);
#endif
- else {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
- rx_stats.bogusHost = from.sin_addr.s_addr;
- MUTEX_EXIT(&rx_stats_mutex);
- dpf(("B: bogus packet from [%x,%d] nb=%d", from.sin_addr.s_addr,
- from.sin_port, nbytes));
- }
return 0;
- } else {
+ }
+#endif
+ else {
/* Extract packet header. */
rxi_DecodePacketHeader(p);
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsRead[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
* never be cleaned up.
*/
peer = rxi_FindPeer(*host, *port, 0, 0);
- if (peer) {
+ /* Since this may not be associated with a connection,
+ * it may have no refCount, meaning we could race with
+ * ReapConnections
+ */
+ if (peer && (peer->refCount > 0)) {
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesReceived, p->length);
MUTEX_EXIT(&peer->peer_lock);
}
}
+#ifdef RX_TRIMDATABUFS
/* Free any empty packet buffers at the end of this packet */
rxi_TrimDataBufs(p, 1);
-
+#endif
return 1;
}
}
* last two pad bytes. */
struct rx_packet *
-rxi_SplitJumboPacket(register struct rx_packet *p, afs_int32 host, short port,
+rxi_SplitJumboPacket(struct rx_packet *p, afs_int32 host, short port,
int first)
{
struct rx_packet *np;
int length, int istack)
{
struct msghdr msg;
+ int ret;
memset(&msg, 0, sizeof(msg));
msg.msg_iov = dvec;
msg.msg_name = addr;
msg.msg_namelen = sizeof(struct sockaddr_in);
- rxi_Sendmsg(socket, &msg, 0);
+ ret = rxi_Sendmsg(socket, &msg, 0);
- return 0;
+ return ret;
}
#elif !defined(UKERNEL)
/*
* The message is NOT changed.
*/
static int
-cpytoc(mblk_t * mp, register int off, register int len, register char *cp)
+cpytoc(mblk_t * mp, int off, int len, char *cp)
{
- register int n;
+ int n;
for (; mp && len > 0; mp = mp->b_cont) {
if (mp->b_datap->db_type != M_DATA) {
* This sucks, anyway, do it like m_cpy.... below
*/
static int
-cpytoiovec(mblk_t * mp, int off, int len, register struct iovec *iovs,
+cpytoiovec(mblk_t * mp, int off, int len, struct iovec *iovs,
int niovs)
{
- register int m, n, o, t, i;
+ int m, n, o, t, i;
for (i = -1, t = 0; i < niovs && mp && len > 0; mp = mp->b_cont) {
if (mp->b_datap->db_type != M_DATA) {
#define m_cpytoc(a, b, c, d) cpytoc(a, b, c, d)
#define m_cpytoiovec(a, b, c, d, e) cpytoiovec(a, b, c, d, e)
#else
-#if !defined(AFS_LINUX20_ENV)
+#if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
static int
m_cpytoiovec(struct mbuf *m, int off, int len, struct iovec iovs[], int niovs)
{
#endif /* LINUX */
#endif /* AFS_SUN5_ENV */
-#if !defined(AFS_LINUX20_ENV)
+#if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
int
rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
#if defined(AFS_SUN5_ENV) || defined(AFS_HPUX110_ENV)
struct rx_packet *phandle;
int hdr_len, data_len;
{
- register int code;
+ int code;
code =
m_cpytoiovec(amb, hdr_len, data_len, phandle->wirevec,
/* send a response to a debug packet */
struct rx_packet *
-rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
+rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
afs_int32 ahost, short aport, int istack)
{
struct rx_debugIn tin;
struct rx_debugStats tstat;
/* get basic stats */
- memset((char *)&tstat, 0, sizeof(tstat)); /* make sure spares are zero */
+ memset(&tstat, 0, sizeof(tstat)); /* make sure spares are zero */
tstat.version = RX_DEBUGI_VERSION;
#ifndef RX_ENABLE_LOCKS
tstat.waitingForPackets = rx_waitingForPackets;
#endif
+ MUTEX_ENTER(&rx_serverPool_lock);
tstat.nFreePackets = htonl(rx_nFreePackets);
+ tstat.nPackets = htonl(rx_nPackets);
tstat.callsExecuted = htonl(rxi_nCalls);
tstat.packetReclaims = htonl(rx_packetReclaims);
tstat.usedFDs = CountFDs(64);
tstat.nWaiting = htonl(rx_nWaiting);
+ tstat.nWaited = htonl(rx_nWaited);
queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
tstat.idleThreads);
+ MUTEX_EXIT(&rx_serverPool_lock);
tstat.idleThreads = htonl(tstat.idleThreads);
tl = sizeof(struct rx_debugStats) - ap->length;
if (tl > 0)
case RX_DEBUGI_GETALLCONN:
case RX_DEBUGI_GETCONN:{
- int i, j;
- register struct rx_connection *tc;
+ unsigned int i, j;
+ struct rx_connection *tc;
struct rx_call *tcall;
struct rx_debugConn tconn;
int all = (tin.type == RX_DEBUGI_GETALLCONN);
if (tl > 0)
return ap;
- memset((char *)&tconn, 0, sizeof(tconn)); /* make sure spares are zero */
+ memset(&tconn, 0, sizeof(tconn)); /* make sure spares are zero */
/* get N'th (maybe) "interesting" connection info */
for (i = 0; i < rx_hashTableSize; i++) {
#if !defined(KERNEL)
*/
case RX_DEBUGI_GETPEER:{
- int i;
- register struct rx_peer *tp;
+ unsigned int i;
+ struct rx_peer *tp;
struct rx_debugPeer tpeer;
if (tl > 0)
return ap;
- memset((char *)&tpeer, 0, sizeof(tpeer));
+ memset(&tpeer, 0, sizeof(tpeer));
for (i = 0; i < rx_hashTableSize; i++) {
#if !defined(KERNEL)
/* the time complexity of the algorithm used here
MUTEX_ENTER(&rx_peerHashTable_lock);
for (tp = rx_peerHashTable[i]; tp; tp = tp->next) {
if (tin.index-- <= 0) {
+ tp->refCount++;
+ MUTEX_EXIT(&rx_peerHashTable_lock);
+
+ MUTEX_ENTER(&tp->peer_lock);
tpeer.host = tp->host;
tpeer.port = tp->port;
tpeer.ifMTU = htons(tp->ifMTU);
htonl(tp->bytesReceived.high);
tpeer.bytesReceived.low =
htonl(tp->bytesReceived.low);
+ MUTEX_EXIT(&tp->peer_lock);
+ MUTEX_ENTER(&rx_peerHashTable_lock);
+ tp->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
+
rx_packetwrite(ap, 0, sizeof(struct rx_debugPeer),
(char *)&tpeer);
tl = ap->length;
return ap;
/* Since its all int32s convert to network order with a loop. */
+ if (rx_stats_active)
MUTEX_ENTER(&rx_stats_mutex);
s = (afs_int32 *) & rx_stats;
for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
tl = ap->length;
ap->length = sizeof(rx_stats);
+ if (rx_stats_active)
MUTEX_EXIT(&rx_stats_mutex);
rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
}
struct rx_packet *
-rxi_ReceiveVersionPacket(register struct rx_packet *ap, osi_socket asocket,
+rxi_ReceiveVersionPacket(struct rx_packet *ap, osi_socket asocket,
afs_int32 ahost, short aport, int istack)
{
afs_int32 tl;
afs_int32 ahost, short aport, afs_int32 istack)
{
struct sockaddr_in taddr;
- int i;
- int nbytes;
+ unsigned int i, nbytes, savelen = 0;
int saven = 0;
- size_t savelen = 0;
#ifdef KERNEL
int waslocked = ISAFS_GLOCK();
#endif
} else
nbytes -= apacket->wirevec[i].iov_len;
}
- AFS_RXGUNLOCK();
#ifdef KERNEL
#ifdef RX_KERNEL_TRACE
if (ICL_SETACTIVE(afs_iclSetp)) {
AFS_GLOCK();
#endif
#endif
- AFS_RXGLOCK();
if (saven) { /* means we truncated the packet above. */
apacket->wirevec[i - 1].iov_len = savelen;
apacket->niovecs = saven;
#endif
int code;
struct sockaddr_in addr;
- register struct rx_peer *peer = conn->peer;
+ struct rx_peer *peer = conn->peer;
osi_socket socket;
#ifdef RXDEBUG
char deliveryType = 'S';
* serial number means the packet was never sent. */
MUTEX_ENTER(&conn->conn_data_lock);
p->header.serial = ++conn->serial;
+ if (p->length > conn->peer->maxPacketSize) {
+ if (p->header.seq != 0) {
+ conn->lastPacketSize = p->length;
+ conn->lastPacketSizeSeq = p->header.seq;
+ }
+ }
MUTEX_EXIT(&conn->conn_data_lock);
/* This is so we can adjust retransmit time-outs better in the face of
* rapidly changing round-trip times. RTO estimation is not a la Karn.
* blocking socket, but unfortunately the interface doesn't
* allow us to have the socket block in send mode, and not
* block in receive mode */
- AFS_RXGUNLOCK();
#ifdef KERNEL
waslocked = ISAFS_GLOCK();
#ifdef RX_KERNEL_TRACE
osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
-
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
- /* Linux is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an ENETUNREACH error
- * code. So, when this happens let's "down" the host NOW so
+ /* Some systems are nice and tell us right away that we cannot
+ * reach this recipient by returning an error code.
+ * So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call && code == -ENETUNREACH)
- call->lastReceiveTime = 0;
+ if (call &&
+#ifdef AFS_NT40_ENV
+ (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
+#elif defined(AFS_LINUX20_ENV)
+ code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV)
+ code == EHOSTUNREACH
+#else
+ 0
#endif
+ )
+ call->lastReceiveTime = 0;
}
#ifdef KERNEL
#ifdef RX_KERNEL_TRACE
AFS_GLOCK();
#endif
#endif
- AFS_RXGLOCK();
#ifdef RXDEBUG
}
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %x resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], peer->host, peer->port, p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (int)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+ ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+ p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
int waslocked;
#endif
struct sockaddr_in addr;
- register struct rx_peer *peer = conn->peer;
+ struct rx_peer *peer = conn->peer;
osi_socket socket;
struct rx_packet *p = NULL;
struct iovec wirevec[RX_MAXIOVECS];
MUTEX_ENTER(&conn->conn_data_lock);
serial = conn->serial;
conn->serial += len;
+ for (i = 0; i < len; i++) {
+ p = list[i];
+ if (p->length > conn->peer->maxPacketSize) {
+ if ((p->header.seq != 0) &&
+ ((i == 0) || (p->length >= conn->lastPacketSize))) {
+ conn->lastPacketSize = p->length;
+ conn->lastPacketSizeSeq = p->header.seq;
+ }
+ }
+ }
MUTEX_EXIT(&conn->conn_data_lock);
* blocking socket, but unfortunately the interface doesn't
* allow us to have the socket block in send mode, and not
* block in receive mode */
- AFS_RXGUNLOCK();
#if defined(AFS_SUN5_ENV) && defined(KERNEL)
waslocked = ISAFS_GLOCK();
if (!istack && waslocked)
osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
}
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
- /* Linux is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an ENETUNREACH error
- * code. So, when this happens let's "down" the host NOW so
+ /* Some systems are nice and tell us right away that we cannot
+ * reach this recipient by returning an error code.
+ * So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call && code == -ENETUNREACH)
- call->lastReceiveTime = 0;
+ if (call &&
+#ifdef AFS_NT40_ENV
+ (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
+#elif defined(AFS_LINUX20_ENV)
+ code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV)
+ code == EHOSTUNREACH
+#else
+ 0
#endif
+ )
+ call->lastReceiveTime = 0;
}
#if defined(AFS_SUN5_ENV) && defined(KERNEL)
if (!istack && waslocked)
AFS_GLOCK();
#endif
- AFS_RXGLOCK();
#ifdef RXDEBUG
}
assert(p != NULL);
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %x resend %d.%0.3d len %d",
- deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1],
- peer->host, peer->port, p->header.serial, p->header.epoch,
- p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
- (int)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+ ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+ p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
-
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
}
* in rx.h. Bug: there's a lot of duplication between this and other
* routines. This needs to be cleaned up. */
struct rx_packet *
-rxi_SendSpecial(register struct rx_call *call,
- register struct rx_connection *conn,
+rxi_SendSpecial(struct rx_call *call,
+ struct rx_connection *conn,
struct rx_packet *optionalPacket, int type, char *data,
int nbytes, int istack)
{
/* Some of the following stuff should be common code for all
* packet sends (it's repeated elsewhere) */
- register struct rx_packet *p;
+ struct rx_packet *p;
unsigned int i = 0;
int savelen = 0, saven = 0;
int channel, callNumber;
* the net byte order representation in the wire representation of the
* packet, which is what is actually sent out on the wire) */
void
-rxi_EncodePacketHeader(register struct rx_packet *p)
+rxi_EncodePacketHeader(struct rx_packet *p)
{
- register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
+ afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
- memset((char *)buf, 0, RX_HEADER_SIZE);
+ memset(buf, 0, RX_HEADER_SIZE);
*buf++ = htonl(p->header.epoch);
*buf++ = htonl(p->header.cid);
*buf++ = htonl(p->header.callNumber);
/* Decode the packet's header (from net byte order to a struct header) */
void
-rxi_DecodePacketHeader(register struct rx_packet *p)
+rxi_DecodePacketHeader(struct rx_packet *p)
{
- register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
+ afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
afs_uint32 temp;
p->header.epoch = ntohl(*buf);
}
void
-rxi_PrepareSendPacket(register struct rx_call *call,
- register struct rx_packet *p, register int last)
+rxi_PrepareSendPacket(struct rx_call *call,
+ struct rx_packet *p, int last)
{
- register struct rx_connection *conn = call->conn;
- int i, j;
- ssize_t len; /* len must be a signed type; it can go negative */
+ struct rx_connection *conn = call->conn;
+ unsigned int i;
+ afs_int32 len; /* len must be a signed type; it can go negative */
p->flags &= ~RX_PKTFLAG_ACKED;
p->header.cid = (conn->cid | call->channel);
p->header.serviceId = conn->serviceId;
p->header.securityIndex = conn->securityIndex;
+
+ /* No data packets on call 0. Where do these come from? */
+ if (*call->callNumber == 0)
+ *call->callNumber = 1;
+
p->header.callNumber = *call->callNumber;
p->header.seq = call->tnext++;
p->header.epoch = conn->epoch;
}
if (len > 0) {
osi_Panic("PrepareSendPacket 1\n"); /* MTUXXX */
- } else {
+ } else if (i < p->niovecs) {
/* Free any extra elements in the wirevec */
- for (j = MAX(2, i); j < p->niovecs; j++) {
- rxi_freeCBuf(RX_CBUF_TO_PACKET(p->wirevec[j].iov_base, p));
- }
- p->niovecs = i;
- p->wirevec[i - 1].iov_len += len;
+#if defined(RX_ENABLE_TSFPQ)
+ rxi_FreeDataBufsTSFPQ(p, i, 1 /* allow global pool flush if overquota */);
+#else /* !RX_ENABLE_TSFPQ */
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ rxi_FreeDataBufsNoLock(p, i);
+ MUTEX_EXIT(&rx_freePktQ_lock);
+#endif /* !RX_ENABLE_TSFPQ */
+
+ p->niovecs = i;
}
+ if (len)
+ p->wirevec[i - 1].iov_len += len;
RXS_PreparePacket(conn->securityObject, call, p);
}
int adjMTU;
int frags;
+ if (rxi_nRecvFrags == 1 && rxi_nSendFrags == 1)
+ return mtu;
adjMTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
if (mtu <= adjMTU) {
return mtu;
}
return (2 + (maxMTU / (RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE)));
}
+
+#ifndef KERNEL
+/*
+ * This function can be used by the Windows Cache Manager
+ * to dump the list of all rx packets so that we can determine
+ * where the packet leakage is.
+ */
+int rx_DumpPackets(FILE *outputFile, char *cookie)
+{
+#ifdef RXDEBUG_PACKET
+ struct rx_packet *p;
+#ifdef AFS_NT40_ENV
+ int zilch;
+ char output[2048];
+#define RXDPRINTF sprintf
+#define RXDPRINTOUT output
+#else
+#define RXDPRINTF fprintf
+#define RXDPRINTOUT outputFile
+#endif
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ RXDPRINTF(RXDPRINTOUT, "%s - Start dumping all Rx Packets - count=%u\r\n", cookie, rx_packet_id);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+
+ for (p = rx_mallocedP; p; p = p->allNextp) {
+ RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
+ cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
+ p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
+ p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
+ (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
+ (afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+ }
+
+ RXDPRINTF(RXDPRINTOUT, "%s - End dumping all Rx Packets\r\n", cookie);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+#endif /* RXDEBUG_PACKET */
+ return 0;
+}
+#endif