rx-slow-write-packet-20090126
[openafs.git] / src / rx / rx_packet.c
index e93df2e..c11fbb9 100644 (file)
@@ -18,96 +18,89 @@ RCSID
     ("$Header$");
 
 #ifdef KERNEL
-#if defined(UKERNEL)
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#include "rx/rx_kcommon.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#include "rx/rx_packet.h"
-#else /* defined(UKERNEL) */
-#ifdef RX_KERNEL_TRACE
-#include "../rx/rx_kcommon.h"
-#endif
-#include "h/types.h"
-#ifndef AFS_LINUX20_ENV
-#include "h/systm.h"
-#endif
-#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
-#include "afs/sysincludes.h"
-#endif
-#if defined(AFS_OBSD_ENV)
-#include "h/proc.h"
-#endif
-#include "h/socket.h"
-#if !defined(AFS_SUN5_ENV) &&  !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
-#if    !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
-#include "sys/mount.h"         /* it gets pulled in by something later anyway */
-#endif
-#include "h/mbuf.h"
-#endif
-#include "netinet/in.h"
-#include "afs/afs_osi.h"
-#include "rx_kmutex.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
-#endif
-#include "rx/rx_packet.h"
-#endif /* defined(UKERNEL) */
-#include "rx/rx_globals.h"
+# if defined(UKERNEL)
+#  include "afs/sysincludes.h"
+#  include "afsincludes.h"
+#  include "rx/rx_kcommon.h"
+#  include "rx/rx_clock.h"
+#  include "rx/rx_queue.h"
+#  include "rx/rx_packet.h"
+# else /* defined(UKERNEL) */
+#  ifdef RX_KERNEL_TRACE
+#   include "../rx/rx_kcommon.h"
+#  endif
+#  include "h/types.h"
+#  ifndef AFS_LINUX20_ENV
+#   include "h/systm.h"
+#  endif
+#  if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
+#   include "afs/sysincludes.h"
+#  endif
+#  if defined(AFS_OBSD_ENV)
+#   include "h/proc.h"
+#  endif
+#  include "h/socket.h"
+#  if !defined(AFS_SUN5_ENV) &&  !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
+#   if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
+#    include "sys/mount.h"             /* it gets pulled in by something later anyway */
+#   endif
+#   include "h/mbuf.h"
+#  endif
+#  include "netinet/in.h"
+#  include "afs/afs_osi.h"
+#  include "rx_kmutex.h"
+#  include "rx/rx_clock.h"
+#  include "rx/rx_queue.h"
+#  ifdef       AFS_SUN5_ENV
+#   include <sys/sysmacros.h>
+#  endif
+#  include "rx/rx_packet.h"
+# endif /* defined(UKERNEL) */
+# include "rx/rx_internal.h"
+# include "rx/rx_globals.h"
 #else /* KERNEL */
-#include "sys/types.h"
-#include <sys/stat.h>
-#include <errno.h>
-#if defined(AFS_NT40_ENV) || defined(AFS_DJGPP_ENV)
-#ifdef AFS_NT40_ENV
-#include <winsock2.h>
-#ifndef EWOULDBLOCK
-#define EWOULDBLOCK WSAEWOULDBLOCK
-#endif
-#else
-#include <sys/socket.h>
-#include <netinet/in.h>
-#endif /* AFS_NT40_ENV */
-#include "rx_user.h"
-#include "rx_xmit_nt.h"
-#include <stdlib.h>
-#else
-#include <sys/socket.h>
-#include <netinet/in.h>
-#endif
-#include "rx_clock.h"
-#include "rx.h"
-#include "rx_queue.h"
-#ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
-#endif
-#include "rx_packet.h"
-#include "rx_globals.h"
-#include <lwp.h>
-#include <assert.h>
-#ifdef HAVE_STRING_H
-#include <string.h>
-#else
-#ifdef HAVE_STRINGS_H
-#include <strings.h>
-#endif
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
+# include "sys/types.h"
+# include <sys/stat.h>
+# include <errno.h>
+# if defined(AFS_NT40_ENV) 
+#  include <winsock2.h>
+#  ifndef EWOULDBLOCK
+#   define EWOULDBLOCK WSAEWOULDBLOCK
+#  endif
+#  include "rx_user.h"
+#  include "rx_xmit_nt.h"
+#  include <stdlib.h>
+# else
+#  include <sys/socket.h>
+#  include <netinet/in.h>
+# endif
+# include "rx_clock.h"
+# include "rx_internal.h"
+# include "rx.h"
+# include "rx_queue.h"
+# ifdef        AFS_SUN5_ENV
+#  include <sys/sysmacros.h>
+# endif
+# include "rx_packet.h"
+# include "rx_globals.h"
+# include <lwp.h>
+# include <assert.h>
+# include <string.h>
+# ifdef HAVE_UNISTD_H
+#  include <unistd.h>
+# endif
 #endif /* KERNEL */
 
 #ifdef RX_LOCKS_DB
 /* rxdb_fileID is used to identify the lock location, along with line#. */
 static int rxdb_fileID = RXDB_FILE_RX_PACKET;
 #endif /* RX_LOCKS_DB */
-struct rx_packet *rx_mallocedP = 0;
+static struct rx_packet *rx_mallocedP = 0;
+#ifdef RXDEBUG_PACKET
+static afs_uint32       rx_packet_id = 0;
+#endif
 
 extern char cml_version_number[];
-extern int (*rx_almostSent) ();
 
 static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
 
@@ -115,8 +108,13 @@ static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
                                afs_int32 ahost, short aport,
                                afs_int32 istack);
 
-static int rxi_FreeDataBufsToQueue(struct rx_packet *p, int first, 
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p, 
+                                  afs_uint32 first, 
                                   struct rx_queue * q);
+#ifdef RX_ENABLE_TSFPQ
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
+#endif
 
 /* some rules about packets:
  * 1.  When a packet is allocated, the final iov_buf contains room for
@@ -232,7 +230,7 @@ rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
      * offset only applies to the first iovec.
      */
     r = resid;
-    while ((resid > 0) && (i < RX_MAXWVECS)) {
+    while ((resid > 0) && (i <= RX_MAXWVECS)) {
        if (i >= packet->niovecs)
            if (rxi_AllocDataBuf(packet, resid, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
                break;
@@ -268,9 +266,8 @@ rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
 static int
 AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 {
-    register struct rx_packet *c;
     register struct rx_ts_info_t * rx_ts_info;
-    int transfer, alloc;
+    int transfer;
     SPLVAR;
 
     RX_TS_INFO_GET(rx_ts_info);
@@ -279,16 +276,10 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
     if (transfer > 0) {
         NETPRI;
         MUTEX_ENTER(&rx_freePktQ_lock);
-       
-       if ((transfer + rx_TSFPQGlobSize) <= rx_nFreePackets) {
-           transfer += rx_TSFPQGlobSize;
-       } else if (transfer <= rx_nFreePackets) {
-           transfer = rx_nFreePackets;
-       } else {
+       transfer = MAX(transfer, rx_TSFPQGlobSize);
+       if (transfer > rx_nFreePackets) {
            /* alloc enough for us, plus a few globs for other threads */
-           alloc = transfer + (3 * rx_TSFPQGlobSize) - rx_nFreePackets;
-           rxi_MorePacketsNoLock(MAX(alloc, rx_initSendWindow));
-           transfer += rx_TSFPQGlobSize;
+           rxi_MorePacketsNoLock(transfer + 4 * rx_initSendWindow);
        }
 
        RX_TS_FPQ_GTOL2(rx_ts_info, transfer);
@@ -297,7 +288,7 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
        USERPRI;
     }
 
-    RX_TS_FPQ_CHECKOUT2(rx_ts_info, num_pkts, q);
+    RX_TS_FPQ_QCHECKOUT(rx_ts_info, num_pkts, q);
 
     return num_pkts;
 }
@@ -306,7 +297,10 @@ static int
 AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 {
     struct rx_packet *c;
-    int i, overq = 0;
+    int i;
+#ifdef KERNEL
+    int overq = 0;
+#endif
     SPLVAR;
 
     NETPRI;
@@ -319,25 +313,25 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 
     if (overq) {
        rxi_NeedMorePackets = TRUE;
-       MUTEX_ENTER(&rx_stats_mutex);
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_stats.receivePktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_stats.sendPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-           rx_stats.specialPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_stats.receiveCbufPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_stats.sendCbufPktAllocFailures++;
-           break;
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
        }
-       MUTEX_EXIT(&rx_stats_mutex);
     }
 
     if (rx_nFreePackets < num_pkts)
@@ -349,7 +343,7 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
     }
 #else /* KERNEL */
     if (rx_nFreePackets < num_pkts) {
-        rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), rx_initSendWindow));
+       rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), 4 * rx_initSendWindow));
     }
 #endif /* KERNEL */
 
@@ -390,16 +384,16 @@ rxi_FreePackets(int num_pkts, struct rx_queue * q)
 
     if (!num_pkts) {
        for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
-           rxi_FreeDataBufsTSFPQ(c, 1, 0);
+           rxi_FreeDataBufsTSFPQ(c, 2, 0);
        }
     } else {
        for (queue_Scan(q, c, nc, rx_packet)) {
-           rxi_FreeDataBufsTSFPQ(c, 1, 0);
+           rxi_FreeDataBufsTSFPQ(c, 2, 0);
        }
     }
 
     if (num_pkts) {
-       RX_TS_FPQ_CHECKIN2(rx_ts_info, num_pkts, q);
+       RX_TS_FPQ_QCHECKIN(rx_ts_info, num_pkts, q);
     }
 
     if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
@@ -548,17 +542,36 @@ rxi_MorePackets(int apackets)
     SPLVAR;
 
     getme = apackets * sizeof(struct rx_packet);
-    p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+    p = (struct rx_packet *)osi_Alloc(getme);
+    osi_Assert(p);
 
     PIN(p, getme);             /* XXXXX */
     memset((char *)p, 0, getme);
     RX_TS_INFO_GET(rx_ts_info);
 
+    RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+    /* TSFPQ patch also needs to keep track of total packets */
+
+    MUTEX_ENTER(&rx_packets_mutex);
+    rx_nPackets += apackets;
+    RX_TS_FPQ_COMPUTE_LIMITS;
+    MUTEX_EXIT(&rx_packets_mutex);
+
     for (e = p + apackets; p < e; p++) {
         RX_PACKET_IOV_INIT(p);
        p->niovecs = 2;
 
        RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+        NETPRI;
+        MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+        rx_mallocedP = p;
+        MUTEX_EXIT(&rx_freePktQ_lock);
+        USERPRI;
     }
     rx_ts_info->_FPQ.delta += apackets;
 
@@ -583,7 +596,8 @@ rxi_MorePackets(int apackets)
     SPLVAR;
 
     getme = apackets * sizeof(struct rx_packet);
-    p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+    p = (struct rx_packet *)osi_Alloc(getme);
+    osi_Assert(p);
 
     PIN(p, getme);             /* XXXXX */
     memset((char *)p, 0, getme);
@@ -596,7 +610,13 @@ rxi_MorePackets(int apackets)
        p->niovecs = 2;
 
        queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+       rx_mallocedP = p;
     }
+
     rx_nFreePackets += apackets;
     rxi_NeedMorePackets = FALSE;
     rxi_PacketsUnWait();
@@ -616,17 +636,33 @@ rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
     SPLVAR;
 
     getme = apackets * sizeof(struct rx_packet);
-    p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+    p = (struct rx_packet *)osi_Alloc(getme);
 
     PIN(p, getme);             /* XXXXX */
     memset((char *)p, 0, getme);
     RX_TS_INFO_GET(rx_ts_info);
 
+    RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+    /* TSFPQ patch also needs to keep track of total packets */
+    MUTEX_ENTER(&rx_packets_mutex);
+    rx_nPackets += apackets;
+    RX_TS_FPQ_COMPUTE_LIMITS;
+    MUTEX_EXIT(&rx_packets_mutex);
+
     for (e = p + apackets; p < e; p++) {
         RX_PACKET_IOV_INIT(p);
        p->niovecs = 2;
-
        RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+       
+        NETPRI;
+        MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+        rx_mallocedP = p;
+        MUTEX_EXIT(&rx_freePktQ_lock);
+        USERPRI;
     }
     rx_ts_info->_FPQ.delta += apackets;
 
@@ -650,6 +686,9 @@ rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
 void
 rxi_MorePacketsNoLock(int apackets)
 {
+#ifdef RX_ENABLE_TSFPQ
+    register struct rx_ts_info_t * rx_ts_info;
+#endif /* RX_ENABLE_TSFPQ */
     struct rx_packet *p, *e;
     int getme;
 
@@ -657,26 +696,41 @@ rxi_MorePacketsNoLock(int apackets)
      * to hold maximal amounts of data */
     apackets += (apackets / 4)
        * ((rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE) / RX_CBUFFERSIZE);
-    getme = apackets * sizeof(struct rx_packet);
-    p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
-
+    do {
+        getme = apackets * sizeof(struct rx_packet);
+        p = (struct rx_packet *)osi_Alloc(getme);
+       if (p == NULL) {
+            apackets -= apackets / 4;
+            osi_Assert(apackets > 0);
+        }
+    } while(p == NULL);
     memset((char *)p, 0, getme);
 
+#ifdef RX_ENABLE_TSFPQ
+    RX_TS_INFO_GET(rx_ts_info);
+    RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info,apackets);
+#endif /* RX_ENABLE_TSFPQ */ 
+
     for (e = p + apackets; p < e; p++) {
         RX_PACKET_IOV_INIT(p);
        p->flags |= RX_PKTFLAG_FREE;
        p->niovecs = 2;
 
        queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+       rx_mallocedP = p;
     }
 
     rx_nFreePackets += apackets;
 #ifdef RX_ENABLE_TSFPQ
     /* TSFPQ patch also needs to keep track of total packets */
-    MUTEX_ENTER(&rx_stats_mutex);
+    MUTEX_ENTER(&rx_packets_mutex);
     rx_nPackets += apackets;
     RX_TS_FPQ_COMPUTE_LIMITS;
-    MUTEX_EXIT(&rx_stats_mutex);
+    MUTEX_EXIT(&rx_packets_mutex);
 #endif /* RX_ENABLE_TSFPQ */
     rxi_NeedMorePackets = FALSE;
     rxi_PacketsUnWait();
@@ -715,7 +769,7 @@ rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit)
             if ((num_keep_local > rx_TSFPQLocalMax) && !allow_overcommit)
                 xfer = rx_TSFPQLocalMax - rx_ts_info->_FPQ.len;
             if (rx_nFreePackets < xfer) {
-                rxi_MorePacketsNoLock(xfer - rx_nFreePackets);
+               rxi_MorePacketsNoLock(MAX(xfer - rx_nFreePackets, 4 * rx_initSendWindow));
             }
             RX_TS_FPQ_GTOL2(rx_ts_info, xfer);
         }
@@ -806,41 +860,57 @@ rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
 }
 #endif /* RX_ENABLE_TSFPQ */
 
-/* free continuation buffers off a packet into a queue of buffers */
+/* 
+ * free continuation buffers off a packet into a queue
+ *
+ * [IN] p      -- packet from which continuation buffers will be freed
+ * [IN] first  -- iovec offset of first continuation buffer to free
+ * [IN] q      -- queue into which continuation buffers will be chained
+ *
+ * returns:
+ *   number of continuation buffers freed
+ */
+#ifndef RX_ENABLE_TSFPQ
 static int
-rxi_FreeDataBufsToQueue(struct rx_packet *p, int first, struct rx_queue * q)
+rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
 {
     struct iovec *iov;
+    struct rx_packet * cb;
     int count = 0;
 
-    if (first < 2)
-       first = 2;
-    for (; first < p->niovecs; first++, count++) {
+    for (first = MAX(2, first); first < p->niovecs; first++, count++) {
        iov = &p->wirevec[first];
        if (!iov->iov_base)
-           osi_Panic("rxi_PacketIOVToQueue: unexpected NULL iov");
-       queue_Append(q, RX_CBUF_TO_PACKET(iov->iov_base, p));
+           osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
+       cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
+       RX_FPQ_MARK_FREE(cb);
+       queue_Append(q, cb);
     }
     p->length = 0;
     p->niovecs = 0;
 
     return count;
 }
+#endif
 
+/*
+ * free packet continuation buffers into the global free packet pool
+ *
+ * [IN] p      -- packet from which to free continuation buffers
+ * [IN] first  -- iovec offset of first continuation buffer to free
+ *
+ * returns:
+ *   zero always
+ */
 int
-rxi_FreeDataBufsNoLock(struct rx_packet *p, int first)
+rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
 {
-    struct iovec *iov, *end;
+    struct iovec *iov;
 
-    if (first != 1)            /* MTUXXX */
-       osi_Panic("FreeDataBufs 1: first must be 1");
-    iov = &p->wirevec[1];
-    end = iov + (p->niovecs - 1);
-    if (iov->iov_base != (caddr_t) p->localdata)       /* MTUXXX */
-       osi_Panic("FreeDataBufs 2: vec 1 must be localdata");
-    for (iov++; iov < end; iov++) {
+    for (first = MAX(2, first); first < p->niovecs; first++) {
+       iov = &p->wirevec[first];
        if (!iov->iov_base)
-           osi_Panic("FreeDataBufs 3: vecs 2-niovecs must not be NULL");
+           osi_Panic("rxi_FreeDataBufsNoLock: unexpected NULL iov");
        rxi_FreePacketNoLock(RX_CBUF_TO_PACKET(iov->iov_base, p));
     }
     p->length = 0;
@@ -850,23 +920,31 @@ rxi_FreeDataBufsNoLock(struct rx_packet *p, int first)
 }
 
 #ifdef RX_ENABLE_TSFPQ
-int
-rxi_FreeDataBufsTSFPQ(struct rx_packet *p, int first, int flush_global)
+/*
+ * free packet continuation buffers into the thread-local free pool
+ *
+ * [IN] p             -- packet from which continuation buffers will be freed
+ * [IN] first         -- iovec offset of first continuation buffer to free
+ *                       any value less than 2, the min number of iovecs,
+ *                       is treated as if it is 2.
+ * [IN] flush_global  -- if nonzero, we will flush overquota packets to the
+ *                       global free pool before returning
+ *
+ * returns:
+ *   zero always
+ */
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global)
 {
-    struct iovec *iov, *end;
+    struct iovec *iov;
     register struct rx_ts_info_t * rx_ts_info;
 
     RX_TS_INFO_GET(rx_ts_info);
 
-    if (first != 1)            /* MTUXXX */
-       osi_Panic("FreeDataBufs 1: first must be 1");
-    iov = &p->wirevec[1];
-    end = iov + (p->niovecs - 1);
-    if (iov->iov_base != (caddr_t) p->localdata)       /* MTUXXX */
-       osi_Panic("FreeDataBufs 2: vec 1 must be localdata");
-    for (iov++; iov < end; iov++) {
+    for (first = MAX(2, first); first < p->niovecs; first++) {
+       iov = &p->wirevec[first];
        if (!iov->iov_base)
-           osi_Panic("FreeDataBufs 3: vecs 2-niovecs must not be NULL");
+           osi_Panic("rxi_FreeDataBufsTSFPQ: unexpected NULL iov");
        RX_TS_FPQ_CHECKIN(rx_ts_info,RX_CBUF_TO_PACKET(iov->iov_base, p));
     }
     p->length = 0;
@@ -1008,7 +1086,7 @@ rxi_TrimDataBufs(struct rx_packet *p, int first)
 void
 rxi_FreePacket(struct rx_packet *p)
 {
-    rxi_FreeDataBufsTSFPQ(p, 1, 0);
+    rxi_FreeDataBufsTSFPQ(p, 2, 0);
     rxi_FreePacketTSFPQ(p, RX_TS_FPQ_FLUSH_GLOBAL);
 }
 #else /* RX_ENABLE_TSFPQ */
@@ -1020,7 +1098,7 @@ rxi_FreePacket(struct rx_packet *p)
     NETPRI;
     MUTEX_ENTER(&rx_freePktQ_lock);
 
-    rxi_FreeDataBufsNoLock(p, 1);
+    rxi_FreeDataBufsNoLock(p, 2);
     rxi_FreePacketNoLock(p);
     /* Wakeup anyone waiting for packets */
     rxi_PacketsUnWait();
@@ -1046,33 +1124,31 @@ rxi_AllocPacketNoLock(int class)
 #ifdef KERNEL
     if (rxi_OverQuota(class)) {
        rxi_NeedMorePackets = TRUE;
-       MUTEX_ENTER(&rx_stats_mutex);
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_stats.receivePktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_stats.sendPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-           rx_stats.specialPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_stats.receiveCbufPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_stats.sendCbufPktAllocFailures++;
-           break;
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
        }
-       MUTEX_EXIT(&rx_stats_mutex);
-       return (struct rx_packet *)0;
+        return (struct rx_packet *)0;
     }
 #endif /* KERNEL */
 
-    MUTEX_ENTER(&rx_stats_mutex);
-    rx_stats.packetRequests++;
-    MUTEX_EXIT(&rx_stats_mutex);
-
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
     if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
 
 #ifdef KERNEL
@@ -1080,7 +1156,7 @@ rxi_AllocPacketNoLock(int class)
            osi_Panic("rxi_AllocPacket error");
 #else /* KERNEL */
         if (queue_IsEmpty(&rx_freePacketQueue))
-           rxi_MorePacketsNoLock(rx_initSendWindow);
+           rxi_MorePacketsNoLock(4 * rx_initSendWindow);
 #endif /* KERNEL */
 
 
@@ -1108,39 +1184,38 @@ rxi_AllocPacketNoLock(int class)
 #ifdef KERNEL
     if (rxi_OverQuota(class)) {
        rxi_NeedMorePackets = TRUE;
-       MUTEX_ENTER(&rx_stats_mutex);
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_stats.receivePktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_stats.sendPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-           rx_stats.specialPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_stats.receiveCbufPktAllocFailures++;
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_stats.sendCbufPktAllocFailures++;
-           break;
-       }
-       MUTEX_EXIT(&rx_stats_mutex);
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
+        }
        return (struct rx_packet *)0;
     }
 #endif /* KERNEL */
 
-    MUTEX_ENTER(&rx_stats_mutex);
-    rx_stats.packetRequests++;
-    MUTEX_EXIT(&rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
 
 #ifdef KERNEL
     if (queue_IsEmpty(&rx_freePacketQueue))
        osi_Panic("rxi_AllocPacket error");
 #else /* KERNEL */
     if (queue_IsEmpty(&rx_freePacketQueue))
-       rxi_MorePacketsNoLock(rx_initSendWindow);
+       rxi_MorePacketsNoLock(4 * rx_initSendWindow);
 #endif /* KERNEL */
 
     rx_nFreePackets--;
@@ -1169,15 +1244,13 @@ rxi_AllocPacketTSFPQ(int class, int pull_global)
 
     RX_TS_INFO_GET(rx_ts_info);
 
-    MUTEX_ENTER(&rx_stats_mutex);
-    rx_stats.packetRequests++;
-    MUTEX_EXIT(&rx_stats_mutex);
-
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
     if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
         MUTEX_ENTER(&rx_freePktQ_lock);
 
         if (queue_IsEmpty(&rx_freePacketQueue))
-            rxi_MorePacketsNoLock(rx_initSendWindow);
+           rxi_MorePacketsNoLock(4 * rx_initSendWindow);
 
        RX_TS_FPQ_GTOL(rx_ts_info);
 
@@ -1312,7 +1385,10 @@ rxi_AllocSendPacket(register struct rx_call *call, int want)
 }
 
 #ifndef KERNEL
-
+#ifdef AFS_NT40_ENV     
+/* Windows does not use file descriptors. */
+#define CountFDs(amax) 0
+#else
 /* count the number of used FDs */
 static int
 CountFDs(register int amax)
@@ -1329,7 +1405,7 @@ CountFDs(register int amax)
     }
     return count;
 }
-
+#endif /* AFS_NT40_ENV */
 #else /* KERNEL */
 
 #define CountFDs(amax) amax
@@ -1388,22 +1464,38 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
 
     p->length = (nbytes - RX_HEADER_SIZE);
     if ((nbytes > tlen) || (p->length & 0x8000)) {     /* Bogus packet */
-       if (nbytes > 0)
-           rxi_MorePackets(rx_initSendWindow);
-       else if (nbytes < 0 && errno == EWOULDBLOCK) {
-           MUTEX_ENTER(&rx_stats_mutex);
-           rx_stats.noPacketOnRead++;
-           MUTEX_EXIT(&rx_stats_mutex);
-       } else {
-           MUTEX_ENTER(&rx_stats_mutex);
-           rx_stats.bogusPacketOnRead++;
-           rx_stats.bogusHost = from.sin_addr.s_addr;
-           MUTEX_EXIT(&rx_stats_mutex);
-           dpf(("B: bogus packet from [%x,%d] nb=%d", from.sin_addr.s_addr,
-                from.sin_port, nbytes));
+       if (nbytes < 0 && errno == EWOULDBLOCK) {
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+       } else if (nbytes <= 0) {
+            if (rx_stats_active) {
+                MUTEX_ENTER(&rx_stats_mutex);
+                rx_stats.bogusPacketOnRead++;
+                rx_stats.bogusHost = from.sin_addr.s_addr;
+                MUTEX_EXIT(&rx_stats_mutex);
+            }
+           dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
+                ntohs(from.sin_port), nbytes));
        }
        return 0;
-    } else {
+    } 
+#ifdef RXDEBUG
+    else if ((rx_intentionallyDroppedOnReadPer100 > 0)
+               && (random() % 100 < rx_intentionallyDroppedOnReadPer100)) {
+       rxi_DecodePacketHeader(p);
+
+       *host = from.sin_addr.s_addr;
+       *port = from.sin_port;
+
+       dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
+             p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial, 
+             p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, 
+             p->length));
+       rxi_TrimDataBufs(p, 1);
+       return 0;
+    } 
+#endif
+    else {
        /* Extract packet header. */
        rxi_DecodePacketHeader(p);
 
@@ -1411,9 +1503,8 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
        *port = from.sin_port;
        if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
            struct rx_peer *peer;
-           MUTEX_ENTER(&rx_stats_mutex);
-           rx_stats.packetsRead[p->header.type - 1]++;
-           MUTEX_EXIT(&rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
            /*
             * Try to look up this peer structure.  If it doesn't exist,
             * don't create a new one - 
@@ -1722,6 +1813,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
 #endif
            MUTEX_ENTER(&rx_serverPool_lock);
            tstat.nFreePackets = htonl(rx_nFreePackets);
+           tstat.nPackets = htonl(rx_nPackets);
            tstat.callsExecuted = htonl(rxi_nCalls);
            tstat.packetReclaims = htonl(rx_packetReclaims);
            tstat.usedFDs = CountFDs(64);
@@ -1956,6 +2048,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                return ap;
 
            /* Since its all int32s convert to network order with a loop. */
+        if (rx_stats_active)
            MUTEX_ENTER(&rx_stats_mutex);
            s = (afs_int32 *) & rx_stats;
            for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
@@ -1963,6 +2056,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
 
            tl = ap->length;
            ap->length = sizeof(rx_stats);
+        if (rx_stats_active)
            MUTEX_EXIT(&rx_stats_mutex);
            rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
            ap->length = tl;
@@ -2178,31 +2272,28 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
             osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
                         p->length + RX_HEADER_SIZE, istack)) != 0) {
            /* send failed, so let's hurry up the resend, eh? */
-           MUTEX_ENTER(&rx_stats_mutex);
-           rx_stats.netSendFailures++;
-           MUTEX_EXIT(&rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
            p->retryTime = p->timeSent; /* resend it very soon */
            clock_Addmsec(&(p->retryTime),
                          10 + (((afs_uint32) p->backoff) << 8));
-
-#ifdef AFS_NT40_ENV
-           /* Windows is nice -- it can tell us right away that we cannot
-            * reach this recipient by returning an WSAEHOSTUNREACH error
-            * code.  So, when this happens let's "down" the host NOW so
+           /* Some systems are nice and tell us right away that we cannot
+            * reach this recipient by returning an error code. 
+            * So, when this happens let's "down" the host NOW so
             * we don't sit around waiting for this host to timeout later.
             */
-               if (call && code == -1 && errno == WSAEHOSTUNREACH)
-                       call->lastReceiveTime = 0;
+           if (call && 
+#ifdef AFS_NT40_ENV
+               code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
+#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+               code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+               code == EHOSTUNREACH
+#else
+               0
 #endif
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
-           /* Linux is nice -- it can tell us right away that we cannot
-            * reach this recipient by returning an ENETUNREACH error
-            * code.  So, when this happens let's "down" the host NOW so
-            * we don't sit around waiting for this host to timeout later.
-            */
-           if (call && code == -ENETUNREACH)
+               )
                call->lastReceiveTime = 0;
-#endif
        }
 #ifdef KERNEL
 #ifdef RX_KERNEL_TRACE
@@ -2220,11 +2311,10 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
 #endif
 #ifdef RXDEBUG
     }
-    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], peer->host, peer->port, p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
 #endif
-    MUTEX_ENTER(&rx_stats_mutex);
-    rx_stats.packetsSent[p->header.type - 1]++;
-    MUTEX_EXIT(&rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
     MUTEX_ENTER(&peer->peer_lock);
     hadd32(peer->bytesSent, p->length);
     MUTEX_EXIT(&peer->peer_lock);
@@ -2369,24 +2459,31 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
             osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
                         istack)) != 0) {
            /* send failed, so let's hurry up the resend, eh? */
-           MUTEX_ENTER(&rx_stats_mutex);
-           rx_stats.netSendFailures++;
-           MUTEX_EXIT(&rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
            for (i = 0; i < len; i++) {
                p = list[i];
                p->retryTime = p->timeSent;     /* resend it very soon */
                clock_Addmsec(&(p->retryTime),
                              10 + (((afs_uint32) p->backoff) << 8));
            }
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
-           /* Linux is nice -- it can tell us right away that we cannot
-            * reach this recipient by returning an ENETUNREACH error
-            * code.  So, when this happens let's "down" the host NOW so
+           /* Some systems are nice and tell us right away that we cannot
+            * reach this recipient by returning an error code. 
+            * So, when this happens let's "down" the host NOW so
             * we don't sit around waiting for this host to timeout later.
             */
-           if (call && code == -ENETUNREACH)
-               call->lastReceiveTime = 0;
+           if (call && 
+#ifdef AFS_NT40_ENV
+               code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
+#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+               code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+               code == EHOSTUNREACH
+#else
+               0
 #endif
+               )
+               call->lastReceiveTime = 0;
        }
 #if    defined(AFS_SUN5_ENV) && defined(KERNEL)
        if (!istack && waslocked)
@@ -2397,14 +2494,12 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
 
     assert(p != NULL);
 
-    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], peer->host, peer->port, p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
 
 #endif
-    MUTEX_ENTER(&rx_stats_mutex);
-    rx_stats.packetsSent[p->header.type - 1]++;
-    MUTEX_EXIT(&rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
     MUTEX_ENTER(&peer->peer_lock);
-
     hadd32(peer->bytesSent, p->length);
     MUTEX_EXIT(&peer->peer_lock);
 }
@@ -2553,7 +2648,7 @@ rxi_PrepareSendPacket(register struct rx_call *call,
                      register struct rx_packet *p, register int last)
 {
     register struct rx_connection *conn = call->conn;
-    int i, j;
+    int i;
     ssize_t len;               /* len must be a signed type; it can go negative */
 
     p->flags &= ~RX_PKTFLAG_ACKED;
@@ -2591,22 +2686,20 @@ rxi_PrepareSendPacket(register struct rx_call *call,
     }
     if (len > 0) {
        osi_Panic("PrepareSendPacket 1\n");     /* MTUXXX */
-    } else {
-        struct rx_queue q;
-       int nb;
-
-       queue_Init(&q);
-
+    } else if (i < p->niovecs) {
        /* Free any extra elements in the wirevec */
-       for (j = MAX(2, i), nb = p->niovecs - j; j < p->niovecs; j++) {
-           queue_Append(&q,RX_CBUF_TO_PACKET(p->wirevec[j].iov_base, p));
-       }
-       if (nb)
-           rxi_FreePackets(nb, &q);
+#if defined(RX_ENABLE_TSFPQ)
+       rxi_FreeDataBufsTSFPQ(p, i, 1 /* allow global pool flush if overquota */);
+#else /* !RX_ENABLE_TSFPQ */
+        MUTEX_ENTER(&rx_freePktQ_lock);
+       rxi_FreeDataBufsNoLock(p, i);
+        MUTEX_EXIT(&rx_freePktQ_lock);
+#endif /* !RX_ENABLE_TSFPQ */
 
-       p->niovecs = i;
-       p->wirevec[i - 1].iov_len += len;
+        p->niovecs = i;
     }
+    if (len)
+        p->wirevec[i - 1].iov_len += len;
     RXS_PreparePacket(conn->securityObject, call, p);
 }
 
@@ -2619,6 +2712,8 @@ rxi_AdjustIfMTU(int mtu)
     int adjMTU;
     int frags;
 
+    if (rxi_nRecvFrags == 1 && rxi_nSendFrags == 1)
+        return mtu;
     adjMTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
     if (mtu <= adjMTU) {
        return mtu;
@@ -2662,3 +2757,42 @@ rxi_AdjustDgramPackets(int frags, int mtu)
     }
     return (2 + (maxMTU / (RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE)));
 }
+
+#ifdef AFS_NT40_ENV
+/* 
+ * This function can be used by the Windows Cache Manager
+ * to dump the list of all rx packets so that we can determine
+ * where the packet leakage is.
+ */
+int rx_DumpPackets(FILE *outputFile, char *cookie)
+{
+#ifdef RXDEBUG_PACKET
+    int zilch;
+    struct rx_packet *p;
+    char output[2048];
+
+    NETPRI;
+    MUTEX_ENTER(&rx_freePktQ_lock);
+    sprintf(output, "%s - Start dumping all Rx Packets - count=%u\r\n", cookie, rx_packet_id);
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+
+    for (p = rx_mallocedP; p; p = p->allNextp) {
+        sprintf(output, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u  header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
+                cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec, 
+                p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
+                p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
+                (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus, 
+                (afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);
+        WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+    }
+
+    sprintf(output, "%s - End dumping all Rx Packets\r\n", cookie);
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+
+    MUTEX_EXIT(&rx_freePktQ_lock);
+    USERPRI;
+#endif /* RXDEBUG_PACKET */
+    return 0;
+}
+#endif /* AFS_NT40_ENV */
+