Rx: prevent rx_rpc_stats mutex from being a global bottleneck
[openafs.git] / src / rx / rx_packet.c
index 60eb416..417de17 100644 (file)
 #include <afs/param.h>
 #endif
 
-RCSID
-    ("$Header$");
 
 #ifdef KERNEL
-# if defined(UKERNEL)
-#  include "afs/sysincludes.h"
-#  include "afsincludes.h"
-#  include "rx/rx_kcommon.h"
-#  include "rx/rx_clock.h"
-#  include "rx/rx_queue.h"
-#  include "rx/rx_packet.h"
-# else /* defined(UKERNEL) */
-#  ifdef RX_KERNEL_TRACE
-#   include "../rx/rx_kcommon.h"
-#  endif
-#  include "h/types.h"
-#  ifndef AFS_LINUX20_ENV
-#   include "h/systm.h"
-#  endif
-#  if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
-#   include "afs/sysincludes.h"
-#  endif
-#  if defined(AFS_OBSD_ENV)
-#   include "h/proc.h"
-#  endif
-#  include "h/socket.h"
-#  if !defined(AFS_SUN5_ENV) &&  !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
-#   if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
-#    include "sys/mount.h"             /* it gets pulled in by something later anyway */
-#   endif
-#   include "h/mbuf.h"
-#  endif
-#  include "netinet/in.h"
-#  include "afs/afs_osi.h"
-#  include "rx_kmutex.h"
-#  include "rx/rx_clock.h"
-#  include "rx/rx_queue.h"
-#  ifdef       AFS_SUN5_ENV
-#   include <sys/sysmacros.h>
-#  endif
-#  include "rx/rx_packet.h"
-# endif /* defined(UKERNEL) */
-# include "rx/rx_internal.h"
-# include "rx/rx_globals.h"
+#if defined(UKERNEL)
+#include "afs/sysincludes.h"
+#include "afsincludes.h"
+#include "rx/rx_kcommon.h"
+#include "rx/rx_clock.h"
+#include "rx/rx_queue.h"
+#include "rx/rx_packet.h"
+#else /* defined(UKERNEL) */
+#ifdef RX_KERNEL_TRACE
+#include "../rx/rx_kcommon.h"
+#endif
+#include "h/types.h"
+#ifndef AFS_LINUX20_ENV
+#include "h/systm.h"
+#endif
+#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
+#include "afs/sysincludes.h"
+#endif
+#if defined(AFS_OBSD_ENV)
+#include "h/proc.h"
+#endif
+#include "h/socket.h"
+#if !defined(AFS_SUN5_ENV) &&  !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
+#if    !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
+#include "sys/mount.h"         /* it gets pulled in by something later anyway */
+#endif
+#include "h/mbuf.h"
+#endif
+#include "netinet/in.h"
+#include "afs/afs_osi.h"
+#include "rx_kmutex.h"
+#include "rx/rx_clock.h"
+#include "rx/rx_queue.h"
+#ifdef AFS_SUN5_ENV
+#include <sys/sysmacros.h>
+#endif
+#include "rx/rx_packet.h"
+#endif /* defined(UKERNEL) */
+#include "rx/rx_globals.h"
 #else /* KERNEL */
-# include "sys/types.h"
-# include <sys/stat.h>
-# include <errno.h>
-# if defined(AFS_NT40_ENV) 
-#  include <winsock2.h>
-#  ifndef EWOULDBLOCK
-#   define EWOULDBLOCK WSAEWOULDBLOCK
-#  endif
-#  include "rx_user.h"
-#  include "rx_xmit_nt.h"
-#  include <stdlib.h>
-# else
-#  include <sys/socket.h>
-#  include <netinet/in.h>
-# endif
-# include "rx_clock.h"
-# include "rx_internal.h"
-# include "rx.h"
-# include "rx_queue.h"
-# ifdef        AFS_SUN5_ENV
-#  include <sys/sysmacros.h>
-# endif
-# include "rx_packet.h"
-# include "rx_globals.h"
-# include <lwp.h>
-# include <assert.h>
-# include <string.h>
-# ifdef HAVE_UNISTD_H
-#  include <unistd.h>
-# endif
+#include "sys/types.h"
+#include <sys/stat.h>
+#include <errno.h>
+#if defined(AFS_NT40_ENV)
+#include <winsock2.h>
+#ifndef EWOULDBLOCK
+#define EWOULDBLOCK WSAEWOULDBLOCK
+#endif
+#include "rx_user.h"
+#include "rx_xmit_nt.h"
+#include <stdlib.h>
+#else
+#include <sys/socket.h>
+#include <netinet/in.h>
+#endif
+#include "rx_clock.h"
+#include "rx.h"
+#include "rx_queue.h"
+#ifdef AFS_SUN5_ENV
+#include <sys/sysmacros.h>
+#endif
+#include "rx_packet.h"
+#include "rx_globals.h"
+#include <lwp.h>
+#include <assert.h>
+#include <string.h>
+#ifdef HAVE_UNISTD_H
+#include <unistd.h>
+#endif
 #endif /* KERNEL */
 
 #ifdef RX_LOCKS_DB
 /* rxdb_fileID is used to identify the lock location, along with line#. */
 static int rxdb_fileID = RXDB_FILE_RX_PACKET;
 #endif /* RX_LOCKS_DB */
-struct rx_packet *rx_mallocedP = 0;
+static struct rx_packet *rx_mallocedP = 0;
+#ifdef RXDEBUG_PACKET
+static afs_uint32       rx_packet_id = 0;
+#endif
 
 extern char cml_version_number[];
 
@@ -105,12 +104,13 @@ static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
                                afs_int32 ahost, short aport,
                                afs_int32 istack);
 
-static int rxi_FreeDataBufsToQueue(struct rx_packet *p, 
-                                  afs_uint32 first, 
-                                  struct rx_queue * q);
 #ifdef RX_ENABLE_TSFPQ
 static int
 rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
+#else
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
+                                  afs_uint32 first,
+                                  struct rx_queue * q);
 #endif
 
 /* some rules about packets:
@@ -189,17 +189,17 @@ rx_SlowReadPacket(struct rx_packet * packet, unsigned int offset, int resid,
      * offset only applies to the first iovec.
      */
     r = resid;
-    while ((resid > 0) && (i < packet->niovecs)) {
-       j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+    while ((r > 0) && (i < packet->niovecs)) {
+       j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
        memcpy(out, (char *)(packet->wirevec[i].iov_base) + (offset - l), j);
-       resid -= j;
+       r -= j;
         out += j;
        l += packet->wirevec[i].iov_len;
        offset = l;
        i++;
     }
 
-    return (resid ? (r - resid) : r);
+    return (r ? (resid - r) : resid);
 }
 
 
@@ -211,11 +211,11 @@ rx_SlowReadPacket(struct rx_packet * packet, unsigned int offset, int resid,
 afs_int32
 rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
 {
-    int i, j, l, r;
+    unsigned int i, j, l, o, r;
     char *b;
 
-    for (l = 0, i = 1; i < packet->niovecs; i++) {
-       if (l + packet->wirevec[i].iov_len > offset) {
+    for (l = 0, i = 1, o = offset; i < packet->niovecs; i++) {
+       if (l + packet->wirevec[i].iov_len > o) {
            break;
        }
        l += packet->wirevec[i].iov_len;
@@ -227,28 +227,28 @@ rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
      * offset only applies to the first iovec.
      */
     r = resid;
-    while ((resid > 0) && (i < RX_MAXWVECS)) {
+    while ((r > 0) && (i <= RX_MAXWVECS)) {
        if (i >= packet->niovecs)
-           if (rxi_AllocDataBuf(packet, resid, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
+           if (rxi_AllocDataBuf(packet, r, RX_PACKET_CLASS_SEND_CBUF) > 0)     /* ++niovecs as a side-effect */
                break;
 
        b = (char *)(packet->wirevec[i].iov_base) + (offset - l);
-       j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+       j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
        memcpy(b, in, j);
-       resid -= j;
+       r -= j;
         in += j;
        l += packet->wirevec[i].iov_len;
        offset = l;
        i++;
     }
 
-    return (resid ? (r - resid) : r);
+    return (r ? (resid - r) : resid);
 }
 
 int
 rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
 {
-    register struct rx_packet *p, *np;
+    struct rx_packet *p, *np;
 
     num_pkts = AllocPacketBufs(class, num_pkts, q);
 
@@ -263,7 +263,7 @@ rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
 static int
 AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 {
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_ts_info_t * rx_ts_info;
     int transfer;
     SPLVAR;
 
@@ -310,22 +310,24 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 
     if (overq) {
        rxi_NeedMorePackets = TRUE;
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-            rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
-           break;
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
        }
     }
 
@@ -370,8 +372,8 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 int
 rxi_FreePackets(int num_pkts, struct rx_queue * q)
 {
-    register struct rx_ts_info_t * rx_ts_info;
-    register struct rx_packet *c, *nc;
+    struct rx_ts_info_t * rx_ts_info;
+    struct rx_packet *c, *nc;
     SPLVAR;
 
     osi_Assert(num_pkts >= 0);
@@ -412,7 +414,7 @@ int
 rxi_FreePackets(int num_pkts, struct rx_queue *q)
 {
     struct rx_queue cbs;
-    register struct rx_packet *p, *np;
+    struct rx_packet *p, *np;
     int qlen = 0;
     SPLVAR;
 
@@ -497,7 +499,7 @@ rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
 {
     int i, nv;
     struct rx_queue q;
-    register struct rx_packet *cb, *ncb;
+    struct rx_packet *cb, *ncb;
 
     /* compute the number of cbuf's we need */
     nv = nb / RX_CBUFFERSIZE;
@@ -532,7 +534,7 @@ void
 rxi_MorePackets(int apackets)
 {
     struct rx_packet *p, *e;
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_ts_info_t * rx_ts_info;
     int getme;
     SPLVAR;
 
@@ -541,15 +543,16 @@ rxi_MorePackets(int apackets)
     osi_Assert(p);
 
     PIN(p, getme);             /* XXXXX */
-    memset((char *)p, 0, getme);
+    memset(p, 0, getme);
     RX_TS_INFO_GET(rx_ts_info);
 
     RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
     /* TSFPQ patch also needs to keep track of total packets */
-    MUTEX_ENTER(&rx_stats_mutex);
+
+    MUTEX_ENTER(&rx_packets_mutex);
     rx_nPackets += apackets;
     RX_TS_FPQ_COMPUTE_LIMITS;
-    MUTEX_EXIT(&rx_stats_mutex);
+    MUTEX_EXIT(&rx_packets_mutex);
 
     for (e = p + apackets; p < e; p++) {
         RX_PACKET_IOV_INIT(p);
@@ -559,6 +562,10 @@ rxi_MorePackets(int apackets)
 
         NETPRI;
         MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
         rx_mallocedP = p;
         MUTEX_EXIT(&rx_freePktQ_lock);
         USERPRI;
@@ -590,7 +597,7 @@ rxi_MorePackets(int apackets)
     osi_Assert(p);
 
     PIN(p, getme);             /* XXXXX */
-    memset((char *)p, 0, getme);
+    memset(p, 0, getme);
     NETPRI;
     MUTEX_ENTER(&rx_freePktQ_lock);
 
@@ -600,6 +607,10 @@ rxi_MorePackets(int apackets)
        p->niovecs = 2;
 
        queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
        rx_mallocedP = p;
     }
 
@@ -617,7 +628,7 @@ void
 rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
 {
     struct rx_packet *p, *e;
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_ts_info_t * rx_ts_info;
     int getme;
     SPLVAR;
 
@@ -625,15 +636,15 @@ rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
     p = (struct rx_packet *)osi_Alloc(getme);
 
     PIN(p, getme);             /* XXXXX */
-    memset((char *)p, 0, getme);
+    memset(p, 0, getme);
     RX_TS_INFO_GET(rx_ts_info);
 
     RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
     /* TSFPQ patch also needs to keep track of total packets */
-    MUTEX_ENTER(&rx_stats_mutex);
+    MUTEX_ENTER(&rx_packets_mutex);
     rx_nPackets += apackets;
     RX_TS_FPQ_COMPUTE_LIMITS;
-    MUTEX_EXIT(&rx_stats_mutex);
+    MUTEX_EXIT(&rx_packets_mutex);
 
     for (e = p + apackets; p < e; p++) {
         RX_PACKET_IOV_INIT(p);
@@ -642,6 +653,10 @@ rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
        
         NETPRI;
         MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
         rx_mallocedP = p;
         MUTEX_EXIT(&rx_freePktQ_lock);
         USERPRI;
@@ -669,7 +684,7 @@ void
 rxi_MorePacketsNoLock(int apackets)
 {
 #ifdef RX_ENABLE_TSFPQ
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_ts_info_t * rx_ts_info;
 #endif /* RX_ENABLE_TSFPQ */
     struct rx_packet *p, *e;
     int getme;
@@ -686,7 +701,7 @@ rxi_MorePacketsNoLock(int apackets)
             osi_Assert(apackets > 0);
         }
     } while(p == NULL);
-    memset((char *)p, 0, getme);
+    memset(p, 0, getme);
 
 #ifdef RX_ENABLE_TSFPQ
     RX_TS_INFO_GET(rx_ts_info);
@@ -699,16 +714,20 @@ rxi_MorePacketsNoLock(int apackets)
        p->niovecs = 2;
 
        queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+        p->packetId = rx_packet_id++;
+        p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
        rx_mallocedP = p;
     }
 
     rx_nFreePackets += apackets;
 #ifdef RX_ENABLE_TSFPQ
     /* TSFPQ patch also needs to keep track of total packets */
-    MUTEX_ENTER(&rx_stats_mutex);
+    MUTEX_ENTER(&rx_packets_mutex);
     rx_nPackets += apackets;
     RX_TS_FPQ_COMPUTE_LIMITS;
-    MUTEX_EXIT(&rx_stats_mutex);
+    MUTEX_EXIT(&rx_packets_mutex);
 #endif /* RX_ENABLE_TSFPQ */
     rxi_NeedMorePackets = FALSE;
     rxi_PacketsUnWait();
@@ -729,8 +748,8 @@ rxi_FreeAllPackets(void)
 void
 rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit)
 {
-    register struct rx_ts_info_t * rx_ts_info;
-    register int xfer;
+    struct rx_ts_info_t * rx_ts_info;
+    int xfer;
     SPLVAR;
 
     RX_TS_INFO_GET(rx_ts_info);
@@ -792,8 +811,8 @@ rx_CheckPackets(void)
 void
 rxi_FreePacketNoLock(struct rx_packet *p)
 {
-    register struct rx_ts_info_t * rx_ts_info;
-    dpf(("Free %lx\n", (unsigned long)p));
+    struct rx_ts_info_t * rx_ts_info;
+    dpf(("Free %"AFS_PTR_FMT"\n", p));
 
     RX_TS_INFO_GET(rx_ts_info);
     RX_TS_FPQ_CHECKIN(rx_ts_info,p);
@@ -805,7 +824,7 @@ rxi_FreePacketNoLock(struct rx_packet *p)
 void
 rxi_FreePacketNoLock(struct rx_packet *p)
 {
-    dpf(("Free %lx\n", (unsigned long)p));
+    dpf(("Free %"AFS_PTR_FMT"\n", p));
 
     RX_FPQ_MARK_FREE(p);
     rx_nFreePackets++;
@@ -817,8 +836,8 @@ rxi_FreePacketNoLock(struct rx_packet *p)
 void
 rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
 {
-    register struct rx_ts_info_t * rx_ts_info;
-    dpf(("Free %lx\n", (unsigned long)p));
+    struct rx_ts_info_t * rx_ts_info;
+    dpf(("Free %"AFS_PTR_FMT"\n", p));
 
     RX_TS_INFO_GET(rx_ts_info);
     RX_TS_FPQ_CHECKIN(rx_ts_info,p);
@@ -903,6 +922,8 @@ rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
  *
  * [IN] p             -- packet from which continuation buffers will be freed
  * [IN] first         -- iovec offset of first continuation buffer to free
+ *                       any value less than 2, the min number of iovecs,
+ *                       is treated as if it is 2.
  * [IN] flush_global  -- if nonzero, we will flush overquota packets to the
  *                       global free pool before returning
  *
@@ -913,7 +934,7 @@ static int
 rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global)
 {
     struct iovec *iov;
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_ts_info_t * rx_ts_info;
 
     RX_TS_INFO_GET(rx_ts_info);
 
@@ -952,7 +973,7 @@ int rxi_nBadIovecs = 0;
 void
 rxi_RestoreDataBufs(struct rx_packet *p)
 {
-    int i;
+    unsigned int i;
     struct iovec *iov = &p->wirevec[2];
 
     RX_PACKET_IOV_INIT(p);
@@ -973,7 +994,7 @@ rxi_TrimDataBufs(struct rx_packet *p, int first)
 {
     int length;
     struct iovec *iov, *end;
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_ts_info_t * rx_ts_info;
     SPLVAR;
 
     if (first != 1)
@@ -1092,36 +1113,39 @@ rxi_FreePacket(struct rx_packet *p)
 struct rx_packet *
 rxi_AllocPacketNoLock(int class)
 {
-    register struct rx_packet *p;
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_packet *p;
+    struct rx_ts_info_t * rx_ts_info;
 
     RX_TS_INFO_GET(rx_ts_info);
 
 #ifdef KERNEL
     if (rxi_OverQuota(class)) {
        rxi_NeedMorePackets = TRUE;
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-            rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
-           break;
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
        }
         return (struct rx_packet *)0;
     }
 #endif /* KERNEL */
 
-    rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
     if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
 
 #ifdef KERNEL
@@ -1138,7 +1162,7 @@ rxi_AllocPacketNoLock(int class)
 
     RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
 
-    dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+    dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
 
 
     /* have to do this here because rx_FlushWrite fiddles with the iovs in
@@ -1152,33 +1176,36 @@ rxi_AllocPacketNoLock(int class)
 struct rx_packet *
 rxi_AllocPacketNoLock(int class)
 {
-    register struct rx_packet *p;
+    struct rx_packet *p;
 
 #ifdef KERNEL
     if (rxi_OverQuota(class)) {
        rxi_NeedMorePackets = TRUE;
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-            rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       }
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
+        }
        return (struct rx_packet *)0;
     }
 #endif /* KERNEL */
 
-    rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
 
 #ifdef KERNEL
     if (queue_IsEmpty(&rx_freePacketQueue))
@@ -1193,7 +1220,7 @@ rxi_AllocPacketNoLock(int class)
     queue_Remove(p);
     RX_FPQ_MARK_USED(p);
 
-    dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+    dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
 
 
     /* have to do this here because rx_FlushWrite fiddles with the iovs in
@@ -1209,12 +1236,13 @@ rxi_AllocPacketNoLock(int class)
 struct rx_packet *
 rxi_AllocPacketTSFPQ(int class, int pull_global)
 {
-    register struct rx_packet *p;
-    register struct rx_ts_info_t * rx_ts_info;
+    struct rx_packet *p;
+    struct rx_ts_info_t * rx_ts_info;
 
     RX_TS_INFO_GET(rx_ts_info);
 
-    rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
     if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
         MUTEX_ENTER(&rx_freePktQ_lock);
 
@@ -1230,7 +1258,7 @@ rxi_AllocPacketTSFPQ(int class, int pull_global)
 
     RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
 
-    dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+    dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
 
     /* have to do this here because rx_FlushWrite fiddles with the iovs in
      * order to truncate outbound packets.  In the near future, may need 
@@ -1245,7 +1273,7 @@ rxi_AllocPacketTSFPQ(int class, int pull_global)
 struct rx_packet *
 rxi_AllocPacket(int class)
 {
-    register struct rx_packet *p;
+    struct rx_packet *p;
 
     p = rxi_AllocPacketTSFPQ(class, RX_TS_FPQ_PULL_GLOBAL);
     return p;
@@ -1254,7 +1282,7 @@ rxi_AllocPacket(int class)
 struct rx_packet *
 rxi_AllocPacket(int class)
 {
-    register struct rx_packet *p;
+    struct rx_packet *p;
 
     MUTEX_ENTER(&rx_freePktQ_lock);
     p = rxi_AllocPacketNoLock(class);
@@ -1269,11 +1297,11 @@ rxi_AllocPacket(int class)
  * Called with call locked.
  */
 struct rx_packet *
-rxi_AllocSendPacket(register struct rx_call *call, int want)
+rxi_AllocSendPacket(struct rx_call *call, int want)
 {
-    register struct rx_packet *p = (struct rx_packet *)0;
-    register int mud;
-    register unsigned delta;
+    struct rx_packet *p = (struct rx_packet *)0;
+    int mud;
+    unsigned delta;
 
     SPLVAR;
     mud = call->MTU - RX_HEADER_SIZE;
@@ -1290,7 +1318,7 @@ rxi_AllocSendPacket(register struct rx_call *call, int want)
            (void)rxi_AllocDataBuf(p, (want - p->length),
                                   RX_PACKET_CLASS_SEND_CBUF);
 
-       if ((unsigned)p->length > mud)
+       if (p->length > mud)
             p->length = mud;
 
        if (delta >= p->length) {
@@ -1316,7 +1344,7 @@ rxi_AllocSendPacket(register struct rx_call *call, int want)
                (void)rxi_AllocDataBuf(p, (want - p->length),
                                       RX_PACKET_CLASS_SEND_CBUF);
 
-           if ((unsigned)p->length > mud)
+           if (p->length > mud)
                p->length = mud;
 
            if (delta >= p->length) {
@@ -1360,11 +1388,11 @@ rxi_AllocSendPacket(register struct rx_call *call, int want)
 #else
 /* count the number of used FDs */
 static int
-CountFDs(register int amax)
+CountFDs(int amax)
 {
     struct stat tstat;
-    register int i, code;
-    register int count;
+    int i, code;
+    int count;
 
     count = 0;
     for (i = 0; i < amax; i++) {
@@ -1389,13 +1417,13 @@ CountFDs(register int amax)
  * the data length of the packet is stored in the packet structure.
  * The header is decoded. */
 int
-rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * host,
+rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
               u_short * port)
 {
     struct sockaddr_in from;
-    int nbytes;
+    unsigned int nbytes;
     afs_int32 rlen;
-    register afs_int32 tlen, savelen;
+    afs_uint32 tlen, savelen;
     struct msghdr msg;
     rx_computelen(p, tlen);
     rx_SetDataSize(p, tlen);   /* this is the size of the user data area */
@@ -1421,7 +1449,7 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
     savelen = p->wirevec[p->niovecs - 1].iov_len;
     p->wirevec[p->niovecs - 1].iov_len += RX_EXTRABUFFERSIZE;
 
-    memset((char *)&msg, 0, sizeof(msg));
+    memset(&msg, 0, sizeof(msg));
     msg.msg_name = (char *)&from;
     msg.msg_namelen = sizeof(struct sockaddr_in);
     msg.msg_iov = p->wirevec;
@@ -1431,15 +1459,18 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
     /* restore the vec to its correct state */
     p->wirevec[p->niovecs - 1].iov_len = savelen;
 
-    p->length = (nbytes - RX_HEADER_SIZE);
+    p->length = (u_short)(nbytes - RX_HEADER_SIZE);
     if ((nbytes > tlen) || (p->length & 0x8000)) {     /* Bogus packet */
        if (nbytes < 0 && errno == EWOULDBLOCK) {
-            rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
        } else if (nbytes <= 0) {
-           MUTEX_ENTER(&rx_stats_mutex);
-           rx_stats.bogusPacketOnRead++;
-           rx_stats.bogusHost = from.sin_addr.s_addr;
-           MUTEX_EXIT(&rx_stats_mutex);
+            if (rx_stats_active) {
+                MUTEX_ENTER(&rx_stats_mutex);
+                rx_stats.bogusPacketOnRead++;
+                rx_stats.bogusHost = from.sin_addr.s_addr;
+                MUTEX_EXIT(&rx_stats_mutex);
+            }
            dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
                 ntohs(from.sin_port), nbytes));
        }
@@ -1457,7 +1488,9 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
              p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial, 
              p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, 
              p->length));
+#ifdef RX_TRIMDATABUFS
        rxi_TrimDataBufs(p, 1);
+#endif
        return 0;
     } 
 #endif
@@ -1469,7 +1502,8 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
        *port = from.sin_port;
        if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
            struct rx_peer *peer;
-            rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
            /*
             * Try to look up this peer structure.  If it doesn't exist,
             * don't create a new one - 
@@ -1493,9 +1527,10 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
            }
        }
 
+#ifdef RX_TRIMDATABUFS
        /* Free any empty packet buffers at the end of this packet */
        rxi_TrimDataBufs(p, 1);
-
+#endif 
        return 1;
     }
 }
@@ -1511,7 +1546,7 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
  * last two pad bytes. */
 
 struct rx_packet *
-rxi_SplitJumboPacket(register struct rx_packet *p, afs_int32 host, short port,
+rxi_SplitJumboPacket(struct rx_packet *p, afs_int32 host, short port,
                     int first)
 {
     struct rx_packet *np;
@@ -1604,9 +1639,9 @@ osi_NetSend(osi_socket socket, void *addr, struct iovec *dvec, int nvecs,
  * The message is NOT changed.
  */
 static int
-cpytoc(mblk_t * mp, register int off, register int len, register char *cp)
+cpytoc(mblk_t * mp, int off, int len, char *cp)
 {
-    register int n;
+    int n;
 
     for (; mp && len > 0; mp = mp->b_cont) {
        if (mp->b_datap->db_type != M_DATA) {
@@ -1626,10 +1661,10 @@ cpytoc(mblk_t * mp, register int off, register int len, register char *cp)
  * This sucks, anyway, do it like m_cpy.... below 
  */
 static int
-cpytoiovec(mblk_t * mp, int off, int len, register struct iovec *iovs,
+cpytoiovec(mblk_t * mp, int off, int len, struct iovec *iovs,
           int niovs)
 {
-    register int m, n, o, t, i;
+    int m, n, o, t, i;
 
     for (i = -1, t = 0; i < niovs && mp && len > 0; mp = mp->b_cont) {
        if (mp->b_datap->db_type != M_DATA) {
@@ -1725,7 +1760,7 @@ rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
      struct rx_packet *phandle;
      int hdr_len, data_len;
 {
-    register int code;
+    int code;
 
     code =
        m_cpytoiovec(amb, hdr_len, data_len, phandle->wirevec,
@@ -1741,7 +1776,7 @@ rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
 /* send a response to a debug packet */
 
 struct rx_packet *
-rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
+rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
                       afs_int32 ahost, short aport, int istack)
 {
     struct rx_debugIn tin;
@@ -1771,7 +1806,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
            struct rx_debugStats tstat;
 
            /* get basic stats */
-           memset((char *)&tstat, 0, sizeof(tstat));   /* make sure spares are zero */
+           memset(&tstat, 0, sizeof(tstat));   /* make sure spares are zero */
            tstat.version = RX_DEBUGI_VERSION;
 #ifndef        RX_ENABLE_LOCKS
            tstat.waitingForPackets = rx_waitingForPackets;
@@ -1804,8 +1839,8 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
 
     case RX_DEBUGI_GETALLCONN:
     case RX_DEBUGI_GETCONN:{
-           int i, j;
-           register struct rx_connection *tc;
+            unsigned int i, j;
+           struct rx_connection *tc;
            struct rx_call *tcall;
            struct rx_debugConn tconn;
            int all = (tin.type == RX_DEBUGI_GETALLCONN);
@@ -1817,7 +1852,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
            if (tl > 0)
                return ap;
 
-           memset((char *)&tconn, 0, sizeof(tconn));   /* make sure spares are zero */
+           memset(&tconn, 0, sizeof(tconn));   /* make sure spares are zero */
            /* get N'th (maybe) "interesting" connection info */
            for (i = 0; i < rx_hashTableSize; i++) {
 #if !defined(KERNEL)
@@ -1830,7 +1865,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                (void)IOMGR_Poll();
 #endif
 #endif
-               RWLOCK_RDLOCK(&rx_connHashTable_lock);
+               MUTEX_ENTER(&rx_connHashTable_lock);
                /* We might be slightly out of step since we are not 
                 * locking each call, but this is only debugging output.
                 */
@@ -1883,8 +1918,8 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                                 sizeof(afs_int32); i++)
                                DOHTONL(sparel[i]);
                        }
-                       
-                       RWLOCK_UNLOCK(&rx_connHashTable_lock);
+
+                       MUTEX_EXIT(&rx_connHashTable_lock);
                        rx_packetwrite(ap, 0, sizeof(struct rx_debugConn),
                                       (char *)&tconn);
                        tl = ap->length;
@@ -1895,7 +1930,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                        return ap;
                    }
                }
-               RWLOCK_UNLOCK(&rx_connHashTable_lock);
+               MUTEX_EXIT(&rx_connHashTable_lock);
            }
            /* if we make it here, there are no interesting packets */
            tconn.cid = htonl(0xffffffff);      /* means end */
@@ -1913,8 +1948,8 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
         */
 
     case RX_DEBUGI_GETPEER:{
-           int i;
-           register struct rx_peer *tp;
+           unsigned int i;
+           struct rx_peer *tp;
            struct rx_debugPeer tpeer;
 
 
@@ -1924,7 +1959,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
            if (tl > 0)
                return ap;
 
-           memset((char *)&tpeer, 0, sizeof(tpeer));
+           memset(&tpeer, 0, sizeof(tpeer));
            for (i = 0; i < rx_hashTableSize; i++) {
 #if !defined(KERNEL)
                /* the time complexity of the algorithm used here
@@ -1942,10 +1977,13 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                (void)IOMGR_Poll();
 #endif
 #endif
-               RWLOCK_RDLOCK(&rx_peerHashTable_lock);
-               /* XXX should copy out, then unlock and byteswap */
+               MUTEX_ENTER(&rx_peerHashTable_lock);
                for (tp = rx_peerHashTable[i]; tp; tp = tp->next) {
                    if (tin.index-- <= 0) {
+                        tp->refCount++;
+                        MUTEX_EXIT(&rx_peerHashTable_lock);
+
+                        MUTEX_ENTER(&tp->peer_lock);
                        tpeer.host = tp->host;
                        tpeer.port = tp->port;
                        tpeer.ifMTU = htons(tp->ifMTU);
@@ -1978,8 +2016,12 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                            htonl(tp->bytesReceived.high);
                        tpeer.bytesReceived.low =
                            htonl(tp->bytesReceived.low);
+                        MUTEX_EXIT(&tp->peer_lock);
+
+                        MUTEX_ENTER(&rx_peerHashTable_lock);
+                        tp->refCount--;
+                       MUTEX_EXIT(&rx_peerHashTable_lock);
 
-                       RWLOCK_UNLOCK(&rx_peerHashTable_lock);
                        rx_packetwrite(ap, 0, sizeof(struct rx_debugPeer),
                                       (char *)&tpeer);
                        tl = ap->length;
@@ -1990,7 +2032,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                        return ap;
                    }
                }
-               RWLOCK_UNLOCK(&rx_peerHashTable_lock);
+               MUTEX_EXIT(&rx_peerHashTable_lock);
            }
            /* if we make it here, there are no interesting packets */
            tpeer.host = htonl(0xffffffff);     /* means end */
@@ -2014,6 +2056,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                return ap;
 
            /* Since its all int32s convert to network order with a loop. */
+        if (rx_stats_active)
            MUTEX_ENTER(&rx_stats_mutex);
            s = (afs_int32 *) & rx_stats;
            for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
@@ -2021,6 +2064,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
 
            tl = ap->length;
            ap->length = sizeof(rx_stats);
+        if (rx_stats_active)
            MUTEX_EXIT(&rx_stats_mutex);
            rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
            ap->length = tl;
@@ -2042,7 +2086,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
 }
 
 struct rx_packet *
-rxi_ReceiveVersionPacket(register struct rx_packet *ap, osi_socket asocket,
+rxi_ReceiveVersionPacket(struct rx_packet *ap, osi_socket asocket,
                         afs_int32 ahost, short aport, int istack)
 {
     afs_int32 tl;
@@ -2075,10 +2119,8 @@ rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
                    afs_int32 ahost, short aport, afs_int32 istack)
 {
     struct sockaddr_in taddr;
-    int i;
-    int nbytes;
+    unsigned int i, nbytes, savelen = 0;
     int saven = 0;
-    size_t savelen = 0;
 #ifdef KERNEL
     int waslocked = ISAFS_GLOCK();
 #endif
@@ -2151,7 +2193,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
 #endif
     int code;
     struct sockaddr_in addr;
-    register struct rx_peer *peer = conn->peer;
+    struct rx_peer *peer = conn->peer;
     osi_socket socket;
 #ifdef RXDEBUG
     char deliveryType = 'S';
@@ -2236,7 +2278,8 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
             osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
                         p->length + RX_HEADER_SIZE, istack)) != 0) {
            /* send failed, so let's hurry up the resend, eh? */
-            rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
            p->retryTime = p->timeSent; /* resend it very soon */
            clock_Addmsec(&(p->retryTime),
                          10 + (((afs_uint32) p->backoff) << 8));
@@ -2247,10 +2290,10 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
             */
            if (call && 
 #ifdef AFS_NT40_ENV
-               code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
-#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+               (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
+#elif defined(AFS_LINUX20_ENV)
                code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+#elif defined(AFS_DARWIN_ENV)
                code == EHOSTUNREACH
 #else
                0
@@ -2274,9 +2317,13 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
 #endif
 #ifdef RXDEBUG
     }
-    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+          deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+          ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+          p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
 #endif
-    rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
     MUTEX_ENTER(&peer->peer_lock);
     hadd32(peer->bytesSent, p->length);
     MUTEX_EXIT(&peer->peer_lock);
@@ -2293,7 +2340,7 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
     int waslocked;
 #endif
     struct sockaddr_in addr;
-    register struct rx_peer *peer = conn->peer;
+    struct rx_peer *peer = conn->peer;
     osi_socket socket;
     struct rx_packet *p = NULL;
     struct iovec wirevec[RX_MAXIOVECS];
@@ -2421,7 +2468,8 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
             osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
                         istack)) != 0) {
            /* send failed, so let's hurry up the resend, eh? */
-            rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
            for (i = 0; i < len; i++) {
                p = list[i];
                p->retryTime = p->timeSent;     /* resend it very soon */
@@ -2435,10 +2483,10 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
             */
            if (call && 
 #ifdef AFS_NT40_ENV
-               code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
-#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+               (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
+#elif defined(AFS_LINUX20_ENV)
                code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+#elif defined(AFS_DARWIN_ENV)
                code == EHOSTUNREACH
 #else
                0
@@ -2455,10 +2503,14 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
 
     assert(p != NULL);
 
-    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+          deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+          ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+          p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
 
 #endif
-    rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
     MUTEX_ENTER(&peer->peer_lock);
     hadd32(peer->bytesSent, p->length);
     MUTEX_EXIT(&peer->peer_lock);
@@ -2475,14 +2527,14 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
  * in rx.h.  Bug: there's a lot of duplication between this and other
  * routines.  This needs to be cleaned up. */
 struct rx_packet *
-rxi_SendSpecial(register struct rx_call *call,
-               register struct rx_connection *conn,
+rxi_SendSpecial(struct rx_call *call,
+               struct rx_connection *conn,
                struct rx_packet *optionalPacket, int type, char *data,
                int nbytes, int istack)
 {
     /* Some of the following stuff should be common code for all
      * packet sends (it's repeated elsewhere) */
-    register struct rx_packet *p;
+    struct rx_packet *p;
     unsigned int i = 0;
     int savelen = 0, saven = 0;
     int channel, callNumber;
@@ -2551,11 +2603,11 @@ rxi_SendSpecial(register struct rx_call *call,
  * the net byte order representation in the wire representation of the
  * packet, which is what is actually sent out on the wire) */
 void
-rxi_EncodePacketHeader(register struct rx_packet *p)
+rxi_EncodePacketHeader(struct rx_packet *p)
 {
-    register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base);        /* MTUXXX */
+    afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
 
-    memset((char *)buf, 0, RX_HEADER_SIZE);
+    memset(buf, 0, RX_HEADER_SIZE);
     *buf++ = htonl(p->header.epoch);
     *buf++ = htonl(p->header.cid);
     *buf++ = htonl(p->header.callNumber);
@@ -2570,9 +2622,9 @@ rxi_EncodePacketHeader(register struct rx_packet *p)
 
 /* Decode the packet's header (from net byte order to a struct header) */
 void
-rxi_DecodePacketHeader(register struct rx_packet *p)
+rxi_DecodePacketHeader(struct rx_packet *p)
 {
-    register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base);        /* MTUXXX */
+    afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
     afs_uint32 temp;
 
     p->header.epoch = ntohl(*buf);
@@ -2604,12 +2656,12 @@ rxi_DecodePacketHeader(register struct rx_packet *p)
 }
 
 void
-rxi_PrepareSendPacket(register struct rx_call *call,
-                     register struct rx_packet *p, register int last)
+rxi_PrepareSendPacket(struct rx_call *call,
+                     struct rx_packet *p, int last)
 {
-    register struct rx_connection *conn = call->conn;
-    int i;
-    ssize_t len;               /* len must be a signed type; it can go negative */
+    struct rx_connection *conn = call->conn;
+    unsigned int i;
+    afs_int32 len;             /* len must be a signed type; it can go negative */
 
     p->flags &= ~RX_PKTFLAG_ACKED;
     p->header.cid = (conn->cid | call->channel);
@@ -2656,9 +2708,10 @@ rxi_PrepareSendPacket(register struct rx_call *call,
         MUTEX_EXIT(&rx_freePktQ_lock);
 #endif /* !RX_ENABLE_TSFPQ */
 
-       p->niovecs = i;
+        p->niovecs = i;
     }
-    p->wirevec[i - 1].iov_len += len;
+    if (len)
+        p->wirevec[i - 1].iov_len += len;
     RXS_PreparePacket(conn->securityObject, call, p);
 }
 
@@ -2716,3 +2769,54 @@ rxi_AdjustDgramPackets(int frags, int mtu)
     }
     return (2 + (maxMTU / (RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE)));
 }
+
+#ifndef KERNEL
+/* 
+ * This function can be used by the Windows Cache Manager
+ * to dump the list of all rx packets so that we can determine
+ * where the packet leakage is.
+ */
+int rx_DumpPackets(FILE *outputFile, char *cookie)
+{
+#ifdef RXDEBUG_PACKET
+    struct rx_packet *p;
+#ifdef AFS_NT40_ENV
+    int zilch;
+    char output[2048];
+#define RXDPRINTF sprintf
+#define RXDPRINTOUT output
+#else
+#define RXDPRINTF fprintf
+#define RXDPRINTOUT outputFile
+#endif
+
+    NETPRI;
+    MUTEX_ENTER(&rx_freePktQ_lock);
+    RXDPRINTF(RXDPRINTOUT, "%s - Start dumping all Rx Packets - count=%u\r\n", cookie, rx_packet_id);
+#ifdef AFS_NT40_ENV
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+
+    for (p = rx_mallocedP; p; p = p->allNextp) {
+        RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u  header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
+                cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec, 
+                p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
+                p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
+                (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus, 
+                (afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);
+#ifdef AFS_NT40_ENV
+        WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+    }
+
+    RXDPRINTF(RXDPRINTOUT, "%s - End dumping all Rx Packets\r\n", cookie);
+#ifdef AFS_NT40_ENV
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+
+    MUTEX_EXIT(&rx_freePktQ_lock);
+    USERPRI;
+#endif /* RXDEBUG_PACKET */
+    return 0;
+}
+#endif