rx: reset packet header userStatus field on reuse
[openafs.git] / src / rx / rx_packet.c
index e337faa..9340da3 100644 (file)
@@ -8,92 +8,78 @@
  */
 
 #include <afsconfig.h>
-#ifdef KERNEL
-#include "afs/param.h"
-#else
 #include <afs/param.h>
-#endif
 
 #ifdef KERNEL
-#if defined(UKERNEL)
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#include "rx/rx_kcommon.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#include "rx/rx_packet.h"
-#include "rx/rx_atomic.h"
-#include "rx/rx_internal.h"
-#include "rx/rx_stats.h"
-#else /* defined(UKERNEL) */
-#ifdef RX_KERNEL_TRACE
-#include "../rx/rx_kcommon.h"
-#endif
-#include "h/types.h"
-#ifndef AFS_LINUX20_ENV
-#include "h/systm.h"
-#endif
-#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV) || defined(AFS_NBSD50_ENV)
-#include "afs/sysincludes.h"
-#endif
-#if defined(AFS_OBSD_ENV)
-#include "h/proc.h"
-#endif
-#include "h/socket.h"
-#if !defined(AFS_SUN5_ENV) &&  !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
-#if    !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
-#include "sys/mount.h"         /* it gets pulled in by something later anyway */
-#endif
-#include "h/mbuf.h"
-#endif
-#include "netinet/in.h"
-#include "afs/afs_osi.h"
-#include "rx_kmutex.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#include "rx_atomic.h"
-#ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
-#endif
-#include "rx/rx_packet.h"
-#include "rx_internal.h"
-#include "rx_stats.h"
-#endif /* defined(UKERNEL) */
-#include "rx/rx_globals.h"
+# if defined(UKERNEL)
+#  include "afs/sysincludes.h"
+#  include "afsincludes.h"
+#  include "rx_kcommon.h"
+# else /* defined(UKERNEL) */
+#  ifdef RX_KERNEL_TRACE
+#   include "rx_kcommon.h"
+#  endif
+#  include "h/types.h"
+#  ifndef AFS_LINUX20_ENV
+#   include "h/systm.h"
+#  endif
+#  if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV) || defined(AFS_NBSD50_ENV)
+#   include "afs/sysincludes.h"
+#  endif
+#  if defined(AFS_OBSD_ENV)
+#   include "h/proc.h"
+#  endif
+#  include "h/socket.h"
+#  if !defined(AFS_SUN5_ENV) &&  !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
+#   if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
+#    include "sys/mount.h"             /* it gets pulled in by something later anyway */
+#   endif
+#   include "h/mbuf.h"
+#  endif
+#  include "netinet/in.h"
+#  include "afs/afs_osi.h"
+#  include "rx_kmutex.h"
+# endif /* defined(UKERNEL) */
 #else /* KERNEL */
-#include "sys/types.h"
-#include <sys/stat.h>
-#include <errno.h>
-#if defined(AFS_NT40_ENV)
-#include <winsock2.h>
-#ifndef EWOULDBLOCK
-#define EWOULDBLOCK WSAEWOULDBLOCK
-#endif
-#include "rx_user.h"
-#include "rx_xmit_nt.h"
-#include <stdlib.h>
-#else
-#include <sys/socket.h>
-#include <netinet/in.h>
-#endif
-#include "rx_clock.h"
-#include "rx.h"
-#include "rx_queue.h"
+# include <roken.h>
+# include <assert.h>
+# include <afs/opr.h>
+# if defined(AFS_NT40_ENV)
+#  ifndef EWOULDBLOCK
+#   define EWOULDBLOCK WSAEWOULDBLOCK
+#  endif
+#  include "rx_user.h"
+#  include "rx_xmit_nt.h"
+# endif
+# include <lwp.h>
+#endif /* KERNEL */
+
 #ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
+# include <sys/sysmacros.h>
 #endif
+
+#include <opr/queue.h>
+
+#include "rx.h"
+#include "rx_clock.h"
 #include "rx_packet.h"
 #include "rx_atomic.h"
 #include "rx_globals.h"
 #include "rx_internal.h"
 #include "rx_stats.h"
-#include <lwp.h>
-#include <assert.h>
-#include <string.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#endif /* KERNEL */
+
+#include "rx_peer.h"
+#include "rx_conn.h"
+#include "rx_call.h"
+
+/*!
+ * \brief structure used to keep track of allocated packets
+ */
+struct rx_mallocedPacket {
+    struct opr_queue entry;    /*!< chained using opr_queue */
+    struct rx_packet *addr;    /*!< address of the first element */
+    afs_uint32 size;           /*!< array size in bytes */
+};
 
 #ifdef RX_LOCKS_DB
 /* rxdb_fileID is used to identify the lock location, along with line#. */
@@ -106,21 +92,31 @@ static afs_uint32       rx_packet_id = 0;
 
 extern char cml_version_number[];
 
-static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
+static int AllocPacketBufs(int class, int num_pkts, struct opr_queue *q);
 
 static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
                                afs_uint32 ahost, short aport,
                                afs_int32 istack);
+static struct rx_packet *rxi_AllocPacketNoLock(int class);
+
+#ifndef KERNEL
+static void rxi_MorePacketsNoLock(int apackets);
+#endif
 
 #ifdef RX_ENABLE_TSFPQ
-static int
-rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
+static int rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first,
+                                int flush_global);
+static void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local,
+                                       int allow_overcommit);
 #else
-static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
-                                  afs_uint32 first,
-                                  struct rx_queue * q);
+static void rxi_FreePacketNoLock(struct rx_packet *p);
+static int rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first);
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first,
+                                  struct opr_queue * q);
 #endif
 
+extern struct opr_queue rx_idleServerQueue;
+
 /* some rules about packets:
  * 1.  When a packet is allocated, the final iov_buf contains room for
  * a security trailer, but iov_len masks that fact.  If the security
@@ -254,14 +250,14 @@ rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
 }
 
 int
-rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
+rxi_AllocPackets(int class, int num_pkts, struct opr_queue * q)
 {
-    struct rx_packet *p, *np;
+    struct opr_queue *c;
 
     num_pkts = AllocPacketBufs(class, num_pkts, q);
 
-    for (queue_Scan(q, p, np, rx_packet)) {
-        RX_PACKET_IOV_FULLINIT(p);
+    for (opr_queue_Scan(q, c)) {
+        RX_PACKET_IOV_FULLINIT(opr_queue_Entry(c, struct rx_packet, entry));
     }
 
     return num_pkts;
@@ -269,7 +265,7 @@ rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
 
 #ifdef RX_ENABLE_TSFPQ
 static int
-AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
+AllocPacketBufs(int class, int num_pkts, struct opr_queue * q)
 {
     struct rx_ts_info_t * rx_ts_info;
     int transfer;
@@ -299,7 +295,7 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 }
 #else /* RX_ENABLE_TSFPQ */
 static int
-AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
+AllocPacketBufs(int class, int num_pkts, struct opr_queue * q)
 {
     struct rx_packet *c;
     int i;
@@ -352,13 +348,13 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
     }
 #endif /* KERNEL */
 
-    for (i=0, c=queue_First(&rx_freePacketQueue, rx_packet);
+    for (i=0, c=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry);
         i < num_pkts;
-        i++, c=queue_Next(c, rx_packet)) {
+        i++, c=opr_queue_Next(&c->entry, struct rx_packet, entry)) {
         RX_FPQ_MARK_USED(c);
     }
 
-    queue_SplitBeforeAppend(&rx_freePacketQueue,q,c);
+    opr_queue_SplitBeforeAppend(&rx_freePacketQueue, q, &c->entry);
 
     rx_nFreePackets -= num_pkts;
 
@@ -378,22 +374,25 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 #ifdef RX_ENABLE_TSFPQ
 /* num_pkts=0 means queue length is unknown */
 int
-rxi_FreePackets(int num_pkts, struct rx_queue * q)
+rxi_FreePackets(int num_pkts, struct opr_queue * q)
 {
     struct rx_ts_info_t * rx_ts_info;
-    struct rx_packet *c, *nc;
+    struct opr_queue *cursor, *store;
     SPLVAR;
 
     osi_Assert(num_pkts >= 0);
     RX_TS_INFO_GET(rx_ts_info);
 
     if (!num_pkts) {
-       for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
-           rxi_FreeDataBufsTSFPQ(c, 2, 0);
+       for (opr_queue_ScanSafe(q, cursor, store)) {
+           num_pkts++;
+           rxi_FreeDataBufsTSFPQ(opr_queue_Entry(cursor, struct rx_packet, 
+                                                entry), 2, 0);
        }
     } else {
-       for (queue_Scan(q, c, nc, rx_packet)) {
-           rxi_FreeDataBufsTSFPQ(c, 2, 0);
+       for (opr_queue_ScanSafe(q, cursor, store)) {
+           rxi_FreeDataBufsTSFPQ(opr_queue_Entry(cursor, struct rx_packet, 
+                                                entry), 2, 0);
        }
     }
 
@@ -419,27 +418,33 @@ rxi_FreePackets(int num_pkts, struct rx_queue * q)
 #else /* RX_ENABLE_TSFPQ */
 /* num_pkts=0 means queue length is unknown */
 int
-rxi_FreePackets(int num_pkts, struct rx_queue *q)
+rxi_FreePackets(int num_pkts, struct opr_queue *q)
 {
-    struct rx_queue cbs;
-    struct rx_packet *p, *np;
+    struct opr_queue cbs;
+    struct opr_queue *cursor, *store;
     int qlen = 0;
     SPLVAR;
 
     osi_Assert(num_pkts >= 0);
-    queue_Init(&cbs);
+    opr_queue_Init(&cbs);
 
     if (!num_pkts) {
-        for (queue_Scan(q, p, np, rx_packet), num_pkts++) {
+        for (opr_queue_ScanSafe(q, cursor, store)) {
+           struct rx_packet *p
+               = opr_queue_Entry(cursor, struct rx_packet, entry);
            if (p->niovecs > 2) {
                qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
            }
             RX_FPQ_MARK_FREE(p);
+           num_pkts++;
        }
        if (!num_pkts)
            return 0;
     } else {
-        for (queue_Scan(q, p, np, rx_packet)) {
+        for (opr_queue_ScanSafe(q, cursor, store)) {
+           struct rx_packet *p
+               = opr_queue_Entry(cursor, struct rx_packet, entry);
+
            if (p->niovecs > 2) {
                qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
            }
@@ -448,7 +453,7 @@ rxi_FreePackets(int num_pkts, struct rx_queue *q)
     }
 
     if (qlen) {
-       queue_SpliceAppend(q, &cbs);
+       opr_queue_SpliceAppend(q, &cbs);
        qlen += num_pkts;
     } else
        qlen = num_pkts;
@@ -456,7 +461,7 @@ rxi_FreePackets(int num_pkts, struct rx_queue *q)
     NETPRI;
     MUTEX_ENTER(&rx_freePktQ_lock);
 
-    queue_SpliceAppend(&rx_freePacketQueue, q);
+    opr_queue_SpliceAppend(&rx_freePacketQueue, q);
     rx_nFreePackets += qlen;
 
     /* Wakeup anyone waiting for packets */
@@ -506,8 +511,7 @@ int
 rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
 {
     int i, nv;
-    struct rx_queue q;
-    struct rx_packet *cb, *ncb;
+    struct opr_queue q, *cursor, *store;
 
     /* compute the number of cbuf's we need */
     nv = nb / RX_CBUFFERSIZE;
@@ -519,14 +523,19 @@ rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
         return nb;
 
     /* allocate buffers */
-    queue_Init(&q);
+    opr_queue_Init(&q);
     nv = AllocPacketBufs(class, nv, &q);
 
     /* setup packet iovs */
-    for (i = p->niovecs, queue_Scan(&q, cb, ncb, rx_packet), i++) {
-        queue_Remove(cb);
+    i = p ->niovecs;
+    for (opr_queue_ScanSafe(&q, cursor, store)) {
+       struct rx_packet *cb
+           = opr_queue_Entry(cursor, struct rx_packet, entry);
+
+        opr_queue_Remove(&cb->entry);
         p->wirevec[i].iov_base = (caddr_t) cb->localdata;
         p->wirevec[i].iov_len = RX_CBUFFERSIZE;
+       i++;
     }
 
     nb -= (nv * RX_CBUFFERSIZE);
@@ -536,6 +545,33 @@ rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
     return nb;
 }
 
+/**
+ * Register allocated packets.
+ *
+ * @param[in] addr array of packets
+ * @param[in] npkt number of packets
+ *
+ * @return none
+ */
+static void
+registerPackets(struct rx_packet *addr, afs_uint32 npkt)
+{
+    struct rx_mallocedPacket *mp;
+
+    mp = osi_Alloc(sizeof(*mp));
+
+    osi_Assert(mp != NULL);
+    memset(mp, 0, sizeof(*mp));
+
+    mp->addr = addr;
+    mp->size = npkt * sizeof(struct rx_packet);
+    osi_Assert(npkt <= MAX_AFS_UINT32 / sizeof(struct rx_packet));
+
+    MUTEX_ENTER(&rx_mallocedPktQ_lock);
+    opr_queue_Append(&rx_mallocedPacketQueue, &mp->entry);
+    MUTEX_EXIT(&rx_mallocedPktQ_lock);
+}
+
 /* Add more packet buffers */
 #ifdef RX_ENABLE_TSFPQ
 void
@@ -547,8 +583,9 @@ rxi_MorePackets(int apackets)
     SPLVAR;
 
     getme = apackets * sizeof(struct rx_packet);
-    p = (struct rx_packet *)osi_Alloc(getme);
+    p = osi_Alloc(getme);
     osi_Assert(p);
+    registerPackets(p, apackets);
 
     PIN(p, getme);             /* XXXXX */
     memset(p, 0, getme);
@@ -601,8 +638,9 @@ rxi_MorePackets(int apackets)
     SPLVAR;
 
     getme = apackets * sizeof(struct rx_packet);
-    p = (struct rx_packet *)osi_Alloc(getme);
+    p = osi_Alloc(getme);
     osi_Assert(p);
+    registerPackets(p, apackets);
 
     PIN(p, getme);             /* XXXXX */
     memset(p, 0, getme);
@@ -616,7 +654,7 @@ rxi_MorePackets(int apackets)
 #endif
        p->niovecs = 2;
 
-       queue_Append(&rx_freePacketQueue, p);
+       opr_queue_Append(&rx_freePacketQueue, &p->entry);
 #ifdef RXDEBUG_PACKET
         p->packetId = rx_packet_id++;
         p->allNextp = rx_mallocedP;
@@ -644,7 +682,8 @@ rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
     SPLVAR;
 
     getme = apackets * sizeof(struct rx_packet);
-    p = (struct rx_packet *)osi_Alloc(getme);
+    p = osi_Alloc(getme);
+    registerPackets(p, apackets);
 
     PIN(p, getme);             /* XXXXX */
     memset(p, 0, getme);
@@ -691,7 +730,7 @@ rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
 
 #ifndef KERNEL
 /* Add more packet buffers */
-void
+static void
 rxi_MorePacketsNoLock(int apackets)
 {
 #ifdef RX_ENABLE_TSFPQ
@@ -706,13 +745,14 @@ rxi_MorePacketsNoLock(int apackets)
        * ((rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE) / RX_CBUFFERSIZE);
     do {
         getme = apackets * sizeof(struct rx_packet);
-        p = (struct rx_packet *)osi_Alloc(getme);
+        p = osi_Alloc(getme);
        if (p == NULL) {
             apackets -= apackets / 4;
             osi_Assert(apackets > 0);
         }
     } while(p == NULL);
     memset(p, 0, getme);
+    registerPackets(p, apackets);
 
 #ifdef RX_ENABLE_TSFPQ
     RX_TS_INFO_GET(rx_ts_info);
@@ -726,7 +766,7 @@ rxi_MorePacketsNoLock(int apackets)
 #endif
        p->niovecs = 2;
 
-       queue_Append(&rx_freePacketQueue, p);
+       opr_queue_Append(&rx_freePacketQueue, &p->entry);
 #ifdef RXDEBUG_PACKET
         p->packetId = rx_packet_id++;
         p->allNextp = rx_mallocedP;
@@ -749,15 +789,23 @@ rxi_MorePacketsNoLock(int apackets)
 void
 rxi_FreeAllPackets(void)
 {
-    /* must be called at proper interrupt level, etcetera */
-    /* MTUXXX need to free all Packets */
-    osi_Free(rx_mallocedP,
-            (rx_maxReceiveWindow + 2) * sizeof(struct rx_packet));
-    UNPIN(rx_mallocedP, (rx_maxReceiveWindow + 2) * sizeof(struct rx_packet));
+    struct rx_mallocedPacket *mp;
+
+    MUTEX_ENTER(&rx_mallocedPktQ_lock);
+
+    while (!opr_queue_IsEmpty(&rx_mallocedPacketQueue)) {
+       mp = opr_queue_First(&rx_mallocedPacketQueue,
+                            struct rx_mallocedPacket, entry);
+       opr_queue_Remove(&mp->entry);
+       osi_Free(mp->addr, mp->size);
+       UNPIN(mp->addr, mp->size);
+       osi_Free(mp, sizeof(*mp));
+    }
+    MUTEX_EXIT(&rx_mallocedPktQ_lock);
 }
 
 #ifdef RX_ENABLE_TSFPQ
-void
+static void
 rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit)
 {
     struct rx_ts_info_t * rx_ts_info;
@@ -819,33 +867,20 @@ rx_CheckPackets(void)
    */
 
 /* Actually free the packet p. */
-#ifdef RX_ENABLE_TSFPQ
-void
-rxi_FreePacketNoLock(struct rx_packet *p)
-{
-    struct rx_ts_info_t * rx_ts_info;
-    dpf(("Free %"AFS_PTR_FMT"\n", p));
-
-    RX_TS_INFO_GET(rx_ts_info);
-    RX_TS_FPQ_CHECKIN(rx_ts_info,p);
-    if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
-        RX_TS_FPQ_LTOG(rx_ts_info);
-    }
-}
-#else /* RX_ENABLE_TSFPQ */
-void
+#ifndef RX_ENABLE_TSFPQ
+static void
 rxi_FreePacketNoLock(struct rx_packet *p)
 {
     dpf(("Free %"AFS_PTR_FMT"\n", p));
 
     RX_FPQ_MARK_FREE(p);
     rx_nFreePackets++;
-    queue_Append(&rx_freePacketQueue, p);
+    opr_queue_Append(&rx_freePacketQueue, &p->entry);
 }
 #endif /* RX_ENABLE_TSFPQ */
 
 #ifdef RX_ENABLE_TSFPQ
-void
+static void
 rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
 {
     struct rx_ts_info_t * rx_ts_info;
@@ -881,7 +916,7 @@ rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
  */
 #ifndef RX_ENABLE_TSFPQ
 static int
-rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
+rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct opr_queue * q)
 {
     struct iovec *iov;
     struct rx_packet * cb;
@@ -893,14 +928,13 @@ rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue *
            osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
        cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
        RX_FPQ_MARK_FREE(cb);
-       queue_Append(q, cb);
+       opr_queue_Append(q, &cb->entry);
     }
     p->length = 0;
     p->niovecs = 0;
 
     return count;
 }
-#endif
 
 /*
  * free packet continuation buffers into the global free packet pool
@@ -911,7 +945,7 @@ rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue *
  * returns:
  *   zero always
  */
-int
+static int
 rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
 {
     struct iovec *iov;
@@ -928,7 +962,8 @@ rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
     return 0;
 }
 
-#ifdef RX_ENABLE_TSFPQ
+#else
+
 /*
  * free packet continuation buffers into the thread-local free pool
  *
@@ -986,7 +1021,7 @@ void
 rxi_RestoreDataBufs(struct rx_packet *p)
 {
     unsigned int i;
-    struct iovec *iov = &p->wirevec[2];
+    struct iovec *iov;
 
     RX_PACKET_IOV_INIT(p);
 
@@ -1122,7 +1157,7 @@ rxi_FreePacket(struct rx_packet *p)
  * The header is absolutely necessary, besides, this is the way the
  * length field is usually used */
 #ifdef RX_ENABLE_TSFPQ
-struct rx_packet *
+static struct rx_packet *
 rxi_AllocPacketNoLock(int class)
 {
     struct rx_packet *p;
@@ -1130,41 +1165,15 @@ rxi_AllocPacketNoLock(int class)
 
     RX_TS_INFO_GET(rx_ts_info);
 
-#ifdef KERNEL
-    if (rxi_OverQuota(class)) {
-       rxi_NeedMorePackets = TRUE;
-        if (rx_stats_active) {
-            switch (class) {
-            case RX_PACKET_CLASS_RECEIVE:
-                rx_atomic_inc(rx_stats.receivePktAllocFailures);
-                break;
-            case RX_PACKET_CLASS_SEND:
-                rx_atomic_inc(&rx_stats.sendPktAllocFailures);
-                break;
-            case RX_PACKET_CLASS_SPECIAL:
-                rx_atomic_inc(&rx_stats.specialPktAllocFailures);
-                break;
-            case RX_PACKET_CLASS_RECV_CBUF:
-                rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
-                break;
-            case RX_PACKET_CLASS_SEND_CBUF:
-                rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
-                break;
-            }
-       }
-        return (struct rx_packet *)0;
-    }
-#endif /* KERNEL */
-
     if (rx_stats_active)
         rx_atomic_inc(&rx_stats.packetRequests);
-    if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+    if (opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
 
 #ifdef KERNEL
-        if (queue_IsEmpty(&rx_freePacketQueue))
+        if (opr_queue_IsEmpty(&rx_freePacketQueue))
            osi_Panic("rxi_AllocPacket error");
 #else /* KERNEL */
-        if (queue_IsEmpty(&rx_freePacketQueue))
+        if (opr_queue_IsEmpty(&rx_freePacketQueue))
            rxi_MorePacketsNoLock(rx_maxSendWindow);
 #endif /* KERNEL */
 
@@ -1185,7 +1194,7 @@ rxi_AllocPacketNoLock(int class)
     return p;
 }
 #else /* RX_ENABLE_TSFPQ */
-struct rx_packet *
+static struct rx_packet *
 rxi_AllocPacketNoLock(int class)
 {
     struct rx_packet *p;
@@ -1220,16 +1229,16 @@ rxi_AllocPacketNoLock(int class)
         rx_atomic_inc(&rx_stats.packetRequests);
 
 #ifdef KERNEL
-    if (queue_IsEmpty(&rx_freePacketQueue))
+    if (opr_queue_IsEmpty(&rx_freePacketQueue))
        osi_Panic("rxi_AllocPacket error");
 #else /* KERNEL */
-    if (queue_IsEmpty(&rx_freePacketQueue))
+    if (opr_queue_IsEmpty(&rx_freePacketQueue))
        rxi_MorePacketsNoLock(rx_maxSendWindow);
 #endif /* KERNEL */
 
     rx_nFreePackets--;
-    p = queue_First(&rx_freePacketQueue, rx_packet);
-    queue_Remove(p);
+    p = opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry);
+    opr_queue_Remove(&p->entry);
     RX_FPQ_MARK_USED(p);
 
     dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
@@ -1245,7 +1254,7 @@ rxi_AllocPacketNoLock(int class)
 #endif /* RX_ENABLE_TSFPQ */
 
 #ifdef RX_ENABLE_TSFPQ
-struct rx_packet *
+static struct rx_packet *
 rxi_AllocPacketTSFPQ(int class, int pull_global)
 {
     struct rx_packet *p;
@@ -1255,16 +1264,16 @@ rxi_AllocPacketTSFPQ(int class, int pull_global)
 
     if (rx_stats_active)
         rx_atomic_inc(&rx_stats.packetRequests);
-    if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
+    if (pull_global && opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
         MUTEX_ENTER(&rx_freePktQ_lock);
 
-        if (queue_IsEmpty(&rx_freePacketQueue))
+        if (opr_queue_IsEmpty(&rx_freePacketQueue))
            rxi_MorePacketsNoLock(rx_maxSendWindow);
 
        RX_TS_FPQ_GTOL(rx_ts_info);
 
         MUTEX_EXIT(&rx_freePktQ_lock);
-    } else if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+    } else if (opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
         return NULL;
     }
 
@@ -1374,9 +1383,7 @@ rxi_AllocSendPacket(struct rx_call *call, int want)
         * just wait.  */
        NETPRI;
        call->flags |= RX_CALL_WAIT_PACKETS;
-        MUTEX_ENTER(&rx_refcnt_mutex);
        CALL_HOLD(call, RX_CALL_REFCOUNT_PACKET);
-        MUTEX_EXIT(&rx_refcnt_mutex);
        MUTEX_EXIT(&call->lock);
        rx_waitingForPackets = 1;
 
@@ -1387,9 +1394,7 @@ rxi_AllocSendPacket(struct rx_call *call, int want)
 #endif
        MUTEX_EXIT(&rx_freePktQ_lock);
        MUTEX_ENTER(&call->lock);
-        MUTEX_ENTER(&rx_refcnt_mutex);
        CALL_RELE(call, RX_CALL_REFCOUNT_PACKET);
-        MUTEX_EXIT(&rx_refcnt_mutex);
        call->flags &= ~RX_CALL_WAIT_PACKETS;
        USERPRI;
     }
@@ -1437,7 +1442,7 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
               u_short * port)
 {
     struct sockaddr_in from;
-    unsigned int nbytes;
+    int nbytes;
     afs_int32 rlen;
     afs_uint32 tlen, savelen;
     struct msghdr msg;
@@ -1476,7 +1481,7 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
     p->wirevec[p->niovecs - 1].iov_len = savelen;
 
     p->length = (u_short)(nbytes - RX_HEADER_SIZE);
-    if ((nbytes > tlen) || (p->length & 0x8000)) {     /* Bogus packet */
+    if (nbytes < 0 || (nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
        if (nbytes < 0 && errno == EWOULDBLOCK) {
             if (rx_stats_active)
                 rx_atomic_inc(&rx_stats.noPacketOnRead);
@@ -1485,7 +1490,7 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
                 rx_atomic_inc(&rx_stats.bogusPacketOnRead);
                 rx_stats.bogusHost = from.sin_addr.s_addr;
             }
-           dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
+           dpf(("B: bogus packet from [%x,%d] nb=%d\n", ntohl(from.sin_addr.s_addr),
                 ntohs(from.sin_port), nbytes));
        }
        return 0;
@@ -1498,7 +1503,7 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
        *host = from.sin_addr.s_addr;
        *port = from.sin_port;
 
-       dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
+       dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d\n",
              p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
              p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
              p->length));
@@ -1514,32 +1519,10 @@ rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
 
        *host = from.sin_addr.s_addr;
        *port = from.sin_port;
-       if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
-            if (rx_stats_active) {
-                struct rx_peer *peer;
-                rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]);
-                /*
-                 * Try to look up this peer structure.  If it doesn't exist,
-                 * don't create a new one -
-                 * we don't keep count of the bytes sent/received if a peer
-                 * structure doesn't already exist.
-                 *
-                 * The peer/connection cleanup code assumes that there is 1 peer
-                 * per connection.  If we actually created a peer structure here
-                 * and this packet was an rxdebug packet, the peer structure would
-                 * never be cleaned up.
-                 */
-                peer = rxi_FindPeer(*host, *port, 0, 0);
-                /* Since this may not be associated with a connection,
-                 * it may have no refCount, meaning we could race with
-                 * ReapConnections
-                 */
-                if (peer && (peer->refCount > 0)) {
-                    MUTEX_ENTER(&peer->peer_lock);
-                    hadd32(peer->bytesReceived, p->length);
-                    MUTEX_EXIT(&peer->peer_lock);
-                }
-            }
+       if (rx_stats_active
+           && p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
+
+               rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]);
        }
 
 #ifdef RX_TRIMDATABUFS
@@ -1615,6 +1598,7 @@ rxi_SplitJumboPacket(struct rx_packet *p, afs_uint32 host, short port,
     np->header = p->header;
     np->header.serial = p->header.serial + 1;
     np->header.seq = p->header.seq + 1;
+    np->header.userStatus = 0;
     np->header.flags = jp->flags;
     np->header.spare = jp->cksum;
 
@@ -1764,6 +1748,10 @@ m_cpytoiovec(struct mbuf *m, int off, int len, struct iovec iovs[], int niovs)
 #endif /* AFS_SUN5_ENV */
 
 #if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
+#if defined(AFS_NBSD_ENV)
+int
+rx_mb_to_packet(struct mbuf *amb, void (*free) (struct mbuf *), int hdr_len, int data_len, struct rx_packet *phandle)
+#else
 int
 rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
 #if defined(AFS_SUN5_ENV) || defined(AFS_HPUX110_ENV)
@@ -1774,6 +1762,7 @@ rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
      void (*free) ();
      struct rx_packet *phandle;
      int hdr_len, data_len;
+#endif /* AFS_NBSD_ENV */
 {
     int code;
 
@@ -1796,7 +1785,6 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
 {
     struct rx_debugIn tin;
     afs_int32 tl;
-    struct rx_serverQueueEntry *np, *nqe;
 
     /*
      * Only respond to client-initiated Rx debug packets,
@@ -1834,8 +1822,7 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
            tstat.usedFDs = CountFDs(64);
            tstat.nWaiting = htonl(rx_atomic_read(&rx_nWaiting));
            tstat.nWaited = htonl(rx_atomic_read(&rx_nWaited));
-           queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
-                       tstat.idleThreads);
+           tstat.idleThreads = opr_queue_Count(&rx_idleServerQueue);
            MUTEX_EXIT(&rx_serverPool_lock);
            tstat.idleThreads = htonl(tstat.idleThreads);
            tl = sizeof(struct rx_debugStats) - ap->length;
@@ -1896,11 +1883,11 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
                            tconn.callNumber[j] = htonl(tc->callNumber[j]);
                            if ((tcall = tc->call[j])) {
                                tconn.callState[j] = tcall->state;
-                               tconn.callMode[j] = tcall->mode;
+                               tconn.callMode[j] = tcall->app.mode;
                                tconn.callFlags[j] = tcall->flags;
-                               if (queue_IsNotEmpty(&tcall->rq))
+                               if (!opr_queue_IsEmpty(&tcall->rq))
                                    tconn.callOther[j] |= RX_OTHER_IN;
-                               if (queue_IsNotEmpty(&tcall->tq))
+                               if (!opr_queue_IsEmpty(&tcall->tq))
                                    tconn.callOther[j] |= RX_OTHER_OUT;
                            } else
                                tconn.callState[j] = RX_STATE_NOTINIT;
@@ -2004,19 +1991,14 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
                        tpeer.ifMTU = htons(tp->ifMTU);
                        tpeer.idleWhen = htonl(tp->idleWhen);
                        tpeer.refCount = htons(tp->refCount);
-                       tpeer.burstSize = tp->burstSize;
-                       tpeer.burst = tp->burst;
-                       tpeer.burstWait.sec = htonl(tp->burstWait.sec);
-                       tpeer.burstWait.usec = htonl(tp->burstWait.usec);
+                       tpeer.burstSize = 0;
+                       tpeer.burst = 0;
+                       tpeer.burstWait.sec = 0;
+                       tpeer.burstWait.usec = 0;
                        tpeer.rtt = htonl(tp->rtt);
                        tpeer.rtt_dev = htonl(tp->rtt_dev);
-                       tpeer.timeout.sec = htonl(tp->timeout.sec);
-                       tpeer.timeout.usec = htonl(tp->timeout.usec);
                        tpeer.nSent = htonl(tp->nSent);
                        tpeer.reSends = htonl(tp->reSends);
-                       tpeer.inPacketSkew = htonl(tp->inPacketSkew);
-                       tpeer.outPacketSkew = htonl(tp->outPacketSkew);
-                       tpeer.rateFlag = htonl(tp->rateFlag);
                        tpeer.natMTU = htons(tp->natMTU);
                        tpeer.maxMTU = htons(tp->maxMTU);
                        tpeer.maxDgramPackets = htons(tp->maxDgramPackets);
@@ -2025,12 +2007,14 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
                        tpeer.cwind = htons(tp->cwind);
                        tpeer.nDgramPackets = htons(tp->nDgramPackets);
                        tpeer.congestSeq = htons(tp->congestSeq);
-                       tpeer.bytesSent.high = htonl(tp->bytesSent.high);
-                       tpeer.bytesSent.low = htonl(tp->bytesSent.low);
+                       tpeer.bytesSent.high =
+                           htonl(tp->bytesSent >> 32);
+                       tpeer.bytesSent.low =
+                           htonl(tp->bytesSent & MAX_AFS_UINT32);
                        tpeer.bytesReceived.high =
-                           htonl(tp->bytesReceived.high);
+                           htonl(tp->bytesReceived >> 32);
                        tpeer.bytesReceived.low =
-                           htonl(tp->bytesReceived.low);
+                           htonl(tp->bytesReceived & MAX_AFS_UINT32);
                         MUTEX_EXIT(&tp->peer_lock);
 
                         MUTEX_ENTER(&rx_peerHashTable_lock);
@@ -2071,16 +2055,16 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
                return ap;
 
            /* Since its all int32s convert to network order with a loop. */
-        if (rx_stats_active)
-           MUTEX_ENTER(&rx_stats_mutex);
+           if (rx_stats_active)
+               MUTEX_ENTER(&rx_stats_mutex);
            s = (afs_int32 *) & rx_stats;
            for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
                rx_PutInt32(ap, i * sizeof(afs_int32), htonl(*s));
 
            tl = ap->length;
            ap->length = sizeof(rx_stats);
-        if (rx_stats_active)
-           MUTEX_EXIT(&rx_stats_mutex);
+           if (rx_stats_active)
+               MUTEX_EXIT(&rx_stats_mutex);
            rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
            ap->length = tl;
            break;
@@ -2143,6 +2127,7 @@ rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
     taddr.sin_family = AF_INET;
     taddr.sin_port = aport;
     taddr.sin_addr.s_addr = ahost;
+    memset(&taddr.sin_zero, 0, sizeof(taddr.sin_zero));
 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
     taddr.sin_len = sizeof(struct sockaddr_in);
 #endif
@@ -2166,7 +2151,7 @@ rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
        afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
                   "before osi_NetSend()");
        AFS_GUNLOCK();
-    } else
+    }
 #else
     if (waslocked)
        AFS_GUNLOCK();
@@ -2183,7 +2168,7 @@ rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
                   "after osi_NetSend()");
        if (!waslocked)
            AFS_GUNLOCK();
-    } else
+    }
 #else
     if (waslocked)
        AFS_GLOCK();
@@ -2196,6 +2181,31 @@ rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
 
 }
 
+static void
+rxi_NetSendError(struct rx_call *call, int code)
+{
+    int down = 0;
+#ifdef AFS_NT40_ENV
+    if (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) {
+       down = 1;
+    }
+    if (code == -WSAEHOSTUNREACH) {
+       down = 1;
+    }
+#elif defined(AFS_LINUX20_ENV)
+    if (code == -ENETUNREACH) {
+       down = 1;
+    }
+#elif defined(AFS_DARWIN_ENV)
+    if (code == EHOSTUNREACH) {
+       down = 1;
+    }
+#endif
+    if (down) {
+       call->lastReceiveTime = 0;
+    }
+}
+
 /* Send the packet to appropriate destination for the specified
  * call.  The header is first encoded and placed in the packet.
  */
@@ -2218,6 +2228,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
     addr.sin_family = AF_INET;
     addr.sin_port = peer->port;
     addr.sin_addr.s_addr = peer->host;
+    memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
 
     /* This stuff should be revamped, I think, so that most, if not
      * all, of the header stuff is always added here.  We could
@@ -2293,7 +2304,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
            afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
                       "before osi_NetSend()");
            AFS_GUNLOCK();
-       } else
+       }
 #else
        if (waslocked)
            AFS_GUNLOCK();
@@ -2305,26 +2316,16 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
            /* send failed, so let's hurry up the resend, eh? */
             if (rx_stats_active)
                 rx_atomic_inc(&rx_stats.netSendFailures);
-           p->retryTime = p->timeSent; /* resend it very soon */
-           clock_Addmsec(&(p->retryTime),
-                         10 + (((afs_uint32) p->backoff) << 8));
+           p->flags &= ~RX_PKTFLAG_SENT; /* resend it very soon */
+
            /* Some systems are nice and tell us right away that we cannot
             * reach this recipient by returning an error code.
             * So, when this happens let's "down" the host NOW so
             * we don't sit around waiting for this host to timeout later.
             */
-           if (call &&
-#ifdef AFS_NT40_ENV
-               (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
-#elif defined(AFS_LINUX20_ENV)
-               code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV)
-               code == EHOSTUNREACH
-#else
-               0
-#endif
-               )
-               call->lastReceiveTime = 0;
+           if (call) {
+               rxi_NetSendError(call, code);
+           }
        }
 #ifdef KERNEL
 #ifdef RX_KERNEL_TRACE
@@ -2334,7 +2335,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
                       "after osi_NetSend()");
            if (!waslocked)
                AFS_GUNLOCK();
-       } else
+       }
 #else
        if (waslocked)
            AFS_GLOCK();
@@ -2342,15 +2343,15 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
 #endif
 #ifdef RXDEBUG
     }
-    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
           deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
           ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
-          p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+          p->header.seq, p->header.flags, p, p->length));
 #endif
     if (rx_stats_active) {
         rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
         MUTEX_ENTER(&peer->peer_lock);
-        hadd32(peer->bytesSent, p->length);
+        peer->bytesSent += p->length;
         MUTEX_EXIT(&peer->peer_lock);
     }
 }
@@ -2381,6 +2382,7 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
     addr.sin_family = AF_INET;
     addr.sin_port = peer->port;
     addr.sin_addr.s_addr = peer->host;
+    memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
 
     if (len + 1 > RX_MAXIOVECS) {
        osi_Panic("rxi_SendPacketList, len > RX_MAXIOVECS\n");
@@ -2394,19 +2396,17 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
     conn->serial += len;
     for (i = 0; i < len; i++) {
        p = list[i];
+       /* a ping *or* a sequenced packet can count */
        if (p->length > conn->peer->maxPacketSize) {
-           /* a ping *or* a sequenced packet can count */
-           if ((p->length > conn->peer->maxPacketSize)) {
-               if (((p->header.type == RX_PACKET_TYPE_ACK) &&
-                    (p->header.flags & RX_REQUEST_ACK)) &&
-                   ((i == 0) || (p->length >= conn->lastPingSize))) {
-                   conn->lastPingSize = p->length;
-                   conn->lastPingSizeSer = serial + i;
-               } else if ((p->header.seq != 0) &&
-                          ((i == 0) || (p->length >= conn->lastPacketSize))) {
-                   conn->lastPacketSize = p->length;
-                   conn->lastPacketSizeSeq = p->header.seq;
-               }
+           if (((p->header.type == RX_PACKET_TYPE_ACK) &&
+                (p->header.flags & RX_REQUEST_ACK)) &&
+               ((i == 0) || (p->length >= conn->lastPingSize))) {
+               conn->lastPingSize = p->length;
+               conn->lastPingSizeSer = serial + i;
+           } else if ((p->header.seq != 0) &&
+                      ((i == 0) || (p->length >= conn->lastPacketSize))) {
+               conn->lastPacketSize = p->length;
+               conn->lastPacketSizeSeq = p->header.seq;
            }
        }
     }
@@ -2516,27 +2516,16 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
                 rx_atomic_inc(&rx_stats.netSendFailures);
            for (i = 0; i < len; i++) {
                p = list[i];
-               p->retryTime = p->timeSent;     /* resend it very soon */
-               clock_Addmsec(&(p->retryTime),
-                             10 + (((afs_uint32) p->backoff) << 8));
+               p->flags &= ~RX_PKTFLAG_SENT;  /* resend it very soon */
            }
            /* Some systems are nice and tell us right away that we cannot
             * reach this recipient by returning an error code.
             * So, when this happens let's "down" the host NOW so
             * we don't sit around waiting for this host to timeout later.
             */
-           if (call &&
-#ifdef AFS_NT40_ENV
-               (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
-#elif defined(AFS_LINUX20_ENV)
-               code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV)
-               code == EHOSTUNREACH
-#else
-               0
-#endif
-               )
-               call->lastReceiveTime = 0;
+           if (call) {
+               rxi_NetSendError(call, code);
+           }
        }
 #if    defined(AFS_SUN5_ENV) && defined(KERNEL)
        if (!istack && waslocked)
@@ -2545,22 +2534,67 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
 #ifdef RXDEBUG
     }
 
-    assert(p != NULL);
+    osi_Assert(p != NULL);
 
-    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+    dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
           deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
           ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
-          p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+          p->header.seq, p->header.flags, p, p->length));
 
 #endif
     if (rx_stats_active) {
         rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
         MUTEX_ENTER(&peer->peer_lock);
-        hadd32(peer->bytesSent, p->length);
+        peer->bytesSent += p->length;
         MUTEX_EXIT(&peer->peer_lock);
     }
 }
 
+/* Send a raw abort packet, without any call or connection structures */
+void
+rxi_SendRawAbort(osi_socket socket, afs_uint32 host, u_short port,
+                afs_uint32 serial, afs_int32 error,
+                struct rx_packet *source, int istack)
+{
+    struct rx_header theader;
+    struct sockaddr_in addr;
+    struct iovec iov[2];
+
+    memset(&theader, 0, sizeof(theader));
+    theader.epoch = htonl(source->header.epoch);
+    theader.callNumber = htonl(source->header.callNumber);
+    theader.serial = htonl(serial);
+    theader.type = RX_PACKET_TYPE_ABORT;
+    theader.serviceId = htons(source->header.serviceId);
+    theader.securityIndex = source->header.securityIndex;
+    theader.cid = htonl(source->header.cid);
+
+    /*
+     * If the abort is being sent in response to a server initiated packet,
+     * set client_initiated in the abort to ensure it is not associated by
+     * the receiver with a connection in the opposite direction.
+     */
+    if ((source->header.flags & RX_CLIENT_INITIATED) != RX_CLIENT_INITIATED)
+        theader.flags |= RX_CLIENT_INITIATED;
+
+    error = htonl(error);
+
+    iov[0].iov_base = &theader;
+    iov[0].iov_len = sizeof(struct rx_header);
+    iov[1].iov_base = &error;
+    iov[1].iov_len = sizeof(error);
+
+    addr.sin_family = AF_INET;
+    addr.sin_addr.s_addr = host;
+    addr.sin_port = port;
+    memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
+    addr.sin_len = sizeof(struct sockaddr_in);
+#endif
+
+    osi_NetSend(socket, &addr, iov, 2,
+               sizeof(struct rx_header) + sizeof(error), istack);
+}
 
 /* Send a "special" packet to the peer connection.  If call is
  * specified, then the packet is directed to a specific call channel
@@ -2612,6 +2646,7 @@ rxi_SendSpecial(struct rx_call *call,
     p->header.seq = 0;
     p->header.epoch = conn->epoch;
     p->header.type = type;
+    p->header.userStatus = 0;
     p->header.flags = 0;
     if (conn->type == RX_CLIENT_CONNECTION)
        p->header.flags |= RX_CLIENT_INITIATED;
@@ -2717,13 +2752,15 @@ rxi_PrepareSendPacket(struct rx_call *call,
     afs_uint32 seq = call->tnext++;
     unsigned int i;
     afs_int32 len;             /* len must be a signed type; it can go negative */
+    int code;
 
     /* No data packets on call 0. Where do these come from? */
     if (*call->callNumber == 0)
        *call->callNumber = 1;
 
     MUTEX_EXIT(&call->lock);
-    p->flags &= ~RX_PKTFLAG_ACKED;
+    p->flags &= ~(RX_PKTFLAG_ACKED | RX_PKTFLAG_SENT);
+
     p->header.cid = (conn->cid | call->channel);
     p->header.serviceId = conn->serviceId;
     p->header.securityIndex = conn->securityIndex;
@@ -2732,6 +2769,7 @@ rxi_PrepareSendPacket(struct rx_call *call,
     p->header.seq = seq;
     p->header.epoch = conn->epoch;
     p->header.type = RX_PACKET_TYPE_DATA;
+    p->header.userStatus = 0;
     p->header.flags = 0;
     p->header.spare = 0;
     if (conn->type == RX_CLIENT_CONNECTION)
@@ -2740,10 +2778,8 @@ rxi_PrepareSendPacket(struct rx_call *call,
     if (last)
        p->header.flags |= RX_LAST_PACKET;
 
-    clock_Zero(&p->retryTime); /* Never yet transmitted */
     clock_Zero(&p->firstSent); /* Never yet transmitted */
     p->header.serial = 0;      /* Another way of saying never transmitted... */
-    p->backoff = 0;
 
     /* Now that we're sure this is the last data on the call, make sure
      * that the "length" and the sum of the iov_lens matches. */
@@ -2768,8 +2804,21 @@ rxi_PrepareSendPacket(struct rx_call *call,
     }
     if (len)
         p->wirevec[i - 1].iov_len += len;
-    RXS_PreparePacket(conn->securityObject, call, p);
     MUTEX_ENTER(&call->lock);
+    code = RXS_PreparePacket(conn->securityObject, call, p);
+    if (code) {
+       MUTEX_EXIT(&call->lock);
+       rxi_ConnectionError(conn, code);
+       MUTEX_ENTER(&conn->conn_data_lock);
+       p = rxi_SendConnectionAbort(conn, p, 0, 0);
+       MUTEX_EXIT(&conn->conn_data_lock);
+       MUTEX_ENTER(&call->lock);
+       /* setting a connection error means all calls for that conn are also
+        * error'd. if this call does not have an error by now, something is
+        * very wrong, and we risk sending data in the clear that is supposed
+        * to be encrypted. */
+       osi_Assert(call->error);
+    }
 }
 
 /* Given an interface MTU size, calculate an adjusted MTU size that
@@ -2855,9 +2904,9 @@ int rx_DumpPackets(FILE *outputFile, char *cookie)
 #endif
 
     for (p = rx_mallocedP; p; p = p->allNextp) {
-        RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u  header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
-                cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
-                p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
+        RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, length=%u  header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
+                cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec,
+                p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->length,
                 p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
                 (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
                 (afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);