rx: Use opr queues
authorSimon Wilkinson <sxw@your-file-system.com>
Fri, 12 Oct 2012 09:07:22 +0000 (10:07 +0100)
committerDerrick Brashear <shadow@your-file-system.com>
Thu, 18 Oct 2012 11:30:07 +0000 (04:30 -0700)
Modify RX so that it uses opr queues throughout, rather than the older,
non-type-safe rx_queue structure and macros. Attempt to clarify which
items in a structure are queue headers, and which are linkage pointers.

This has the knock on effect that including an RX header doesn't
automatically give you rx_queue.h in your application's namespace.

Change-Id: I1b3fbcd8c03f8153a557bd4532710bcebfe45818
Reviewed-on: http://gerrit.openafs.org/8232
Tested-by: BuildBot <buildbot@rampaginggeek.com>
Reviewed-by: Derrick Brashear <shadow@your-file-system.com>

52 files changed:
src/rx/rx.c
src/rx/rx.h
src/rx/rx_call.h
src/rx/rx_conncache.c
src/rx/rx_globals.h
src/rx/rx_packet.c
src/rx/rx_packet.h
src/rx/rx_peer.h
src/rx/rx_prototypes.h
src/rx/rx_pthread.c
src/rx/rx_rdwr.c
src/rx/rx_server.h
src/tools/dumpscan/dumpscan.h
src/tools/dumpscan/dumptool.c
src/tsalvaged/salvsync-debug.c
src/viced/afsfileprocs.c
src/viced/callback.c
src/viced/host.c
src/viced/physio.c
src/viced/serialize_state.c
src/viced/state_analyzer.c
src/viced/viced.c
src/vol/clone.c
src/vol/daemon_com.c
src/vol/devname.c
src/vol/fssync-client.c
src/vol/fssync-debug.c
src/vol/fssync-server.c
src/vol/namei_ops.c
src/vol/ntops.c
src/vol/nuke.c
src/vol/partition.c
src/vol/purge.c
src/vol/salvaged.c
src/vol/salvager.c
src/vol/salvsync-client.c
src/vol/salvsync-server.c
src/vol/vg_cache.c
src/vol/vg_scan.c
src/vol/vol-info.c
src/vol/vol-salvage.c
src/vol/volume.c
src/vol/vutil.c
src/volser/dumpstuff.c
src/volser/restorevol.c
src/volser/vol-dump.c
src/volser/vol_split.c
src/volser/volmain.c
src/volser/volprocs.c
src/volser/voltrans.c
src/volser/vos.c
src/volser/vsprocs.c

index 162ae4a..12ad917 100644 (file)
@@ -72,9 +72,10 @@ extern afs_int32 afs_termState;
 # include "rx_user.h"
 #endif /* KERNEL */
 
+#include <opr/queue.h>
+
 #include "rx.h"
 #include "rx_clock.h"
-#include "rx_queue.h"
 #include "rx_atomic.h"
 #include "rx_globals.h"
 #include "rx_trace.h"
@@ -217,11 +218,11 @@ rx_atomic_t rx_nWaited = RX_ATOMIC_INIT(0);
 
 /* Incoming calls wait on this queue when there are no available
  * server processes */
-struct rx_queue rx_incomingCallQueue;
+struct opr_queue rx_incomingCallQueue;
 
 /* Server processes wait on this queue when there are no appropriate
  * calls to process */
-struct rx_queue rx_idleServerQueue;
+struct opr_queue rx_idleServerQueue;
 
 #if !defined(offsetof)
 #include <stddef.h>            /* for definition of offsetof() */
@@ -577,7 +578,7 @@ rx_InitHost(u_int host, u_int port)
 
     /* Malloc up a bunch of packets & buffers */
     rx_nFreePackets = 0;
-    queue_Init(&rx_freePacketQueue);
+    opr_queue_Init(&rx_freePacketQueue);
     rxi_NeedMorePackets = FALSE;
     rx_nPackets = 0;   /* rx_nPackets is managed by rxi_MorePackets* */
 
@@ -647,9 +648,9 @@ rx_InitHost(u_int host, u_int port)
     rxevent_Init(20, rxi_ReScheduleEvents);
 
     /* Initialize various global queues */
-    queue_Init(&rx_idleServerQueue);
-    queue_Init(&rx_incomingCallQueue);
-    queue_Init(&rx_freeCallQueue);
+    opr_queue_Init(&rx_idleServerQueue);
+    opr_queue_Init(&rx_incomingCallQueue);
+    opr_queue_Init(&rx_freeCallQueue);
 
 #if defined(AFS_NT40_ENV) && !defined(KERNEL)
     /* Initialize our list of usable IP addresses. */
@@ -773,14 +774,15 @@ rxi_rto_packet_sent(struct rx_call *call, int lastPacket, int istack)
 static_inline void
 rxi_rto_packet_acked(struct rx_call *call, int istack)
 {
-    struct rx_packet *p, *nxp;
+    struct opr_queue *cursor;
 
     rxi_rto_cancel(call);
 
-    if (queue_IsEmpty(&call->tq))
+    if (opr_queue_IsEmpty(&call->tq))
        return;
 
-    for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
+    for (opr_queue_Scan(&call->tq, cursor)) {
+       struct rx_packet *p = opr_queue_Entry(cursor, struct rx_packet, entry);
        if (p->header.seq > call->tfirst + call->twind)
            return;
 
@@ -1963,6 +1965,7 @@ void
 rx_WakeupServerProcs(void)
 {
     struct rx_serverQueueEntry *np, *tqp;
+    struct opr_queue *cursor;
     SPLVAR;
 
     NETPRI;
@@ -1985,7 +1988,8 @@ rx_WakeupServerProcs(void)
 #endif /* RX_ENABLE_LOCKS */
     }
     MUTEX_EXIT(&freeSQEList_lock);
-    for (queue_Scan(&rx_idleServerQueue, np, tqp, rx_serverQueueEntry)) {
+    for (opr_queue_Scan(&rx_idleServerQueue, cursor)) {
+        np = opr_queue_Entry(cursor, struct rx_serverQueueEntry, entry);
 #ifdef RX_ENABLE_LOCKS
        CV_BROADCAST(&np->cv);
 #else /* RX_ENABLE_LOCKS */
@@ -2051,8 +2055,9 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
        ReturnToServerPool(cur_service);
     }
     while (1) {
-       if (queue_IsNotEmpty(&rx_incomingCallQueue)) {
-           struct rx_call *tcall, *ncall, *choice2 = NULL;
+       if (!opr_queue_IsEmpty(&rx_incomingCallQueue)) {
+           struct rx_call *tcall, *choice2 = NULL;
+           struct opr_queue *cursor;
 
            /* Scan for eligible incoming calls.  A call is not eligible
             * if the maximum number of calls for its service type are
@@ -2061,14 +2066,16 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
             * while the other threads may run ahead looking for calls which
             * have all their input data available immediately.  This helps
             * keep threads from blocking, waiting for data from the client. */
-           for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
+           for (opr_queue_Scan(&rx_incomingCallQueue, cursor)) {
+               tcall = opr_queue_Entry(cursor, struct rx_call, entry);
+
                service = tcall->conn->service;
                if (!QuotaOK(service)) {
                    continue;
                }
                MUTEX_ENTER(&rx_pthread_mutex);
                if (tno == rxi_fcfs_thread_num
-                       || queue_IsLast(&rx_incomingCallQueue, tcall)) {
+                       || opr_queue_IsEnd(&rx_incomingCallQueue, cursor)) {
                    MUTEX_EXIT(&rx_pthread_mutex);
                    /* If we're the fcfs thread , then  we'll just use
                     * this call. If we haven't been able to find an optimal
@@ -2078,9 +2085,10 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
                    service = call->conn->service;
                } else {
                    MUTEX_EXIT(&rx_pthread_mutex);
-                   if (!queue_IsEmpty(&tcall->rq)) {
+                   if (!opr_queue_IsEmpty(&tcall->rq)) {
                        struct rx_packet *rp;
-                       rp = queue_First(&tcall->rq, rx_packet);
+                       rp = opr_queue_First(&tcall->rq, struct rx_packet,
+                                           entry);
                        if (rp->header.seq == 1) {
                            if (!meltdown_1pkt
                                || (rp->header.flags & RX_LAST_PACKET)) {
@@ -2103,7 +2111,7 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
        }
 
        if (call) {
-           queue_Remove(call);
+           opr_queue_Remove(&call->entry);
            MUTEX_EXIT(&rx_serverPool_lock);
            MUTEX_ENTER(&call->lock);
 
@@ -2120,8 +2128,8 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
                continue;
            }
 
-           if (queue_IsEmpty(&call->rq)
-               || queue_First(&call->rq, rx_packet)->header.seq != 1)
+           if (opr_queue_IsEmpty(&call->rq)
+               || opr_queue_First(&call->rq, struct rx_packet, entry)->header.seq != 1)
                rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
 
            CLEAR_CALL_QUEUE_LOCK(call);
@@ -2135,7 +2143,7 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
                *socketp = OSI_NULLSOCKET;
            }
            sq->socketp = socketp;
-           queue_Append(&rx_idleServerQueue, sq);
+           opr_queue_Append(&rx_idleServerQueue, &sq->entry);
 #ifndef AFS_AIX41_ENV
            rx_waitForPacket = sq;
 #else
@@ -2225,8 +2233,9 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
        rxi_availProcs++;
         MUTEX_EXIT(&rx_quota_mutex);
     }
-    if (queue_IsNotEmpty(&rx_incomingCallQueue)) {
-       struct rx_call *tcall, *ncall;
+    if (!opr_queue_IsEmpty(&rx_incomingCallQueue)) {
+       struct rx_call *tcall;
+       struct opr_queue *cursor;
        /* Scan for eligible incoming calls.  A call is not eligible
         * if the maximum number of calls for its service type are
         * already executing */
@@ -2235,12 +2244,14 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
         * have all their input data available immediately.  This helps
         * keep threads from blocking, waiting for data from the client. */
        choice2 = (struct rx_call *)0;
-       for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
+       for (opr_queue_Scan(&rx_incomingCallQueue, cursor)) {
+           tcall = opr_queue_Entry(cursor, struct rx_call, entry);
            service = tcall->conn->service;
            if (QuotaOK(service)) {
                MUTEX_ENTER(&rx_pthread_mutex);
-               if (tno == rxi_fcfs_thread_num
-                   || !tcall->queue_item_header.next) {
+               /* XXX - If tcall->entry.next is NULL, then we're no longer
+                * on a queue at all. This shouldn't happen. */
+               if (tno == rxi_fcfs_thread_num || !tcall->entry.next) {
                    MUTEX_EXIT(&rx_pthread_mutex);
                    /* If we're the fcfs thread, then  we'll just use
                     * this call. If we haven't been able to find an optimal
@@ -2250,9 +2261,10 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
                    service = call->conn->service;
                } else {
                    MUTEX_EXIT(&rx_pthread_mutex);
-                   if (!queue_IsEmpty(&tcall->rq)) {
+                   if (!opr_queue_IsEmpty(&tcall->rq)) {
                        struct rx_packet *rp;
-                       rp = queue_First(&tcall->rq, rx_packet);
+                       rp = opr_queue_First(&tcall->rq, struct rx_packet,
+                                           entry);
                        if (rp->header.seq == 1
                            && (!meltdown_1pkt
                                || (rp->header.flags & RX_LAST_PACKET))) {
@@ -2272,14 +2284,14 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
     }
 
     if (call) {
-       queue_Remove(call);
+       opr_queue_Remove(&call->entry);
        /* we can't schedule a call if there's no data!!! */
        /* send an ack if there's no data, if we're missing the
         * first packet, or we're missing something between first
         * and last -- there's a "hole" in the incoming data. */
-       if (queue_IsEmpty(&call->rq)
-           || queue_First(&call->rq, rx_packet)->header.seq != 1
-           || call->rprev != queue_Last(&call->rq, rx_packet)->header.seq)
+       if (opr_queue_IsEmpty(&call->rq)
+           || opr_queue_First(&call->rq, struct rx_packet, entry)->header.seq != 1
+           || call->rprev != opr_queue_Last(&call->rq, struct rx_packet, entry)->header.seq)
            rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
 
        call->flags &= (~RX_CALL_WAIT_PROC);
@@ -2301,7 +2313,7 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
            *socketp = OSI_NULLSOCKET;
        }
        sq->socketp = socketp;
-       queue_Append(&rx_idleServerQueue, sq);
+       opr_queue_Append(&rx_idleServerQueue, &sq->entry);
        do {
            osi_rxSleep(sq);
 #ifdef KERNEL
@@ -2641,7 +2653,7 @@ rxi_NewCall(struct rx_connection *conn, int channel)
     struct rx_call *call;
 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
     struct rx_call *cp;        /* Call pointer temp */
-    struct rx_call *nxp;       /* Next call pointer, for queue_Scan */
+    struct opr_queue *cursor;
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
 
     dpf(("rxi_NewCall(conn %"AFS_PTR_FMT", channel %d)\n", conn, channel));
@@ -2657,7 +2669,8 @@ rxi_NewCall(struct rx_connection *conn, int channel)
      * Skip over those with in-use TQs.
      */
     call = NULL;
-    for (queue_Scan(&rx_freeCallQueue, cp, nxp, rx_call)) {
+    for (opr_queue_Scan(&rx_freeCallQueue, cursor)) {
+       cp = opr_queue_Entry(cursor, struct rx_call, entry);
        if (!(cp->flags & RX_CALL_TQ_BUSY)) {
            call = cp;
            break;
@@ -2665,10 +2678,10 @@ rxi_NewCall(struct rx_connection *conn, int channel)
     }
     if (call) {
 #else /* AFS_GLOBAL_RXLOCK_KERNEL */
-    if (queue_IsNotEmpty(&rx_freeCallQueue)) {
-       call = queue_First(&rx_freeCallQueue, rx_call);
+    if (!opr_queue_IsEmpty(&rx_freeCallQueue)) {
+       call = opr_queue_First(&rx_freeCallQueue, struct rx_call, entry);
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
-       queue_Remove(call);
+       opr_queue_Remove(&call->entry);
         if (rx_stats_active)
            rx_atomic_dec(&rx_stats.nFreeCallStructs);
        MUTEX_EXIT(&rx_freeCallQueue_lock);
@@ -2705,9 +2718,9 @@ rxi_NewCall(struct rx_connection *conn, int channel)
        CV_INIT(&call->cv_tq, "call tq", CV_DEFAULT, 0);
 
        /* Initialize once-only items */
-       queue_Init(&call->tq);
-       queue_Init(&call->rq);
-       queue_Init(&call->iovq);
+       opr_queue_Init(&call->tq);
+       opr_queue_Init(&call->rq);
+       opr_queue_Init(&call->iovq);
 #ifdef RXDEBUG_PACKET
         call->rqc = call->tqc = call->iovqc = 0;
 #endif /* RXDEBUG_PACKET */
@@ -2786,11 +2799,11 @@ rxi_FreeCall(struct rx_call *call, int haveCTLock)
      * the head of the list, and idle calls at the tail.
      */
     if (call->flags & RX_CALL_TQ_BUSY)
-       queue_Prepend(&rx_freeCallQueue, call);
+       opr_queue_Prepend(&rx_freeCallQueue, &call->entry);
     else
-       queue_Append(&rx_freeCallQueue, call);
+       opr_queue_Append(&rx_freeCallQueue, &call->entry);
 #else /* AFS_GLOBAL_RXLOCK_KERNEL */
-    queue_Append(&rx_freeCallQueue, call);
+    opr_queue_Append(&rx_freeCallQueue, &call->entry);
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
     if (rx_stats_active)
        rx_atomic_inc(&rx_stats.nFreeCallStructs);
@@ -3000,7 +3013,7 @@ rxi_FindPeer(afs_uint32 host, u_short port,
            rx_atomic_set(&pp->neterrs, 0);
 #endif
            MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0);
-           queue_Init(&pp->rpcStats);
+           opr_queue_Init(&pp->rpcStats);
            pp->next = rx_peerHashTable[hashIndex];
            rx_peerHashTable[hashIndex] = pp;
            rxi_InitPeerParams(pp);
@@ -3981,8 +3994,8 @@ rxi_ReceiveDataPacket(struct rx_call *call,
        if (seq == call->rnext) {
 
            /* Check to make sure it is not a duplicate of one already queued */
-           if (queue_IsNotEmpty(&call->rq)
-               && queue_First(&call->rq, rx_packet)->header.seq == seq) {
+           if (!opr_queue_IsEmpty(&call->rq)
+               && opr_queue_First(&call->rq, struct rx_packet, entry)->header.seq == seq) {
                 if (rx_stats_active)
                     rx_atomic_inc(&rx_stats.dupPacketsRead);
                dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate\n", np));
@@ -4000,7 +4013,7 @@ rxi_ReceiveDataPacket(struct rx_call *call,
 #ifdef RX_TRACK_PACKETS
            np->flags |= RX_PKTFLAG_RQ;
 #endif
-           queue_Prepend(&call->rq, np);
+           opr_queue_Prepend(&call->rq, &np->entry);
 #ifdef RXDEBUG_PACKET
             call->rqc++;
 #endif /* RXDEBUG_PACKET */
@@ -4023,10 +4036,12 @@ rxi_ReceiveDataPacket(struct rx_call *call,
            /* Check whether we have all of the packets for this call */
            if (call->flags & RX_CALL_HAVE_LAST) {
                afs_uint32 tseq;        /* temporary sequence number */
-               struct rx_packet *tp;   /* Temporary packet pointer */
-               struct rx_packet *nxp;  /* Next pointer, for queue_Scan */
+               struct opr_queue *cursor;
 
-               for (tseq = seq, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
+               for (tseq = seq, opr_queue_Scan(&call->rq, cursor)) {
+                   struct rx_packet *tp;
+                   
+                   tp = opr_queue_Entry(cursor, struct rx_packet, entry);
                    if (tseq != tp->header.seq)
                        break;
                    if (tp->header.flags & RX_LAST_PACKET) {
@@ -4067,8 +4082,7 @@ rxi_ReceiveDataPacket(struct rx_call *call,
             * any of this packets predecessors are missing.  */
 
            afs_uint32 prev;    /* "Previous packet" sequence number */
-           struct rx_packet *tp;       /* Temporary packet pointer */
-           struct rx_packet *nxp;      /* Next pointer, for queue_Scan */
+           struct opr_queue *cursor;
            int missing;        /* Are any predecessors missing? */
 
            /* If the new packet's sequence number has been sent to the
@@ -4098,8 +4112,12 @@ rxi_ReceiveDataPacket(struct rx_call *call,
            }
 
            /* Look for the packet in the queue of old received packets */
-           for (prev = call->rnext - 1, missing =
-                0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
+           prev = call->rnext - 1;
+           missing = 0;
+           for (opr_queue_Scan(&call->rq, cursor)) {
+               struct rx_packet *tp
+                   = opr_queue_Entry(cursor, struct rx_packet, entry);
+
                /*Check for duplicate packet */
                if (seq == tp->header.seq) {
                     if (rx_stats_active)
@@ -4140,7 +4158,7 @@ rxi_ReceiveDataPacket(struct rx_call *call,
 #ifdef RXDEBUG_PACKET
             call->rqc++;
 #endif /* RXDEBUG_PACKET */
-           queue_InsertBefore(tp, np);
+           opr_queue_InsertBefore(cursor, &np->entry);
            call->nSoftAcks++;
            np = NULL;
 
@@ -4149,8 +4167,10 @@ rxi_ReceiveDataPacket(struct rx_call *call,
                && !(call->flags & RX_CALL_RECEIVE_DONE)) {
                afs_uint32 tseq;        /* temporary sequence number */
 
-               for (tseq =
-                    call->rnext, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
+               tseq = call->rnext;
+               for (opr_queue_Scan(&call->rq, cursor)) {
+                   struct rx_packet *tp
+                        = opr_queue_Entry(cursor, struct rx_packet, entry);
                    if (tseq != tp->header.seq)
                        break;
                    if (tp->header.flags & RX_LAST_PACKET) {
@@ -4300,9 +4320,9 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
     struct rx_ackPacket *ap;
     int nAcks;
     struct rx_packet *tp;
-    struct rx_packet *nxp;     /* Next packet pointer for queue_Scan */
     struct rx_connection *conn = call->conn;
     struct rx_peer *peer = conn->peer;
+    struct opr_queue *cursor;
     struct clock now;          /* Current time, for RTT calculations */
     afs_uint32 first;
     afs_uint32 prev;
@@ -4458,11 +4478,11 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
      * disposed of
      */
 
-    tp = queue_First(&call->tq, rx_packet);
-    while(!queue_IsEnd(&call->tq, tp) && tp->header.seq < first) {
+    tp = opr_queue_First(&call->tq, struct rx_packet, entry);
+    while(!opr_queue_IsEnd(&call->tq, &tp->entry) && tp->header.seq < first) {
        struct rx_packet *next;
 
-       next = queue_Next(tp, rx_packet);
+       next = opr_queue_Next(&tp->entry, struct rx_packet, entry);
        call->tfirst = tp->header.seq + 1;
 
        if (!(tp->flags & RX_PKTFLAG_ACKED)) {
@@ -4490,7 +4510,7 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
        } else
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
        {
-           queue_Remove(tp);
+           opr_queue_Remove(&tp->entry);
 #ifdef RX_TRACK_PACKETS
            tp->flags &= ~RX_PKTFLAG_TQ;
 #endif
@@ -4518,7 +4538,8 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
 
     call->nSoftAcked = 0;
     missing = 0;
-    while (!queue_IsEnd(&call->tq, tp) && tp->header.seq < first + nAcks) {
+    while (!opr_queue_IsEnd(&call->tq, &tp->entry) 
+          && tp->header.seq < first + nAcks) {
        /* Set the acknowledge flag per packet based on the
         * information in the ack packet. An acknowlegded packet can
         * be downgraded when the server has discarded a packet it
@@ -4540,7 +4561,7 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
            missing = 1;
        }
 
-       tp = queue_Next(tp, rx_packet);
+       tp = opr_queue_Next(&tp->entry, struct rx_packet, entry);
     }
 
     /* We don't need to take any action with the 3rd or 4th section in the
@@ -4725,7 +4746,10 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
         * so we will retransmit as soon as the window permits
         */
 
-       for (acked = 0, queue_ScanBackwards(&call->tq, tp, nxp, rx_packet)) {
+       acked = 0;
+       for (opr_queue_ScanBackwards(&call->tq, cursor)) {
+           struct rx_packet *tp =
+               opr_queue_Entry(cursor, struct rx_packet, entry);
            if (acked) {
                if (!(tp->flags & RX_PKTFLAG_ACKED)) {
                    tp->flags &= ~RX_PKTFLAG_SENT;
@@ -4785,7 +4809,7 @@ rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
        call->state = RX_STATE_DALLY;
        rxi_ClearTransmitQueue(call, 0);
         rxevent_Cancel(&call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
-    } else if (!queue_IsEmpty(&call->tq)) {
+    } else if (!opr_queue_IsEmpty(&call->tq)) {
        rxi_Start(call, istack);
     }
     return np;
@@ -4903,7 +4927,7 @@ rxi_AttachServerProc(struct rx_call *call,
     MUTEX_ENTER(&rx_serverPool_lock);
 
     haveQuota = QuotaOK(service);
-    if ((!haveQuota) || queue_IsEmpty(&rx_idleServerQueue)) {
+    if ((!haveQuota) || opr_queue_IsEmpty(&rx_idleServerQueue)) {
        /* If there are no processes available to service this call,
         * put the call on the incoming call queue (unless it's
         * already on the queue).
@@ -4919,16 +4943,18 @@ rxi_AttachServerProc(struct rx_call *call,
            rx_atomic_inc(&rx_nWaited);
            rxi_calltrace(RX_CALL_ARRIVAL, call);
            SET_CALL_QUEUE_LOCK(call, &rx_serverPool_lock);
-           queue_Append(&rx_incomingCallQueue, call);
+           opr_queue_Append(&rx_incomingCallQueue, &call->entry);
        }
     } else {
-       sq = queue_Last(&rx_idleServerQueue, rx_serverQueueEntry);
+       sq = opr_queue_Last(&rx_idleServerQueue,
+                           struct rx_serverQueueEntry, entry);
 
        /* If hot threads are enabled, and both newcallp and sq->socketp
         * are non-null, then this thread will process the call, and the
         * idle server thread will start listening on this threads socket.
         */
-       queue_Remove(sq);
+       opr_queue_Remove(&sq->entry);
+
        if (rx_enable_hot_thread && newcallp && sq->socketp) {
            *newcallp = call;
            *tnop = sq->tno;
@@ -4942,8 +4968,8 @@ rxi_AttachServerProc(struct rx_call *call,
            /* Conservative:  I don't think this should happen */
            call->flags &= ~RX_CALL_WAIT_PROC;
            rx_atomic_dec(&rx_nWaiting);
-           if (queue_IsOnQueue(call)) {
-               queue_Remove(call);
+           if (opr_queue_IsOnQueue(&call->entry)) {
+               opr_queue_Remove(&call->entry);
            }
        }
        call->state = RX_STATE_ACTIVE;
@@ -5026,13 +5052,17 @@ rxi_SendDelayedAck(struct rxevent *event, void *arg1, void *unused1,
 static void
 rxi_SetAcksInTransmitQueue(struct rx_call *call)
 {
-    struct rx_packet *p, *tp;
+    struct opr_queue *cursor;
     int someAcked = 0;
 
-    for (queue_Scan(&call->tq, p, tp, rx_packet)) {
+    for (opr_queue_Scan(&call->tq, cursor)) {
+       struct rx_packet *p 
+               = opr_queue_Entry(cursor, struct rx_packet, entry);
+
        p->flags |= RX_PKTFLAG_ACKED;
        someAcked = 1;
     }
+
     if (someAcked) {
        call->flags |= RX_CALL_TQ_CLEARME;
        call->flags |= RX_CALL_TQ_SOME_ACKED;
@@ -5059,11 +5089,13 @@ static void
 rxi_ClearTransmitQueue(struct rx_call *call, int force)
 {
 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
-    struct rx_packet *p, *tp;
-
+    struct opr_queue *cursor;
     if (!force && (call->flags & RX_CALL_TQ_BUSY)) {
        int someAcked = 0;
-       for (queue_Scan(&call->tq, p, tp, rx_packet)) {
+       for (opr_queue_Scan(&call->tq, cursor)) {
+           struct rx_packet *p 
+               = opr_queue_Entry(cursor, struct rx_packet, entry);
+
            p->flags |= RX_PKTFLAG_ACKED;
            someAcked = 1;
        }
@@ -5101,7 +5133,7 @@ rxi_ClearTransmitQueue(struct rx_call *call, int force)
 static void
 rxi_ClearReceiveQueue(struct rx_call *call)
 {
-    if (queue_IsNotEmpty(&call->rq)) {
+    if (!opr_queue_IsEmpty(&call->rq)) {
         u_short count;
 
         count = rxi_FreePackets(0, &call->rq);
@@ -5434,15 +5466,15 @@ rxi_ResetCall(struct rx_call *call, int newcall)
      */
     if (call->call_queue_lock) {
        MUTEX_ENTER(call->call_queue_lock);
-       if (queue_IsOnQueue(call)) {
-           queue_Remove(call);
+       if (opr_queue_IsOnQueue(&call->entry)) {
+           opr_queue_Remove(&call->entry);
        }
        MUTEX_EXIT(call->call_queue_lock);
        CLEAR_CALL_QUEUE_LOCK(call);
     }
 #else /* RX_ENABLE_LOCKS */
-    if (queue_IsOnQueue(call)) {
-       queue_Remove(call);
+    if (opr_queue_IsOnQueue(&call->entry)) {
+       opr_queue_Remove(&call->entry);
     }
 #endif /* RX_ENABLE_LOCKS */
 
@@ -5480,9 +5512,8 @@ rxi_SendAck(struct rx_call *call,
            int istack)
 {
     struct rx_ackPacket *ap;
-    struct rx_packet *rqp;
-    struct rx_packet *nxp;     /* For queue_Scan */
     struct rx_packet *p;
+    struct opr_queue *cursor;
     u_char offset = 0;
     afs_int32 templ;
     afs_uint32 padbytes = 0;
@@ -5589,18 +5620,23 @@ rxi_SendAck(struct rx_call *call,
      * are packets in the receive queue awaiting processing.
      */
     if ((call->flags & RX_CALL_ACKALL_SENT) &&
-        !queue_IsEmpty(&call->rq)) {
-        ap->firstPacket = htonl(queue_Last(&call->rq, rx_packet)->header.seq + 1);
+        !opr_queue_IsEmpty(&call->rq)) {
+        ap->firstPacket = htonl(opr_queue_Last(&call->rq, struct rx_packet, entry)->header.seq + 1);
     } else {
         ap->firstPacket = htonl(call->rnext);
 
        ap->previousPacket = htonl(call->rprev);        /* Previous packet received */
 
-       /* No fear of running out of ack packet here because there can only be at most
-        * one window full of unacknowledged packets.  The window size must be constrained
-        * to be less than the maximum ack size, of course.  Also, an ack should always
-        * fit into a single packet -- it should not ever be fragmented.  */
-       for (offset = 0, queue_Scan(&call->rq, rqp, nxp, rx_packet)) {
+       /* No fear of running out of ack packet here because there can only 
+        * be at most one window full of unacknowledged packets.  The window
+        * size must be constrained to be less than the maximum ack size, 
+        * of course.  Also, an ack should always fit into a single packet 
+        * -- it should not ever be fragmented.  */
+       offset = 0;
+       for (opr_queue_Scan(&call->rq, cursor)) {
+           struct rx_packet *rqp
+               = opr_queue_Entry(cursor, struct rx_packet, entry);
+
            if (!rqp || !call->rq.next
                || (rqp->header.seq > (call->rnext + call->rwind))) {
 #ifndef RX_ENABLE_TSFPQ
@@ -5999,7 +6035,7 @@ rxi_Resend(struct rxevent *event, void *arg0, void *arg1, int istack)
 {
     struct rx_call *call = arg0;
     struct rx_peer *peer;
-    struct rx_packet *p, *nxp;
+    struct opr_queue *cursor;
     struct clock maxTimeout = { 60, 0 };
 
     MUTEX_ENTER(&call->lock);
@@ -6021,7 +6057,7 @@ rxi_Resend(struct rxevent *event, void *arg0, void *arg1, int istack)
        rxi_CheckBusy(call);
     }
 
-    if (queue_IsEmpty(&call->tq)) {
+    if (opr_queue_IsEmpty(&call->tq)) {
        /* Nothing to do. This means that we've been raced, and that an
         * ACK has come in between when we were triggered, and when we
         * actually got to run. */
@@ -6032,7 +6068,8 @@ rxi_Resend(struct rxevent *event, void *arg0, void *arg1, int istack)
     call->flags |= RX_CALL_FAST_RECOVER;
 
     /* Mark all of the pending packets in the queue as being lost */
-    for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
+    for (opr_queue_Scan(&call->tq, cursor)) {
+       struct rx_packet *p = opr_queue_Entry(cursor, struct rx_packet, entry);
        if (!(p->flags & RX_PKTFLAG_ACKED))
            p->flags &= ~RX_PKTFLAG_SENT;
     }
@@ -6080,9 +6117,10 @@ out:
 void
 rxi_Start(struct rx_call *call, int istack)
 {
-
-    struct rx_packet *p;
-    struct rx_packet *nxp;     /* Next pointer for queue_Scan */
+    struct opr_queue *cursor;
+#ifdef RX_ENABLE_LOCKS
+    struct opr_queue *store;
+#endif
     int nXmitPackets;
     int maxXmitPackets;
 
@@ -6094,8 +6132,7 @@ rxi_Start(struct rx_call *call, int istack)
        return;
     }
 
-    if (queue_IsNotEmpty(&call->tq)) { /* If we have anything to send */
-
+    if (!opr_queue_IsEmpty(&call->tq)) {       /* If we have anything to send */
        /* Send (or resend) any packets that need it, subject to
         * window restrictions and congestion burst control
         * restrictions.  Ask for an ack on the last packet sent in
@@ -6122,16 +6159,10 @@ rxi_Start(struct rx_call *call, int istack)
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
                nXmitPackets = 0;
                maxXmitPackets = MIN(call->twind, call->cwind);
-               for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
-#ifdef RX_TRACK_PACKETS
-                   if ((p->flags & RX_PKTFLAG_FREE)
-                       || (!queue_IsEnd(&call->tq, nxp)
-                           && (nxp->flags & RX_PKTFLAG_FREE))
-                       || (p == (struct rx_packet *)&rx_freePacketQueue)
-                       || (nxp == (struct rx_packet *)&rx_freePacketQueue)) {
-                       osi_Panic("rxi_Start: xmit queue clobbered");
-                   }
-#endif
+               for (opr_queue_Scan(&call->tq, cursor)) {
+                   struct rx_packet *p
+                       = opr_queue_Entry(cursor, struct rx_packet, entry);
+
                    if (p->flags & RX_PKTFLAG_ACKED) {
                        /* Since we may block, don't trust this */
                         if (rx_stats_active)
@@ -6168,7 +6199,7 @@ rxi_Start(struct rx_call *call, int istack)
                               *(call->callNumber), p));
                        call->xmitList[nXmitPackets++] = p;
                    }
-               }
+               } /* end of the queue_Scan */
 
                /* xmitList now hold pointers to all of the packets that are
                 * ready to send. Now we loop to send the packets */
@@ -6197,11 +6228,14 @@ rxi_Start(struct rx_call *call, int istack)
                    /* Some packets have received acks. If they all have, we can clear
                     * the transmit queue.
                     */
-                   for (missing =
-                        0, queue_Scan(&call->tq, p, nxp, rx_packet)) {
+                   missing = 0;
+                   for (opr_queue_ScanSafe(&call->tq, cursor, store)) {
+                       struct rx_packet *p
+                           = opr_queue_Entry(cursor, struct rx_packet, entry);
+
                        if (p->header.seq < call->tfirst
                            && (p->flags & RX_PKTFLAG_ACKED)) {
-                           queue_Remove(p);
+                           opr_queue_Remove(&p->entry);
 #ifdef RX_TRACK_PACKETS
                            p->flags &= ~RX_PKTFLAG_TQ;
 #endif
@@ -7137,7 +7171,7 @@ rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2,
                code = MUTEX_TRYENTER(&peer->peer_lock);
                if ((code) && (peer->refCount == 0)
                    && ((peer->idleWhen + rx_idlePeerTime) < now.sec)) {
-                   rx_interface_stat_p rpc_stat, nrpc_stat;
+                   struct opr_queue *cursor, *store;
                    size_t space;
 
                     /*
@@ -7169,14 +7203,18 @@ rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2,
 
                    MUTEX_EXIT(&peer->peer_lock);
                    MUTEX_DESTROY(&peer->peer_lock);
-                   for (queue_Scan
-                        (&peer->rpcStats, rpc_stat, nrpc_stat,
-                         rx_interface_stat)) {
+
+                   for (opr_queue_ScanSafe(&peer->rpcStats, cursor, store)) {
                        unsigned int num_funcs;
+                       struct rx_interface_stat *rpc_stat
+                           = opr_queue_Entry(cursor, struct rx_interface_stat,
+                                            entry);
                        if (!rpc_stat)
                            break;
-                       queue_Remove(&rpc_stat->queue_header);
-                       queue_Remove(&rpc_stat->all_peers);
+
+                       opr_queue_Remove(&rpc_stat->entry);
+                       opr_queue_Remove(&rpc_stat->entryPeers);
+
                        num_funcs = rpc_stat->stats[0].func_total;
                        space =
                            sizeof(rx_interface_stat_t) +
@@ -7924,15 +7962,16 @@ shutdown_rx(void)
 #endif /* AFS_USE_GETTIMEOFDAY */
 #endif /* AFS_PTHREAD_ENV */
 
-    while (!queue_IsEmpty(&rx_freeCallQueue)) {
-       call = queue_First(&rx_freeCallQueue, rx_call);
-       queue_Remove(call);
+    while (!opr_queue_IsEmpty(&rx_freeCallQueue)) {
+       call = opr_queue_First(&rx_freeCallQueue, struct rx_call, entry);
+       opr_queue_Remove(&call->entry);
        rxi_Free(call, sizeof(struct rx_call));
     }
 
-    while (!queue_IsEmpty(&rx_idleServerQueue)) {
-       sq = queue_First(&rx_idleServerQueue, rx_serverQueueEntry);
-       queue_Remove(sq);
+    while (!opr_queue_IsEmpty(&rx_idleServerQueue)) {
+       sq = opr_queue_First(&rx_idleServerQueue, struct rx_serverQueueEntry,
+                           entry);
+       opr_queue_Remove(&sq->entry);
     }
 #endif /* KERNEL */
 
@@ -7945,19 +7984,20 @@ shutdown_rx(void)
 
             MUTEX_ENTER(&rx_peerHashTable_lock);
             for (peer = *peer_ptr; peer; peer = next) {
-               rx_interface_stat_p rpc_stat, nrpc_stat;
+               struct opr_queue *cursor, *store;
                size_t space;
 
                 MUTEX_ENTER(&rx_rpc_stats);
                 MUTEX_ENTER(&peer->peer_lock);
-               for (queue_Scan
-                    (&peer->rpcStats, rpc_stat, nrpc_stat,
-                     rx_interface_stat)) {
+               for (opr_queue_ScanSafe(&peer->rpcStats, cursor, store)) {
                    unsigned int num_funcs;
+                   struct rx_interface_stat *rpc_stat
+                       = opr_queue_Entry(cursor, struct rx_interface_stat,
+                                        entry);
                    if (!rpc_stat)
                        break;
-                   queue_Remove(&rpc_stat->queue_header);
-                   queue_Remove(&rpc_stat->all_peers);
+                   opr_queue_Remove(&rpc_stat->entry);
+                   opr_queue_Remove(&rpc_stat->entryPeers);
                    num_funcs = rpc_stat->stats[0].func_total;
                    space =
                        sizeof(rx_interface_stat_t) +
@@ -8151,14 +8191,14 @@ rx_GetServiceSpecific(struct rx_service *svc, int key)
  * which can come and go based upon the peer lifetime.
  */
 
-static struct rx_queue processStats = { &processStats, &processStats };
+static struct opr_queue processStats = { &processStats, &processStats };
 
 /*
  * peerStats is a queue used to store the statistics for all peer structs.
  * Its contents are the union of all the peer rpcStats queues.
  */
 
-static struct rx_queue peerStats = { &peerStats, &peerStats };
+static struct opr_queue peerStats = { &peerStats, &peerStats };
 
 /*
  * rxi_monitor_processStats is used to turn process wide stat collection
@@ -8237,18 +8277,21 @@ rxi_ClearRPCOpStat(rx_function_entry_v1_p rpc_stat)
  */
 
 static rx_interface_stat_p
-rxi_FindRpcStat(struct rx_queue *stats, afs_uint32 rxInterface,
+rxi_FindRpcStat(struct opr_queue *stats, afs_uint32 rxInterface,
                afs_uint32 totalFunc, int isServer, afs_uint32 remoteHost,
                afs_uint32 remotePort, int addToPeerList,
                unsigned int *counter, int create)
 {
-    rx_interface_stat_p rpc_stat, nrpc_stat;
+    rx_interface_stat_p rpc_stat = NULL;
+    struct opr_queue *cursor;
 
     /*
      * See if there's already a structure for this interface
      */
 
-    for (queue_Scan(stats, rpc_stat, nrpc_stat, rx_interface_stat)) {
+    for (opr_queue_Scan(stats, cursor)) {
+       rpc_stat = opr_queue_Entry(cursor, struct rx_interface_stat, entry);
+
        if ((rpc_stat->stats[0].interfaceId == rxInterface)
            && (rpc_stat->stats[0].remote_is_server == isServer))
            break;
@@ -8256,7 +8299,7 @@ rxi_FindRpcStat(struct rx_queue *stats, afs_uint32 rxInterface,
 
     /* if they didn't ask us to create, we're done */
     if (!create) {
-        if (queue_IsEnd(stats, rpc_stat))
+        if (opr_queue_IsEnd(stats, cursor))
             return NULL;
         else
             return rpc_stat;
@@ -8271,7 +8314,7 @@ rxi_FindRpcStat(struct rx_queue *stats, afs_uint32 rxInterface,
      * queue.
      */
 
-    if (queue_IsEnd(stats, rpc_stat) || (rpc_stat == NULL)
+    if (opr_queue_IsEnd(stats, cursor) || (rpc_stat == NULL)
        || (rpc_stat->stats[0].interfaceId != rxInterface)
        || (rpc_stat->stats[0].remote_is_server != isServer)) {
        int i;
@@ -8295,9 +8338,9 @@ rxi_FindRpcStat(struct rx_queue *stats, afs_uint32 rxInterface,
            rpc_stat->stats[i].func_total = totalFunc;
            rpc_stat->stats[i].func_index = i;
        }
-       queue_Prepend(stats, rpc_stat);
+       opr_queue_Prepend(stats, &rpc_stat->entry);
        if (addToPeerList) {
-           queue_Prepend(&peerStats, &rpc_stat->all_peers);
+           opr_queue_Prepend(&peerStats, &rpc_stat->entryPeers);
        }
     }
     return rpc_stat;
@@ -8473,7 +8516,7 @@ rx_ReleaseRPCStats(void *stats)
  */
 
 static int
-rxi_AddRpcStat(struct rx_queue *stats, afs_uint32 rxInterface,
+rxi_AddRpcStat(struct opr_queue *stats, afs_uint32 rxInterface,
               afs_uint32 currentFunc, afs_uint32 totalFunc,
               struct clock *queueTime, struct clock *execTime,
               afs_uint64 bytesSent, afs_uint64 bytesRcvd, int isServer,
@@ -8744,11 +8787,11 @@ rx_RetrieveProcessRPCStats(afs_uint32 callerVersion, afs_uint32 * myVersion,
        ptr = *stats = rxi_Alloc(space);
 
        if (ptr != NULL) {
-           rx_interface_stat_p rpc_stat, nrpc_stat;
+           struct opr_queue *cursor;
 
-
-           for (queue_Scan
-                (&processStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
+           for (opr_queue_Scan(&processStats, cursor)) {
+               struct rx_interface_stat *rpc_stat = 
+                   opr_queue_Entry(cursor, struct rx_interface_stat, entry);
                /*
                 * Copy the data based upon the caller version
                 */
@@ -8843,24 +8886,12 @@ rx_RetrievePeerRPCStats(afs_uint32 callerVersion, afs_uint32 * myVersion,
        ptr = *stats = rxi_Alloc(space);
 
        if (ptr != NULL) {
-           rx_interface_stat_p rpc_stat, nrpc_stat;
-           char *fix_offset;
+           struct opr_queue *cursor;
 
-           for (queue_Scan
-                (&peerStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
-               /*
-                * We have to fix the offset of rpc_stat since we are
-                * keeping this structure on two rx_queues.  The rx_queue
-                * package assumes that the rx_queue member is the first
-                * member of the structure.  That is, rx_queue assumes that
-                * any one item is only on one queue at a time.  We are
-                * breaking that assumption and so we have to do a little
-                * math to fix our pointers.
-                */
-
-               fix_offset = (char *)rpc_stat;
-               fix_offset -= offsetof(rx_interface_stat_t, all_peers);
-               rpc_stat = (rx_interface_stat_p) fix_offset;
+           for (opr_queue_Scan(&peerStats, cursor)) {
+               struct rx_interface_stat *rpc_stat
+                   = opr_queue_Entry(cursor, struct rx_interface_stat,
+                                    entryPeers);
 
                /*
                 * Copy the data based upon the caller version
@@ -8991,7 +9022,7 @@ rx_enablePeerRPCStats(void)
 void
 rx_disableProcessRPCStats(void)
 {
-    rx_interface_stat_p rpc_stat, nrpc_stat;
+    struct opr_queue *cursor, *store;
     size_t space;
 
     MUTEX_ENTER(&rx_rpc_stats);
@@ -9006,11 +9037,13 @@ rx_disableProcessRPCStats(void)
        rx_enable_stats = 0;
     }
 
-    for (queue_Scan(&processStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
-       unsigned int num_funcs = 0;
-       if (!rpc_stat)
-           break;
-       queue_Remove(rpc_stat);
+    for (opr_queue_ScanSafe(&processStats, cursor, store)) {
+       unsigned int num_funcs = 0;
+       struct rx_interface_stat *rpc_stat
+           = opr_queue_Entry(cursor, struct rx_interface_stat, entry);
+
+       opr_queue_Remove(&rpc_stat->entry);
+
        num_funcs = rpc_stat->stats[0].func_total;
        space =
            sizeof(rx_interface_stat_t) +
@@ -9059,8 +9092,8 @@ rx_disablePeerRPCStats(void)
            next = peer->next;
            code = MUTEX_TRYENTER(&peer->peer_lock);
            if (code) {
-               rx_interface_stat_p rpc_stat, nrpc_stat;
                size_t space;
+               struct opr_queue *cursor, *store;
 
                if (prev == *peer_ptr) {
                    *peer_ptr = next;
@@ -9075,14 +9108,14 @@ rx_disablePeerRPCStats(void)
                 peer->refCount++;
                 MUTEX_EXIT(&rx_peerHashTable_lock);
 
-                for (queue_Scan
-                    (&peer->rpcStats, rpc_stat, nrpc_stat,
-                     rx_interface_stat)) {
+                for (opr_queue_ScanSafe(&peer->rpcStats, cursor, store)) {
                    unsigned int num_funcs = 0;
-                   if (!rpc_stat)
-                       break;
-                   queue_Remove(&rpc_stat->queue_header);
-                   queue_Remove(&rpc_stat->all_peers);
+                   struct rx_interface_stat *rpc_stat
+                       = opr_queue_Entry(cursor, struct rx_interface_stat,
+                                        entry);
+
+                   opr_queue_Remove(&rpc_stat->entry);
+                   opr_queue_Remove(&rpc_stat->entryPeers);
                    num_funcs = rpc_stat->stats[0].func_total;
                    space =
                        sizeof(rx_interface_stat_t) +
@@ -9125,12 +9158,15 @@ rx_disablePeerRPCStats(void)
 void
 rx_clearProcessRPCStats(afs_uint32 clearFlag)
 {
-    rx_interface_stat_p rpc_stat, nrpc_stat;
+    struct opr_queue *cursor;
 
     MUTEX_ENTER(&rx_rpc_stats);
 
-    for (queue_Scan(&processStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
+    for (opr_queue_Scan(&processStats, cursor)) {
        unsigned int num_funcs = 0, i;
+       struct rx_interface_stat *rpc_stat
+            = opr_queue_Entry(rpc_stat, struct rx_interface_stat, entry);
+
        num_funcs = rpc_stat->stats[0].func_total;
        for (i = 0; i < num_funcs; i++) {
            if (clearFlag & AFS_RX_STATS_CLEAR_INVOCATIONS) {
@@ -9196,26 +9232,14 @@ rx_clearProcessRPCStats(afs_uint32 clearFlag)
 void
 rx_clearPeerRPCStats(afs_uint32 clearFlag)
 {
-    rx_interface_stat_p rpc_stat, nrpc_stat;
+    struct opr_queue *cursor;
 
     MUTEX_ENTER(&rx_rpc_stats);
 
-    for (queue_Scan(&peerStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
-       unsigned int num_funcs = 0, i;
-       char *fix_offset;
-       /*
-        * We have to fix the offset of rpc_stat since we are
-        * keeping this structure on two rx_queues.  The rx_queue
-        * package assumes that the rx_queue member is the first
-        * member of the structure.  That is, rx_queue assumes that
-        * any one item is only on one queue at a time.  We are
-        * breaking that assumption and so we have to do a little
-        * math to fix our pointers.
-        */
-
-       fix_offset = (char *)rpc_stat;
-       fix_offset -= offsetof(rx_interface_stat_t, all_peers);
-       rpc_stat = (rx_interface_stat_p) fix_offset;
+    for (opr_queue_Scan(&peerStats, cursor)) {
+       unsigned int num_funcs, i;
+       struct rx_interface_stat *rpc_stat
+           = opr_queue_Entry(cursor, struct rx_interface_stat, entryPeers);
 
        num_funcs = rpc_stat->stats[0].func_total;
        for (i = 0; i < num_funcs; i++) {
@@ -9339,12 +9363,11 @@ int rx_DumpCalls(FILE *outputFile, char *cookie)
 
     for (c = rx_allCallsp; c; c = c->allNextp) {
         u_short rqc, tqc, iovqc;
-        struct rx_packet *p, *np;
 
         MUTEX_ENTER(&c->lock);
-        queue_Count(&c->rq, p, np, rx_packet, rqc);
-        queue_Count(&c->tq, p, np, rx_packet, tqc);
-        queue_Count(&c->iovq, p, np, rx_packet, iovqc);
+        rqc = opr_queue_Count(&c->rq);
+        tqc = opr_queue_Count(&c->tq);
+        iovqc = opr_queue_Count(&c->iovq);
 
        RXDPRINTF(RXDPRINTOUT, "%s - call=0x%p, id=%u, state=%u, mode=%u, conn=%p, epoch=%u, cid=%u, callNum=%u, connFlags=0x%x, flags=0x%x, "
                 "rqc=%u,%u, tqc=%u,%u, iovqc=%u,%u, "
index 35606bb..e0e6152 100644 (file)
@@ -52,7 +52,8 @@
 #endif
 #endif /* KERNEL */
 
-#include "rx_queue.h"
+#include <opr/queue.h>
+
 #include "rx_clock.h"
 #include "rx_event.h"
 #include "rx_misc.h"
@@ -841,8 +842,8 @@ typedef struct rx_function_entry_v1 {
 #define RX_STATS_RETRIEVAL_FIRST_EDITION 1     /* first implementation */
 
 typedef struct rx_interface_stat {
-    struct rx_queue queue_header;
-    struct rx_queue all_peers;
+    struct opr_queue entry;
+    struct opr_queue entryPeers;
     rx_function_entry_v1_t stats[1];   /* make sure this is aligned correctly */
 } rx_interface_stat_t, *rx_interface_stat_p;
 
index a1450f1..f1fa532 100644 (file)
@@ -20,9 +20,9 @@ struct rx_call_rx_lock {
 #else
 struct rx_call {
 #endif
-    struct rx_queue queue_item_header; /* Call can be on various queues (one-at-a-time) */
-    struct rx_queue tq;                /* Transmit packet queue */
-    struct rx_queue rq;                /* Receive packet queue */
+    struct opr_queue entry;    /* Call can be on various queues (one-at-a-time) */
+    struct opr_queue tq;       /* Transmit packet queue */
+    struct opr_queue rq;       /* Receive packet queue */
     /*
      * The following fields are accessed while the call is unlocked.
      * These fields are used by the caller/server thread to marshall
@@ -33,7 +33,7 @@ struct rx_call {
      *       word boundary. Otherwise threads that are changing
      *       adjacent fields will cause problems.
      */
-    struct rx_queue iovq;      /* readv/writev packet queue */
+    struct opr_queue iovq;     /* readv/writev packet queue */
     u_short nLeft;             /* Number bytes left in first receive packet */
     u_short curvec;            /* current iovec in currentPacket */
     u_short curlen;            /* bytes remaining in curvec */
index dc2c826..89f8d5e 100644 (file)
@@ -25,7 +25,7 @@
  * We initialize rxi_connectionCache at compile time, so there is no
  * need to call queue_Init(&rxi_connectionCache).
  */
-static struct rx_queue rxi_connectionCache = { &rxi_connectionCache,
+static struct opr_queue rxi_connectionCache = { &rxi_connectionCache,
     &rxi_connectionCache
 };
 
@@ -59,13 +59,13 @@ typedef struct rx_connParts {
 
 /*
  * Each element in the cache is represented by the following
- * structure.  I use an rx_queue to manipulate the cache entries.
+ * structure.  I use an opr_queue to manipulate the cache entries.
  * inUse tracks the number of calls within this connection that
  * we know are in use.
  */
 
 typedef struct cache_entry {
-    struct rx_queue queue_header;
+    struct opr_queue queue;
     struct rx_connection *conn;
     rx_connParts_t parts;
     int inUse;
@@ -104,9 +104,11 @@ static int
 rxi_FindCachedConnection(rx_connParts_p parts, struct rx_connection **conn)
 {
     int error = 0;
-    cache_entry_p cacheConn, nCacheConn;
+    struct opr_queue *cursor;
 
-    for (queue_Scan(&rxi_connectionCache, cacheConn, nCacheConn, cache_entry)) {
+    for (opr_queue_Scan(&rxi_connectionCache, cursor)) {
+       struct cache_entry *cacheConn
+           = opr_queue_Entry(cursor, struct cache_entry, queue);
        if ((rxi_CachedConnectionsEqual(parts, &cacheConn->parts))
            && (cacheConn->inUse < RX_MAXCALLS)
            && (cacheConn->hasError == 0)) {
@@ -138,7 +140,7 @@ rxi_AddCachedConnection(rx_connParts_p parts, struct rx_connection **conn)
        new_entry->parts = *parts;
        new_entry->inUse = 1;
        new_entry->hasError = 0;
-       queue_Prepend(&rxi_connectionCache, new_entry);
+       opr_queue_Prepend(&rxi_connectionCache, &new_entry->queue);
     }
 
     /*
@@ -197,13 +199,15 @@ rxi_GetCachedConnection(rx_connParts_p parts, struct rx_connection **conn)
 void
 rxi_DeleteCachedConnections(void)
 {
-    cache_entry_p cacheConn, nCacheConn;
+    struct opr_queue *cursor, *store;
 
     LOCK_CONN_CACHE;
-    for (queue_Scan(&rxi_connectionCache, cacheConn, nCacheConn, cache_entry)) {
+    for (opr_queue_ScanSafe(&rxi_connectionCache, cursor, store)) {
+       struct cache_entry *cacheConn
+           = opr_queue_Entry(cursor, struct cache_entry, queue);
        if (!cacheConn)
            break;
-       queue_Remove(cacheConn);
+       opr_queue_Remove(&cacheConn->queue);
        rxi_DestroyConnection(cacheConn->conn);
        free(cacheConn);
     }
@@ -251,11 +255,14 @@ rx_GetCachedConnection(unsigned int remoteAddr, unsigned short port,
 void
 rx_ReleaseCachedConnection(struct rx_connection *conn)
 {
-    cache_entry_p cacheConn, nCacheConn;
+    struct opr_queue *cursor, *store;
 
     LOCK_CONN_CACHE;
-    for (queue_Scan(&rxi_connectionCache, cacheConn, nCacheConn, cache_entry)) {
-       if (conn == cacheConn->conn) {
+    for (opr_queue_ScanSafe(&rxi_connectionCache, cursor, store)) {
+       struct cache_entry *cacheConn
+           = opr_queue_Entry(cursor, struct cache_entry, queue);
+
+           if (conn == cacheConn->conn) {
            cacheConn->inUse--;
            /*
             * check to see if the connection is in error.
@@ -266,7 +273,7 @@ rx_ReleaseCachedConnection(struct rx_connection *conn)
            if (rx_ConnError(conn)) {
                cacheConn->hasError = 1;
                if (cacheConn->inUse == 0) {
-                   queue_Remove(cacheConn);
+                   opr_queue_Remove(&cacheConn->queue);
                    rxi_DestroyConnection(cacheConn->conn);
                    free(cacheConn);
                }
index dd86ba5..b25578b 100644 (file)
@@ -149,7 +149,7 @@ EXT int rx_nPackets GLOBALSINIT(0); /* preallocate packets with rx_extraPackets
 EXT pthread_key_t rx_ts_info_key;
 typedef struct rx_ts_info_t {
     struct {
-        struct rx_queue queue;
+        struct opr_queue queue;
         int len;                /* local queue length */
         int delta;              /* number of new packets alloc'd locally since last sync w/ global queue */
 
@@ -184,7 +184,7 @@ EXT struct rx_ts_info_t * rx_ts_info_init(void);   /* init function for thread-s
 /* in pthreads rx, free packet queue is now a two-tiered queueing system
  * in which the first tier is thread-specific, and the second tier is
  * a global free packet queue */
-EXT struct rx_queue rx_freePacketQueue;
+EXT struct opr_queue rx_freePacketQueue;
 #ifdef RX_TRACK_PACKETS
 #define RX_FPQ_MARK_FREE(p) \
     do { \
@@ -285,9 +285,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         struct rx_packet * p; \
         int tsize = MIN((rx_ts_info_p)->_FPQ.len, (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + 3 *  rx_TSFPQGlobSize); \
        if (tsize <= 0) break; \
-        for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
-             i < tsize; i++,p=queue_Prev(p, rx_packet)); \
-        queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
+        for (i=0,p=opr_queue_Last(&((rx_ts_info_p)->_FPQ.queue), \
+                                struct rx_packet, entry); \
+             i < tsize; i++,p=opr_queue_Prev(&p->entry, \
+                                           struct rx_packet, entry )); \
+        opr_queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ.queue), \
+                                  &rx_freePacketQueue, &p->entry); \
         (rx_ts_info_p)->_FPQ.len -= tsize; \
         rx_nFreePackets += tsize; \
         (rx_ts_info_p)->_FPQ.ltog_ops++; \
@@ -305,9 +308,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         int i; \
         struct rx_packet * p; \
         if (num_transfer <= 0) break; \
-        for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
-            i < (num_transfer); i++,p=queue_Prev(p, rx_packet)); \
-        queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
+        for (i=0,p=opr_queue_Last(&((rx_ts_info_p)->_FPQ.queue), \
+                                struct rx_packet, entry ); \
+            i < (num_transfer); \
+            i++,p=opr_queue_Prev(&p->entry, struct rx_packet, entry )); \
+        opr_queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ.queue), \
+                                  &rx_freePacketQueue, &p->entry); \
         (rx_ts_info_p)->_FPQ.len -= (num_transfer); \
         rx_nFreePackets += (num_transfer); \
         (rx_ts_info_p)->_FPQ.ltog_ops++; \
@@ -327,9 +333,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         struct rx_packet * p; \
         tsize = (rx_TSFPQGlobSize <= rx_nFreePackets) ? \
                  rx_TSFPQGlobSize : rx_nFreePackets; \
-        for (i=0,p=queue_First(&rx_freePacketQueue, rx_packet); \
-             i < tsize; i++,p=queue_Next(p, rx_packet)); \
-        queue_SplitBeforeAppend(&rx_freePacketQueue,&((rx_ts_info_p)->_FPQ),p); \
+        for (i=0, \
+              p=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry); \
+             i < tsize; \
+            i++,p=opr_queue_Next(&p->entry, struct rx_packet, entry)); \
+        opr_queue_SplitBeforeAppend(&rx_freePacketQueue, \
+                                  &((rx_ts_info_p)->_FPQ.queue), &p->entry); \
         (rx_ts_info_p)->_FPQ.len += i; \
         rx_nFreePackets -= i; \
         (rx_ts_info_p)->_FPQ.gtol_ops++; \
@@ -342,9 +351,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         struct rx_packet * p; \
         tsize = (num_transfer); \
         if (tsize > rx_nFreePackets) tsize = rx_nFreePackets; \
-        for (i=0,p=queue_First(&rx_freePacketQueue, rx_packet); \
-             i < tsize; i++,p=queue_Next(p, rx_packet)); \
-        queue_SplitBeforeAppend(&rx_freePacketQueue,&((rx_ts_info_p)->_FPQ),p); \
+        for (i=0, \
+              p=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry); \
+             i < tsize; \
+            i++, p=opr_queue_Next(&p->entry, struct rx_packet, entry)); \
+        opr_queue_SplitBeforeAppend(&rx_freePacketQueue, \
+                                  &((rx_ts_info_p)->_FPQ.queue), &p->entry); \
         (rx_ts_info_p)->_FPQ.len += i; \
         rx_nFreePackets -= i; \
         (rx_ts_info_p)->_FPQ.gtol_ops++; \
@@ -353,8 +365,9 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
 /* checkout a packet from the thread-specific free packet queue */
 #define RX_TS_FPQ_CHECKOUT(rx_ts_info_p,p) \
     do { \
-        (p) = queue_First(&((rx_ts_info_p)->_FPQ), rx_packet); \
-        queue_Remove(p); \
+        (p) = opr_queue_First(&((rx_ts_info_p)->_FPQ.queue), \
+                            struct rx_packet, entry); \
+        opr_queue_Remove(&p->entry); \
         RX_FPQ_MARK_USED(p); \
         (rx_ts_info_p)->_FPQ.len--; \
         (rx_ts_info_p)->_FPQ.checkout_ops++; \
@@ -368,12 +381,14 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         int i; \
         struct rx_packet *p; \
         if (num_transfer > (rx_ts_info_p)->_FPQ.len) num_transfer = (rx_ts_info_p)->_FPQ.len; \
-        for (i=0, p=queue_First(&((rx_ts_info_p)->_FPQ), rx_packet); \
+        for (i=0, p=opr_queue_First(&((rx_ts_info_p)->_FPQ.queue), \
+                                  struct rx_packet, entry); \
              i < num_transfer; \
-             i++, p=queue_Next(p, rx_packet)) { \
+             i++, p=opr_queue_Next(&p->entry, struct rx_packet, entry)) { \
             RX_FPQ_MARK_USED(p); \
         } \
-        queue_SplitBeforeAppend(&((rx_ts_info_p)->_FPQ),(q),p); \
+        opr_queue_SplitBeforeAppend(&((rx_ts_info_p)->_FPQ.queue),(q), \
+                                  &((p)->entry)); \
         (rx_ts_info_p)->_FPQ.len -= num_transfer; \
         (rx_ts_info_p)->_FPQ.checkout_ops++; \
         (rx_ts_info_p)->_FPQ.checkout_xfer += num_transfer; \
@@ -381,7 +396,7 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
 /* check a packet into the thread-specific free packet queue */
 #define RX_TS_FPQ_CHECKIN(rx_ts_info_p,p) \
     do { \
-        queue_Prepend(&((rx_ts_info_p)->_FPQ), (p)); \
+        opr_queue_Prepend(&((rx_ts_info_p)->_FPQ.queue), &((p)->entry)); \
         RX_FPQ_MARK_FREE(p); \
         (rx_ts_info_p)->_FPQ.len++; \
         (rx_ts_info_p)->_FPQ.checkin_ops++; \
@@ -393,11 +408,11 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
  * since caller already knows length of (q) for other reasons */
 #define RX_TS_FPQ_QCHECKIN(rx_ts_info_p,num_transfer,q) \
     do { \
-        struct rx_packet *p, *np; \
-        for (queue_Scan((q), p, np, rx_packet)) { \
-            RX_FPQ_MARK_FREE(p); \
+       struct opr_queue *cur; \
+        for (opr_queue_Scan((q), cur)) { \
+            RX_FPQ_MARK_FREE(opr_queue_Entry(cur, struct rx_packet, entry)); \
         } \
-        queue_SplicePrepend(&((rx_ts_info_p)->_FPQ),(q)); \
+        opr_queue_SplicePrepend(&((rx_ts_info_p)->_FPQ.queue),(q)); \
         (rx_ts_info_p)->_FPQ.len += (num_transfer); \
         (rx_ts_info_p)->_FPQ.checkin_ops++; \
         (rx_ts_info_p)->_FPQ.checkin_xfer += (num_transfer); \
@@ -439,7 +454,7 @@ EXT afs_kmutex_t freeSQEList_lock;
 #endif
 
 /* List of free call structures */
-EXT struct rx_queue rx_freeCallQueue;
+EXT struct opr_queue rx_freeCallQueue;
 #ifdef RX_ENABLE_LOCKS
 EXT afs_kmutex_t rx_freeCallQueue_lock;
 #endif
index 8fc0847..28441ec 100644 (file)
 # include <sys/sysmacros.h>
 #endif
 
+#include <opr/queue.h>
+
 #include "rx.h"
 #include "rx_clock.h"
-#include "rx_queue.h"
 #include "rx_packet.h"
 #include "rx_atomic.h"
 #include "rx_globals.h"
@@ -82,7 +83,7 @@ static afs_uint32       rx_packet_id = 0;
 
 extern char cml_version_number[];
 
-static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
+static int AllocPacketBufs(int class, int num_pkts, struct opr_queue *q);
 
 static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
                                afs_uint32 ahost, short aport,
@@ -102,10 +103,10 @@ static void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local,
 static void rxi_FreePacketNoLock(struct rx_packet *p);
 static int rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first);
 static int rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first,
-                                  struct rx_queue * q);
+                                  struct opr_queue * q);
 #endif
 
-extern struct rx_queue rx_idleServerQueue;
+extern struct opr_queue rx_idleServerQueue;
 
 /* some rules about packets:
  * 1.  When a packet is allocated, the final iov_buf contains room for
@@ -240,14 +241,14 @@ rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
 }
 
 int
-rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
+rxi_AllocPackets(int class, int num_pkts, struct opr_queue * q)
 {
-    struct rx_packet *p, *np;
+    struct opr_queue *c;
 
     num_pkts = AllocPacketBufs(class, num_pkts, q);
 
-    for (queue_Scan(q, p, np, rx_packet)) {
-        RX_PACKET_IOV_FULLINIT(p);
+    for (opr_queue_Scan(q, c)) {
+        RX_PACKET_IOV_FULLINIT(opr_queue_Entry(c, struct rx_packet, entry));
     }
 
     return num_pkts;
@@ -255,7 +256,7 @@ rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
 
 #ifdef RX_ENABLE_TSFPQ
 static int
-AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
+AllocPacketBufs(int class, int num_pkts, struct opr_queue * q)
 {
     struct rx_ts_info_t * rx_ts_info;
     int transfer;
@@ -285,7 +286,7 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 }
 #else /* RX_ENABLE_TSFPQ */
 static int
-AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
+AllocPacketBufs(int class, int num_pkts, struct opr_queue * q)
 {
     struct rx_packet *c;
     int i;
@@ -338,13 +339,13 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
     }
 #endif /* KERNEL */
 
-    for (i=0, c=queue_First(&rx_freePacketQueue, rx_packet);
+    for (i=0, c=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry);
         i < num_pkts;
-        i++, c=queue_Next(c, rx_packet)) {
+        i++, c=opr_queue_Next(&c->entry, struct rx_packet, entry)) {
         RX_FPQ_MARK_USED(c);
     }
 
-    queue_SplitBeforeAppend(&rx_freePacketQueue,q,c);
+    opr_queue_SplitBeforeAppend(&rx_freePacketQueue, q, &c->entry);
 
     rx_nFreePackets -= num_pkts;
 
@@ -364,22 +365,25 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 #ifdef RX_ENABLE_TSFPQ
 /* num_pkts=0 means queue length is unknown */
 int
-rxi_FreePackets(int num_pkts, struct rx_queue * q)
+rxi_FreePackets(int num_pkts, struct opr_queue * q)
 {
     struct rx_ts_info_t * rx_ts_info;
-    struct rx_packet *c, *nc;
+    struct opr_queue *cursor, *store;
     SPLVAR;
 
     osi_Assert(num_pkts >= 0);
     RX_TS_INFO_GET(rx_ts_info);
 
     if (!num_pkts) {
-       for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
-           rxi_FreeDataBufsTSFPQ(c, 2, 0);
+       for (opr_queue_ScanSafe(q, cursor, store)) {
+           num_pkts++;
+           rxi_FreeDataBufsTSFPQ(opr_queue_Entry(cursor, struct rx_packet, 
+                                                entry), 2, 0);
        }
     } else {
-       for (queue_Scan(q, c, nc, rx_packet)) {
-           rxi_FreeDataBufsTSFPQ(c, 2, 0);
+       for (opr_queue_ScanSafe(q, cursor, store)) {
+           rxi_FreeDataBufsTSFPQ(opr_queue_Entry(cursor, struct rx_packet, 
+                                                entry), 2, 0);
        }
     }
 
@@ -405,18 +409,20 @@ rxi_FreePackets(int num_pkts, struct rx_queue * q)
 #else /* RX_ENABLE_TSFPQ */
 /* num_pkts=0 means queue length is unknown */
 int
-rxi_FreePackets(int num_pkts, struct rx_queue *q)
+rxi_FreePackets(int num_pkts, struct opr_queue *q)
 {
-    struct rx_queue cbs;
-    struct rx_packet *p, *np;
+    struct opr_queue cbs;
+    struct opr_queue *cursor, *store;
     int qlen = 0;
     SPLVAR;
 
     osi_Assert(num_pkts >= 0);
-    queue_Init(&cbs);
+    opr_queue_Init(&cbs);
 
     if (!num_pkts) {
-        for (queue_Scan(q, p, np, rx_packet), num_pkts++) {
+        for (opr_queue_ScanSafe(q, cursor, store)) {
+           struct rx_packet *p
+               = opr_queue_Entry(cursor, struct rx_packet, entry);
            if (p->niovecs > 2) {
                qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
            }
@@ -425,7 +431,10 @@ rxi_FreePackets(int num_pkts, struct rx_queue *q)
        if (!num_pkts)
            return 0;
     } else {
-        for (queue_Scan(q, p, np, rx_packet)) {
+        for (opr_queue_ScanSafe(q, cursor, store)) {
+           struct rx_packet *p
+               = opr_queue_Entry(cursor, struct rx_packet, entry);
+
            if (p->niovecs > 2) {
                qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
            }
@@ -434,7 +443,7 @@ rxi_FreePackets(int num_pkts, struct rx_queue *q)
     }
 
     if (qlen) {
-       queue_SpliceAppend(q, &cbs);
+       opr_queue_SpliceAppend(q, &cbs);
        qlen += num_pkts;
     } else
        qlen = num_pkts;
@@ -442,7 +451,7 @@ rxi_FreePackets(int num_pkts, struct rx_queue *q)
     NETPRI;
     MUTEX_ENTER(&rx_freePktQ_lock);
 
-    queue_SpliceAppend(&rx_freePacketQueue, q);
+    opr_queue_SpliceAppend(&rx_freePacketQueue, q);
     rx_nFreePackets += qlen;
 
     /* Wakeup anyone waiting for packets */
@@ -492,8 +501,7 @@ int
 rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
 {
     int i, nv;
-    struct rx_queue q;
-    struct rx_packet *cb, *ncb;
+    struct opr_queue q, *cursor, *store;
 
     /* compute the number of cbuf's we need */
     nv = nb / RX_CBUFFERSIZE;
@@ -505,14 +513,19 @@ rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
         return nb;
 
     /* allocate buffers */
-    queue_Init(&q);
+    opr_queue_Init(&q);
     nv = AllocPacketBufs(class, nv, &q);
 
     /* setup packet iovs */
-    for (i = p->niovecs, queue_Scan(&q, cb, ncb, rx_packet), i++) {
-        queue_Remove(cb);
+    i = p ->niovecs;
+    for (opr_queue_ScanSafe(&q, cursor, store)) {
+       struct rx_packet *cb
+           = opr_queue_Entry(cursor, struct rx_packet, entry);
+
+        opr_queue_Remove(&cb->entry);
         p->wirevec[i].iov_base = (caddr_t) cb->localdata;
         p->wirevec[i].iov_len = RX_CBUFFERSIZE;
+       i++;
     }
 
     nb -= (nv * RX_CBUFFERSIZE);
@@ -602,7 +615,7 @@ rxi_MorePackets(int apackets)
 #endif
        p->niovecs = 2;
 
-       queue_Append(&rx_freePacketQueue, p);
+       opr_queue_Append(&rx_freePacketQueue, &p->entry);
 #ifdef RXDEBUG_PACKET
         p->packetId = rx_packet_id++;
         p->allNextp = rx_mallocedP;
@@ -712,7 +725,7 @@ rxi_MorePacketsNoLock(int apackets)
 #endif
        p->niovecs = 2;
 
-       queue_Append(&rx_freePacketQueue, p);
+       opr_queue_Append(&rx_freePacketQueue, &p->entry);
 #ifdef RXDEBUG_PACKET
         p->packetId = rx_packet_id++;
         p->allNextp = rx_mallocedP;
@@ -813,7 +826,7 @@ rxi_FreePacketNoLock(struct rx_packet *p)
 
     RX_FPQ_MARK_FREE(p);
     rx_nFreePackets++;
-    queue_Append(&rx_freePacketQueue, p);
+    opr_queue_Append(&rx_freePacketQueue, &p->entry);
 }
 #endif /* RX_ENABLE_TSFPQ */
 
@@ -854,7 +867,7 @@ rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
  */
 #ifndef RX_ENABLE_TSFPQ
 static int
-rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
+rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct opr_queue * q)
 {
     struct iovec *iov;
     struct rx_packet * cb;
@@ -866,7 +879,7 @@ rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue *
            osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
        cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
        RX_FPQ_MARK_FREE(cb);
-       queue_Append(q, cb);
+       opr_queue_Append(q, &cb->entry);
     }
     p->length = 0;
     p->niovecs = 0;
@@ -1131,13 +1144,13 @@ rxi_AllocPacketNoLock(int class)
 
     if (rx_stats_active)
         rx_atomic_inc(&rx_stats.packetRequests);
-    if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+    if (opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
 
 #ifdef KERNEL
-        if (queue_IsEmpty(&rx_freePacketQueue))
+        if (opr_queue_IsEmpty(&rx_freePacketQueue))
            osi_Panic("rxi_AllocPacket error");
 #else /* KERNEL */
-        if (queue_IsEmpty(&rx_freePacketQueue))
+        if (opr_queue_IsEmpty(&rx_freePacketQueue))
            rxi_MorePacketsNoLock(rx_maxSendWindow);
 #endif /* KERNEL */
 
@@ -1193,16 +1206,16 @@ rxi_AllocPacketNoLock(int class)
         rx_atomic_inc(&rx_stats.packetRequests);
 
 #ifdef KERNEL
-    if (queue_IsEmpty(&rx_freePacketQueue))
+    if (opr_queue_IsEmpty(&rx_freePacketQueue))
        osi_Panic("rxi_AllocPacket error");
 #else /* KERNEL */
-    if (queue_IsEmpty(&rx_freePacketQueue))
+    if (opr_queue_IsEmpty(&rx_freePacketQueue))
        rxi_MorePacketsNoLock(rx_maxSendWindow);
 #endif /* KERNEL */
 
     rx_nFreePackets--;
-    p = queue_First(&rx_freePacketQueue, rx_packet);
-    queue_Remove(p);
+    p = opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry);
+    opr_queue_Remove(&p->entry);
     RX_FPQ_MARK_USED(p);
 
     dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
@@ -1228,16 +1241,16 @@ rxi_AllocPacketTSFPQ(int class, int pull_global)
 
     if (rx_stats_active)
         rx_atomic_inc(&rx_stats.packetRequests);
-    if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
+    if (pull_global && opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
         MUTEX_ENTER(&rx_freePktQ_lock);
 
-        if (queue_IsEmpty(&rx_freePacketQueue))
+        if (opr_queue_IsEmpty(&rx_freePacketQueue))
            rxi_MorePacketsNoLock(rx_maxSendWindow);
 
        RX_TS_FPQ_GTOL(rx_ts_info);
 
         MUTEX_EXIT(&rx_freePktQ_lock);
-    } else if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+    } else if (opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
         return NULL;
     }
 
@@ -1748,7 +1761,6 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
 {
     struct rx_debugIn tin;
     afs_int32 tl;
-    struct rx_serverQueueEntry *np, *nqe;
 
     /*
      * Only respond to client-initiated Rx debug packets,
@@ -1786,8 +1798,7 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
            tstat.usedFDs = CountFDs(64);
            tstat.nWaiting = htonl(rx_atomic_read(&rx_nWaiting));
            tstat.nWaited = htonl(rx_atomic_read(&rx_nWaited));
-           queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
-                       tstat.idleThreads);
+           tstat.idleThreads = opr_queue_Count(&rx_idleServerQueue);
            MUTEX_EXIT(&rx_serverPool_lock);
            tstat.idleThreads = htonl(tstat.idleThreads);
            tl = sizeof(struct rx_debugStats) - ap->length;
@@ -1850,9 +1861,9 @@ rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
                                tconn.callState[j] = tcall->state;
                                tconn.callMode[j] = tcall->mode;
                                tconn.callFlags[j] = tcall->flags;
-                               if (queue_IsNotEmpty(&tcall->rq))
+                               if (!opr_queue_IsEmpty(&tcall->rq))
                                    tconn.callOther[j] |= RX_OTHER_IN;
-                               if (queue_IsNotEmpty(&tcall->tq))
+                               if (!opr_queue_IsEmpty(&tcall->tq))
                                    tconn.callOther[j] |= RX_OTHER_OUT;
                            } else
                                tconn.callState[j] = RX_STATE_NOTINIT;
index 0fbec34..0929de3 100644 (file)
@@ -10,8 +10,6 @@
 #ifndef _RX_PACKET_
 #define _RX_PACKET_
 
-#include "rx_queue.h"
-
 #if defined(AFS_NT40_ENV)
 #include "rx_xmit_nt.h"
 #endif
@@ -239,7 +237,7 @@ struct rx_jumboHeader {
 #endif
 
 struct rx_packet {
-    struct rx_queue queueItemHeader;   /* Packets are chained using the queue.h package */
+    struct opr_queue entry;    /* Packets are chained using opr_queue */
     struct clock timeSent;     /* When this packet was transmitted last */
     afs_uint32 firstSerial;    /* Original serial number of this packet */
     struct clock firstSent;    /* When this packet was transmitted first */
index baf37b0..ca747f6 100644 (file)
@@ -60,7 +60,7 @@ struct rx_peer {
     u_short congestSeq;                /* Changed when a call retransmits */
     afs_uint64 bytesSent;      /* Number of bytes sent to this peer */
     afs_uint64 bytesReceived;  /* Number of bytes received from this peer */
-    struct rx_queue rpcStats;  /* rpc statistic list */
+    struct opr_queue rpcStats; /* rpc statistic list */
     int lastReachTime;         /* Last time we verified reachability */
     afs_int32 maxPacketSize;    /* peer packetsize hint */
 #ifdef AFS_RXERRQ_ENV
index 223280e..0b8bd27 100644 (file)
@@ -417,8 +417,8 @@ extern void rxi_RestoreDataBufs(struct rx_packet *p);
 extern int rxi_TrimDataBufs(struct rx_packet *p, int first);
 extern void rxi_FreePacket(struct rx_packet *p);
 extern struct rx_packet *rxi_AllocPacket(int cla_ss);
-extern int rxi_AllocPackets(int cla_ss, int num_pkts, struct rx_queue *q);
-extern int rxi_FreePackets(int num_pkts, struct rx_queue *q);
+extern int rxi_AllocPackets(int cla_ss, int num_pkts, struct opr_queue *q);
+extern int rxi_FreePackets(int num_pkts, struct opr_queue *q);
 extern struct rx_packet *rxi_AllocSendPacket(struct rx_call *call,
                                             int want);
 extern int rxi_ReadPacket(osi_socket socket, struct rx_packet *p,
index 03ece50..b4272bb 100644 (file)
@@ -449,7 +449,7 @@ struct rx_ts_info_t * rx_ts_info_init(void) {
     rx_ts_info = calloc(1, sizeof(rx_ts_info_t));
     osi_Assert(rx_ts_info != NULL && pthread_setspecific(rx_ts_info_key, rx_ts_info) == 0);
 #ifdef RX_ENABLE_TSFPQ
-    queue_Init(&rx_ts_info->_FPQ);
+    opr_queue_Init(&rx_ts_info->_FPQ.queue);
 
     MUTEX_ENTER(&rx_packets_mutex);
     rx_TSFPQMaxProcs++;
index b63b42f..1aee72d 100644 (file)
@@ -58,7 +58,6 @@
 
 #include "rx.h"
 #include "rx_clock.h"
-#include "rx_queue.h"
 #include "rx_globals.h"
 #include "rx_atomic.h"
 #include "rx_internal.h"
@@ -100,15 +99,15 @@ rxi_GetNextPacket(struct rx_call *call) {
        call->currentPacket = NULL;
     }
 
-    if (queue_IsEmpty(&call->rq))
+    if (opr_queue_IsEmpty(&call->rq))
        return 0;
 
     /* Check that next packet available is next in sequence */
-    rp = queue_First(&call->rq, rx_packet);
+    rp = opr_queue_First(&call->rq, struct rx_packet, entry);
     if (rp->header.seq != call->rnext)
        return 0;
 
-    queue_Remove(rp);
+    opr_queue_Remove(&rp->entry);
 #ifdef RX_TRACK_PACKETS
     rp->flags &= ~RX_PKTFLAG_RQ;
 #endif
@@ -171,7 +170,7 @@ rxi_ReadProc(struct rx_call *call, char *buf,
     requestCount = nbytes;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (queue_IsNotEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -311,7 +310,7 @@ rx_ReadProc(struct rx_call *call, char *buf, int nbytes)
     SPLVAR;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (!queue_IsEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -351,7 +350,7 @@ rx_ReadProc32(struct rx_call *call, afs_int32 * value)
     SPLVAR;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (!queue_IsEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -452,7 +451,7 @@ rxi_FillReadVec(struct rx_call *call, afs_uint32 serial)
                 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
                 call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
 #endif
-               queue_Append(&call->iovq, call->currentPacket);
+               opr_queue_Append(&call->iovq, &call->currentPacket->entry);
 #ifdef RXDEBUG_PACKET
                 call->iovqc++;
 #endif /* RXDEBUG_PACKET */
@@ -466,7 +465,7 @@ rxi_FillReadVec(struct rx_call *call, afs_uint32 serial)
                    call->currentPacket->flags &= ~RX_PKTFLAG_CP;
                    call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
 #endif
-                   queue_Append(&call->iovq, call->currentPacket);
+                   opr_queue_Append(&call->iovq, &call->currentPacket->entry);
 #ifdef RXDEBUG_PACKET
                     call->iovqc++;
 #endif /* RXDEBUG_PACKET */
@@ -513,7 +512,7 @@ rxi_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
     int bytes;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (queue_IsNotEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -601,7 +600,7 @@ rxi_WriteProc(struct rx_call *call, char *buf,
     int requestCount = nbytes;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (queue_IsNotEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -650,7 +649,7 @@ rxi_WriteProc(struct rx_call *call, char *buf,
 #ifdef RX_TRACK_PACKETS
                call->currentPacket->flags |= RX_PKTFLAG_TQ;
 #endif
-               queue_Append(&call->tq, call->currentPacket);
+               opr_queue_Append(&call->tq, &call->currentPacket->entry);
 #ifdef RXDEBUG_PACKET
                 call->tqc++;
 #endif /* RXDEBUG_PACKET */
@@ -792,7 +791,7 @@ rx_WriteProc(struct rx_call *call, char *buf, int nbytes)
     SPLVAR;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (queue_IsNotEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -831,7 +830,7 @@ rx_WriteProc32(struct rx_call *call, afs_int32 * value)
     char *tcurpos;
     SPLVAR;
 
-    if (queue_IsNotEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -891,7 +890,7 @@ rxi_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
     nextio = 0;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (queue_IsNotEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -938,7 +937,7 @@ rxi_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
 #ifdef RX_TRACK_PACKETS
            cp->flags |= RX_PKTFLAG_IOVQ;
 #endif
-           queue_Append(&call->iovq, cp);
+           opr_queue_Append(&call->iovq, &cp->entry);
 #ifdef RXDEBUG_PACKET
             call->iovqc++;
 #endif /* RXDEBUG_PACKET */
@@ -1017,11 +1016,11 @@ int
 rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
 {
 #ifdef RX_TRACK_PACKETS
-    struct rx_packet *p, *np;
+    struct opr_queue *cursor;
 #endif
     int nextio;
     int requestCount;
-    struct rx_queue tmpq;
+    struct opr_queue tmpq;
 #ifdef RXDEBUG_PACKET
     u_short tmpqc;
 #endif
@@ -1047,7 +1046,7 @@ rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
             call->currentPacket->flags &= ~RX_PKTFLAG_CP;
             call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
 #endif
-           queue_Prepend(&call->iovq, call->currentPacket);
+           opr_queue_Prepend(&call->iovq, &call->currentPacket->entry);
 #ifdef RXDEBUG_PACKET
             call->iovqc++;
 #endif /* RXDEBUG_PACKET */
@@ -1066,7 +1065,7 @@ rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
      * the iovec. We put the loop condition at the end to ensure that
      * a zero length write will push a short packet. */
     nextio = 0;
-    queue_Init(&tmpq);
+    opr_queue_Init(&tmpq);
 #ifdef RXDEBUG_PACKET
     tmpqc = 0;
 #endif /* RXDEBUG_PACKET */
@@ -1083,7 +1082,7 @@ rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
             /* PrepareSendPacket drops the call lock */
             rxi_WaitforTQBusy(call);
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
-           queue_Append(&tmpq, call->currentPacket);
+           opr_queue_Append(&tmpq, &call->currentPacket->entry);
 #ifdef RXDEBUG_PACKET
             tmpqc++;
 #endif /* RXDEBUG_PACKET */
@@ -1091,7 +1090,7 @@ rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
 
            /* The head of the iovq is now the current packet */
            if (nbytes) {
-               if (queue_IsEmpty(&call->iovq)) {
+               if (opr_queue_IsEmpty(&call->iovq)) {
                     MUTEX_EXIT(&call->lock);
                    call->error = RX_PROTOCOL_ERROR;
 #ifdef RXDEBUG_PACKET
@@ -1100,8 +1099,10 @@ rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
                         rxi_FreePackets(0, &tmpq);
                    return 0;
                }
-               call->currentPacket = queue_First(&call->iovq, rx_packet);
-               queue_Remove(call->currentPacket);
+               call->currentPacket = opr_queue_First(&call->iovq,
+                                                     struct rx_packet,
+                                                     entry);
+               opr_queue_Remove(&call->currentPacket->entry);
 #ifdef RX_TRACK_PACKETS
                 call->currentPacket->flags &= ~RX_PKTFLAG_IOVQ;
                call->currentPacket->flags |= RX_PKTFLAG_CP;
@@ -1130,7 +1131,7 @@ rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
 #ifdef RX_TRACK_PACKETS
                    call->currentPacket->flags &= ~RX_PKTFLAG_CP;
 #endif
-                    queue_Prepend(&tmpq, call->currentPacket);
+                    opr_queue_Prepend(&tmpq, &call->currentPacket->entry);
 #ifdef RXDEBUG_PACKET
                     tmpqc++;
 #endif /* RXDEBUG_PACKET */
@@ -1164,16 +1165,16 @@ rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
      * We may end up with more than call->twind packets on the queue. */
 
 #ifdef RX_TRACK_PACKETS
-    for (queue_Scan(&tmpq, p, np, rx_packet))
+    for (opr_queue_Scan(&tmpq, cursor))
     {
+       struct rx_packet *p = opr_queue_Entry(cursor, struct rx_packet, entry);
         p->flags |= RX_PKTFLAG_TQ;
     }
 #endif
-
     if (call->error)
         call->mode = RX_MODE_ERROR;
 
-    queue_SpliceAppend(&call->tq, &tmpq);
+    opr_queue_SpliceAppend(&call->tq, &tmpq);
 
     /* If the call is in recovery, let it exhaust its current retransmit
      * queue before forcing it to send new packets
@@ -1235,7 +1236,7 @@ rxi_FlushWrite(struct rx_call *call)
     struct rx_packet *cp = NULL;
 
     /* Free any packets from the last call to ReadvProc/WritevProc */
-    if (queue_IsNotEmpty(&call->iovq)) {
+    if (!opr_queue_IsEmpty(&call->iovq)) {
 #ifdef RXDEBUG_PACKET
         call->iovqc -=
 #endif /* RXDEBUG_PACKET */
@@ -1298,7 +1299,7 @@ rxi_FlushWrite(struct rx_call *call)
 #ifdef RX_TRACK_PACKETS
        cp->flags |= RX_PKTFLAG_TQ;
 #endif
-       queue_Append(&call->tq, cp);
+       opr_queue_Append(&call->tq, &cp->entry);
 #ifdef RXDEBUG_PACKET
         call->tqc++;
 #endif /* RXDEBUG_PACKET */
index 37b3e95..501069f 100644 (file)
@@ -18,7 +18,7 @@ struct rx_serverQueueEntry_rx_lock {
 #else
 struct rx_serverQueueEntry {
 #endif
-    struct rx_queue queueItemHeader;
+    struct opr_queue entry;
 #ifdef KDUMP_RX_LOCK
     struct rx_call_rx_lock *newcall;
 #else
index ccf8afc..7fd53ee 100644 (file)
@@ -34,6 +34,7 @@
 #include "intNN.h"
 #include "xfiles.h"
 
+#include <rx/rx_queue.h>
 #include <lock.h>
 #include <afs/afsint.h>
 #include <afs/nfs.h>
index d514cde..1b1c6ba 100644 (file)
@@ -77,6 +77,7 @@
 #include <fcntl.h>
 #include <sys/ioctl.h>
 
+#include <rx/rx_queue.h>
 #include <lock.h>
 #include <afs/afsint.h>
 #include <afs/nfs.h>
index 7e82a4b..c4f3f98 100644 (file)
@@ -25,6 +25,7 @@
 #endif
 
 #include <rx/xdr.h>
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 #include <afs/opr_assert.h>
 #include <afs/dir.h>
index 8666c22..1b46f05 100644 (file)
@@ -74,6 +74,7 @@
 #endif /* AFS_HPUX_ENV */
 
 #include <afs/opr.h>
+#include <rx/rx_queue.h>
 #include <afs/nfs.h>
 #include <lwp.h>
 #include <lock.h>
index f58ce5f..56db76b 100644 (file)
@@ -95,6 +95,7 @@
 #include <afs/nfs.h>           /* yuck.  This is an abomination. */
 #include <lwp.h>
 #include <rx/rx.h>
+#include <rx/rx_queue.h>
 #include <afs/afscbint.h>
 #include <afs/afsutil.h>
 #include <lock.h>
index 68d8901..695abc9 100644 (file)
@@ -30,8 +30,6 @@
 #include <afs/nfs.h>
 #include <afs/errors.h>
 #include <afs/ihandle.h>
-#include <afs/vnode.h>
-#include <afs/volume.h>
 #include <afs/acl.h>
 #include <afs/ptclient.h>
 #include <afs/ptuser.h>
index d091865..45a635b 100644 (file)
@@ -22,6 +22,7 @@
 #include <sys/file.h>
 #endif
 
+#include <rx/rx_queue.h>
 #include <afs/nfs.h>
 #include <lwp.h>
 #include <lock.h>
index 460ebf5..c074304 100644 (file)
@@ -27,8 +27,6 @@
 #include <afs/nfs.h>
 #include <afs/errors.h>
 #include <afs/ihandle.h>
-#include <afs/vnode.h>
-#include <afs/volume.h>
 #include <afs/acl.h>
 #include <afs/ptclient.h>
 #include <afs/prs_fs.h>
index 99962d8..40f68c5 100644 (file)
@@ -31,8 +31,6 @@
 #include <afs/nfs.h>
 #include <afs/errors.h>
 #include <afs/ihandle.h>
-#include <afs/vnode.h>
-#include <afs/volume.h>
 #include <afs/acl.h>
 #include <afs/ptclient.h>
 #include <afs/prs_fs.h>
index 1d4f0e6..f670366 100644 (file)
@@ -42,6 +42,7 @@
 
 #include <afs/opr.h>
 #include <afs/nfs.h>
+#include <rx/rx_queue.h>
 #include <lwp.h>
 #include <lock.h>
 #include <afs/cmd.h>
index 87f42bf..8cc5ad4 100644 (file)
@@ -28,6 +28,7 @@
 #include <rx/xdr.h>
 #include <afs/afsint.h>
 #include <afs/afssyscalls.h>
+#include <rx/rx_queue.h>
 
 #include "nfs.h"
 #include "lwp.h"
index d2e1207..1ce56c9 100644 (file)
@@ -27,6 +27,7 @@
 #include <rx/xdr.h>
 #include <afs/afsint.h>
 #include <afs/errors.h>
+#include <rx/rx_queue.h>
 
 #include "nfs.h"
 #include "daemon_com.h"
index 0b77933..de58525 100644 (file)
@@ -15,6 +15,7 @@
 #include <ctype.h>
 
 #include <rx/xdr.h>
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 
 #if !defined(AFS_SGI_ENV)
index 666162d..65ef327 100644 (file)
@@ -39,6 +39,7 @@
 
 #include <afs/opr.h>
 #include <afs/afsint.h>
+#include <rx/rx_queue.h>
 #include <afs/errors.h>
 #include <afs/afssyscalls.h>
 
index ce63df9..9981971 100644 (file)
@@ -35,6 +35,7 @@
 #include <afs/dir.h>
 #include <afs/afsutil.h>
 #include <afs/fileutil.h>
+#include <rx/rx_queue.h>
 
 #include "nfs.h"
 #include "lwp.h"
index 5d20c77..e8f1692 100644 (file)
@@ -47,6 +47,7 @@
 
 #include <afs/opr.h>
 #include <afs/afsint.h>
+#include <rx/rx_queue.h>
 #include "nfs.h"
 #include <afs/errors.h>
 #include "daemon_com.h"
index 69873fe..2823d04 100644 (file)
@@ -30,6 +30,7 @@
 #endif
 
 #include <afs/opr.h>
+#include <rx/rx_queue.h>
 #include <lock.h>
 #include <afs/afsutil.h>
 #include <lwp.h>
index 1260d2a..3cb0c5e 100644 (file)
@@ -21,6 +21,7 @@
 #include <winbase.h>
 #include <lock.h>
 #include <afs/afsutil.h>
+#include <rx/rx_queue.h>
 #include "nfs.h"
 #include <afs/afsint.h>
 #include "ihandle.h"
index 99df0d5..b1f69b1 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <afs/opr.h>
 #include <afs/afsint.h>
+#include <rx/rx_queue.h>
 
 #include <afs/afsutil.h>
 
index 7bc6f3d..0dc0d72 100644 (file)
@@ -98,6 +98,7 @@
 
 #include <afs/opr.h>
 #include <afs/afsint.h>
+#include <rx/rx_queue.h>
 #include "nfs.h"
 #include <afs/errors.h>
 #include "lock.h"
index b23401e..c46eb37 100644 (file)
@@ -23,6 +23,7 @@
 #endif
 
 #include <afs/afsutil.h>
+#include <rx/rx_queue.h>
 
 #include <rx/xdr.h>
 #include "afs/afsint.h"
index 25c18b5..8ca71df 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <afs/opr.h>
 #include <afs/afsint.h>
+#include <rx/rx_queue.h>
 
 #if !defined(AFS_SGI_ENV) && !defined(AFS_NT40_ENV)
 #if defined(AFS_VFSINCL_ENV)
index 61fb3a0..a34029e 100644 (file)
@@ -89,6 +89,7 @@
 #include <afs/dir.h>
 #include <afs/afsutil.h>
 #include <afs/fileutil.h>
+#include <rx/rx_queue.h>
 
 #include "nfs.h"
 #include "lwp.h"
index 1a8c845..691e18d 100644 (file)
@@ -21,6 +21,7 @@
 
 #include <afs/opr.h>
 #include <afs/afsint.h>
+#include <rx/rx_queue.h>
 #include "nfs.h"
 #include <afs/errors.h>
 #include "salvsync.h"
index f4becd0..25a33ec 100644 (file)
@@ -33,6 +33,8 @@
 
 #include <afs/opr.h>
 #include <afs/afsint.h>
+#include <rx/rx_queue.h>
+
 #include "nfs.h"
 #include <afs/errors.h>
 #include "salvsync.h"
index 224971b..b0e4609 100644 (file)
@@ -24,6 +24,7 @@
 #ifdef AFS_DEMAND_ATTACH_FS
 
 #include <afs/opr.h>
+#include <rx/rx_queue.h>
 #include <lock.h>
 #include <afs/afsutil.h>
 #include <lwp.h>
index c94384c..4c9e6c6 100644 (file)
@@ -25,6 +25,7 @@
 #ifdef AFS_DEMAND_ATTACH_FS
 
 #include <afs/opr.h>
+#include <rx/rx_queue.h>
 #include <lock.h>
 #include <afs/afsutil.h>
 #include <lwp.h>
index df4a192..4d73660 100644 (file)
@@ -31,7 +31,7 @@
 #include <afs/errors.h>
 #include <afs/acl.h>
 #include <afs/prs_fs.h>
-#include <opr/queue.h>
+#include <rx/rx_queue.h>
 
 #include "nfs.h"
 #include "lock.h"
index 601da80..23cd098 100644 (file)
@@ -153,6 +153,7 @@ Vnodes with 0 inode pointers in RW volumes are now deleted.
 #include <afs/dir.h>
 #include <afs/afsutil.h>
 #include <afs/fileutil.h>
+#include <rx/rx_queue.h>
 
 #include "nfs.h"
 #include "lwp.h"
index c85c835..7df0705 100644 (file)
@@ -34,6 +34,8 @@
 #include <afs/opr.h>
 #include <afs/afsint.h>
 
+#include <rx/rx_queue.h>
+
 #ifndef AFS_NT40_ENV
 #if !defined(AFS_SGI_ENV)
 #ifdef AFS_OSF_ENV
index 6715d88..3c7d395 100644 (file)
@@ -28,6 +28,7 @@
 #endif
 
 #include <afs/opr.h>
+#include <rx/rx_queue.h>
 #include <rx/xdr.h>
 #include <afs/afsint.h>
 #include "nfs.h"
index 9c12487..bcb2c84 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <afs/opr.h>
 #include <rx/rx.h>
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 #include <afs/nfs.h>
 #include <afs/errors.h>
index e723e42..1da076f 100644 (file)
@@ -46,6 +46,7 @@
 
 #include <afs/afsint.h>
 #include <afs/nfs.h>
+#include <rx/rx_queue.h>
 #include <lock.h>
 #include <afs/ihandle.h>
 #include <afs/vnode.h>
index 38c1060..f055952 100644 (file)
@@ -27,6 +27,7 @@
 
 #include <afs/cmd.h>
 #include <rx/xdr.h>
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 #include <afs/nfs.h>
 #include <afs/errors.h>
index 1be53c2..0351b5d 100644 (file)
@@ -17,6 +17,7 @@
 #if defined(AFS_NAMEI_ENV) && !defined(AFS_NT40_ENV)
 #include <afs/dir.h>
 #include <rx/xdr.h>
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 #include <afs/nfs.h>
 #include <lwp.h>
index 14bb2c0..7ac31c2 100644 (file)
@@ -19,6 +19,7 @@
 #endif
 
 #include <rx/xdr.h>
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 #include <afs/prs_fs.h>
 #include <afs/nfs.h>
index d622f71..f859444 100644 (file)
@@ -17,6 +17,7 @@
 #include <afs/opr.h>
 #include <rx/rx.h>
 #include <rx/rxkad.h>
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 #include <afs/prs_fs.h>
 #include <afs/nfs.h>
index d341a2b..cefe6d1 100644 (file)
@@ -25,6 +25,7 @@
 #include <afs/afsutil.h>
 #endif
 
+#include <rx/rx_queue.h>
 #include <afs/afsint.h>
 #include <afs/prs_fs.h>
 #include <afs/nfs.h>
index 273b6f6..cb807a0 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <lock.h>
 #include <afs/stds.h>
+#include <rx/rx_queue.h>
 #include <rx/xdr.h>
 #include <rx/rx.h>
 #include <rx/rx_globals.h>
index 9f4bfdf..0b926f2 100644 (file)
@@ -21,6 +21,7 @@
 #include <afs/voldefs.h>
 #include <rx/xdr.h>
 #include <rx/rx.h>
+#include <rx/rx_queue.h>
 #include <afs/vlserver.h>
 #include <afs/nfs.h>
 #include <afs/cellconfig.h>