afs: release the packets used by rx on shutdown
[openafs.git] / src / rx / rx_globals.h
index 2051a26..66dff99 100644 (file)
@@ -21,6 +21,7 @@
 
 #ifndef GLOBALSINIT
 #define GLOBALSINIT(x)
+#define POSTAMBLE
 #if defined(AFS_NT40_ENV)
 #define RX_STATS_INTERLOCKED 1
 #if defined(AFS_PTHREAD_ENV)
@@ -43,12 +44,6 @@ EXT struct rx_service *rx_services[RX_MAX_SERVICES + 1];
 EXT afs_kmutex_t rx_serverPool_lock;
 #endif /* RX_ENABLE_LOCKS */
 
-/* Incoming calls wait on this queue when there are no available server processes */
-EXT struct rx_queue rx_incomingCallQueue;
-
-/* Server processes wait on this queue when there are no appropriate calls to process */
-EXT struct rx_queue rx_idleServerQueue;
-
 /* Constant delay time before sending a hard ack if the receiver consumes
  * a packet while no delayed ack event is scheduled. Ensures that the
  * sender is able to advance its window when the receiver consumes a packet
@@ -154,7 +149,7 @@ EXT int rx_nPackets GLOBALSINIT(0); /* preallocate packets with rx_extraPackets
 EXT pthread_key_t rx_ts_info_key;
 typedef struct rx_ts_info_t {
     struct {
-        struct rx_queue queue;
+        struct opr_queue queue;
         int len;                /* local queue length */
         int delta;              /* number of new packets alloc'd locally since last sync w/ global queue */
 
@@ -179,7 +174,7 @@ EXT struct rx_ts_info_t * rx_ts_info_init(void);   /* init function for thread-s
     do { \
         ts_info_p = (struct rx_ts_info_t*)pthread_getspecific(rx_ts_info_key); \
         if (ts_info_p == NULL) { \
-            osi_Assert((ts_info_p = rx_ts_info_init()) != NULL); \
+            opr_Verify((ts_info_p = rx_ts_info_init()) != NULL); \
         } \
     } while(0)
 #endif /* AFS_PTHREAD_ENV */
@@ -189,7 +184,7 @@ EXT struct rx_ts_info_t * rx_ts_info_init(void);   /* init function for thread-s
 /* in pthreads rx, free packet queue is now a two-tiered queueing system
  * in which the first tier is thread-specific, and the second tier is
  * a global free packet queue */
-EXT struct rx_queue rx_freePacketQueue;
+EXT struct opr_queue rx_freePacketQueue;
 #ifdef RX_TRACK_PACKETS
 #define RX_FPQ_MARK_FREE(p) \
     do { \
@@ -240,7 +235,19 @@ EXT struct rx_queue rx_freePacketQueue;
 EXT afs_kmutex_t rx_freePktQ_lock;
 #endif /* RX_ENABLE_LOCKS */
 
-#if defined(AFS_PTHREAD_ENV)
+/*!
+ * \brief Queue of allocated packets.
+ *
+ * This queue is used to keep track of the blocks of allocated packets.
+ * This information is used when afs is being unmounted and the memory
+ * used by those packets needs to be released.
+ */
+EXT struct opr_queue rx_mallocedPacketQueue;
+#ifdef RX_ENABLE_LOCKS
+EXT afs_kmutex_t rx_mallocedPktQ_lock;
+#endif /* RX_ENABLE_LOCKS */
+
+#if defined(AFS_PTHREAD_ENV) && !defined(KERNEL)
 #define RX_ENABLE_TSFPQ
 EXT int rx_TSFPQGlobSize GLOBALSINIT(3); /* number of packets to transfer between global and local queues in one op */
 EXT int rx_TSFPQLocalMax GLOBALSINIT(15); /* max number of packets on local FPQ before returning a glob to the global pool */
@@ -290,9 +297,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         struct rx_packet * p; \
         int tsize = MIN((rx_ts_info_p)->_FPQ.len, (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + 3 *  rx_TSFPQGlobSize); \
        if (tsize <= 0) break; \
-        for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
-             i < tsize; i++,p=queue_Prev(p, rx_packet)); \
-        queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
+        for (i=0,p=opr_queue_Last(&((rx_ts_info_p)->_FPQ.queue), \
+                                struct rx_packet, entry); \
+             i < tsize; i++,p=opr_queue_Prev(&p->entry, \
+                                           struct rx_packet, entry )); \
+        opr_queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ.queue), \
+                                  &rx_freePacketQueue, &p->entry); \
         (rx_ts_info_p)->_FPQ.len -= tsize; \
         rx_nFreePackets += tsize; \
         (rx_ts_info_p)->_FPQ.ltog_ops++; \
@@ -310,9 +320,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         int i; \
         struct rx_packet * p; \
         if (num_transfer <= 0) break; \
-        for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
-            i < (num_transfer); i++,p=queue_Prev(p, rx_packet)); \
-        queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
+        for (i=0,p=opr_queue_Last(&((rx_ts_info_p)->_FPQ.queue), \
+                                struct rx_packet, entry ); \
+            i < (num_transfer); \
+            i++,p=opr_queue_Prev(&p->entry, struct rx_packet, entry )); \
+        opr_queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ.queue), \
+                                  &rx_freePacketQueue, &p->entry); \
         (rx_ts_info_p)->_FPQ.len -= (num_transfer); \
         rx_nFreePackets += (num_transfer); \
         (rx_ts_info_p)->_FPQ.ltog_ops++; \
@@ -332,9 +345,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         struct rx_packet * p; \
         tsize = (rx_TSFPQGlobSize <= rx_nFreePackets) ? \
                  rx_TSFPQGlobSize : rx_nFreePackets; \
-        for (i=0,p=queue_First(&rx_freePacketQueue, rx_packet); \
-             i < tsize; i++,p=queue_Next(p, rx_packet)); \
-        queue_SplitBeforeAppend(&rx_freePacketQueue,&((rx_ts_info_p)->_FPQ),p); \
+        for (i=0, \
+              p=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry); \
+             i < tsize; \
+            i++,p=opr_queue_Next(&p->entry, struct rx_packet, entry)); \
+        opr_queue_SplitBeforeAppend(&rx_freePacketQueue, \
+                                  &((rx_ts_info_p)->_FPQ.queue), &p->entry); \
         (rx_ts_info_p)->_FPQ.len += i; \
         rx_nFreePackets -= i; \
         (rx_ts_info_p)->_FPQ.gtol_ops++; \
@@ -347,9 +363,12 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         struct rx_packet * p; \
         tsize = (num_transfer); \
         if (tsize > rx_nFreePackets) tsize = rx_nFreePackets; \
-        for (i=0,p=queue_First(&rx_freePacketQueue, rx_packet); \
-             i < tsize; i++,p=queue_Next(p, rx_packet)); \
-        queue_SplitBeforeAppend(&rx_freePacketQueue,&((rx_ts_info_p)->_FPQ),p); \
+        for (i=0, \
+              p=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry); \
+             i < tsize; \
+            i++, p=opr_queue_Next(&p->entry, struct rx_packet, entry)); \
+        opr_queue_SplitBeforeAppend(&rx_freePacketQueue, \
+                                  &((rx_ts_info_p)->_FPQ.queue), &p->entry); \
         (rx_ts_info_p)->_FPQ.len += i; \
         rx_nFreePackets -= i; \
         (rx_ts_info_p)->_FPQ.gtol_ops++; \
@@ -358,8 +377,9 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
 /* checkout a packet from the thread-specific free packet queue */
 #define RX_TS_FPQ_CHECKOUT(rx_ts_info_p,p) \
     do { \
-        (p) = queue_First(&((rx_ts_info_p)->_FPQ), rx_packet); \
-        queue_Remove(p); \
+        (p) = opr_queue_First(&((rx_ts_info_p)->_FPQ.queue), \
+                            struct rx_packet, entry); \
+        opr_queue_Remove(&p->entry); \
         RX_FPQ_MARK_USED(p); \
         (rx_ts_info_p)->_FPQ.len--; \
         (rx_ts_info_p)->_FPQ.checkout_ops++; \
@@ -373,12 +393,14 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
         int i; \
         struct rx_packet *p; \
         if (num_transfer > (rx_ts_info_p)->_FPQ.len) num_transfer = (rx_ts_info_p)->_FPQ.len; \
-        for (i=0, p=queue_First(&((rx_ts_info_p)->_FPQ), rx_packet); \
+        for (i=0, p=opr_queue_First(&((rx_ts_info_p)->_FPQ.queue), \
+                                  struct rx_packet, entry); \
              i < num_transfer; \
-             i++, p=queue_Next(p, rx_packet)) { \
+             i++, p=opr_queue_Next(&p->entry, struct rx_packet, entry)) { \
             RX_FPQ_MARK_USED(p); \
         } \
-        queue_SplitBeforeAppend(&((rx_ts_info_p)->_FPQ),(q),p); \
+        opr_queue_SplitBeforeAppend(&((rx_ts_info_p)->_FPQ.queue),(q), \
+                                  &((p)->entry)); \
         (rx_ts_info_p)->_FPQ.len -= num_transfer; \
         (rx_ts_info_p)->_FPQ.checkout_ops++; \
         (rx_ts_info_p)->_FPQ.checkout_xfer += num_transfer; \
@@ -386,7 +408,7 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
 /* check a packet into the thread-specific free packet queue */
 #define RX_TS_FPQ_CHECKIN(rx_ts_info_p,p) \
     do { \
-        queue_Prepend(&((rx_ts_info_p)->_FPQ), (p)); \
+        opr_queue_Prepend(&((rx_ts_info_p)->_FPQ.queue), &((p)->entry)); \
         RX_FPQ_MARK_FREE(p); \
         (rx_ts_info_p)->_FPQ.len++; \
         (rx_ts_info_p)->_FPQ.checkin_ops++; \
@@ -398,16 +420,16 @@ EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
  * since caller already knows length of (q) for other reasons */
 #define RX_TS_FPQ_QCHECKIN(rx_ts_info_p,num_transfer,q) \
     do { \
-        struct rx_packet *p, *np; \
-        for (queue_Scan((q), p, np, rx_packet)) { \
-            RX_FPQ_MARK_FREE(p); \
+       struct opr_queue *cur; \
+        for (opr_queue_Scan((q), cur)) { \
+            RX_FPQ_MARK_FREE(opr_queue_Entry(cur, struct rx_packet, entry)); \
         } \
-        queue_SplicePrepend(&((rx_ts_info_p)->_FPQ),(q)); \
+        opr_queue_SplicePrepend(&((rx_ts_info_p)->_FPQ.queue),(q)); \
         (rx_ts_info_p)->_FPQ.len += (num_transfer); \
         (rx_ts_info_p)->_FPQ.checkin_ops++; \
         (rx_ts_info_p)->_FPQ.checkin_xfer += (num_transfer); \
     } while(0)
-#endif /* AFS_PTHREAD_ENV */
+#endif /* AFS_PTHREAD_ENV && !KERNEL */
 
 /* Number of free packets */
 EXT int rx_nFreePackets GLOBALSINIT(0);
@@ -444,7 +466,7 @@ EXT afs_kmutex_t freeSQEList_lock;
 #endif
 
 /* List of free call structures */
-EXT struct rx_queue rx_freeCallQueue;
+EXT struct opr_queue rx_freeCallQueue;
 #ifdef RX_ENABLE_LOCKS
 EXT afs_kmutex_t rx_freeCallQueue_lock;
 #endif
@@ -496,8 +518,8 @@ EXT afs_int32 rxi_availProcs GLOBALSINIT(0);        /* number of threads in the pool */
 EXT afs_int32 rxi_totalMin GLOBALSINIT(0);     /* Sum(minProcs) forall services */
 EXT afs_int32 rxi_minDeficit GLOBALSINIT(0);   /* number of procs needed to handle all minProcs */
 
-EXT int rx_nextCid;            /* Next connection call id */
-EXT int rx_epoch;              /* Initialization time of rx */
+EXT afs_uint32 rx_nextCid;             /* Next connection call id */
+EXT afs_uint32 rx_epoch;               /* Initialization time of rx */
 #ifdef RX_ENABLE_LOCKS
 EXT afs_kcondvar_t rx_waitingForPackets_cv;
 #endif
@@ -518,11 +540,9 @@ EXT afs_kmutex_t rx_connHashTable_lock;
 
 /* Forward definitions of internal procedures */
 #define        rxi_ChallengeOff(conn)  \
-       rxevent_Cancel(&(conn)->challengeEvent, NULL, 0)
-#define rxi_KeepAliveOff(call) \
-       rxevent_Cancel(&(call)->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE)
+       rxevent_Cancel(&(conn)->challengeEvent)
 #define rxi_NatKeepAliveOff(conn) \
-       rxevent_Cancel(&(conn)->natKeepAliveEvent, NULL, 0)
+       rxevent_Cancel(&(conn)->natKeepAliveEvent)
 
 #define rxi_AllocSecurityObject() rxi_Alloc(sizeof(struct rx_securityClass))
 #define        rxi_FreeSecurityObject(obj) rxi_Free(obj, sizeof(struct rx_securityClass))
@@ -602,11 +622,11 @@ EXT pthread_key_t rx_thread_id_key;
 #endif
 
 #if defined(RX_ENABLE_LOCKS)
-EXT afs_kmutex_t rx_waiting_mutex;     /* used to protect waiting counters */
-EXT afs_kmutex_t rx_quota_mutex;       /* used to protect quota counters */
-EXT afs_kmutex_t rx_pthread_mutex;     /* used to protect pthread counters */
-EXT afs_kmutex_t rx_packets_mutex;     /* used to protect packet counters */
-EXT afs_kmutex_t rx_refcnt_mutex;       /* used to protect conn/call ref counts */
+EXT afs_kmutex_t rx_waiting_mutex POSTAMBLE;   /* used to protect waiting counters */
+EXT afs_kmutex_t rx_quota_mutex POSTAMBLE;     /* used to protect quota counters */
+EXT afs_kmutex_t rx_pthread_mutex POSTAMBLE;   /* used to protect pthread counters */
+EXT afs_kmutex_t rx_packets_mutex POSTAMBLE;   /* used to protect packet counters */
+EXT afs_kmutex_t rx_refcnt_mutex POSTAMBLE;       /* used to protect conn/call ref counts */
 #endif
 
 EXT int rx_enable_stats GLOBALSINIT(0);