From fbdf126df02eacc0442d80cc5bca0e16ddafe55e Mon Sep 17 00:00:00 2001 From: Andrew Deason Date: Sun, 25 Aug 2019 19:30:30 -0500 Subject: [PATCH] rx: Convert rx_FreeSQEList to rx_freeServerQueue Currently, rx_serverQueueEntry structs are placed on the rx_FreeSQEList linked list instead of being freed directly, but managing this list is done a bit oddly. The first field in struct rx_FreeSQEList is an opr_queue, but we don't use the opr_queue_* macros to manage the list. Instead, we just assume the first field in a struct rx_serverQueueEntry is a pointer that we can use to link entries together. This is currently true and works, but it's an odd way of maintaining such a list, and of course would break if we ever moved the fields around in struct rx_serverQueueEntry. Make this code more closely follow the normal way of managing opr_queue lists, by using opr_queue_* macros, and changing rx_FreeSQEList to be an opr_queue itself. Change the name to rx_freeServerQueue to ensure all callers are changed, and to match the naming convention for the other linked lists for rx_serverQueueEntry structs. Also move rx_freeServerQueue and its associated lock freeSQEList_lock to be declared static inside rx.c, since neither are referenced outside of rx.c. The general idea for this commit suggested by kaduk@mit.edu. Change-Id: I2ea15af1ad3228fa5fdf9f323e9394838fba4bac Reviewed-on: https://gerrit.openafs.org/13811 Reviewed-by: Benjamin Kaduk Tested-by: Benjamin Kaduk --- src/rx/rx.c | 38 +++++++++++++++++++++++++------------- src/rx/rx_globals.h | 6 ------ 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/src/rx/rx.c b/src/rx/rx.c index 0ae611e..37bdf2b 100644 --- a/src/rx/rx.c +++ b/src/rx/rx.c @@ -221,12 +221,16 @@ struct opr_queue rx_incomingCallQueue; * calls to process */ struct opr_queue rx_idleServerQueue; +/* List of free rx_serverQueueEntry structs */ +struct opr_queue rx_freeServerQueue; + #if !defined(offsetof) #include /* for definition of offsetof() */ #endif #ifdef RX_ENABLE_LOCKS afs_kmutex_t rx_atomic_mutex; +static afs_kmutex_t freeSQEList_lock; #endif /* Forward prototypes */ @@ -634,6 +638,7 @@ rx_InitHost(u_int host, u_int port) /* Initialize various global queues */ opr_queue_Init(&rx_idleServerQueue); + opr_queue_Init(&rx_freeServerQueue); opr_queue_Init(&rx_incomingCallQueue); opr_queue_Init(&rx_freeCallQueue); @@ -1944,7 +1949,7 @@ rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket * socketp) void rx_WakeupServerProcs(void) { - struct rx_serverQueueEntry *np, *tqp; + struct rx_serverQueueEntry *np; struct opr_queue *cursor; SPLVAR; @@ -1959,8 +1964,8 @@ rx_WakeupServerProcs(void) osi_rxWakeup(rx_waitForPacket); #endif /* RX_ENABLE_LOCKS */ MUTEX_ENTER(&freeSQEList_lock); - for (np = rx_FreeSQEList; np; np = tqp) { - tqp = *(struct rx_serverQueueEntry **)np; + for (opr_queue_Scan(&rx_freeServerQueue, cursor)) { + np = opr_queue_Entry(cursor, struct rx_serverQueueEntry, entry); #ifdef RX_ENABLE_LOCKS CV_BROADCAST(&np->cv); #else /* RX_ENABLE_LOCKS */ @@ -2020,8 +2025,10 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp) MUTEX_ENTER(&freeSQEList_lock); - if ((sq = rx_FreeSQEList)) { - rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq; + if (!opr_queue_IsEmpty(&rx_freeServerQueue)) { + sq = opr_queue_First(&rx_freeServerQueue, struct rx_serverQueueEntry, + entry); + opr_queue_Remove(&sq->entry); MUTEX_EXIT(&freeSQEList_lock); } else { /* otherwise allocate a new one and return that */ MUTEX_EXIT(&freeSQEList_lock); @@ -2136,6 +2143,9 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp) #endif } while (!(call = sq->newcall) && !(socketp && *socketp != OSI_NULLSOCKET)); + if (opr_queue_IsOnQueue(&sq->entry)) { + opr_queue_Remove(&sq->entry); + } MUTEX_EXIT(&rx_serverPool_lock); if (call) { MUTEX_ENTER(&call->lock); @@ -2145,8 +2155,7 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp) } MUTEX_ENTER(&freeSQEList_lock); - *(struct rx_serverQueueEntry **)sq = rx_FreeSQEList; - rx_FreeSQEList = sq; + opr_queue_Prepend(&rx_freeServerQueue, &sq->entry); MUTEX_EXIT(&freeSQEList_lock); if (call) { @@ -2191,8 +2200,10 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp) NETPRI; MUTEX_ENTER(&freeSQEList_lock); - if ((sq = rx_FreeSQEList)) { - rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq; + if (!opr_queue_IsEmpty(&rx_freeServerQueue)) { + sq = opr_queue_First(&rx_freeServerQueue, struct rx_serverQueueEntry, + entry); + opr_queue_Remove(&sq->entry); MUTEX_EXIT(&freeSQEList_lock); } else { /* otherwise allocate a new one and return that */ MUTEX_EXIT(&freeSQEList_lock); @@ -2306,8 +2317,7 @@ rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp) MUTEX_EXIT(&sq->lock); MUTEX_ENTER(&freeSQEList_lock); - *(struct rx_serverQueueEntry **)sq = rx_FreeSQEList; - rx_FreeSQEList = sq; + opr_queue_Prepend(&rx_freeServerQueue, &sq->entry); MUTEX_EXIT(&freeSQEList_lock); if (call) { @@ -8045,8 +8055,10 @@ shutdown_rx(void) MUTEX_ENTER(&freeSQEList_lock); - while ((np = rx_FreeSQEList)) { - rx_FreeSQEList = *(struct rx_serverQueueEntry **)np; + while (!opr_queue_IsEmpty(&rx_freeServerQueue)) { + np = opr_queue_First(&rx_freeServerQueue, struct rx_serverQueueEntry, + entry); + opr_queue_Remove(&np->entry); MUTEX_DESTROY(&np->lock); rxi_Free(np, sizeof(*np)); } diff --git a/src/rx/rx_globals.h b/src/rx/rx_globals.h index caf0d3b..a6695ea 100644 --- a/src/rx/rx_globals.h +++ b/src/rx/rx_globals.h @@ -459,12 +459,6 @@ EXT afs_uint32 rx_maxJumboRecvSize GLOBALSINIT(RX_MAX_PACKET_SIZE); /* need this to permit progs to run on AIX systems */ EXT int (*rxi_syscallp) (afs_uint32 a3, afs_uint32 a4, void *a5)GLOBALSINIT(0); -/* List of free queue entries */ -EXT struct rx_serverQueueEntry *rx_FreeSQEList GLOBALSINIT(0); -#ifdef RX_ENABLE_LOCKS -EXT afs_kmutex_t freeSQEList_lock; -#endif - /* List of free call structures */ EXT struct opr_queue rx_freeCallQueue; #ifdef RX_ENABLE_LOCKS -- 1.9.4