From c86ae86a53c8a8e5acc099e1ea437e3571b2e63d Mon Sep 17 00:00:00 2001 From: Jeffrey Altman Date: Tue, 11 Mar 2008 18:23:23 +0000 Subject: [PATCH] rx-mutex-interlocked-macros-20080311 LICENSE IPL10 Introduce a new set of macros that can be used to permit either mutex based protection or Interlocked operation protection increments, decrements, or additions. rx_MutexIncrement(object, mutex) rx_MutexAdd(object, addend, mutex) rx_MutexDecrement(object, mutex) rx_MutexAdd1Increment2(object1, addend, object2, mutex) rx_MutexAdd1Decrement2(object1, addend, object2, mutex) For Windows these are implemented with the Interlocked operations for other platforms the existing mutex is relied upon. Only a subset of the rx_stats parameters have been transitioned at the current time. --- src/rx/rx.c | 143 +++++++++++++--------------------------------------- src/rx/rx.h | 49 +++++++++++++++++- src/rx/rx_globals.h | 1 + src/rx/rx_packet.c | 80 ++++++++++------------------- 4 files changed, 111 insertions(+), 162 deletions(-) diff --git a/src/rx/rx.c b/src/rx/rx.c index f442f46..439a3d3 100644 --- a/src/rx/rx.c +++ b/src/rx/rx.c @@ -654,8 +654,8 @@ void rx_StartClientThread(void) { #ifdef AFS_PTHREAD_ENV - int pid; - pid = (int) pthread_self(); + pthread_t pid; + pid = pthread_self(); #endif /* AFS_PTHREAD_ENV */ } #endif /* AFS_NT40_ENV */ @@ -781,10 +781,7 @@ rx_NewConnection(register afs_uint32 shost, u_short sport, u_short sservice, conn->refCount++; /* no lock required since only this thread knows... */ conn->next = rx_connHashTable[hashindex]; rx_connHashTable[hashindex] = conn; - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nClientConns++; - MUTEX_EXIT(&rx_stats_mutex); - + rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex); MUTEX_EXIT(&rx_connHashTable_lock); USERPRI; return conn; @@ -834,13 +831,10 @@ rxi_CleanupConnection(struct rx_connection *conn) conn->peer->refCount--; MUTEX_EXIT(&rx_peerHashTable_lock); - MUTEX_ENTER(&rx_stats_mutex); if (conn->type == RX_SERVER_CONNECTION) - rx_stats.nServerConns--; + rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex); else - rx_stats.nClientConns--; - MUTEX_EXIT(&rx_stats_mutex); - + rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex); #ifndef KERNEL if (conn->specific) { int i; @@ -2120,9 +2114,7 @@ rxi_NewCall(register struct rx_connection *conn, register int channel) call = queue_First(&rx_freeCallQueue, rx_call); #endif /* AFS_GLOBAL_RXLOCK_KERNEL */ queue_Remove(call); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nFreeCallStructs--; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex); MUTEX_EXIT(&rx_freeCallQueue_lock); MUTEX_ENTER(&call->lock); CLEAR_CALL_QUEUE_LOCK(call); @@ -2146,9 +2138,7 @@ rxi_NewCall(register struct rx_connection *conn, register int channel) CV_INIT(&call->cv_rq, "call rq", CV_DEFAULT, 0); CV_INIT(&call->cv_tq, "call tq", CV_DEFAULT, 0); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nCallStructs++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex); /* Initialize once-only items */ queue_Init(&call->tq); queue_Init(&call->rq); @@ -2208,10 +2198,7 @@ rxi_FreeCall(register struct rx_call *call) #else /* AFS_GLOBAL_RXLOCK_KERNEL */ queue_Append(&rx_freeCallQueue, call); #endif /* AFS_GLOBAL_RXLOCK_KERNEL */ - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nFreeCallStructs++; - MUTEX_EXIT(&rx_stats_mutex); - + rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex); MUTEX_EXIT(&rx_freeCallQueue_lock); /* Destroy the connection if it was previously slated for @@ -2247,11 +2234,7 @@ rxi_Alloc(register size_t size) { register char *p; - MUTEX_ENTER(&rx_stats_mutex); - rxi_Alloccnt++; - rxi_Allocsize += (afs_int32)size; - MUTEX_EXIT(&rx_stats_mutex); - + rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex); p = (char *)osi_Alloc(size); if (!p) @@ -2263,11 +2246,7 @@ rxi_Alloc(register size_t size) void rxi_Free(void *addr, register size_t size) { - MUTEX_ENTER(&rx_stats_mutex); - rxi_Alloccnt--; - rxi_Allocsize -= (afs_int32)size; - MUTEX_EXIT(&rx_stats_mutex); - + rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex); osi_Free(addr, size); } @@ -2300,9 +2279,7 @@ rxi_FindPeer(register afs_uint32 host, register u_short port, pp->next = rx_peerHashTable[hashIndex]; rx_peerHashTable[hashIndex] = pp; rxi_InitPeerParams(pp); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nPeerStructs++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex); } } if (pp && create) { @@ -2407,9 +2384,7 @@ rxi_FindConnection(osi_socket socket, register afs_int32 host, /* XXXX Connection timeout? */ if (service->newConnProc) (*service->newConnProc) (conn); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nServerConns++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex); } MUTEX_ENTER(&conn->conn_data_lock); @@ -2607,9 +2582,7 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket, * then, since this is a client connection we're getting data for * it must be for the previous call. */ - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.spuriousPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); MUTEX_ENTER(&conn->conn_data_lock); conn->refCount--; MUTEX_EXIT(&conn->conn_data_lock); @@ -2621,9 +2594,7 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket, if (type == RX_SERVER_CONNECTION) { /* We're the server */ if (np->header.callNumber < currentCallNumber) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.spuriousPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); #ifdef RX_ENABLE_LOCKS if (call) MUTEX_EXIT(&call->lock); @@ -2658,9 +2629,7 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket, MUTEX_ENTER(&conn->conn_data_lock); conn->refCount--; MUTEX_EXIT(&conn->conn_data_lock); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nBusies++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex); return tp; } rxi_KeepAliveOn(call); @@ -2723,9 +2692,7 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket, MUTEX_ENTER(&conn->conn_data_lock); conn->refCount--; MUTEX_EXIT(&conn->conn_data_lock); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nBusies++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex); return tp; } rxi_KeepAliveOn(call); @@ -2736,9 +2703,7 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket, /* Ignore all incoming acknowledgements for calls in DALLY state */ if (call && (call->state == RX_STATE_DALLY) && (np->header.type == RX_PACKET_TYPE_ACK)) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.ignorePacketDally++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex); #ifdef RX_ENABLE_LOCKS if (call) { MUTEX_EXIT(&call->lock); @@ -2753,9 +2718,7 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket, /* Ignore anything that's not relevant to the current call. If there * isn't a current call, then no packet is relevant. */ if (!call || (np->header.callNumber != currentCallNumber)) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.spuriousPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); #ifdef RX_ENABLE_LOCKS if (call) { MUTEX_EXIT(&call->lock); @@ -2820,9 +2783,7 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket, * XXX interact badly with the server-restart detection * XXX code in receiveackpacket. */ if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.spuriousPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex); MUTEX_EXIT(&call->lock); MUTEX_ENTER(&conn->conn_data_lock); conn->refCount--; @@ -3120,9 +3081,7 @@ rxi_ReceiveDataPacket(register struct rx_call *call, int isFirst; struct rx_packet *tnp; struct clock when; - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.dataPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex); #ifdef KERNEL /* If there are no packet buffers, drop this new packet, unless we can find @@ -3132,9 +3091,7 @@ rxi_ReceiveDataPacket(register struct rx_call *call, MUTEX_ENTER(&rx_freePktQ_lock); rxi_NeedMorePackets = TRUE; MUTEX_EXIT(&rx_freePktQ_lock); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.noPacketBuffersOnRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.nPacketBuffersOnRead, rx_stats_mutex); call->rprev = np->header.serial; rxi_calltrace(RX_TRACE_DROP, call); dpf(("packet %x dropped on receipt - quota problems", np)); @@ -3198,9 +3155,7 @@ rxi_ReceiveDataPacket(register struct rx_call *call, /* Check to make sure it is not a duplicate of one already queued */ if (queue_IsNotEmpty(&call->rq) && queue_First(&call->rq, rx_packet)->header.seq == seq) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.dupPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); dpf(("packet %x dropped on receipt - duplicate", np)); rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); @@ -3284,9 +3239,7 @@ rxi_ReceiveDataPacket(register struct rx_call *call, /* If the new packet's sequence number has been sent to the * application already, then this is a duplicate */ if (seq < call->rnext) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.dupPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack); @@ -3313,9 +3266,7 @@ rxi_ReceiveDataPacket(register struct rx_call *call, 0, queue_Scan(&call->rq, tp, nxp, rx_packet)) { /*Check for duplicate packet */ if (seq == tp->header.seq) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.dupPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex); rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY); np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, @@ -3554,9 +3505,7 @@ rxi_ReceiveAckPacket(register struct rx_call *call, struct rx_packet *np, u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */ int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */ - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.ackPacketsRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex); ap = (struct rx_ackPacket *)rx_DataOf(np); nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap); if (nbytes < 0) @@ -4429,9 +4378,7 @@ rxi_ConnectionError(register struct rx_connection *conn, } } conn->error = error; - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.fatalErrors++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex); } } @@ -4845,9 +4792,7 @@ rxi_SendAck(register struct rx_call *call, nbytes -= p->wirevec[i].iov_len; } } - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.ackPacketsSent++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex); #ifndef RX_ENABLE_TSFPQ if (!optionalPacket) rxi_FreePacket(p); @@ -4871,9 +4816,7 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len, peer->nSent += len; if (resending) peer->reSends += len; - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.dataPacketsSent += len; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex); MUTEX_EXIT(&peer->peer_lock); if (list[len - 1]->header.flags & RX_LAST_PACKET) { @@ -4908,9 +4851,7 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len, * packet until the congestion window reaches the ack rate. */ if (list[i]->header.serial) { requestAck = 1; - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.dataPacketsReSent++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex); } else { /* improved RTO calculation- not Karn */ list[i]->firstSent = *now; @@ -4925,9 +4866,7 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len, peer->nSent++; if (resending) peer->reSends++; - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.dataPacketsSent++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex); MUTEX_EXIT(&peer->peer_lock); /* Tag this packet as not being the last in this group, @@ -5147,9 +5086,7 @@ rxi_Start(struct rxevent *event, register struct rx_call *call, } if (call->error) { #ifdef AFS_GLOBAL_RXLOCK_KERNEL - MUTEX_ENTER(&rx_stats_mutex); - rx_tq_debug.rxi_start_in_error++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex); #endif return; } @@ -5215,9 +5152,7 @@ rxi_Start(struct rxevent *event, register struct rx_call *call, osi_Panic("rxi_Start: xmit queue clobbered"); } if (p->flags & RX_PKTFLAG_ACKED) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.ignoreAckedPacket++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex); continue; /* Ignore this packet if it has been acknowledged */ } @@ -5284,9 +5219,7 @@ rxi_Start(struct rxevent *event, register struct rx_call *call, * the time to reset the call. This will also inform the using * process that the call is in an error state. */ - MUTEX_ENTER(&rx_stats_mutex); - rx_tq_debug.rxi_start_aborted++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex); call->flags &= ~RX_CALL_TQ_BUSY; if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) { dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags)); @@ -5916,9 +5849,7 @@ rxi_ReapConnections(void) rxi_rpc_peer_stat_cnt -= num_funcs; } rxi_FreePeer(peer); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nPeerStructs--; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex); if (peer == *peer_ptr) { *peer_ptr = next; prev = next; @@ -6329,7 +6260,7 @@ MakeDebugCall(osi_socket socket, afs_uint32 remoteAddr, afs_uint16 remotePort, void *outputData, size_t outputLength) { static afs_int32 counter = 100; - time_t waitTime, waitCount, startTime, endTime; + time_t waitTime, waitCount, startTime; struct rx_header theader; char tbuffer[1500]; register afs_int32 code; @@ -6751,9 +6682,7 @@ shutdown_rx(void) } next = peer->next; rxi_FreePeer(peer); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.nPeerStructs--; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex); } } } diff --git a/src/rx/rx.h b/src/rx/rx.h index 3a95498..ec4724d 100644 --- a/src/rx/rx.h +++ b/src/rx/rx.h @@ -1049,7 +1049,54 @@ typedef struct rx_interface_stat { #define RX_STATS_SERVICE_ID 409 - +#ifdef AFS_NT40_ENV +#define rx_MutexIncrement(object, mutex) InterlockedIncrement(&object) +#define rx_MutexAdd(object, addend, mutex) InterlockedAdd(&object, addend) +#define rx_MutexDecrement(object, mutex) InterlockedDecrement(&object) +#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \ + do { \ + InterlockedAdd(&object1, addend); \ + InterlockedIncrement(&object2); \ + } while (0) +#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \ + do { \ + InterlockedAdd(&object1, addend); \ + InterlockedDecrement(&object2); \ + } while (0) +#else +#define rx_MutexIncrement(object, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object++; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexAdd(object, addend, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object += addend; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object1 += addend; \ + object2++; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object1 += addend; \ + object2--; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#define rx_MutexDecrement(object, mutex) \ + do { \ + MUTEX_ENTER(&mutex); \ + object--; \ + MUTEX_EXIT(&mutex); \ + } while(0) +#endif #endif /* _RX_ End of rx.h */ diff --git a/src/rx/rx_globals.h b/src/rx/rx_globals.h index 99719fd..5632012 100644 --- a/src/rx/rx_globals.h +++ b/src/rx/rx_globals.h @@ -22,6 +22,7 @@ #ifndef GLOBALSINIT #define GLOBALSINIT(x) #if defined(AFS_NT40_ENV) +#define RX_STATS_INTERLOCKED 1 #if defined(AFS_PTHREAD_ENV) #define EXT __declspec(dllimport) extern #else diff --git a/src/rx/rx_packet.c b/src/rx/rx_packet.c index f5cc949..687967b 100644 --- a/src/rx/rx_packet.c +++ b/src/rx/rx_packet.c @@ -266,7 +266,6 @@ rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q) static int AllocPacketBufs(int class, int num_pkts, struct rx_queue * q) { - register struct rx_packet *c; register struct rx_ts_info_t * rx_ts_info; int transfer, alloc; SPLVAR; @@ -317,25 +316,23 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q) if (overq) { rxi_NeedMorePackets = TRUE; - MUTEX_ENTER(&rx_stats_mutex); switch (class) { case RX_PACKET_CLASS_RECEIVE: - rx_stats.receivePktAllocFailures++; + rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND: - rx_stats.sendPktAllocFailures++; + rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SPECIAL: - rx_stats.specialPktAllocFailures++; + rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_RECV_CBUF: - rx_stats.receiveCbufPktAllocFailures++; + rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND_CBUF: - rx_stats.sendCbufPktAllocFailures++; + rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); break; } - MUTEX_EXIT(&rx_stats_mutex); } if (rx_nFreePackets < num_pkts) @@ -1064,33 +1061,28 @@ rxi_AllocPacketNoLock(int class) #ifdef KERNEL if (rxi_OverQuota(class)) { rxi_NeedMorePackets = TRUE; - MUTEX_ENTER(&rx_stats_mutex); switch (class) { case RX_PACKET_CLASS_RECEIVE: - rx_stats.receivePktAllocFailures++; + rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND: - rx_stats.sendPktAllocFailures++; + rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SPECIAL: - rx_stats.specialPktAllocFailures++; + rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_RECV_CBUF: - rx_stats.receiveCbufPktAllocFailures++; + rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND_CBUF: - rx_stats.sendCbufPktAllocFailures++; + rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); break; } - MUTEX_EXIT(&rx_stats_mutex); - return (struct rx_packet *)0; + return (struct rx_packet *)0; } #endif /* KERNEL */ - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.packetRequests++; - MUTEX_EXIT(&rx_stats_mutex); - + rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex); if (queue_IsEmpty(&rx_ts_info->_FPQ)) { #ifdef KERNEL @@ -1126,32 +1118,28 @@ rxi_AllocPacketNoLock(int class) #ifdef KERNEL if (rxi_OverQuota(class)) { rxi_NeedMorePackets = TRUE; - MUTEX_ENTER(&rx_stats_mutex); switch (class) { case RX_PACKET_CLASS_RECEIVE: - rx_stats.receivePktAllocFailures++; + rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND: - rx_stats.sendPktAllocFailures++; + rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SPECIAL: - rx_stats.specialPktAllocFailures++; + rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_RECV_CBUF: - rx_stats.receiveCbufPktAllocFailures++; + rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex); break; case RX_PACKET_CLASS_SEND_CBUF: - rx_stats.sendCbufPktAllocFailures++; + rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex); break; } - MUTEX_EXIT(&rx_stats_mutex); return (struct rx_packet *)0; } #endif /* KERNEL */ - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.packetRequests++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex); #ifdef KERNEL if (queue_IsEmpty(&rx_freePacketQueue)) @@ -1187,10 +1175,7 @@ rxi_AllocPacketTSFPQ(int class, int pull_global) RX_TS_INFO_GET(rx_ts_info); - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.packetRequests++; - MUTEX_EXIT(&rx_stats_mutex); - + rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex); if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) { MUTEX_ENTER(&rx_freePktQ_lock); @@ -1410,9 +1395,7 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos p->length = (nbytes - RX_HEADER_SIZE); if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */ if (nbytes < 0 && errno == EWOULDBLOCK) { - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.noPacketOnRead++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex); } else if (nbytes <= 0) { MUTEX_ENTER(&rx_stats_mutex); rx_stats.bogusPacketOnRead++; @@ -1447,9 +1430,7 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos *port = from.sin_port; if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) { struct rx_peer *peer; - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.packetsRead[p->header.type - 1]++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex); /* * Try to look up this peer structure. If it doesn't exist, * don't create a new one - @@ -2214,9 +2195,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn, osi_NetSend(socket, &addr, p->wirevec, p->niovecs, p->length + RX_HEADER_SIZE, istack)) != 0) { /* send failed, so let's hurry up the resend, eh? */ - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.netSendFailures++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex); p->retryTime = p->timeSent; /* resend it very soon */ clock_Addmsec(&(p->retryTime), 10 + (((afs_uint32) p->backoff) << 8)); @@ -2256,9 +2235,7 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn, } dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length)); #endif - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.packetsSent[p->header.type - 1]++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex); MUTEX_ENTER(&peer->peer_lock); hadd32(peer->bytesSent, p->length); MUTEX_EXIT(&peer->peer_lock); @@ -2403,9 +2380,7 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn, osi_NetSend(socket, &addr, &wirevec[0], len + 1, length, istack)) != 0) { /* send failed, so let's hurry up the resend, eh? */ - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.netSendFailures++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex); for (i = 0; i < len; i++) { p = list[i]; p->retryTime = p->timeSent; /* resend it very soon */ @@ -2442,11 +2417,8 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn, dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length)); #endif - MUTEX_ENTER(&rx_stats_mutex); - rx_stats.packetsSent[p->header.type - 1]++; - MUTEX_EXIT(&rx_stats_mutex); + rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex); MUTEX_ENTER(&peer->peer_lock); - hadd32(peer->bytesSent, p->length); MUTEX_EXIT(&peer->peer_lock); } @@ -2595,7 +2567,7 @@ rxi_PrepareSendPacket(register struct rx_call *call, register struct rx_packet *p, register int last) { register struct rx_connection *conn = call->conn; - int i, j; + int i; ssize_t len; /* len must be a signed type; it can go negative */ p->flags &= ~RX_PKTFLAG_ACKED; -- 1.9.4