rx-statistics-active-flag-20090110
authorJeffrey Altman <jaltman@your-file-system.com>
Sun, 11 Jan 2009 05:53:36 +0000 (05:53 +0000)
committerJeffrey Altman <jaltman@secure-endpoints.com>
Sun, 11 Jan 2009 05:53:36 +0000 (05:53 +0000)
LICENSE MIT

Permit rx statistics gathering to be disabled by setting the
new rx_stats_active variable to 0.  This avoids grabbing the
rx_stats_mutex throughout the execution of the requests and
permits greater concurrency thoughout the library.

src/libafsrpc/afsrpc.def
src/rx/rx.c
src/rx/rx_globals.h
src/rx/rx_kcommon.c
src/rx/rx_packet.c
src/rx/rx_prototypes.h
src/rx/rx_user.c

index 01a3b17..c9a82bb 100644 (file)
@@ -237,6 +237,8 @@ EXPORTS
         rx_GetMinUdpBufSize                     @242
         rx_SetUdpBufSize                        @243        
         rx_getAllAddrMaskMtu                    @244        
+        rx_stats_active                         @245 DATA
+        rx_StatsOnOff                           @246
 
 ; for performance testing
         rx_TSFPQGlobSize                        @2001 DATA
index 49184f4..5d1a591 100644 (file)
@@ -890,7 +890,8 @@ rx_NewConnection(afs_uint32 shost, u_short sport, u_short sservice,
         tconn->refCount++;    /* no lock required since only this thread knows */
        tconn->next = rx_connHashTable[hashindex];
        rx_connHashTable[hashindex] = tconn;
-       rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+        if (rx_stats_active)
+            rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
     }
        
     MUTEX_EXIT(&rx_connHashTable_lock);
@@ -937,16 +938,20 @@ rxi_CleanupConnection(struct rx_connection *conn)
        conn->peer->idleWhen = clock_Sec();
        if (conn->peer->refCount < 1) {
            conn->peer->refCount = 1;
-           rx_MutexIncrement(rxi_lowPeerRefCount, rx_stats_mutex);
+           if (rx_stats_active)
+                rx_MutexIncrement(rxi_lowPeerRefCount, rx_stats_mutex);
        }
     }
     conn->peer->refCount--;
     MUTEX_EXIT(&rx_peerHashTable_lock);
 
-    if (conn->type == RX_SERVER_CONNECTION)
-       rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
-    else
-       rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+    if (rx_stats_active) 
+    {
+        if (conn->type == RX_SERVER_CONNECTION)
+            rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+        else
+            rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+    }
 #ifndef KERNEL
     if (conn->specific) {
        int i;
@@ -1032,9 +1037,11 @@ rxi_DestroyConnectionNoLock(register struct rx_connection *conn)
     if (conn->refCount > 0)
        conn->refCount--;
     else {
-       MUTEX_ENTER(&rx_stats_mutex);
-       rxi_lowConnRefCount++;
-       MUTEX_EXIT(&rx_stats_mutex);
+        if (rx_stats_active) {
+            MUTEX_ENTER(&rx_stats_mutex);
+            rxi_lowConnRefCount++;
+            MUTEX_EXIT(&rx_stats_mutex);
+        }
     }
 
     if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
@@ -1575,7 +1582,8 @@ rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket * socketp)
            (*tservice->afterProc) (call, code);
 
        rx_EndCall(call, code);
-       rx_MutexIncrement(rxi_nCalls, rx_stats_mutex);
+       if (rx_stats_active)
+            rx_MutexIncrement(rxi_nCalls, rx_stats_mutex);
     }
 }
 
@@ -2272,7 +2280,8 @@ rxi_NewCall(register struct rx_connection *conn, register int channel)
        call = queue_First(&rx_freeCallQueue, rx_call);
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
        queue_Remove(call);
-        rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+        if (rx_stats_active)
+            rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
        MUTEX_EXIT(&rx_freeCallQueue_lock);
        MUTEX_ENTER(&call->lock);
        CLEAR_CALL_QUEUE_LOCK(call);
@@ -2367,7 +2376,8 @@ rxi_FreeCall(register struct rx_call *call)
 #else /* AFS_GLOBAL_RXLOCK_KERNEL */
     queue_Append(&rx_freeCallQueue, call);
 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
-    rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
     MUTEX_EXIT(&rx_freeCallQueue_lock);
 
     /* Destroy the connection if it was previously slated for
@@ -2401,7 +2411,8 @@ rxi_Alloc(register size_t size)
 {
     register char *p;
 
-    rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
 
 p = (char *)
 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
@@ -2418,7 +2429,8 @@ p = (char *)
 void
 rxi_Free(void *addr, register size_t size)
 {
-    rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
     osi_Free(addr, size);
 }
 
@@ -2488,7 +2500,8 @@ rxi_FindPeer(register afs_uint32 host, register u_short port,
            pp->next = rx_peerHashTable[hashIndex];
            rx_peerHashTable[hashIndex] = pp;
            rxi_InitPeerParams(pp);
-           rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+           if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
        }
     }
     if (pp && create) {
@@ -2598,7 +2611,8 @@ rxi_FindConnection(osi_socket socket, register afs_int32 host,
        /* XXXX Connection timeout? */
        if (service->newConnProc)
            (*service->newConnProc) (conn);
-        rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+        if (rx_stats_active)
+            rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
     }
 
     rx_MutexIncrement(conn->refCount, conn->conn_data_lock);
@@ -2786,7 +2800,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
             * then, since this is a client connection we're getting data for
             * it must be for the previous call.
             */
-           rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+           if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
            rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
            return np;
        }
@@ -2796,7 +2811,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
 
     if (type == RX_SERVER_CONNECTION) {        /* We're the server */
        if (np->header.callNumber < currentCallNumber) {
-           rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+           if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
 #ifdef RX_ENABLE_LOCKS
            if (call)
                MUTEX_EXIT(&call->lock);
@@ -2827,7 +2843,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
                tp = rxi_SendCallAbort(call, np, 1, 0);
                MUTEX_EXIT(&call->lock);
                rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
-                rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+                if (rx_stats_active)
+                    rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
                return tp;
            }
            rxi_KeepAliveOn(call);
@@ -2886,7 +2903,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
                tp = rxi_SendCallAbort(call, np, 1, 0);
                MUTEX_EXIT(&call->lock);
                rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
-                rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+                if (rx_stats_active)
+                    rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
                return tp;
            }
            rxi_KeepAliveOn(call);
@@ -2897,7 +2915,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
        /* Ignore all incoming acknowledgements for calls in DALLY state */
        if (call && (call->state == RX_STATE_DALLY)
            && (np->header.type == RX_PACKET_TYPE_ACK)) {
-           rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+           if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
 #ifdef  RX_ENABLE_LOCKS
            if (call) {
                MUTEX_EXIT(&call->lock);
@@ -2910,7 +2929,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
        /* Ignore anything that's not relevant to the current call.  If there
         * isn't a current call, then no packet is relevant. */
        if (!call || (np->header.callNumber != currentCallNumber)) {
-           rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+           if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
 #ifdef RX_ENABLE_LOCKS
            if (call) {
                MUTEX_EXIT(&call->lock);
@@ -2971,7 +2991,8 @@ rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
                 * XXX interact badly with the server-restart detection 
                 * XXX code in receiveackpacket.  */
                if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
-                    rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+                    if (rx_stats_active)
+                        rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
                    MUTEX_EXIT(&call->lock);
                    rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
                    return np;
@@ -3266,7 +3287,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
     int isFirst;
     struct rx_packet *tnp;
     struct clock when, now;
-    rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
 
 #ifdef KERNEL
     /* If there are no packet buffers, drop this new packet, unless we can find
@@ -3276,7 +3298,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
        MUTEX_ENTER(&rx_freePktQ_lock);
        rxi_NeedMorePackets = TRUE;
        MUTEX_EXIT(&rx_freePktQ_lock);
-        rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+        if (rx_stats_active)
+            rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
        call->rprev = np->header.serial;
        rxi_calltrace(RX_TRACE_DROP, call);
        dpf(("packet %x dropped on receipt - quota problems", np));
@@ -3341,7 +3364,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
            /* Check to make sure it is not a duplicate of one already queued */
            if (queue_IsNotEmpty(&call->rq)
                && queue_First(&call->rq, rx_packet)->header.seq == seq) {
-                rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+                if (rx_stats_active)
+                    rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
                dpf(("packet %x dropped on receipt - duplicate", np));
                rxevent_Cancel(call->delayedAckEvent, call,
                               RX_CALL_REFCOUNT_DELAY);
@@ -3429,7 +3453,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
            /* If the new packet's sequence number has been sent to the
             * application already, then this is a duplicate */
            if (seq < call->rnext) {
-                rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+                if (rx_stats_active)
+                    rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
                rxevent_Cancel(call->delayedAckEvent, call,
                               RX_CALL_REFCOUNT_DELAY);
                np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
@@ -3456,7 +3481,8 @@ rxi_ReceiveDataPacket(register struct rx_call *call,
                 0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
                /*Check for duplicate packet */
                if (seq == tp->header.seq) {
-                    rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+                    if (rx_stats_active)
+                        rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
                    rxevent_Cancel(call->delayedAckEvent, call,
                                   RX_CALL_REFCOUNT_DELAY);
                    np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
@@ -3700,7 +3726,8 @@ rxi_ReceiveAckPacket(register struct rx_call *call, struct rx_packet *np,
     u_short maxMTU = 0;                /* Set if peer supports AFS 3.4a jumbo datagrams */
     int maxDgramPackets = 0;   /* Set if peer supports AFS 3.5 jumbo datagrams */
 
-    rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
     ap = (struct rx_ackPacket *)rx_DataOf(np);
     nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
     if (nbytes < 0)
@@ -4599,7 +4626,8 @@ rxi_ConnectionError(register struct rx_connection *conn,
             }
        }
         rx_SetConnError(conn, error);
-        rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+        if (rx_stats_active)
+            rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
     }
 }
 
@@ -5037,7 +5065,8 @@ rxi_SendAck(register struct rx_call *call,
                nbytes -= p->wirevec[i].iov_len;
        }
     }
-    rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
 #ifndef RX_ENABLE_TSFPQ
     if (!optionalPacket)
        rxi_FreePacket(p);
@@ -5061,7 +5090,8 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
     peer->nSent += len;
     if (resending)
        peer->reSends += len;
-    rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
     MUTEX_EXIT(&peer->peer_lock);
 
     if (list[len - 1]->header.flags & RX_LAST_PACKET) {
@@ -5096,7 +5126,8 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
         * packet until the congestion window reaches the ack rate. */
        if (list[i]->header.serial) {
            requestAck = 1;
-           rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
        } else {
            /* improved RTO calculation- not Karn */
            list[i]->firstSent = *now;
@@ -5111,7 +5142,8 @@ rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
        peer->nSent++;
        if (resending)
            peer->reSends++;
-        rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+        if (rx_stats_active)
+            rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
        MUTEX_EXIT(&peer->peer_lock);
 
        /* Tag this packet as not being the last in this group,
@@ -5335,7 +5367,8 @@ rxi_Start(struct rxevent *event,
     }
     if (call->error) {
 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
-        rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
+        if (rx_stats_active)
+            rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
 #endif
        return;
     }
@@ -5410,7 +5443,8 @@ rxi_Start(struct rxevent *event,
                    if (p->flags & RX_PKTFLAG_ACKED) {
                        /* Since we may block, don't trust this */
                        usenow.sec = usenow.usec = 0;
-                        rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+                        if (rx_stats_active)
+                            rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
                        continue;       /* Ignore this packet if it has been acknowledged */
                    }
 
@@ -5477,7 +5511,8 @@ rxi_Start(struct rxevent *event,
                     * the time to reset the call. This will also inform the using
                     * process that the call is in an error state.
                     */
-                    rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
+                    if (rx_stats_active)
+                        rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
                    call->flags &= ~RX_CALL_TQ_BUSY;
                    if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
                        dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
@@ -5962,19 +5997,21 @@ rxi_ComputeRoundTripTime(register struct rx_packet *p,
        return;                 /* somebody set the clock back, don't count this time. */
     }
     clock_Sub(rttp, sentp);
-    MUTEX_ENTER(&rx_stats_mutex);
-    if (clock_Lt(rttp, &rx_stats.minRtt))
-       rx_stats.minRtt = *rttp;
-    if (clock_Gt(rttp, &rx_stats.maxRtt)) {
-       if (rttp->sec > 60) {
-           MUTEX_EXIT(&rx_stats_mutex);
-           return;             /* somebody set the clock ahead */
-       }
-       rx_stats.maxRtt = *rttp;
+    if (rx_stats_active) {
+        MUTEX_ENTER(&rx_stats_mutex);
+        if (clock_Lt(rttp, &rx_stats.minRtt))
+            rx_stats.minRtt = *rttp;
+        if (clock_Gt(rttp, &rx_stats.maxRtt)) {
+            if (rttp->sec > 60) {
+                MUTEX_EXIT(&rx_stats_mutex);
+                return;                /* somebody set the clock ahead */
+            }
+            rx_stats.maxRtt = *rttp;
+        }
+        clock_Add(&rx_stats.totalRtt, rttp);
+        rx_stats.nRttSamples++;
+        MUTEX_EXIT(&rx_stats_mutex);
     }
-    clock_Add(&rx_stats.totalRtt, rttp);
-    rx_stats.nRttSamples++;
-    MUTEX_EXIT(&rx_stats_mutex);
 
     /* better rtt calculation courtesy of UMich crew (dave,larry,peter,?) */
 
@@ -6157,7 +6194,8 @@ rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2)
                        rxi_rpc_peer_stat_cnt -= num_funcs;
                    }
                    rxi_FreePeer(peer);
-                    rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+                    if (rx_stats_active)
+                        rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
                    if (peer == *peer_ptr) {
                        *peer_ptr = next;
                        prev = next;
@@ -6396,6 +6434,12 @@ rx_DebugOnOff(int on)
 {
     rxdebug_active = on;
 }
+
+void
+rx_StatsOnOff(int on)
+{
+    rx_stats_active = on;
+}
 #endif /* AFS_NT40_ENV */
 
 
@@ -7000,7 +7044,8 @@ shutdown_rx(void)
                }
                next = peer->next;
                rxi_FreePeer(peer);
-                rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+                if (rx_stats_active)
+                    rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
            }
        }
     }
index a25f70d..91e58f5 100644 (file)
@@ -545,6 +545,8 @@ EXT afs_kmutex_t rx_connHashTable_lock;
 #define        rxi_AllocConnection()   (struct rx_connection *) rxi_Alloc(sizeof(struct rx_connection))
 #define rxi_FreeConnection(conn) (rxi_Free(conn, sizeof(struct rx_connection)))
 
+EXT afs_int32 rx_stats_active GLOBALSINIT(1);  /* boolean - rx statistics gathering */
+
 #ifdef RXDEBUG
 /* Some debugging stuff */
 EXT FILE *rx_debugFile;                /* Set by the user to a stdio file for debugging output */
index ea5d284..c2f1d98 100644 (file)
@@ -328,9 +328,11 @@ MyPacketProc(struct rx_packet **ahandle, int asize)
                                 RX_PACKET_CLASS_RECV_CBUF)) {
                rxi_FreePacket(tp);
                tp = NULL;
-               MUTEX_ENTER(&rx_stats_mutex);
-               rx_stats.noPacketBuffersOnRead++;
-               MUTEX_EXIT(&rx_stats_mutex);
+                if (rx_stats_active) {
+                    MUTEX_ENTER(&rx_stats_mutex);
+                    rx_stats.noPacketBuffersOnRead++;
+                    MUTEX_EXIT(&rx_stats_mutex);
+                }
            }
        }
     } else {
@@ -339,9 +341,11 @@ MyPacketProc(struct rx_packet **ahandle, int asize)
         * should do this at a higher layer and let other
         * end know we're losing.
         */
-       MUTEX_ENTER(&rx_stats_mutex);
-       rx_stats.bogusPacketOnRead++;
-       MUTEX_EXIT(&rx_stats_mutex);
+        if (rx_stats_active) {
+            MUTEX_ENTER(&rx_stats_mutex);
+            rx_stats.bogusPacketOnRead++;
+            MUTEX_EXIT(&rx_stats_mutex);
+        }
        /* I DON"T LIKE THIS PRINTF -- PRINTFS MAKE THINGS VERY VERY SLOOWWW */
        dpf(("rx: packet dropped: bad ulen=%d\n", asize));
        tp = NULL;
@@ -1176,10 +1180,12 @@ rxk_ReadPacket(osi_socket so, struct rx_packet *p, int *host, int *port)
        p->length = nbytes - RX_HEADER_SIZE;;
        if ((nbytes > tlen) || (p->length & 0x8000)) {  /* Bogus packet */
            if (nbytes <= 0) {
-               MUTEX_ENTER(&rx_stats_mutex);
-               rx_stats.bogusPacketOnRead++;
-               rx_stats.bogusHost = from.sin_addr.s_addr;
-               MUTEX_EXIT(&rx_stats_mutex);
+                if (rx_stats_active) {
+                    MUTEX_ENTER(&rx_stats_mutex);
+                    rx_stats.bogusPacketOnRead++;
+                    rx_stats.bogusHost = from.sin_addr.s_addr;
+                    MUTEX_EXIT(&rx_stats_mutex);
+                }
                dpf(("B: bogus packet from [%x,%d] nb=%d",
                     from.sin_addr.s_addr, from.sin_port, nbytes));
            }
@@ -1191,9 +1197,11 @@ rxk_ReadPacket(osi_socket so, struct rx_packet *p, int *host, int *port)
            *host = from.sin_addr.s_addr;
            *port = from.sin_port;
            if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
-               MUTEX_ENTER(&rx_stats_mutex);
-               rx_stats.packetsRead[p->header.type - 1]++;
-               MUTEX_EXIT(&rx_stats_mutex);
+                if (rx_stats_active) {
+                    MUTEX_ENTER(&rx_stats_mutex);
+                    rx_stats.packetsRead[p->header.type - 1]++;
+                    MUTEX_EXIT(&rx_stats_mutex);
+                }
            }
 
            /* Free any empty packet buffers at the end of this packet */
index 7f0be72..aa6058e 100644 (file)
@@ -313,22 +313,24 @@ AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
 
     if (overq) {
        rxi_NeedMorePackets = TRUE;
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-            rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
-           break;
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
        }
     }
 
@@ -1122,28 +1124,31 @@ rxi_AllocPacketNoLock(int class)
 #ifdef KERNEL
     if (rxi_OverQuota(class)) {
        rxi_NeedMorePackets = TRUE;
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-            rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
-           break;
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
        }
         return (struct rx_packet *)0;
     }
 #endif /* KERNEL */
 
-    rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
     if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
 
 #ifdef KERNEL
@@ -1179,28 +1184,31 @@ rxi_AllocPacketNoLock(int class)
 #ifdef KERNEL
     if (rxi_OverQuota(class)) {
        rxi_NeedMorePackets = TRUE;
-       switch (class) {
-       case RX_PACKET_CLASS_RECEIVE:
-           rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND:
-           rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SPECIAL:
-            rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_RECV_CBUF:
-           rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       case RX_PACKET_CLASS_SEND_CBUF:
-           rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
-           break;
-       }
+        if (rx_stats_active) {
+            switch (class) {
+            case RX_PACKET_CLASS_RECEIVE:
+                rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND:
+                rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SPECIAL:
+                rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_RECV_CBUF:
+                rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            case RX_PACKET_CLASS_SEND_CBUF:
+                rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+                break;
+            }
+        }
        return (struct rx_packet *)0;
     }
 #endif /* KERNEL */
 
-    rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
 
 #ifdef KERNEL
     if (queue_IsEmpty(&rx_freePacketQueue))
@@ -1236,7 +1244,8 @@ rxi_AllocPacketTSFPQ(int class, int pull_global)
 
     RX_TS_INFO_GET(rx_ts_info);
 
-    rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
     if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
         MUTEX_ENTER(&rx_freePktQ_lock);
 
@@ -1456,12 +1465,15 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
     p->length = (nbytes - RX_HEADER_SIZE);
     if ((nbytes > tlen) || (p->length & 0x8000)) {     /* Bogus packet */
        if (nbytes < 0 && errno == EWOULDBLOCK) {
-            rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
        } else if (nbytes <= 0) {
-           MUTEX_ENTER(&rx_stats_mutex);
-           rx_stats.bogusPacketOnRead++;
-           rx_stats.bogusHost = from.sin_addr.s_addr;
-           MUTEX_EXIT(&rx_stats_mutex);
+            if (rx_stats_active) {
+                MUTEX_ENTER(&rx_stats_mutex);
+                rx_stats.bogusPacketOnRead++;
+                rx_stats.bogusHost = from.sin_addr.s_addr;
+                MUTEX_EXIT(&rx_stats_mutex);
+            }
            dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
                 ntohs(from.sin_port), nbytes));
        }
@@ -1491,7 +1503,8 @@ rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * hos
        *port = from.sin_port;
        if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
            struct rx_peer *peer;
-            rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
            /*
             * Try to look up this peer structure.  If it doesn't exist,
             * don't create a new one - 
@@ -2035,6 +2048,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
                return ap;
 
            /* Since its all int32s convert to network order with a loop. */
+        if (rx_stats_active)
            MUTEX_ENTER(&rx_stats_mutex);
            s = (afs_int32 *) & rx_stats;
            for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
@@ -2042,6 +2056,7 @@ rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
 
            tl = ap->length;
            ap->length = sizeof(rx_stats);
+        if (rx_stats_active)
            MUTEX_EXIT(&rx_stats_mutex);
            rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
            ap->length = tl;
@@ -2257,7 +2272,8 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
             osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
                         p->length + RX_HEADER_SIZE, istack)) != 0) {
            /* send failed, so let's hurry up the resend, eh? */
-            rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
            p->retryTime = p->timeSent; /* resend it very soon */
            clock_Addmsec(&(p->retryTime),
                          10 + (((afs_uint32) p->backoff) << 8));
@@ -2297,7 +2313,8 @@ rxi_SendPacket(struct rx_call *call, struct rx_connection *conn,
     }
     dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
 #endif
-    rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
     MUTEX_ENTER(&peer->peer_lock);
     hadd32(peer->bytesSent, p->length);
     MUTEX_EXIT(&peer->peer_lock);
@@ -2442,7 +2459,8 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
             osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
                         istack)) != 0) {
            /* send failed, so let's hurry up the resend, eh? */
-            rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+            if (rx_stats_active)
+                rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
            for (i = 0; i < len; i++) {
                p = list[i];
                p->retryTime = p->timeSent;     /* resend it very soon */
@@ -2479,7 +2497,8 @@ rxi_SendPacketList(struct rx_call *call, struct rx_connection *conn,
     dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
 
 #endif
-    rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+    if (rx_stats_active)
+        rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
     MUTEX_ENTER(&peer->peer_lock);
     hadd32(peer->bytesSent, p->length);
     MUTEX_EXIT(&peer->peer_lock);
index 5fc88d8..048ce7f 100644 (file)
@@ -26,6 +26,7 @@ extern int rx_Init(u_int port);
 extern int rx_InitHost(u_int host, u_int port);
 #ifdef AFS_NT40_ENV
 extern void rx_DebugOnOff(int on);
+extern void rx_StatsOnOff(int on);
 #endif
 #ifndef KERNEL
 extern void rxi_StartServerProcs(int nExistingProcs);
index 78f86fd..c9a9793 100644 (file)
@@ -195,9 +195,11 @@ rxi_GetHostUDPSocket(u_int ahost, u_short port)
        if (!greedy)
            (osi_Msg "%s*WARNING* Unable to increase buffering on socket\n",
             name);
-       MUTEX_ENTER(&rx_stats_mutex);
-       rx_stats.socketGreedy = greedy;
-       MUTEX_EXIT(&rx_stats_mutex);
+       if (rx_stats_active) {
+            MUTEX_ENTER(&rx_stats_mutex);
+            rx_stats.socketGreedy = greedy;
+            MUTEX_EXIT(&rx_stats_mutex);
+        }
     }
 
 #ifdef AFS_LINUX22_ENV