rx_misc.o \
rx_packet.o \
rx_rdwr.o \
+ rx_stats.o \
rx_trace.o \
rx_multi.o
rx_packet.o: ${RX}/rx_packet.c
${CCRULE} ${RX}/rx_packet.c
+rx_stats.o: ${RX}/rx_stats.c
+ ${CCRULE} ${RX}/rx_stats.c
+
rx_rdwr.o: ${RX}/rx_rdwr.c
${CCRULE} ${RX}/rx_rdwr.c
$(OUT)\rx_globals.obj $(OUT)\rx_getaddr.obj $(OUT)\rx_misc.obj \
$(OUT)\rx_packet.obj $(OUT)\rx_rdwr.obj $(OUT)\rx_trace.obj \
$(OUT)\rx_xmit_nt.obj $(OUT)\rx_conncache.obj $(OUT)\rx_opaque.obj \
- $(OUT)\rx_identity.obj
+ $(OUT)\rx_identity.obj $(OUT)\rx_stats.obj
RXSTATBJS = $(OUT)\rxstat.obj $(OUT)\rxstat.ss.obj $(OUT)\rxstat.xdr.obj $(OUT)\rxstat.cs.obj
rx_GetServiceSpecific @267
rx_SetServiceSpecific @268
rx_NewThreadId @269
+ rx_GetStatistics @270
+ rx_FreeStatistics @271
; for performance testing
rx_TSFPQGlobSize @2001 DATA
$(UOBJ)/rx_null.o \
$(UOBJ)/rx_opaque.o \
$(UOBJ)/rx_getaddr.o \
+ $(UOBJ)/rx_stats.o \
$(UOBJ)/rx_packet.o \
$(UOBJ)/rx_conncache.o \
$(UOBJ)/xdr_rx.o \
$(WEBOBJ)/rx_null.o \
$(WEBOBJ)/rx_opaque.o \
$(WEBOBJ)/rx_getaddr.o \
+ $(WEBOBJ)/rx_stats.o \
$(WEBOBJ)/rx_packet.o \
$(WEBOBJ)/rx_conncache.o \
$(WEBOBJ)/xdr_rx.o \
$(WEBOBJ)/rx_null.o \
$(WEBOBJ)/rx_opaque.o \
$(WEBOBJ)/rx_getaddr.o \
+ $(WEBOBJ)/rx_stats.o \
$(WEBOBJ)/rx_packet.o \
$(WEBOBJ)/rx_conncache.o \
$(WEBOBJ)/xdr_rx.o \
$(JUAFS)/rx_null.o \
$(JUAFS)/rx_opaque.o \
$(JUAFS)/rx_getaddr.o \
+ $(JUAFS)/rx_stats.o \
$(JUAFS)/rx_packet.o \
$(JUAFS)/rx_conncache.o \
$(JUAFS)/xdr_rx.o \
$(CRULE1)
$(UOBJ)/rx_getaddr.o: $(TOP_SRC_RX)/rx_getaddr.c
$(CRULE1)
+$(UOBJ)/rx_stats.o: $(TOP_SRC_RX)/rx_stats.c
+ $(CRULE1)
$(UOBJ)/rx_packet.o: $(TOP_SRC_RX)/rx_packet.c
$(CRULE1)
$(UOBJ)/rx_conncache.o: $(TOP_SRCDIR)/rx/rx_conncache.c
$(CRULE2)
$(WEBOBJ)/rx_getaddr.o: $(TOP_SRC_RX)/rx_getaddr.c
$(CRULE2)
+$(WEBOBJ)/rx_stats.o: $(TOP_SRC_RX)/rx_stats.c
+ $(CRULE2)
$(WEBOBJ)/rx_packet.o: $(TOP_SRC_RX)/rx_packet.c
$(CRULE2)
$(WEBOBJ)/rx_conncache.o: $(TOP_SRCDIR)/rx/rx_conncache.c
$(CRULE1)
$(JUAFS)/rx_getaddr.o: $(TOP_SRC_RX)/rx_getaddr.c
$(CRULE1)
+$(JUAFS)/rx_stats.o: $(TOP_SRC_RX)/rx_stats.c
+ $(CRULE1)
$(JUAFS)/rx_packet.o: $(TOP_SRC_RX)/rx_packet.c
$(CRULE1)
$(JUAFS)/rx_conncache.o: $(TOP_SRCDIR)/rx/rx_conncache.c
RXOBJS_common = rx_clock.o rx_event.o rx_user.o rx_lwp.o rx.o rx_null.o \
rx_globals.o rx_getaddr.o rx_misc.o rx_packet.o rx_rdwr.o rx_trace.o \
- rx_conncache.o rx_opaque.o rx_identity.o \
+ rx_conncache.o rx_opaque.o rx_identity.o rx_stats.o \
xdr_int32.o xdr_int64.o xdr_update.o xdr_refernce.o
RXOBJS = ${RXOBJS_common}
$(OUT)\rx_globals.obj $(OUT)\rx_getaddr.obj $(OUT)\rx_misc.obj \
$(OUT)\rx_packet.obj $(OUT)\rx_rdwr.obj $(OUT)\rx_trace.obj \
$(OUT)\rx_xmit_nt.obj $(OUT)\rx_conncache.obj \
- $(OUT)\rx_opaque.obj $(OUT)\rx_identity.obj
+ $(OUT)\rx_opaque.obj $(OUT)\rx_identity.obj $(OUT)\rx_stats.obj
MULTIOBJS = $(OUT)\rx_multi.obj
#include "rx_trace.h"
#include "rx_atomic.h"
#include "rx_internal.h"
+#include "rx_stats.h"
#define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
#define AFSOP_STOP_AFS 211 /* Stop AFS process */
#define AFSOP_STOP_BKG 212 /* Stop BKG process */
# include "rx_globals.h"
# include "rx_trace.h"
# include "rx_internal.h"
+# include "rx_stats.h"
# include <afs/rxgen_consts.h>
#endif /* KERNEL */
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
struct rx_tq_debug {
- afs_int32 rxi_start_aborted; /* rxi_start awoke after rxi_Send in error. */
- afs_int32 rxi_start_in_error;
+ rx_atomic_t rxi_start_aborted; /* rxi_start awoke after rxi_Send in error.*/
+ rx_atomic_t rxi_start_in_error;
} rx_tq_debug;
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
* to ease NT porting
*/
-extern afs_kmutex_t rx_stats_mutex;
extern afs_kmutex_t rx_quota_mutex;
extern afs_kmutex_t rx_pthread_mutex;
extern afs_kmutex_t rx_packets_mutex;
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- memset(&rx_stats, 0, sizeof(struct rx_statistics));
+ rxi_ResetStatistics();
htable = (char *)
osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nClientConns);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
if (rx_stats_active)
{
if (conn->type == RX_SERVER_CONNECTION)
- rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nServerConns);
else
- rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nClientConns);
}
#ifndef KERNEL
if (conn->specific) {
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
rx_allCallsp = call;
call->call_id =
#endif /* RXDEBUG_PACKET */
- rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_atomic_inc(&rx_stats.nCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nPeerStructs);
}
}
if (pp && create) {
if (service->newConnProc)
(*service->newConnProc) (conn);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nServerConns);
}
MUTEX_ENTER(&conn->conn_data_lock);
* it must be for the previous call.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nBusies);
return tp;
}
rxi_KeepAliveOn(call);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nBusies);
return tp;
}
rxi_KeepAliveOn(call);
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ignorePacketDally);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
struct rx_packet *tnp;
struct clock when, now;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dataPacketsRead);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketBuffersOnRead);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems", np));
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
* application already, then this is a duplicate */
if (seq < call->rnext) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/*Check for duplicate packet */
if (seq == tp->header.seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
int conn_data_locked = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsRead);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
}
conn->error = error;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.fatalErrors);
}
}
}
}
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsSent);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
if (rx_stats_active) {
if (resending)
- rx_MutexAdd(rx_stats.dataPacketsReSent, len, rx_stats_mutex);
+ rx_atomic_add(&rx_stats.dataPacketsReSent, len);
else
- rx_MutexAdd(rx_stats.dataPacketsSent, len, rx_stats_mutex);
+ rx_atomic_add(&rx_stats.dataPacketsSent, len);
}
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
#endif
return;
}
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ignoreAckedPacket);
continue; /* Ignore this packet if it has been acknowledged */
}
* process that the call is in an error state.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_aborted);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
rx_stats.maxRtt = *rttp;
}
clock_Add(&rx_stats.totalRtt, rttp);
- rx_stats.nRttSamples++;
+ rx_atomic_inc(&rx_stats.nRttSamples);
MUTEX_EXIT(&rx_stats_mutex);
}
prev->next = next;
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
/*
* Now if we hold references on 'prev' and 'next'
rx_PrintStats(FILE * file)
{
MUTEX_ENTER(&rx_stats_mutex);
- rx_PrintTheseStats(file, &rx_stats, sizeof(rx_stats), rx_nFreePackets,
+ rx_PrintTheseStats(file, (struct rx_statistics *) &rx_stats,
+ sizeof(rx_stats), rx_nFreePackets,
RX_DEBUGI_VERSION);
MUTEX_EXIT(&rx_stats_mutex);
}
next = peer->next;
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
}
MUTEX_EXIT(&rx_peerHashTable_lock);
}
#ifdef AFS_NT40_ENV
extern int rx_DumpCalls(FILE *outputFile, char *cookie);
-
-#define rx_MutexIncrement(object, mutex) InterlockedIncrement(&object)
-#define rx_MutexAdd(object, addend, mutex) InterlockedExchangeAdd(&object, addend)
-#define rx_MutexDecrement(object, mutex) InterlockedDecrement(&object)
-#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- InterlockedIncrement(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
-#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- InterlockedDecrement(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
-#else
-#define rx_MutexIncrement(object, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object++; \
- MUTEX_EXIT(&mutex); \
- } while(0)
-#define rx_MutexAdd(object, addend, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object += addend; \
- MUTEX_EXIT(&mutex); \
- } while(0)
-#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- object2++; \
- MUTEX_EXIT(&mutex); \
- } while(0)
-#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- object2--; \
- MUTEX_EXIT(&mutex); \
- } while(0)
-#define rx_MutexDecrement(object, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object--; \
- MUTEX_EXIT(&mutex); \
- } while(0)
#endif
#endif /* _RX_ End of rx.h */
#endif
EXT char rx_waitingForPackets; /* Processes set and wait on this variable when waiting for packet buffers */
-EXT struct rx_statistics rx_stats;
-
EXT struct rx_peer **rx_peerHashTable;
EXT struct rx_connection **rx_connHashTable;
EXT struct rx_connection *rx_connCleanup_list GLOBALSINIT(0);
#endif
#if defined(RX_ENABLE_LOCKS)
-EXT afs_kmutex_t rx_stats_mutex; /* used to protect stats gathering */
EXT afs_kmutex_t rx_waiting_mutex; /* used to protect waiting counters */
EXT afs_kmutex_t rx_quota_mutex; /* used to protect quota counters */
EXT afs_kmutex_t rx_pthread_mutex; /* used to protect pthread counters */
#include "rx/rx_kcommon.h"
+#include "rx_atomic.h"
+#include "rx_stats.h"
#ifdef AFS_HPUX110_ENV
#include "h/tihdr.h"
if (nbytes <= 0) {
if (rx_stats_active) {
MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
+ rx_atomic_inc(&rx_stats.bogusPacketOnRead);
rx_stats.bogusHost = from.sin_addr.s_addr;
MUTEX_EXIT(&rx_stats_mutex);
}
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
if (rx_stats_active) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsRead[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]);
}
}
#endif
# include <assert.h>
# include "rx.h"
+# include "rx_atomic.h"
# include "rx_globals.h"
+# include "rx_stats.h"
# include <lwp.h>
#define MAXTHREADNAMELENGTH 64
tv.tv_usec = cv.usec;
tvp = &tv;
}
- rx_stats.selects++;
+ if (rx_stats_active)
+ rx_atomic_inc(&rx_stats.selects);
*rfds = rx_selectMask;
fd_set *sfds = (fd_set *) 0;
while (sendmsg(socket, msg_p, flags) == -1) {
int err;
- rx_stats.sendSelects++;
+ if (rx_stats_active)
+ rx_atomic_inc(&rx_stats.sendSelects);
if (!sfds) {
if (!(sfds = IOMGR_AllocFDSet())) {
#include "rx/rx_packet.h"
#include "rx/rx_atomic.h"
#include "rx/rx_internal.h"
+#include "rx/rx_stats.h"
#else /* defined(UKERNEL) */
#ifdef RX_KERNEL_TRACE
#include "../rx/rx_kcommon.h"
#endif
#include "rx/rx_packet.h"
#include "rx_internal.h"
+#include "rx_stats.h"
#endif /* defined(UKERNEL) */
#include "rx/rx_globals.h"
#else /* KERNEL */
#include "rx_atomic.h"
#include "rx_globals.h"
#include "rx_internal.h"
+#include "rx_stats.h"
#include <lwp.h>
#include <assert.h>
#include <string.h>
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketOnRead);
} else if (nbytes <= 0) {
if (rx_stats_active) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
+ rx_atomic_inc(&rx_stats.bogusPacketOnRead);
rx_stats.bogusHost = from.sin_addr.s_addr;
- MUTEX_EXIT(&rx_stats_mutex);
}
dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
ntohs(from.sin_port), nbytes));
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
if (rx_stats_active) {
struct rx_peer *peer;
- rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.netSendFailures);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
if (rx_stats_active) {
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.netSendFailures);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
#endif
if (rx_stats_active) {
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
extern void rxi_FlushWrite(struct rx_call *call);
extern void rx_FlushWrite(struct rx_call *call);
+/* rx_stats.c */
+extern struct rx_statistics * rx_GetStatistics(void);
+extern void rx_FreeStatistics(struct rx_statistics **);
+
/* rx_trace.c */
--- /dev/null
+/*
+ * Copyright (c) 2010 Your File System Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*!
+ * @file rx_stats.c
+ *
+ * Code for handling statistics gathering within RX, and for mapping the
+ * internal representation into an external one
+ */
+#include <afsconfig.h>
+#include <afs/param.h>
+
+#include <string.h>
+
+#include "rx.h"
+#include "rx_atomic.h"
+#include "rx_stats.h"
+
+/* Globals */
+
+/*!
+ * rx_stats_mutex protects the non-atomic members of the rx_stats structure
+ */
+#if defined(RX_ENABLE_LOCKS)
+afs_kmutex_t rx_stats_mutex;
+#endif
+
+struct rx_statisticsAtomic rx_stats;
+
+/*!
+ * Return the internal statistics collected by rx
+ *
+ * @return
+ * A statistics structure which must be freed using rx_FreeStatistics
+ * @notes
+ * Takes, and releases rx_stats_mutex
+ */
+struct rx_statistics *
+rx_GetStatistics(void) {
+ struct rx_statistics *stats = rxi_Alloc(sizeof(struct rx_statistics));
+ MUTEX_ENTER(&rx_stats_mutex);
+ memcpy(stats, &rx_stats, sizeof(struct rx_statistics));
+ MUTEX_EXIT(&rx_stats_mutex);
+
+ return stats;
+}
+
+/*!
+ * Free a statistics block allocated by rx_GetStatistics
+ *
+ * @param stats
+ * The statistics block to free
+ */
+void
+rx_FreeStatistics(struct rx_statistics **stats) {
+ if (*stats) {
+ rxi_Free(*stats, sizeof(struct rx_statistics));
+ *stats = NULL;
+ }
+}
+
+/*!
+ * Zero the internal statistics structure
+ *
+ * @private
+ */
+
+void
+rxi_ResetStatistics(void) {
+ memset(&rx_stats, 0, sizeof(struct rx_statisticsAtomic));
+}
--- /dev/null
+/*
+ * Copyright (c) 2010 Your File System Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* rx_stats.h
+ *
+ * These are internal structures used by the rx statistics code. Nothing
+ * in this file should be visible outside the RX module.
+ */
+
+/* We memcpy between rx_statisticsAtomic and rx_statistics, so the two
+ * structures must have the same elements, and sizeof(rx_atomic_t) must
+ * equal sizeof(int)
+ */
+
+struct rx_statisticsAtomic { /* Atomic version of rx_statistics */
+ rx_atomic_t packetRequests;
+ rx_atomic_t receivePktAllocFailures;
+ rx_atomic_t sendPktAllocFailures;
+ rx_atomic_t specialPktAllocFailures;
+ rx_atomic_t socketGreedy;
+ rx_atomic_t bogusPacketOnRead;
+ int bogusHost;
+ rx_atomic_t noPacketOnRead;
+ rx_atomic_t noPacketBuffersOnRead;
+ rx_atomic_t selects;
+ rx_atomic_t sendSelects;
+ rx_atomic_t packetsRead[RX_N_PACKET_TYPES];
+ rx_atomic_t dataPacketsRead;
+ rx_atomic_t ackPacketsRead;
+ rx_atomic_t dupPacketsRead;
+ rx_atomic_t spuriousPacketsRead;
+ rx_atomic_t packetsSent[RX_N_PACKET_TYPES];
+ rx_atomic_t ackPacketsSent;
+ rx_atomic_t pingPacketsSent;
+ rx_atomic_t abortPacketsSent;
+ rx_atomic_t busyPacketsSent;
+ rx_atomic_t dataPacketsSent;
+ rx_atomic_t dataPacketsReSent;
+ rx_atomic_t dataPacketsPushed;
+ rx_atomic_t ignoreAckedPacket;
+ struct clock totalRtt;
+ struct clock minRtt;
+ struct clock maxRtt;
+ rx_atomic_t nRttSamples;
+ rx_atomic_t nServerConns;
+ rx_atomic_t nClientConns;
+ rx_atomic_t nPeerStructs;
+ rx_atomic_t nCallStructs;
+ rx_atomic_t nFreeCallStructs;
+ rx_atomic_t netSendFailures;
+ rx_atomic_t fatalErrors;
+ rx_atomic_t ignorePacketDally;
+ rx_atomic_t receiveCbufPktAllocFailures;
+ rx_atomic_t sendCbufPktAllocFailures;
+ rx_atomic_t nBusies;
+ rx_atomic_t spares[4];
+};
+
+#if defined(RX_ENABLE_LOCKS)
+extern afs_kmutex_t rx_stats_mutex;
+#endif
+
+extern struct rx_statisticsAtomic rx_stats;
+
+extern void rxi_ResetStatistics(void);
#ifndef AFS_NT40_ENV
# include <sys/time.h>
#endif
-# include "rx.h"
-# include "rx_globals.h"
-
+#include "rx.h"
+#include "rx_atomic.h"
+#include "rx_globals.h"
+#include "rx_stats.h"
#ifdef AFS_PTHREAD_ENV
#include <assert.h>
if (!greedy)
(osi_Msg "%s*WARNING* Unable to increase buffering on socket\n",
name);
- if (rx_stats_active) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.socketGreedy = greedy;
- MUTEX_EXIT(&rx_stats_mutex);
- }
+ if (rx_stats_active)
+ rx_atomic_set(&rx_stats.socketGreedy, greedy);
}
#ifdef AFS_LINUX22_ENV
rx_misc.o \
rx_packet.o \
rx_rdwr.o \
+ rx_stats.o \
rx_trace.o \
rx_multi.o
rx_rdwr.o: ${RX}/rx_rdwr.c
${CCRULE}
+rx_stats.o: ${RX}/rx_stats.c
+ ${CCRULE}
+
rx_trace.o: ${RX}/rx_trace.c
${CCRULE}
rx_tranquil;
rx_getAllAddr;
rx_nWaiting;
- rx_stats;
rx_SetNoJumbo;
rx_SetConnDeadTime;
rx_FlushWrite;
rx_GetServerStats;
rx_GetServerVersion;
rx_GetServerConnections;
- rx_stats_mutex;
rx_GetServerPeers;
rx_RetrieveProcessRPCStats;
rx_RetrievePeerRPCStats;
int dir_Buffers; /*# buffers in use by dir package */
int dir_Calls; /*# read calls in dir package */
int dir_IOs; /*# I/O ops in dir package */
+ struct rx_statistics *stats;
/*
* Vnode cache section.
/*
* Rx section.
*/
- a_perfP->rx_packetRequests = (afs_int32) rx_stats.packetRequests;
+ stats = rx_GetStatistics();
+
+ a_perfP->rx_packetRequests = (afs_int32) stats->packetRequests;
a_perfP->rx_noPackets_RcvClass =
- (afs_int32) rx_stats.receivePktAllocFailures;
+ (afs_int32) stats->receivePktAllocFailures;
a_perfP->rx_noPackets_SendClass =
- (afs_int32) rx_stats.sendPktAllocFailures;
+ (afs_int32) stats->sendPktAllocFailures;
a_perfP->rx_noPackets_SpecialClass =
- (afs_int32) rx_stats.specialPktAllocFailures;
- a_perfP->rx_socketGreedy = (afs_int32) rx_stats.socketGreedy;
- a_perfP->rx_bogusPacketOnRead = (afs_int32) rx_stats.bogusPacketOnRead;
- a_perfP->rx_bogusHost = (afs_int32) rx_stats.bogusHost;
- a_perfP->rx_noPacketOnRead = (afs_int32) rx_stats.noPacketOnRead;
+ (afs_int32) stats->specialPktAllocFailures;
+ a_perfP->rx_socketGreedy = (afs_int32) stats->socketGreedy;
+ a_perfP->rx_bogusPacketOnRead = (afs_int32) stats->bogusPacketOnRead;
+ a_perfP->rx_bogusHost = (afs_int32) stats->bogusHost;
+ a_perfP->rx_noPacketOnRead = (afs_int32) stats->noPacketOnRead;
a_perfP->rx_noPacketBuffersOnRead =
- (afs_int32) rx_stats.noPacketBuffersOnRead;
- a_perfP->rx_selects = (afs_int32) rx_stats.selects;
- a_perfP->rx_sendSelects = (afs_int32) rx_stats.sendSelects;
+ (afs_int32) stats->noPacketBuffersOnRead;
+ a_perfP->rx_selects = (afs_int32) stats->selects;
+ a_perfP->rx_sendSelects = (afs_int32) stats->sendSelects;
a_perfP->rx_packetsRead_RcvClass =
- (afs_int32) rx_stats.packetsRead[RX_PACKET_CLASS_RECEIVE];
+ (afs_int32) stats->packetsRead[RX_PACKET_CLASS_RECEIVE];
a_perfP->rx_packetsRead_SendClass =
- (afs_int32) rx_stats.packetsRead[RX_PACKET_CLASS_SEND];
+ (afs_int32) stats->packetsRead[RX_PACKET_CLASS_SEND];
a_perfP->rx_packetsRead_SpecialClass =
- (afs_int32) rx_stats.packetsRead[RX_PACKET_CLASS_SPECIAL];
- a_perfP->rx_dataPacketsRead = (afs_int32) rx_stats.dataPacketsRead;
- a_perfP->rx_ackPacketsRead = (afs_int32) rx_stats.ackPacketsRead;
- a_perfP->rx_dupPacketsRead = (afs_int32) rx_stats.dupPacketsRead;
+ (afs_int32) stats->packetsRead[RX_PACKET_CLASS_SPECIAL];
+ a_perfP->rx_dataPacketsRead = (afs_int32) stats->dataPacketsRead;
+ a_perfP->rx_ackPacketsRead = (afs_int32) stats->ackPacketsRead;
+ a_perfP->rx_dupPacketsRead = (afs_int32) stats->dupPacketsRead;
a_perfP->rx_spuriousPacketsRead =
- (afs_int32) rx_stats.spuriousPacketsRead;
+ (afs_int32) stats->spuriousPacketsRead;
a_perfP->rx_packetsSent_RcvClass =
- (afs_int32) rx_stats.packetsSent[RX_PACKET_CLASS_RECEIVE];
+ (afs_int32) stats->packetsSent[RX_PACKET_CLASS_RECEIVE];
a_perfP->rx_packetsSent_SendClass =
- (afs_int32) rx_stats.packetsSent[RX_PACKET_CLASS_SEND];
+ (afs_int32) stats->packetsSent[RX_PACKET_CLASS_SEND];
a_perfP->rx_packetsSent_SpecialClass =
- (afs_int32) rx_stats.packetsSent[RX_PACKET_CLASS_SPECIAL];
- a_perfP->rx_ackPacketsSent = (afs_int32) rx_stats.ackPacketsSent;
- a_perfP->rx_pingPacketsSent = (afs_int32) rx_stats.pingPacketsSent;
- a_perfP->rx_abortPacketsSent = (afs_int32) rx_stats.abortPacketsSent;
- a_perfP->rx_busyPacketsSent = (afs_int32) rx_stats.busyPacketsSent;
- a_perfP->rx_dataPacketsSent = (afs_int32) rx_stats.dataPacketsSent;
- a_perfP->rx_dataPacketsReSent = (afs_int32) rx_stats.dataPacketsReSent;
- a_perfP->rx_dataPacketsPushed = (afs_int32) rx_stats.dataPacketsPushed;
- a_perfP->rx_ignoreAckedPacket = (afs_int32) rx_stats.ignoreAckedPacket;
- a_perfP->rx_totalRtt_Sec = (afs_int32) rx_stats.totalRtt.sec;
- a_perfP->rx_totalRtt_Usec = (afs_int32) rx_stats.totalRtt.usec;
- a_perfP->rx_minRtt_Sec = (afs_int32) rx_stats.minRtt.sec;
- a_perfP->rx_minRtt_Usec = (afs_int32) rx_stats.minRtt.usec;
- a_perfP->rx_maxRtt_Sec = (afs_int32) rx_stats.maxRtt.sec;
- a_perfP->rx_maxRtt_Usec = (afs_int32) rx_stats.maxRtt.usec;
- a_perfP->rx_nRttSamples = (afs_int32) rx_stats.nRttSamples;
- a_perfP->rx_nServerConns = (afs_int32) rx_stats.nServerConns;
- a_perfP->rx_nClientConns = (afs_int32) rx_stats.nClientConns;
- a_perfP->rx_nPeerStructs = (afs_int32) rx_stats.nPeerStructs;
- a_perfP->rx_nCallStructs = (afs_int32) rx_stats.nCallStructs;
- a_perfP->rx_nFreeCallStructs = (afs_int32) rx_stats.nFreeCallStructs;
+ (afs_int32) stats->packetsSent[RX_PACKET_CLASS_SPECIAL];
+ a_perfP->rx_ackPacketsSent = (afs_int32) stats->ackPacketsSent;
+ a_perfP->rx_pingPacketsSent = (afs_int32) stats->pingPacketsSent;
+ a_perfP->rx_abortPacketsSent = (afs_int32) stats->abortPacketsSent;
+ a_perfP->rx_busyPacketsSent = (afs_int32) stats->busyPacketsSent;
+ a_perfP->rx_dataPacketsSent = (afs_int32) stats->dataPacketsSent;
+ a_perfP->rx_dataPacketsReSent = (afs_int32) stats->dataPacketsReSent;
+ a_perfP->rx_dataPacketsPushed = (afs_int32) stats->dataPacketsPushed;
+ a_perfP->rx_ignoreAckedPacket = (afs_int32) stats->ignoreAckedPacket;
+ a_perfP->rx_totalRtt_Sec = (afs_int32) stats->totalRtt.sec;
+ a_perfP->rx_totalRtt_Usec = (afs_int32) stats->totalRtt.usec;
+ a_perfP->rx_minRtt_Sec = (afs_int32) stats->minRtt.sec;
+ a_perfP->rx_minRtt_Usec = (afs_int32) stats->minRtt.usec;
+ a_perfP->rx_maxRtt_Sec = (afs_int32) stats->maxRtt.sec;
+ a_perfP->rx_maxRtt_Usec = (afs_int32) stats->maxRtt.usec;
+ a_perfP->rx_nRttSamples = (afs_int32) stats->nRttSamples;
+ a_perfP->rx_nServerConns = (afs_int32) stats->nServerConns;
+ a_perfP->rx_nClientConns = (afs_int32) stats->nClientConns;
+ a_perfP->rx_nPeerStructs = (afs_int32) stats->nPeerStructs;
+ a_perfP->rx_nCallStructs = (afs_int32) stats->nCallStructs;
+ a_perfP->rx_nFreeCallStructs = (afs_int32) stats->nFreeCallStructs;
a_perfP->host_NumHostEntries = HTs;
a_perfP->host_HostBlocks = HTBlocks;
a_perfP->host_ClientBlocks = CEBlocks;
a_perfP->sysname_ID = afs_perfstats.sysname_ID;
- a_perfP->rx_nBusies = (afs_int32) rx_stats.nBusies;
+ a_perfP->rx_nBusies = (afs_int32) stats->nBusies;
a_perfP->fs_nBusies = afs_perfstats.fs_nBusies;
+ rx_FreeStatistics(&stats);
} /*FillPerfValues */