tconn->refCount++; /* no lock required since only this thread knows */
tconn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = tconn;
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
}
MUTEX_EXIT(&rx_connHashTable_lock);
conn->peer->idleWhen = clock_Sec();
if (conn->peer->refCount < 1) {
conn->peer->refCount = 1;
- rx_MutexIncrement(rxi_lowPeerRefCount, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rxi_lowPeerRefCount, rx_stats_mutex);
}
}
conn->peer->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
- if (conn->type == RX_SERVER_CONNECTION)
- rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
- else
- rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ if (rx_stats_active)
+ {
+ if (conn->type == RX_SERVER_CONNECTION)
+ rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ else
+ rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ }
#ifndef KERNEL
if (conn->specific) {
int i;
if (conn->refCount > 0)
conn->refCount--;
else {
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_lowConnRefCount++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_lowConnRefCount++;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
}
if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
(*tservice->afterProc) (call, code);
rx_EndCall(call, code);
- rx_MutexIncrement(rxi_nCalls, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rxi_nCalls, rx_stats_mutex);
}
}
call = queue_First(&rx_freeCallQueue, rx_call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
- rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
#else /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
{
register char *p;
- rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
p = (char *)
#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
void
rxi_Free(void *addr, register size_t size)
{
- rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
osi_Free(addr, size);
}
pp->next = rx_peerHashTable[hashIndex];
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
- rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
if (pp && create) {
/* XXXX Connection timeout? */
if (service->newConnProc)
(*service->newConnProc) (conn);
- rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
}
rx_MutexIncrement(conn->refCount, conn->conn_data_lock);
* then, since this is a client connection we're getting data for
* it must be for the previous call.
*/
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
/* Ignore all incoming acknowledgements for calls in DALLY state */
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
- rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
/* Ignore anything that's not relevant to the current call. If there
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
* XXX interact badly with the server-restart detection
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_EXIT(&call->lock);
rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
return np;
int isFirst;
struct rx_packet *tnp;
struct clock when, now;
- rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
MUTEX_ENTER(&rx_freePktQ_lock);
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
- rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %x dropped on receipt - quota problems", np));
/* Check to make sure it is not a duplicate of one already queued */
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
dpf(("packet %x dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
/* If the new packet's sequence number has been sent to the
* application already, then this is a duplicate */
if (seq < call->rnext) {
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
/*Check for duplicate packet */
if (seq == tp->header.seq) {
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
- rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
}
}
rx_SetConnError(conn, error);
- rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
}
}
nbytes -= p->wirevec[i].iov_len;
}
}
- rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
peer->nSent += len;
if (resending)
peer->reSends += len;
- rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
* packet until the congestion window reaches the ack rate. */
if (list[i]->header.serial) {
requestAck = 1;
- rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
} else {
/* improved RTO calculation- not Karn */
list[i]->firstSent = *now;
peer->nSent++;
if (resending)
peer->reSends++;
- rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
/* Tag this packet as not being the last in this group,
}
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
#endif
return;
}
if (p->flags & RX_PKTFLAG_ACKED) {
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
- rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
continue; /* Ignore this packet if it has been acknowledged */
}
* the time to reset the call. This will also inform the using
* process that the call is in an error state.
*/
- rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
return; /* somebody set the clock back, don't count this time. */
}
clock_Sub(rttp, sentp);
- MUTEX_ENTER(&rx_stats_mutex);
- if (clock_Lt(rttp, &rx_stats.minRtt))
- rx_stats.minRtt = *rttp;
- if (clock_Gt(rttp, &rx_stats.maxRtt)) {
- if (rttp->sec > 60) {
- MUTEX_EXIT(&rx_stats_mutex);
- return; /* somebody set the clock ahead */
- }
- rx_stats.maxRtt = *rttp;
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ if (clock_Lt(rttp, &rx_stats.minRtt))
+ rx_stats.minRtt = *rttp;
+ if (clock_Gt(rttp, &rx_stats.maxRtt)) {
+ if (rttp->sec > 60) {
+ MUTEX_EXIT(&rx_stats_mutex);
+ return; /* somebody set the clock ahead */
+ }
+ rx_stats.maxRtt = *rttp;
+ }
+ clock_Add(&rx_stats.totalRtt, rttp);
+ rx_stats.nRttSamples++;
+ MUTEX_EXIT(&rx_stats_mutex);
}
- clock_Add(&rx_stats.totalRtt, rttp);
- rx_stats.nRttSamples++;
- MUTEX_EXIT(&rx_stats_mutex);
/* better rtt calculation courtesy of UMich crew (dave,larry,peter,?) */
rxi_rpc_peer_stat_cnt -= num_funcs;
}
rxi_FreePeer(peer);
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
if (peer == *peer_ptr) {
*peer_ptr = next;
prev = next;
{
rxdebug_active = on;
}
+
+void
+rx_StatsOnOff(int on)
+{
+ rx_stats_active = on;
+}
#endif /* AFS_NT40_ENV */
}
next = peer->next;
rxi_FreePeer(peer);
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
}
if (overq) {
rxi_NeedMorePackets = TRUE;
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
- break;
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
}
}
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
- break;
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
}
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
- break;
- }
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
+ }
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
p->length = (nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
- rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
} else if (nbytes <= 0) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
- rx_stats.bogusHost = from.sin_addr.s_addr;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rx_stats.bogusPacketOnRead++;
+ rx_stats.bogusHost = from.sin_addr.s_addr;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
ntohs(from.sin_port), nbytes));
}
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
- rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
return ap;
/* Since its all int32s convert to network order with a loop. */
+ if (rx_stats_active)
MUTEX_ENTER(&rx_stats_mutex);
s = (afs_int32 *) & rx_stats;
for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
tl = ap->length;
ap->length = sizeof(rx_stats);
+ if (rx_stats_active)
MUTEX_EXIT(&rx_stats_mutex);
rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
}
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);