rx_StartClientThread(void)
{
#ifdef AFS_PTHREAD_ENV
- int pid;
- pid = (int) pthread_self();
+ pthread_t pid;
+ pid = pthread_self();
#endif /* AFS_PTHREAD_ENV */
}
#endif /* AFS_NT40_ENV */
conn->refCount++; /* no lock required since only this thread knows... */
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nClientConns++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
conn->peer->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
- MUTEX_ENTER(&rx_stats_mutex);
if (conn->type == RX_SERVER_CONNECTION)
- rx_stats.nServerConns--;
+ rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
else
- rx_stats.nClientConns--;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
#ifndef KERNEL
if (conn->specific) {
int i;
call = queue_First(&rx_freeCallQueue, rx_call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nFreeCallStructs--;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
CV_INIT(&call->cv_rq, "call rq", CV_DEFAULT, 0);
CV_INIT(&call->cv_tq, "call tq", CV_DEFAULT, 0);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nCallStructs++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
/* Initialize once-only items */
queue_Init(&call->tq);
queue_Init(&call->rq);
#else /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nFreeCallStructs++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
{
register char *p;
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_Alloccnt++;
- rxi_Allocsize += (afs_int32)size;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
p = (char *)osi_Alloc(size);
if (!p)
void
rxi_Free(void *addr, register size_t size)
{
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_Alloccnt--;
- rxi_Allocsize -= (afs_int32)size;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
osi_Free(addr, size);
}
pp->next = rx_peerHashTable[hashIndex];
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nPeerStructs++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
if (pp && create) {
/* XXXX Connection timeout? */
if (service->newConnProc)
(*service->newConnProc) (conn);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nServerConns++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
}
MUTEX_ENTER(&conn->conn_data_lock);
* then, since this is a client connection we're getting data for
* it must be for the previous call.
*/
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nBusies++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nBusies++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
/* Ignore all incoming acknowledgements for calls in DALLY state */
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ignorePacketDally++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
/* Ignore anything that's not relevant to the current call. If there
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
* XXX interact badly with the server-restart detection
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
int isFirst;
struct rx_packet *tnp;
struct clock when;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
MUTEX_ENTER(&rx_freePktQ_lock);
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.noPacketBuffersOnRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %x dropped on receipt - quota problems", np));
/* Check to make sure it is not a duplicate of one already queued */
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dupPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
dpf(("packet %x dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
/* If the new packet's sequence number has been sent to the
* application already, then this is a duplicate */
if (seq < call->rnext) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dupPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
/*Check for duplicate packet */
if (seq == tp->header.seq) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dupPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ackPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
}
}
conn->error = error;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.fatalErrors++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
}
}
nbytes -= p->wirevec[i].iov_len;
}
}
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ackPacketsSent++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
peer->nSent += len;
if (resending)
peer->reSends += len;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsSent += len;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
* packet until the congestion window reaches the ack rate. */
if (list[i]->header.serial) {
requestAck = 1;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsReSent++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
} else {
/* improved RTO calculation- not Karn */
list[i]->firstSent = *now;
peer->nSent++;
if (resending)
peer->reSends++;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsSent++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
/* Tag this packet as not being the last in this group,
}
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- MUTEX_ENTER(&rx_stats_mutex);
- rx_tq_debug.rxi_start_in_error++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
#endif
return;
}
osi_Panic("rxi_Start: xmit queue clobbered");
}
if (p->flags & RX_PKTFLAG_ACKED) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ignoreAckedPacket++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
continue; /* Ignore this packet if it has been acknowledged */
}
* the time to reset the call. This will also inform the using
* process that the call is in an error state.
*/
- MUTEX_ENTER(&rx_stats_mutex);
- rx_tq_debug.rxi_start_aborted++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
rxi_rpc_peer_stat_cnt -= num_funcs;
}
rxi_FreePeer(peer);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nPeerStructs--;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
if (peer == *peer_ptr) {
*peer_ptr = next;
prev = next;
void *outputData, size_t outputLength)
{
static afs_int32 counter = 100;
- time_t waitTime, waitCount, startTime, endTime;
+ time_t waitTime, waitCount, startTime;
struct rx_header theader;
char tbuffer[1500];
register afs_int32 code;
}
next = peer->next;
rxi_FreePeer(peer);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nPeerStructs--;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
}
static int
AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
- register struct rx_packet *c;
register struct rx_ts_info_t * rx_ts_info;
int transfer, alloc;
SPLVAR;
if (overq) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
}
if (rx_nFreePackets < num_pkts)
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
- return (struct rx_packet *)0;
+ return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
p->length = (nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.noPacketOnRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
} else if (nbytes <= 0) {
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.bogusPacketOnRead++;
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsRead[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
}
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
-
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
}
register struct rx_packet *p, register int last)
{
register struct rx_connection *conn = call->conn;
- int i, j;
+ int i;
ssize_t len; /* len must be a signed type; it can go negative */
p->flags &= ~RX_PKTFLAG_ACKED;