hashindex =
CONN_HASH(shost, sport, tconn->cid, tconn->epoch,
RX_CLIENT_CONNECTION);
- tconn->refCount++; /* no lock required since only this thread knows */
+ rx_AtomicIncrement_NL(tconn->refCount); /* no lock required since only this thread knows */
tconn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = tconn;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.nClientConns, rx_stats_mutex);
}
MUTEX_EXIT(&rx_connHashTable_lock);
tconn->secondsUntilPing = rx_ConnSecondsUntilDead(tconn) / 6;
}
-int rxi_lowPeerRefCount = 0;
-int rxi_lowConnRefCount = 0;
+rx_atomic_t rxi_lowPeerRefCount = 0;
+rx_atomic_t rxi_lowConnRefCount = 0;
/*
* Cleanup a connection that was destroyed in rxi_DestroyConnectioNoLock.
* idle (refCount == 0) after rx_idlePeerTime (60 seconds) have passed.
*/
MUTEX_ENTER(&rx_peerHashTable_lock);
- if (conn->peer->refCount < 2) {
+ if (rx_AtomicDecrement_NL(conn->peer->refCount) < 1) {
conn->peer->idleWhen = clock_Sec();
- if (conn->peer->refCount < 1) {
- conn->peer->refCount = 1;
+ if (rx_AtomicPeek_NL(conn->peer->refCount) < 0) {
+ rx_AtomicSwap_NL(&conn->peer->refCount, 0);
+ dpf(("UNDERCOUNT(peer %x)\n", conn->peer));
if (rx_stats_active)
- rx_MutexIncrement(rxi_lowPeerRefCount, rx_stats_mutex);
+ rx_AtomicIncrement(rxi_lowPeerRefCount, rx_stats_mutex);
}
}
- conn->peer->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
if (rx_stats_active)
{
if (conn->type == RX_SERVER_CONNECTION)
- rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_AtomicDecrement(rx_stats.nServerConns, rx_stats_mutex);
else
- rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_AtomicDecrement(rx_stats.nClientConns, rx_stats_mutex);
}
#ifndef KERNEL
if (conn->specific) {
NETPRI;
MUTEX_ENTER(&conn->conn_data_lock);
- if (conn->refCount > 0)
- conn->refCount--;
- else {
+ /* This requires the atomic type to be signed */
+ if (rx_AtomicDecrement_NL(conn->refCount) < 0) {
+ dpf(("UNDERCOUNT(conn %x)\n", conn));
if (rx_stats_active) {
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_lowConnRefCount++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_AtomicIncrement(rxi_lowConnRefCount, rx_stats_mutex);
}
}
- if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
+ if ((rx_AtomicPeek_NL(conn->refCount) > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Busy; wait till the last guy before proceeding */
MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
SPLVAR;
NETPRI;
- rx_MutexIncrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
USERPRI;
}
next = conn->next;
if (conn->type == RX_CLIENT_CONNECTION) {
/* MUTEX_ENTER(&conn->conn_data_lock); when used in kernel */
- conn->refCount++;
+ rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
/* MUTEX_EXIT(&conn->conn_data_lock); when used in kernel */
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_AtomicDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
rx_allCallsp = call;
call->call_id =
#endif /* RXDEBUG_PACKET */
- rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.nCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
* call lock held or are going through this section of code.
*/
if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) {
- rx_MutexIncrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
if (haveCTLock)
rxi_DestroyConnectionNoLock(conn);
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
if (pp && create) {
- pp->refCount++;
+ rx_AtomicIncrement_NL(pp->refCount);
}
if (origPeer)
- origPeer->refCount--;
+ rx_AtomicDecrement_NL(origPeer->refCount);
MUTEX_EXIT(&rx_peerHashTable_lock);
return pp;
}
if (service->newConnProc)
(*service->newConnProc) (conn);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.nServerConns, rx_stats_mutex);
}
- rx_MutexIncrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
rxLastConn = conn; /* store this connection as the last conn used */
MUTEX_EXIT(&rx_connHashTable_lock);
MUTEX_ENTER(&conn->conn_data_lock);
if (np->header.type != RX_PACKET_TYPE_ABORT)
np = rxi_SendConnectionAbort(conn, np, 1, 0);
- conn->refCount--;
+ rx_AtomicDecrement_NL(conn->refCount);
MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d", errcode));
rxi_ConnectionError(conn, errcode);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
case RX_PACKET_TYPE_CHALLENGE:
tnp = rxi_ReceiveChallengePacket(conn, np, 1);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return tnp;
case RX_PACKET_TYPE_RESPONSE:
tnp = rxi_ReceiveResponsePacket(conn, np, 1);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return tnp;
case RX_PACKET_TYPE_PARAMS:
case RX_PACKET_TYPE_PARAMS + 1:
case RX_PACKET_TYPE_PARAMS + 2:
/* ignore these packet types for now */
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
MUTEX_ENTER(&conn->conn_data_lock);
tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
- conn->refCount--;
+ rx_AtomicDecrement_NL(conn->refCount);
MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
}
* it must be for the previous call.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
}
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
#endif
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
if (!call) {
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY,
NULL, 0, 1);
MUTEX_EXIT(&call->lock);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return tp;
}
rxi_ResetCall(call, 0);
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
/* If the service security object index stamped in the packet does not
#ifdef RX_ENABLE_LOCKS
MUTEX_EXIT(&call->lock);
#endif
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_EXIT(&call->lock);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
}
dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d", errdata));
rxi_CallError(call, errdata);
MUTEX_EXIT(&call->lock);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np; /* xmitting; drop packet */
}
case RX_PACKET_TYPE_BUSY:
* (if not, then the time won't actually be re-evaluated here). */
call->lastReceiveTime = clock_Sec();
MUTEX_EXIT(&call->lock);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
return np;
}
conn->checkReachEvent = NULL;
waiting = conn->flags & RX_CONN_ATTACHWAIT;
if (event)
- conn->refCount--;
+ rx_AtomicDecrement_NL(conn->refCount);
MUTEX_EXIT(&conn->conn_data_lock);
if (waiting) {
when.sec += RX_CHECKREACH_TIMEOUT;
MUTEX_ENTER(&conn->conn_data_lock);
if (!conn->checkReachEvent) {
- conn->refCount++;
+ rx_AtomicIncrement_NL(conn->refCount);
conn->checkReachEvent =
rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
NULL);
struct rx_packet *tnp;
struct clock when, now;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %x dropped on receipt - quota problems", np));
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
dpf(("packet %x dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
* application already, then this is a duplicate */
if (seq < call->rnext) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/*Check for duplicate packet */
if (seq == tp->header.seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
conn->flags &= ~RX_CONN_ATTACHWAIT;
- conn->refCount--;
+ rx_AtomicDecrement_NL(conn->refCount);
}
MUTEX_EXIT(&conn->conn_data_lock);
}
rx_SetConnError(conn, error);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.fatalErrors, rx_stats_mutex);
}
}
}
}
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
if (resending)
peer->reSends += len;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
if (list[i]->header.serial) {
requestAck = 1;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
} else {
/* improved RTO calculation- not Karn */
list[i]->firstSent = *now;
if (resending)
peer->reSends++;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
/* Tag this packet as not being the last in this group,
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
continue; /* Ignore this packet if it has been acknowledged */
}
rx_stats.maxRtt = *rttp;
}
clock_Add(&rx_stats.totalRtt, rttp);
- rx_stats.nRttSamples++;
+ rx_AtomicIncrement_NL(rx_stats.nRttSamples);
MUTEX_EXIT(&rx_stats_mutex);
}
/* This only actually destroys the connection if
* there are no outstanding calls */
MUTEX_ENTER(&conn->conn_data_lock);
- if (!havecalls && !conn->refCount
+ if (!havecalls && (rx_AtomicPeek_NL(conn->refCount) == 0)
&& ((conn->lastSendTime + rx_idleConnectionTime) <
now.sec)) {
- conn->refCount++; /* it will be decr in rx_DestroyConn */
+ rx_AtomicIncrement_NL(conn->refCount); /* it will be decr in rx_DestroyConn */
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
for (prev = peer = *peer_ptr; peer; peer = next) {
next = peer->next;
code = MUTEX_TRYENTER(&peer->peer_lock);
- if ((code) && (peer->refCount == 0)
+ if ((code) && (rx_AtomicPeek_NL(peer->refCount) == 0)
&& ((peer->idleWhen + rx_idlePeerTime) < now.sec)) {
rx_interface_stat_p rpc_stat, nrpc_stat;
size_t space;
}
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_AtomicDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
if (peer == *peer_ptr) {
*peer_ptr = next;
prev = next;
}
fprintf(file, "rx stats: free packets %d, allocs %d, ", (int)freePackets,
- s->packetRequests);
+ rx_AtomicPeek_NL(s->packetRequests));
if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
fprintf(file, "alloc-failures(rcv %d/%d,send %d/%d,ack %d)\n",
- s->receivePktAllocFailures, s->receiveCbufPktAllocFailures,
- s->sendPktAllocFailures, s->sendCbufPktAllocFailures,
- s->specialPktAllocFailures);
+ rx_AtomicPeek_NL(s->receivePktAllocFailures),
+ rx_AtomicPeek_NL(s->receiveCbufPktAllocFailures),
+ rx_AtomicPeek_NL(s->sendPktAllocFailures),
+ rx_AtomicPeek_NL(s->sendCbufPktAllocFailures),
+ rx_AtomicPeek_NL(s->specialPktAllocFailures));
} else {
fprintf(file, "alloc-failures(rcv %d,send %d,ack %d)\n",
- s->receivePktAllocFailures, s->sendPktAllocFailures,
- s->specialPktAllocFailures);
+ rx_AtomicPeek_NL(s->receivePktAllocFailures),
+ rx_AtomicPeek_NL(s->sendPktAllocFailures),
+ rx_AtomicPeek_NL(s->specialPktAllocFailures));
}
fprintf(file,
" greedy %d, " "bogusReads %d (last from host %x), "
"noPackets %d, " "noBuffers %d, " "selects %d, "
- "sendSelects %d\n", s->socketGreedy, s->bogusPacketOnRead,
- s->bogusHost, s->noPacketOnRead, s->noPacketBuffersOnRead,
- s->selects, s->sendSelects);
+ "sendSelects %d\n",
+ rx_AtomicPeek_NL(s->socketGreedy),
+ rx_AtomicPeek_NL(s->bogusPacketOnRead),
+ rx_AtomicPeek_NL(s->bogusHost),
+ rx_AtomicPeek_NL(s->noPacketOnRead),
+ rx_AtomicPeek_NL(s->noPacketBuffersOnRead),
+ rx_AtomicPeek_NL(s->selects),
+ rx_AtomicPeek_NL(s->sendSelects));
fprintf(file, " packets read: ");
for (i = 0; i < RX_N_PACKET_TYPES; i++) {
- fprintf(file, "%s %d ", rx_packetTypes[i], s->packetsRead[i]);
+ fprintf(file, "%s %d ", rx_packetTypes[i], rx_AtomicPeek_NL(s->packetsRead[i]));
}
fprintf(file, "\n");
fprintf(file,
" other read counters: data %d, " "ack %d, " "dup %d "
- "spurious %d " "dally %d\n", s->dataPacketsRead,
- s->ackPacketsRead, s->dupPacketsRead, s->spuriousPacketsRead,
- s->ignorePacketDally);
+ "spurious %d " "dally %d\n", rx_AtomicPeek_NL(s->dataPacketsRead),
+ rx_AtomicPeek_NL(s->ackPacketsRead),
+ rx_AtomicPeek_NL(s->dupPacketsRead),
+ rx_AtomicPeek_NL(s->spuriousPacketsRead),
+ rx_AtomicPeek_NL(s->ignorePacketDally));
fprintf(file, " packets sent: ");
for (i = 0; i < RX_N_PACKET_TYPES; i++) {
- fprintf(file, "%s %d ", rx_packetTypes[i], s->packetsSent[i]);
+ fprintf(file, "%s %d ", rx_packetTypes[i], rx_AtomicPeek_NL(s->packetsSent[i]));
}
fprintf(file, "\n");
fprintf(file,
" other send counters: ack %d, " "data %d (not resends), "
"resends %d, " "pushed %d, " "acked&ignored %d\n",
- s->ackPacketsSent, s->dataPacketsSent, s->dataPacketsReSent,
- s->dataPacketsPushed, s->ignoreAckedPacket);
+ rx_AtomicPeek_NL(s->ackPacketsSent),
+ rx_AtomicPeek_NL(s->dataPacketsSent),
+ rx_AtomicPeek_NL(s->dataPacketsReSent),
+ rx_AtomicPeek_NL(s->dataPacketsPushed),
+ rx_AtomicPeek_NL(s->ignoreAckedPacket));
fprintf(file,
" \t(these should be small) sendFailed %d, " "fatalErrors %d\n",
- s->netSendFailures, (int)s->fatalErrors);
+ rx_AtomicPeek_NL(s->netSendFailures), rx_AtomicPeek_NL(s->fatalErrors));
- if (s->nRttSamples) {
+ if (rx_AtomicPeek_NL(s->nRttSamples)) {
fprintf(file, " Average rtt is %0.3f, with %d samples\n",
- clock_Float(&s->totalRtt) / s->nRttSamples, s->nRttSamples);
+ clock_Float(&s->totalRtt) / rx_AtomicPeek_NL(s->nRttSamples), rx_AtomicPeek_NL(s->nRttSamples));
fprintf(file, " Minimum rtt is %0.3f, maximum is %0.3f\n",
clock_Float(&s->minRtt), clock_Float(&s->maxRtt));
fprintf(file,
" %d server connections, " "%d client connections, "
"%d peer structs, " "%d call structs, " "%d free call structs\n",
- s->nServerConns, s->nClientConns, s->nPeerStructs,
- s->nCallStructs, s->nFreeCallStructs);
+ rx_AtomicPeek_NL(s->nServerConns),
+ rx_AtomicPeek_NL(s->nClientConns),
+ rx_AtomicPeek_NL(s->nPeerStructs),
+ rx_AtomicPeek_NL(s->nCallStructs),
+ rx_AtomicPeek_NL(s->nFreeCallStructs));
#if !defined(AFS_PTHREAD_ENV) && !defined(AFS_USE_GETTIMEOFDAY)
fprintf(file, " %d clock updates\n", clock_nUpdates);
next = peer->next;
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_AtomicDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
}
rx_max_clones_per_connection = v; \
} while(0);
+typedef afs_int32 rx_atomic_t;
+
#define rx_PutConnection(conn) rx_DestroyConnection(conn)
/* A connection is an authenticated communication path, allowing
/* client-- to retransmit the challenge */
struct rx_service *service; /* used by servers only */
u_short serviceId; /* To stamp on requests (clients only) */
- afs_uint32 refCount; /* Reference count */
+ rx_atomic_t refCount; /* Reference count */
u_char flags; /* Defined below */
u_char type; /* Type of connection, defined below */
u_char secondsUntilPing; /* how often to ping for each active call */
/* For garbage collection */
afs_uint32 idleWhen; /* When the refcountwent to zero */
- afs_uint32 refCount; /* Reference count for this structure */
+ rx_atomic_t refCount; /* Reference count */
+
/* Congestion control parameters */
u_char burstSize; /* Reinitialization size for the burst parameter */
* must equal sizeof(afs_int32). */
struct rx_statistics { /* General rx statistics */
- int packetRequests; /* Number of packet allocation requests */
- int receivePktAllocFailures;
- int sendPktAllocFailures;
- int specialPktAllocFailures;
- int socketGreedy; /* Whether SO_GREEDY succeeded */
- int bogusPacketOnRead; /* Number of inappropriately short packets received */
- int bogusHost; /* Host address from bogus packets */
- int noPacketOnRead; /* Number of read packets attempted when there was actually no packet to read off the wire */
- int noPacketBuffersOnRead; /* Number of dropped data packets due to lack of packet buffers */
- int selects; /* Number of selects waiting for packet or timeout */
- int sendSelects; /* Number of selects forced when sending packet */
- int packetsRead[RX_N_PACKET_TYPES]; /* Total number of packets read, per type */
- int dataPacketsRead; /* Number of unique data packets read off the wire */
- int ackPacketsRead; /* Number of ack packets read */
- int dupPacketsRead; /* Number of duplicate data packets read */
- int spuriousPacketsRead; /* Number of inappropriate data packets */
- int packetsSent[RX_N_PACKET_TYPES]; /* Number of rxi_Sends: packets sent over the wire, per type */
- int ackPacketsSent; /* Number of acks sent */
- int pingPacketsSent; /* Total number of ping packets sent */
- int abortPacketsSent; /* Total number of aborts */
- int busyPacketsSent; /* Total number of busies sent received */
- int dataPacketsSent; /* Number of unique data packets sent */
- int dataPacketsReSent; /* Number of retransmissions */
- int dataPacketsPushed; /* Number of retransmissions pushed early by a NACK */
- int ignoreAckedPacket; /* Number of packets with acked flag, on rxi_Start */
+ rx_atomic_t packetRequests; /* Number of packet allocation requests */
+ rx_atomic_t receivePktAllocFailures;
+ rx_atomic_t sendPktAllocFailures;
+ rx_atomic_t specialPktAllocFailures;
+ rx_atomic_t socketGreedy; /* Whether SO_GREEDY succeeded */
+ rx_atomic_t bogusPacketOnRead; /* Number of inappropriately short packets received */
+ rx_atomic_t bogusHost; /* Host address from bogus packets */
+ rx_atomic_t noPacketOnRead; /* Number of read packets attempted when there was actually no packet to read off the wire */
+ rx_atomic_t noPacketBuffersOnRead; /* Number of dropped data packets due to lack of packet buffers */
+ rx_atomic_t selects; /* Number of selects waiting for packet or timeout */
+ rx_atomic_t sendSelects; /* Number of selects forced when sending packet */
+ rx_atomic_t packetsRead[RX_N_PACKET_TYPES]; /* Total number of packets read, per type */
+ rx_atomic_t dataPacketsRead; /* Number of unique data packets read off the wire */
+ rx_atomic_t ackPacketsRead; /* Number of ack packets read */
+ rx_atomic_t dupPacketsRead; /* Number of duplicate data packets read */
+ rx_atomic_t spuriousPacketsRead; /* Number of inappropriate data packets */
+ rx_atomic_t packetsSent[RX_N_PACKET_TYPES]; /* Number of rxi_Sends: packets sent over the wire, per type */
+ rx_atomic_t ackPacketsSent; /* Number of acks sent */
+ rx_atomic_t pingPacketsSent; /* Total number of ping packets sent */
+ rx_atomic_t abortPacketsSent; /* Total number of aborts */
+ rx_atomic_t busyPacketsSent; /* Total number of busies sent received */
+ rx_atomic_t dataPacketsSent; /* Number of unique data packets sent */
+ rx_atomic_t dataPacketsReSent; /* Number of retransmissions */
+ rx_atomic_t dataPacketsPushed; /* Number of retransmissions pushed early by a NACK */
+ rx_atomic_t ignoreAckedPacket; /* Number of packets with acked flag, on rxi_Start */
struct clock totalRtt; /* Total round trip time measured (use to compute average) */
struct clock minRtt; /* Minimum round trip time measured */
struct clock maxRtt; /* Maximum round trip time measured */
- int nRttSamples; /* Total number of round trip samples */
- int nServerConns; /* Total number of server connections */
- int nClientConns; /* Total number of client connections */
- int nPeerStructs; /* Total number of peer structures */
- int nCallStructs; /* Total number of call structures allocated */
- int nFreeCallStructs; /* Total number of previously allocated free call structures */
- int netSendFailures;
- afs_int32 fatalErrors;
- int ignorePacketDally; /* packets dropped because call is in dally state */
- int receiveCbufPktAllocFailures;
- int sendCbufPktAllocFailures;
- int nBusies;
- int spares[4];
+ rx_atomic_t nRttSamples; /* Total number of round trip samples */
+ rx_atomic_t nServerConns; /* Total number of server connections */
+ rx_atomic_t nClientConns; /* Total number of client connections */
+ rx_atomic_t nPeerStructs; /* Total number of peer structures */
+ rx_atomic_t nCallStructs; /* Total number of call structures allocated */
+ rx_atomic_t nFreeCallStructs; /* Total number of previously allocated free call structures */
+ rx_atomic_t netSendFailures;
+ rx_atomic_t fatalErrors;
+ rx_atomic_t ignorePacketDally; /* packets dropped because call is in dally state */
+ rx_atomic_t receiveCbufPktAllocFailures;
+ rx_atomic_t sendCbufPktAllocFailures;
+ rx_atomic_t nBusies;
+ rx_atomic_t spares[4];
};
/* structures for debug input and output packets */
#include <intrin.h>
#pragma intrinsic(_InterlockedOr)
#pragma intrinsic(_InterlockedAnd)
-#define rx_MutexOr(object, operand, mutex) _InterlockedOr(&object, operand)
-#define rx_MutexAnd(object, operand, mutex) _InterlockedAnd(&object, operand)
-#endif
-#else
-#define rx_MutexOr(object, operand, mutex) InterlockedOr(&object, operand)
-#define rx_MutexAnd(object, operand, mutex) InterlockedAnd(&object, operand)
-#endif
-#define rx_MutexIncrement(object, mutex) InterlockedIncrement(&object)
-#define rx_MutexXor(object, operand, mutex) InterlockedXor(&object, operand)
-#define rx_MutexAdd(object, addend, mutex) InterlockedExchangeAdd(&object, addend)
-#define rx_MutexDecrement(object, mutex) InterlockedDecrement(&object)
-#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- InterlockedIncrement(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
-#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- InterlockedDecrement(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
+#define rx_AtomicOr(object, operand, mutex) _InterlockedOr(&object, operand)
+#define rx_AtomicAnd(object, operand, mutex) _InterlockedAnd(&object, operand)
+#endif /* __cplusplus */
+#else /* !WIN64 */
+#define rx_AtomicOr(object, operand, mutex) InterlockedOr(&object, operand)
+#define rx_AtomicAnd(object, operand, mutex) InterlockedAnd(&object, operand)
+#endif /* WIN64 */
+#define rx_AtomicIncrement_NL(object) InterlockedIncrement(&object)
+#define rx_AtomicIncrement(object, mutex) InterlockedIncrement(&object)
+#define rx_AtomicXor(object, operand, mutex) InterlockedXor(&object, operand)
+#define rx_AtomicAdd_NL(object, addend) InterlockedExchangeAdd(&object, addend)
+#define rx_AtomicAdd(object, addend, mutex) InterlockedExchangeAdd(&object, addend)
+#define rx_AtomicDecrement_NL(object) InterlockedDecrement(&object)
+#define rx_AtomicDecrement(object, mutex) InterlockedDecrement(&object)
+#define rx_AtomicSwap_NL(object1, object2) InterlockedExchange ((volatile LONG *) object1, object2);
+#define rx_AtomicSwap(object1, object2, mutex) InterlockedExchange ((volatile LONG *) object1, object2);
#elif defined(AFS_DARWIN80_ENV)
-#define rx_MutexIncrement(object, mutex) OSAtomicIncrement32(&object)
-#define rx_MutexOr(object, operand, mutex) OSAtomicOr32(operand, &object)
-#define rx_MutexAnd(object, operand, mutex) OSAtomicAnd32(operand, &object)
-#define rx_MutexXor(object, operand, mutex) OSAtomicXor32(operand, &object)
-#define rx_MutexAdd(object, addend, mutex) OSAtomicAdd32(addend, &object)
-#define rx_MutexDecrement(object, mutex) OSAtomicDecrement32(&object)
-#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- OSAtomicIncrement32(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
-#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- OSAtomicDecrement32(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
+#define rx_AtomicIncrement_NL(object) OSAtomicIncrement32(&object)
+#define rx_AtomicIncrement(object, mutex) OSAtomicIncrement32(&object)
+#define rx_AtomicOr(object, operand, mutex) OSAtomicOr32(operand, &object)
+#define rx_AtomicAnd(object, operand, mutex) OSAtomicAnd32(operand, &object)
+#define rx_AtomicXor(object, operand, mutex) OSAtomicXor32(operand, &object)
+#define rx_AtomicAdd_NL(object, addend) OSAtomicAdd32(addend, &object)
+#define rx_AtomicAdd(object, addend, mutex) OSAtomicAdd32(addend, &object)
+#define rx_AtomicDecrement_NL(object) OSAtomicDecrement32(&object)
+#define rx_AtomicDecrement(object, mutex) OSAtomicDecrement32(&object)
+#define rx_AtomicSwap_NL(oldval, newval) rx_AtomicSwap_int(oldval, newval)
+#define rx_AtomicSwap(oldval, newval, mutex) rx_AtomicSwap_int(oldval, newval)
+static inline afs_int32 rx_AtomicSwap_int(afs_int32 *oldval, afs_int32 newval) {
+ afs_int32 ret = *oldval;
+ OSAtomicCompareAndSwap32 ((afs_int32) *oldval,(afs_int32) newval,
+ (afs_int32*) oldval);
+ return ret;
+}
#elif defined(AFS_SUN58_ENV)
-#define rx_MutexIncrement(object, mutex) atomic_inc_32(&object)
-#define rx_MutexOr(object, operand, mutex) atomic_or_32(&object, operand)
-#define rx_MutexAnd(object, operand, mutex) atomic_and_32(&object, operand)
-#define rx_MutexXor(object, operand, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object ^= operand; \
- MUTEX_EXIT(&mutex); \
- } while(0)
-#define rx_MutexXor(object, operand, mutex) OSAtomicXor32Barrier(operand, &object)
-#define rx_MutexAdd(object, addend, mutex) atomic_add_32(&object, addend)
-#define rx_MutexDecrement(object, mutex) atomic_dec_32(&object)
-#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- atomic_inc_32(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
-#define rx_MutexAdd1Decrement2(object1, addend, object2, mutex) \
- do { \
- MUTEX_ENTER(&mutex); \
- object1 += addend; \
- atomic_dec_32(&object2); \
- MUTEX_EXIT(&mutex); \
- } while (0)
+#define rx_AtomicIncrement_NL(object) atomic_inc_32(&object)
+#define rx_AtomicIncrement(object, mutex) atomic_inc_32(&object)
+#define rx_AtomicOr(object, operand, mutex) atomic_or_32(&object, operand)
+#define rx_AtomicAnd(object, operand, mutex) atomic_and_32(&object, operand)
+#define rx_AtomicAdd_NL(object, addend) atomic_add_32(&object, addend)
+#define rx_AtomicAdd(object, addend, mutex) atomic_add_32(&object, addend)
+#define rx_AtomicDecrement_NL(object) atomic_dec_32(&object)
+#define rx_AtomicDecrement(object, mutex) atomic_dec_32(&object)
+#define rx_AtomicSwap_NL(oldval, newval) rx_AtomicSwap_int(oldval, newval)
+#define rx_AtomicSwap(oldval, newval, mutex) rx_AtomicSwap_int(oldval, newval)
+static inline afs_int32 rx_AtomicSwap_int(afs_int32 *oldval, afs_int32 newval) {
+ afs_int32 ret = *oldval;
+ atomic_cas_32((afs_int32) *oldval,(afs_int32) newval,
+ (afs_int32*) oldval);
+ return ret;
+}
#else
+#define rx_AtomicIncrement_NL(object) (object)++
+#define rx_AtomicIncrement(object, mutex) rx_MutexIncrement(object, mutex)
+#define rx_AtomicOr(object, operand, mutex) rx_MutexOr(object, operand, mutex)
+#define rx_AtomicAnd(object, operand, mutex) rx_MutexAnd(object, operand, mutex)
+#define rx_AtomicAdd_NL(object, addend) object += addend
+#define rx_AtomicAdd(object, addend, mutex) rx_MutexAdd(object, addand, mutex)
+#define rx_AtomicDecrement_NL(object) (object)--
+#define rx_AtomicDecrement(object, mutex) rx_MutexDecrement(object, mutex)
+#define rx_AtomicSwap_NL(oldval, newval) rx_AtomicSwap_int(oldval, newval)
+#define rx_AtomicSwap(oldval, newval, mutex) rx_AtomicSwap_int(oldval, newval)
+static inline afs_int32 rx_AtomicSwap_int(afs_int32 *oldval, afs_int32 newval) {
+ afs_int32 ret = *oldval;
+ *oldval = newval;
+ return ret;
+}
+#endif
+#define rx_AtomicPeek_NL(object) rx_AtomicAdd_NL(object, 0)
+#define rx_AtomicPeek(object, mutex) rx_AtomicAdd(object, 0, mutex)
#define rx_MutexIncrement(object, mutex) \
do { \
MUTEX_ENTER(&mutex); \
object += addend; \
MUTEX_EXIT(&mutex); \
} while(0)
+#define rx_MutexDecrement(object, mutex) \
+ do { \
+ MUTEX_ENTER(&mutex); \
+ object--; \
+ MUTEX_EXIT(&mutex); \
+ } while(0)
#define rx_MutexAdd1Increment2(object1, addend, object2, mutex) \
do { \
MUTEX_ENTER(&mutex); \
object2--; \
MUTEX_EXIT(&mutex); \
} while(0)
-#define rx_MutexDecrement(object, mutex) \
+
+#define rx_MutexAdd1AtomicIncrement2(object1, addend, object2, mutex) \
do { \
MUTEX_ENTER(&mutex); \
- object--; \
+ object1 += addend; \
+ rx_AtomicIncrement(&object2); \
MUTEX_EXIT(&mutex); \
- } while(0)
-#endif
-
+ } while (0)
+#define rx_MutexAdd1AtomicDecrement2(object1, addend, object2, mutex) \
+ do { \
+ MUTEX_ENTER(&mutex); \
+ object1 += addend; \
+ rx_AtomicDecrement(&object2); \
+ MUTEX_EXIT(&mutex); \
+ } while (0)
#endif /* _RX_INTERNAL_H */
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
}
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
} else if (nbytes <= 0) {
if (rx_stats_active) {
MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
- rx_stats.bogusHost = from.sin_addr.s_addr;
+ rx_AtomicIncrement_NL(rx_stats.bogusPacketOnRead);
+ rx_AtomicSwap(&rx_stats.bogusHost, from.sin_addr.s_addr, rx_stats_mutex);
MUTEX_EXIT(&rx_stats_mutex);
}
dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
* it may have no refCount, meaning we could race with
* ReapConnections
*/
- if (peer && (peer->refCount > 0)) {
+ if (peer && (rx_AtomicPeek_NL(peer->refCount) > 0)) {
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesReceived, p->length);
MUTEX_EXIT(&peer->peer_lock);
tpeer.port = tp->port;
tpeer.ifMTU = htons(tp->ifMTU);
tpeer.idleWhen = htonl(tp->idleWhen);
- tpeer.refCount = htons(tp->refCount);
+ tpeer.refCount = htons(rx_AtomicPeek_NL(tp->refCount));
tpeer.burstSize = tp->burstSize;
tpeer.burst = tp->burst;
tpeer.burstWait.sec = htonl(tp->burstWait.sec);
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
#endif
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ rx_AtomicIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);