#include <afs/param.h>
#endif
-RCSID
- ("$Header$");
#ifdef KERNEL
#include "afs/sysincludes.h"
#include "rx_kernel.h"
#include "rx_clock.h"
#include "rx_queue.h"
-#include "rx_internal.h"
#include "rx.h"
#include "rx_globals.h"
#include "rx_trace.h"
# include <netinet/in.h>
# include <sys/time.h>
#endif
-# include "rx_internal.h"
# include "rx.h"
# include "rx_user.h"
# include "rx_clock.h"
if (afs_winsockInit() < 0)
return -1;
#endif
-
+
#ifndef KERNEL
/*
* Initialize anything necessary to provide a non-premptive threading
*/
rxi_InitializeThreadSupport();
#endif
-
+
/* Allocate and initialize a socket for client and perhaps server
* connections. */
-
+
rx_socket = rxi_GetHostUDPSocket(host, (u_short) port);
if (rx_socket == OSI_NULLSOCKET) {
UNLOCK_RX_INIT;
rx_SetEpoch(tv.tv_sec); /* Start time of this package, rxkad
* will provide a randomer value. */
#endif
- rx_MutexAdd(rxi_dataQuota, rx_extraQuota, rx_quota_mutex); /* + extra pkts caller asked to rsrv */
+ MUTEX_ENTER(&rx_quota_mutex);
+ rxi_dataQuota += rx_extraQuota; /* + extra pkts caller asked to rsrv */
+ MUTEX_EXIT(&rx_quota_mutex);
/* *Slightly* random start time for the cid. This is just to help
* out with the hashing function at the peer */
rx_nextCid = ((tv.tv_sec ^ tv.tv_usec) << RX_CIDSHIFT);
int serviceSecurityIndex)
{
int hashindex, i;
- afs_int32 cix, nclones;
- struct rx_connection *conn, *tconn, *ptconn;
+ afs_int32 cid;
+ struct rx_connection *conn;
SPLVAR;
clock_NewTime();
dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
+ /* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
+ * the case of kmem_alloc? */
+ conn = rxi_AllocConnection();
+#ifdef RX_ENABLE_LOCKS
+ MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
+ CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
+#endif
NETPRI;
MUTEX_ENTER(&rx_connHashTable_lock);
-
- /*
- * allocate the connection and all of its clones.
- * clones are flagged as such and have their
- * parent set to the 0th connection object.
- */
- for (nclones = rx_max_clones_per_connection,
- conn = tconn = 0,
- cix = 0;
- cix <= nclones;
- ++cix, ptconn = tconn) {
-
- tconn = rxi_AllocConnection();
- tconn->cid = (rx_nextCid += RX_MAXCALLS);
- tconn->type = RX_CLIENT_CONNECTION;
- tconn->epoch = rx_epoch;
- tconn->peer = rxi_FindPeer(shost, sport, 0, 1);
- tconn->serviceId = sservice;
- tconn->securityObject = securityObject;
- tconn->securityData = (void *) 0;
- tconn->securityIndex = serviceSecurityIndex;
- tconn->ackRate = RX_FAST_ACK_RATE;
- tconn->nSpecific = 0;
- tconn->specific = NULL;
- tconn->challengeEvent = NULL;
- tconn->delayedAbortEvent = NULL;
- tconn->abortCount = 0;
- tconn->error = 0;
-
- for (i = 0; i < RX_MAXCALLS; i++) {
- tconn->twind[i] = rx_initSendWindow;
- tconn->rwind[i] = rx_initReceiveWindow;
- }
-
- if (cix == 0) {
- conn = tconn;
- conn->nclones = nclones;
- conn->parent = 0;
- conn->next_clone = 0;
- rx_SetConnDeadTime(conn, rx_connDeadTime);
- } else {
- tconn->nclones = 0;
- tconn->flags |= RX_CLONED_CONNECTION;
- tconn->parent = conn;
- ptconn->next_clone = tconn;
- tconn->secondsUntilDead = 0;
- tconn->secondsUntilPing = 0;
- }
-
- /* generic connection setup */
-#ifdef RX_ENABLE_LOCKS
- MUTEX_INIT(&tconn->conn_call_lock, "conn call lock", MUTEX_DEFAULT,
- 0);
- MUTEX_INIT(&tconn->conn_data_lock, "conn data lock", MUTEX_DEFAULT,
- 0);
- CV_INIT(&tconn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
-#endif
- RXS_NewConnection(securityObject, tconn);
- hashindex =
- CONN_HASH(shost, sport, tconn->cid, tconn->epoch,
- RX_CLIENT_CONNECTION);
- rx_AtomicIncrement_NL(tconn->refCount); /* no lock required since only this thread knows */
- tconn->next = rx_connHashTable[hashindex];
- rx_connHashTable[hashindex] = tconn;
- if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ cid = (rx_nextCid += RX_MAXCALLS);
+ conn->type = RX_CLIENT_CONNECTION;
+ conn->cid = cid;
+ conn->epoch = rx_epoch;
+ conn->peer = rxi_FindPeer(shost, sport, 0, 1);
+ conn->serviceId = sservice;
+ conn->securityObject = securityObject;
+ conn->securityData = (void *) 0;
+ conn->securityIndex = serviceSecurityIndex;
+ rx_SetConnDeadTime(conn, rx_connDeadTime);
+ conn->ackRate = RX_FAST_ACK_RATE;
+ conn->nSpecific = 0;
+ conn->specific = NULL;
+ conn->challengeEvent = NULL;
+ conn->delayedAbortEvent = NULL;
+ conn->abortCount = 0;
+ conn->error = 0;
+ for (i = 0; i < RX_MAXCALLS; i++) {
+ conn->twind[i] = rx_initSendWindow;
+ conn->rwind[i] = rx_initReceiveWindow;
}
-
+
+ RXS_NewConnection(securityObject, conn);
+ hashindex =
+ CONN_HASH(shost, sport, conn->cid, conn->epoch, RX_CLIENT_CONNECTION);
+
+ conn->refCount++; /* no lock required since only this thread knows... */
+ conn->next = rx_connHashTable[hashindex];
+ rx_connHashTable[hashindex] = conn;
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
{
/* The idea is to set the dead time to a value that allows several
* keepalives to be dropped without timing out the connection. */
- struct rx_connection *tconn =
- (rx_IsClonedConn(conn)) ? conn->parent : conn;
-
- tconn->secondsUntilDead = MAX(seconds, 6);
- tconn->secondsUntilPing = rx_ConnSecondsUntilDead(tconn) / 6;
+ conn->secondsUntilDead = MAX(seconds, 6);
+ conn->secondsUntilPing = conn->secondsUntilDead / 6;
}
-rx_atomic_t rxi_lowPeerRefCount = 0;
-rx_atomic_t rxi_lowConnRefCount = 0;
+int rxi_lowPeerRefCount = 0;
+int rxi_lowConnRefCount = 0;
/*
* Cleanup a connection that was destroyed in rxi_DestroyConnectioNoLock.
* idle (refCount == 0) after rx_idlePeerTime (60 seconds) have passed.
*/
MUTEX_ENTER(&rx_peerHashTable_lock);
- if (rx_AtomicDecrement_NL(conn->peer->refCount) < 1) {
+ if (conn->peer->refCount < 2) {
conn->peer->idleWhen = clock_Sec();
- if (rx_AtomicPeek_NL(conn->peer->refCount) < 0) {
- rx_AtomicSwap_NL(&conn->peer->refCount, 0);
- dpf(("UNDERCOUNT(peer %x)\n", conn->peer));
- if (rx_stats_active)
- rx_AtomicIncrement(rxi_lowPeerRefCount, rx_stats_mutex);
+ if (conn->peer->refCount < 1) {
+ conn->peer->refCount = 1;
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_lowPeerRefCount++;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
}
}
+ conn->peer->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
- if (rx_stats_active)
+ if (rx_stats_active)
{
if (conn->type == RX_SERVER_CONNECTION)
- rx_AtomicDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
else
- rx_AtomicDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
}
#ifndef KERNEL
if (conn->specific) {
void
rxi_DestroyConnection(struct rx_connection *conn)
{
- struct rx_connection *tconn, *dtconn;
-
MUTEX_ENTER(&rx_connHashTable_lock);
-
- /* destroy any clones that might exist */
- if (!rx_IsClonedConn(conn)) {
- tconn = conn->next_clone;
- conn->next_clone = 0; /* once */
-
- while (tconn) {
- dtconn = tconn;
- tconn = tconn->next_clone;
- rxi_DestroyConnectionNoLock(dtconn);
- /*
- * if destroyed dtconn will be the head of
- * rx_connCleanup_list. Remove it and clean
- * it up now as no one else is holding a
- * reference to it.
- */
- if (dtconn == rx_connCleanup_list) {
- rx_connCleanup_list = rx_connCleanup_list->next;
- MUTEX_EXIT(&rx_connHashTable_lock);
- /* rxi_CleanupConnection will free dtconn */
- rxi_CleanupConnection(dtconn);
- MUTEX_ENTER(&rx_connHashTable_lock);
- (conn->nclones)--;
- }
- } /* while(tconn) */
- }
- /* !rx_IsCloned */
rxi_DestroyConnectionNoLock(conn);
/* conn should be at the head of the cleanup list */
if (conn == rx_connCleanup_list) {
NETPRI;
MUTEX_ENTER(&conn->conn_data_lock);
- /* This requires the atomic type to be signed */
- if (rx_AtomicDecrement_NL(conn->refCount) < 0) {
- dpf(("UNDERCOUNT(conn %x)\n", conn));
+ if (conn->refCount > 0)
+ conn->refCount--;
+ else {
if (rx_stats_active) {
- rx_AtomicIncrement(rxi_lowConnRefCount, rx_stats_mutex);
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_lowConnRefCount++;
+ MUTEX_EXIT(&rx_stats_mutex);
}
}
- if ((rx_AtomicPeek_NL(conn->refCount) > 0) || (conn->flags & RX_CONN_BUSY)) {
+ if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Busy; wait till the last guy before proceeding */
MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
if (havecalls) {
/* Don't destroy the connection if there are any call
* structures still in use */
- rx_MutexOr(conn->flags, RX_CONN_DESTROY_ME, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->flags |= RX_CONN_DESTROY_ME;
+ MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
return;
}
SPLVAR;
NETPRI;
- rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount++;
+ MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
}
#else
osi_rxSleep(conn);
#endif
- rx_MutexDecrement(conn->makeCallWaiters, conn->conn_data_lock);
- } else {
- MUTEX_EXIT(&conn->conn_data_lock);
- }
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->makeCallWaiters--;
+ }
+ MUTEX_EXIT(&conn->conn_data_lock);
- /* search for next free call on this connection or
- * its clones, if any */
for (;;) {
- struct rx_connection *tconn;
-
- for (tconn = conn; tconn; tconn = tconn->next_clone) {
- for (i = 0; i < RX_MAXCALLS; i++) {
- call = tconn->call[i];
- if (call) {
- MUTEX_ENTER(&call->lock);
- if (call->state == RX_STATE_DALLY) {
- rxi_ResetCall(call, 0);
- (*call->callNumber)++;
- goto have_call;
- }
- MUTEX_EXIT(&call->lock);
- } else {
- call = rxi_NewCall(tconn, i);
- goto have_call;
- }
- } /* for i < RX_MAXCALLS */
+ for (i = 0; i < RX_MAXCALLS; i++) {
+ call = conn->call[i];
+ if (call) {
+ MUTEX_ENTER(&call->lock);
+ if (call->state == RX_STATE_DALLY) {
+ rxi_ResetCall(call, 0);
+ (*call->callNumber)++;
+ break;
+ }
+ MUTEX_EXIT(&call->lock);
+ } else {
+ call = rxi_NewCall(conn, i);
+ break;
+ }
+ }
+ if (i < RX_MAXCALLS) {
+ break;
}
-
- /*
- * to be here, all available calls for this connection (and all
- * of its clones) must be in use
- */
-
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_MAKECALL_WAITING;
conn->makeCallWaiters++;
MUTEX_EXIT(&conn->conn_data_lock);
-
+
#ifdef RX_ENABLE_LOCKS
CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
#else
osi_rxSleep(conn);
#endif
- rx_MutexDecrement(conn->makeCallWaiters, conn->conn_data_lock);
- } /* for ;; */
-
- have_call:
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->makeCallWaiters--;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ }
/*
* Wake up anyone else who might be giving us a chance to
* run (see code above that avoids resource starvation).
/* Client is initially in send mode */
call->state = RX_STATE_ACTIVE;
- call->error = rx_ConnError(conn);
+ call->error = conn->error;
if (call->error)
call->mode = RX_MODE_ERROR;
else
dpf(("rx_NewCall(call %x)\n", call));
return call;
-} /* rx_NewCall */
+}
int
rxi_HasActiveCalls(struct rx_connection *aconn)
if (socket == OSI_NULLSOCKET) {
/* If we don't already have a socket (from another
* service on same port) get a new one */
- socket = rxi_GetHostUDPSocket(htonl(INADDR_ANY), port);
+ socket = rxi_GetHostUDPSocket(host, port);
if (socket == OSI_NULLSOCKET) {
USERPRI;
rxi_FreeService(tservice);
(*tservice->afterProc) (call, code);
rx_EndCall(call, code);
- if (rx_stats_active)
- rx_MutexIncrement(rxi_nCalls, rx_stats_mutex);
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_nCalls++;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
}
}
if (call->flags & RX_CALL_WAIT_PROC) {
call->flags &= ~RX_CALL_WAIT_PROC;
- rx_MutexDecrement(rx_nWaiting, rx_waiting_mutex);
+ MUTEX_ENTER(&rx_waiting_mutex);
+ rx_nWaiting--;
+ MUTEX_EXIT(&rx_waiting_mutex);
}
if (call->state != RX_STATE_PRECALL || call->error) {
next = conn->next;
if (conn->type == RX_CLIENT_CONNECTION) {
/* MUTEX_ENTER(&conn->conn_data_lock); when used in kernel */
- rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
+ conn->refCount++;
/* MUTEX_EXIT(&conn->conn_data_lock); when used in kernel */
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
if (rx_stats_active)
- rx_AtomicDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
rx_allCallsp = call;
call->call_id =
#endif /* RXDEBUG_PACKET */
- rx_AtomicIncrement(rx_stats.nCallStructs, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
* call lock held or are going through this section of code.
*/
if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) {
- rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount++;
+ MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
if (haveCTLock)
rxi_DestroyConnectionNoLock(conn);
* structure hanging off a connection structure */
struct rx_peer *
rxi_FindPeer(afs_uint32 host, u_short port,
- struct rx_peer *origPeer, int create)
+ struct rx_peer *origPeer, int create)
{
struct rx_peer *pp;
int hashIndex;
MUTEX_ENTER(&rx_peerHashTable_lock);
for (pp = rx_peerHashTable[hashIndex]; pp; pp = pp->next) {
if ((pp->host == host) && (pp->port == port))
- break;
+ break;
}
if (!pp) {
- if (create) {
- pp = rxi_AllocPeer(); /* This bzero's *pp */
- pp->host = host; /* set here or in InitPeerParams is zero */
+ if (create) {
+ pp = rxi_AllocPeer(); /* This bzero's *pp */
+ pp->host = host; /* set here or in InitPeerParams is zero */
pp->port = port;
MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0);
queue_Init(&pp->congestionQueue);
pp->next = rx_peerHashTable[hashIndex];
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
- if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
if (pp && create) {
- rx_AtomicIncrement_NL(pp->refCount);
+ pp->refCount++;
}
if (origPeer)
- rx_AtomicDecrement_NL(origPeer->refCount);
+ origPeer->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
return pp;
}
if (service->newConnProc)
(*service->newConnProc) (conn);
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
}
- rx_AtomicIncrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount++;
+ MUTEX_EXIT(&conn->conn_data_lock);
rxLastConn = conn; /* store this connection as the last conn used */
MUTEX_EXIT(&rx_connHashTable_lock);
/* If the connection is in an error state, send an abort packet and ignore
* the incoming packet */
- if (rx_ConnError(conn)) {
+ if (conn->error) {
/* Don't respond to an abort packet--we don't want loops! */
MUTEX_ENTER(&conn->conn_data_lock);
if (np->header.type != RX_PACKET_TYPE_ABORT)
np = rxi_SendConnectionAbort(conn, np, 1, 0);
- rx_AtomicDecrement_NL(conn->refCount);
+ conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d", errcode));
rxi_ConnectionError(conn, errcode);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
case RX_PACKET_TYPE_CHALLENGE:
tnp = rxi_ReceiveChallengePacket(conn, np, 1);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
case RX_PACKET_TYPE_RESPONSE:
tnp = rxi_ReceiveResponsePacket(conn, np, 1);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
case RX_PACKET_TYPE_PARAMS:
case RX_PACKET_TYPE_PARAMS + 1:
case RX_PACKET_TYPE_PARAMS + 2:
/* ignore these packet types for now */
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
MUTEX_ENTER(&conn->conn_data_lock);
tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
- rx_AtomicDecrement_NL(conn->refCount);
+ conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
}
* then, since this is a client connection we're getting data for
* it must be for the previous call.
*/
- if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
}
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
- if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
#endif
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
if (!call) {
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY,
NULL, 0, 1);
MUTEX_EXIT(&call->lock);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return tp;
}
rxi_ResetCall(call, 0);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%06d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec, np->length));
#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
/* Ignore all incoming acknowledgements for calls in DALLY state */
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
- if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
/* Ignore anything that's not relevant to the current call. If there
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
- if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
/* If the service security object index stamped in the packet does not
#ifdef RX_ENABLE_LOCKS
MUTEX_EXIT(&call->lock);
#endif
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_EXIT(&call->lock);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
}
dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d", errdata));
rxi_CallError(call, errdata);
MUTEX_EXIT(&call->lock);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np; /* xmitting; drop packet */
}
case RX_PACKET_TYPE_BUSY:
break;
#else /* RX_ENABLE_LOCKS */
MUTEX_EXIT(&call->lock);
- rx_MutexDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np; /* xmitting; drop packet */
#endif /* RX_ENABLE_LOCKS */
}
* (if not, then the time won't actually be re-evaluated here). */
call->lastReceiveTime = clock_Sec();
MUTEX_EXIT(&call->lock);
- rx_AtomicDecrement(conn->refCount, conn->conn_data_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
conn->checkReachEvent = NULL;
waiting = conn->flags & RX_CONN_ATTACHWAIT;
if (event)
- rx_AtomicDecrement_NL(conn->refCount);
+ conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
if (waiting) {
when.sec += RX_CHECKREACH_TIMEOUT;
MUTEX_ENTER(&conn->conn_data_lock);
if (!conn->checkReachEvent) {
- rx_AtomicIncrement_NL(conn->refCount);
+ conn->refCount++;
conn->checkReachEvent =
rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
NULL);
int newPackets = 0;
int didHardAck = 0;
int haveLast = 0;
- afs_uint32 seq, serial, flags;
+ afs_uint32 seq;
+ afs_uint32 serial=0, flags=0;
int isFirst;
struct rx_packet *tnp;
struct clock when, now;
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %x dropped on receipt - quota problems", np));
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
dpf(("packet %x dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
* application already, then this is a duplicate */
if (seq < call->rnext) {
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/*Check for duplicate packet */
if (seq == tp->header.seq) {
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
MUTEX_EXIT(&conn->conn_data_lock);
}
+#if defined(RXDEBUG) && defined(AFS_NT40_ENV)
static const char *
rx_ack_reason(int reason)
{
return "unknown!!";
}
}
+#endif
/* rxi_ComputePeerNetStats
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
sizeof(afs_int32), &tSize);
maxDgramPackets = (afs_uint32) ntohl(tSize);
maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets);
- maxDgramPackets = MIN(maxDgramPackets, peer->ifDgramPackets);
- if (peer->natMTU < peer->ifMTU)
- maxDgramPackets = MIN(maxDgramPackets, rxi_AdjustDgramPackets(1, peer->natMTU));
+ maxDgramPackets =
+ MIN(maxDgramPackets, (int)(peer->ifDgramPackets));
+ maxDgramPackets = MIN(maxDgramPackets, tSize);
if (maxDgramPackets > 1) {
peer->maxDgramPackets = maxDgramPackets;
call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
afs_int32 error;
struct clock when, now;
- if (!rx_ConnError(conn))
+ if (!conn->error)
return packet;
/* Clients should never delay abort messages */
if (conn->delayedAbortEvent) {
rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0);
}
- error = htonl(rx_ConnError(conn));
+ error = htonl(conn->error);
conn->abortCount++;
MUTEX_EXIT(&conn->conn_data_lock);
packet =
return packet;
}
-/*
- * Associate an error all of the calls owned by a connection. Called
+/* Associate an error all of the calls owned by a connection. Called
* with error non-zero. This is only for really fatal things, like
* bad authentication responses. The connection itself is set in
* error at this point, so that future packets received will be
- * rejected.
- */
+ * rejected. */
void
rxi_ConnectionError(struct rx_connection *conn,
afs_int32 error)
{
if (error) {
int i;
- struct rx_connection *tconn;
dpf(("rxi_ConnectionError conn %x error %d", conn, error));
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
conn->flags &= ~RX_CONN_ATTACHWAIT;
- rx_AtomicDecrement_NL(conn->refCount);
+ conn->refCount--;
}
MUTEX_EXIT(&conn->conn_data_lock);
-
- for ( tconn = rx_IsClonedConn(conn) ? conn->parent : conn;
- tconn;
- tconn = tconn->next_clone) {
- for (i = 0; i < RX_MAXCALLS; i++) {
- struct rx_call *call = tconn->call[i];
- if (call) {
- MUTEX_ENTER(&call->lock);
- rxi_CallError(call, error);
- MUTEX_EXIT(&call->lock);
- }
- }
+ for (i = 0; i < RX_MAXCALLS; i++) {
+ struct rx_call *call = conn->call[i];
+ if (call) {
+ MUTEX_ENTER(&call->lock);
+ rxi_CallError(call, error);
+ MUTEX_EXIT(&call->lock);
+ }
}
- rx_SetConnError(conn, error);
+ conn->error = error;
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
}
}
}
}
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
if (resending)
peer->reSends += len;
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+ rx_MutexAdd(rx_stats.dataPacketsSent, len, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
if (list[i]->header.serial) {
requestAck = 1;
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
} else {
/* improved RTO calculation- not Karn */
list[i]->firstSent = *now;
}
}
- MUTEX_ENTER(&peer->peer_lock);
- peer->nSent++;
- if (resending)
- peer->reSends++;
- if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
- MUTEX_EXIT(&peer->peer_lock);
-
/* Tag this packet as not being the last in this group,
* for the receiver's benefit */
if (i < len - 1 || moreFlag) {
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
if (rx_stats_active)
- rx_AtomicIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
continue; /* Ignore this packet if it has been acknowledged */
}
#endif
/* dead time + RTT + 8*MDEV, rounded up to next second. */
deadTime =
- (((afs_uint32) rx_ConnSecondsUntilDead(conn) << 10) +
+ (((afs_uint32) conn->secondsUntilDead << 10) +
((afs_uint32) conn->peer->rtt >> 3) +
((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10;
now = clock_Sec();
* attached process can die reasonably gracefully. */
}
/* see if we have a non-activity timeout */
- if (call->startWait && rx_ConnIdleDeadTime(conn)
- && ((call->startWait + rx_ConnIdleDeadTime(conn)) < now)) {
+ if (call->startWait && conn->idleDeadTime
+ && ((call->startWait + conn->idleDeadTime) < now)) {
if (call->state == RX_STATE_ACTIVE) {
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
}
}
- if (call->lastSendData && rx_ConnIdleDeadTime(conn)
- && (rx_ConnIdleDeadErr(conn) != 0)
- && ((call->lastSendData + rx_ConnIdleDeadTime(conn)) < now)) {
+ if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0)
+ && ((call->lastSendData + conn->idleDeadTime) < now)) {
if (call->state == RX_STATE_ACTIVE) {
rxi_CallError(call, conn->idleDeadErr);
return -1;
}
}
/* see if we have a hard timeout */
- if (rx_ConnHardDeadTime(conn)
- && (now > (rx_ConnHardDeadTime(conn) + call->startTime.sec))) {
+ if (conn->hardDeadTime
+ && (now > (conn->hardDeadTime + call->startTime.sec))) {
if (call->state == RX_STATE_ACTIVE)
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
}
conn = call->conn;
- if ((now - call->lastSendTime) > rx_ConnSecondsUntilPing(conn)) {
+ if ((now - call->lastSendTime) > conn->secondsUntilPing) {
/* Don't try to send keepalives if there is unacknowledged data */
/* the rexmit code should be good enough, this little hack
* doesn't quite work XXX */
struct clock when, now;
clock_GetTime(&now);
when = now;
- when.sec += rx_ConnSecondsUntilPing(call->conn);
+ when.sec += call->conn->secondsUntilPing;
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
call->keepAliveEvent =
rxevent_PostNow(&when, &now, rxi_KeepAliveEvent, call, 0);
MUTEX_ENTER(&conn->conn_data_lock);
conn->delayedAbortEvent = NULL;
- error = htonl(rx_ConnError(conn));
+ error = htonl(conn->error);
conn->abortCount++;
MUTEX_EXIT(&conn->conn_data_lock);
packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
rx_stats.maxRtt = *rttp;
}
clock_Add(&rx_stats.totalRtt, rttp);
- rx_AtomicIncrement_NL(rx_stats.nRttSamples);
+ rx_stats.nRttSamples++;
MUTEX_EXIT(&rx_stats_mutex);
}
clock_Zero(&(peer->timeout));
clock_Addmsec(&(peer->timeout), rtt_timeout);
- dpf(("rxi_ComputeRoundTripTime(rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%0.3d sec)\n", MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
+ dpf(("rxi_ComputeRoundTripTime(rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n", MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
}
/* This only actually destroys the connection if
* there are no outstanding calls */
MUTEX_ENTER(&conn->conn_data_lock);
- if (!havecalls && (rx_AtomicPeek_NL(conn->refCount) == 0)
+ if (!havecalls && !conn->refCount
&& ((conn->lastSendTime + rx_idleConnectionTime) <
now.sec)) {
- rx_AtomicIncrement_NL(conn->refCount); /* it will be decr in rx_DestroyConn */
+ conn->refCount++; /* it will be decr in rx_DestroyConn */
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
for (prev = peer = *peer_ptr; peer; peer = next) {
next = peer->next;
code = MUTEX_TRYENTER(&peer->peer_lock);
- if ((code) && (rx_AtomicPeek_NL(peer->refCount) == 0)
+ if ((code) && (peer->refCount == 0)
&& ((peer->idleWhen + rx_idlePeerTime) < now.sec)) {
rx_interface_stat_p rpc_stat, nrpc_stat;
size_t space;
}
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_AtomicDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
if (peer == *peer_ptr) {
*peer_ptr = next;
prev = next;
return;
}
- dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %lu.%06lu, rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"), xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
+ dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"), xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
/* Track only packets that are big enough. */
if ((p->length + RX_HEADER_SIZE + call->conn->securityMaxTrailerSize) <
* one packet exchange */
if (clock_Gt(&newTO, &peer->timeout)) {
- dpf(("CONG peer %lx/%u: timeout %lu.%06lu ==> %lu.%06lu (rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec, newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
+ dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec, newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
peer->timeout = newTO;
}
/* calculate estimate for transmission interval in milliseconds */
minTime = rx_Window * peer->smRtt;
if (minTime < 1000) {
- dpf(("CONG peer %lx/%u: cut TO %lu.%06lu by 0.5 (rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u, ps %u)",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec,
peer->timeout.usec, peer->smRtt, peer->packetSize));
va_start(ap, format);
clock_GetTime(&now);
- fprintf(rx_Log, " %u.%.3u:", (unsigned int)now.sec,
- (unsigned int)now.usec / 1000);
+ fprintf(rx_Log, " %d.%06d:", (unsigned int)now.sec,
+ (unsigned int)now.usec);
vfprintf(rx_Log, format, ap);
putc('\n', rx_Log);
va_end(ap);
}
fprintf(file, "rx stats: free packets %d, allocs %d, ", (int)freePackets,
- rx_AtomicPeek_NL(s->packetRequests));
+ s->packetRequests);
if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
- fprintf(file, "alloc-failures(rcv %d/%d,send %d/%d,ack %d)\n",
- rx_AtomicPeek_NL(s->receivePktAllocFailures),
- rx_AtomicPeek_NL(s->receiveCbufPktAllocFailures),
- rx_AtomicPeek_NL(s->sendPktAllocFailures),
- rx_AtomicPeek_NL(s->sendCbufPktAllocFailures),
- rx_AtomicPeek_NL(s->specialPktAllocFailures));
+ fprintf(file, "alloc-failures(rcv %u/%u,send %u/%u,ack %u)\n",
+ s->receivePktAllocFailures, s->receiveCbufPktAllocFailures,
+ s->sendPktAllocFailures, s->sendCbufPktAllocFailures,
+ s->specialPktAllocFailures);
} else {
- fprintf(file, "alloc-failures(rcv %d,send %d,ack %d)\n",
- rx_AtomicPeek_NL(s->receivePktAllocFailures),
- rx_AtomicPeek_NL(s->sendPktAllocFailures),
- rx_AtomicPeek_NL(s->specialPktAllocFailures));
+ fprintf(file, "alloc-failures(rcv %u,send %u,ack %u)\n",
+ s->receivePktAllocFailures, s->sendPktAllocFailures,
+ s->specialPktAllocFailures);
}
fprintf(file,
- " greedy %d, " "bogusReads %d (last from host %x), "
- "noPackets %d, " "noBuffers %d, " "selects %d, "
- "sendSelects %d\n",
- rx_AtomicPeek_NL(s->socketGreedy),
- rx_AtomicPeek_NL(s->bogusPacketOnRead),
- rx_AtomicPeek_NL(s->bogusHost),
- rx_AtomicPeek_NL(s->noPacketOnRead),
- rx_AtomicPeek_NL(s->noPacketBuffersOnRead),
- rx_AtomicPeek_NL(s->selects),
- rx_AtomicPeek_NL(s->sendSelects));
+ " greedy %u, " "bogusReads %u (last from host %x), "
+ "noPackets %u, " "noBuffers %u, " "selects %u, "
+ "sendSelects %u\n", s->socketGreedy, s->bogusPacketOnRead,
+ s->bogusHost, s->noPacketOnRead, s->noPacketBuffersOnRead,
+ s->selects, s->sendSelects);
fprintf(file, " packets read: ");
for (i = 0; i < RX_N_PACKET_TYPES; i++) {
- fprintf(file, "%s %d ", rx_packetTypes[i], rx_AtomicPeek_NL(s->packetsRead[i]));
+ fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsRead[i]);
}
fprintf(file, "\n");
fprintf(file,
- " other read counters: data %d, " "ack %d, " "dup %d "
- "spurious %d " "dally %d\n", rx_AtomicPeek_NL(s->dataPacketsRead),
- rx_AtomicPeek_NL(s->ackPacketsRead),
- rx_AtomicPeek_NL(s->dupPacketsRead),
- rx_AtomicPeek_NL(s->spuriousPacketsRead),
- rx_AtomicPeek_NL(s->ignorePacketDally));
+ " other read counters: data %u, " "ack %u, " "dup %u "
+ "spurious %u " "dally %u\n", s->dataPacketsRead,
+ s->ackPacketsRead, s->dupPacketsRead, s->spuriousPacketsRead,
+ s->ignorePacketDally);
fprintf(file, " packets sent: ");
for (i = 0; i < RX_N_PACKET_TYPES; i++) {
- fprintf(file, "%s %d ", rx_packetTypes[i], rx_AtomicPeek_NL(s->packetsSent[i]));
+ fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsSent[i]);
}
fprintf(file, "\n");
fprintf(file,
- " other send counters: ack %d, " "data %d (not resends), "
- "resends %d, " "pushed %d, " "acked&ignored %d\n",
- rx_AtomicPeek_NL(s->ackPacketsSent),
- rx_AtomicPeek_NL(s->dataPacketsSent),
- rx_AtomicPeek_NL(s->dataPacketsReSent),
- rx_AtomicPeek_NL(s->dataPacketsPushed),
- rx_AtomicPeek_NL(s->ignoreAckedPacket));
+ " other send counters: ack %u, " "data %u (not resends), "
+ "resends %u, " "pushed %u, " "acked&ignored %u\n",
+ s->ackPacketsSent, s->dataPacketsSent, s->dataPacketsReSent,
+ s->dataPacketsPushed, s->ignoreAckedPacket);
fprintf(file,
- " \t(these should be small) sendFailed %d, " "fatalErrors %d\n",
- rx_AtomicPeek_NL(s->netSendFailures), rx_AtomicPeek_NL(s->fatalErrors));
+ " \t(these should be small) sendFailed %u, " "fatalErrors %u\n",
+ s->netSendFailures, (int)s->fatalErrors);
- if (rx_AtomicPeek_NL(s->nRttSamples)) {
+ if (s->nRttSamples) {
fprintf(file, " Average rtt is %0.3f, with %d samples\n",
- clock_Float(&s->totalRtt) / rx_AtomicPeek_NL(s->nRttSamples), rx_AtomicPeek_NL(s->nRttSamples));
+ clock_Float(&s->totalRtt) / s->nRttSamples, s->nRttSamples);
fprintf(file, " Minimum rtt is %0.3f, maximum is %0.3f\n",
clock_Float(&s->minRtt), clock_Float(&s->maxRtt));
fprintf(file,
" %d server connections, " "%d client connections, "
"%d peer structs, " "%d call structs, " "%d free call structs\n",
- rx_AtomicPeek_NL(s->nServerConns),
- rx_AtomicPeek_NL(s->nClientConns),
- rx_AtomicPeek_NL(s->nPeerStructs),
- rx_AtomicPeek_NL(s->nCallStructs),
- rx_AtomicPeek_NL(s->nFreeCallStructs));
+ s->nServerConns, s->nClientConns, s->nPeerStructs,
+ s->nCallStructs, s->nFreeCallStructs);
#if !defined(AFS_PTHREAD_ENV) && !defined(AFS_USE_GETTIMEOFDAY)
fprintf(file, " %d clock updates\n", clock_nUpdates);
void
rx_PrintPeerStats(FILE * file, struct rx_peer *peer)
{
- fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %u.%d.\n",
+ fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %d.%06d.\n",
ntohl(peer->host), (int)peer->port, (int)peer->burstSize,
(int)peer->burstWait.sec, (int)peer->burstWait.usec);
afs_uint32 * supportedValues)
{
#ifndef RXDEBUG
- afs_int32 rc = -1;
+ afs_int32 rc = -1;
#else
afs_int32 rc = 0;
struct rx_debugIn in;
+ afs_int32 *lp = (afs_int32 *) stat;
*supportedValues = 0;
in.type = htonl(RX_DEBUGI_GETSTATS);
afs_uint32 * supportedValues)
{
#ifndef RXDEBUG
- afs_int32 rc = -1;
+ afs_int32 rc = -1;
#else
afs_int32 rc = 0;
struct rx_debugIn in;
- int i;
afs_int32 *lp = (afs_int32 *) stat;
+ int i;
/*
* supportedValues is currently unused, but added to allow future
for (i = 0; i < RX_MAXCALLS; i++) {
conn->callNumber[i] = ntohl(conn->callNumber[i]);
}
- rx_SetConnError(conn, ntohl(rx_ConnError(conn)));
+ conn->error = ntohl(conn->error);
conn->secStats.flags = ntohl(conn->secStats.flags);
conn->secStats.expires = ntohl(conn->secStats.expires);
conn->secStats.packetsReceived =
sizeof(rx_function_entry_v1_t);
rxi_Free(rpc_stat, space);
- rx_MutexAdd(rxi_rpc_peer_stat_cnt, -num_funcs, rx_rpc_stats);
+ MUTEX_ENTER(&rx_rpc_stats);
+ rxi_rpc_peer_stat_cnt -= num_funcs;
+ MUTEX_EXIT(&rx_rpc_stats);
}
next = peer->next;
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_AtomicDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
}
rx_SetSpecific(struct rx_connection *conn, int key, void *ptr)
{
int i;
- struct rx_connection *tconn =
- (rx_IsClonedConn(conn)) ? conn->parent : conn;
-
- MUTEX_ENTER(&tconn->conn_data_lock);
- if (!tconn->specific) {
- tconn->specific = (void **)malloc((key + 1) * sizeof(void *));
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if (!conn->specific) {
+ conn->specific = (void **)malloc((key + 1) * sizeof(void *));
for (i = 0; i < key; i++)
- tconn->specific[i] = NULL;
- tconn->nSpecific = key + 1;
- tconn->specific[key] = ptr;
- } else if (key >= tconn->nSpecific) {
- tconn->specific = (void **)
- realloc(tconn->specific, (key + 1) * sizeof(void *));
- for (i = tconn->nSpecific; i < key; i++)
- tconn->specific[i] = NULL;
- tconn->nSpecific = key + 1;
- tconn->specific[key] = ptr;
+ conn->specific[i] = NULL;
+ conn->nSpecific = key + 1;
+ conn->specific[key] = ptr;
+ } else if (key >= conn->nSpecific) {
+ conn->specific = (void **)
+ realloc(conn->specific, (key + 1) * sizeof(void *));
+ for (i = conn->nSpecific; i < key; i++)
+ conn->specific[i] = NULL;
+ conn->nSpecific = key + 1;
+ conn->specific[key] = ptr;
} else {
- if (tconn->specific[key] && rxi_keyCreate_destructor[key])
+ if (conn->specific[key] && rxi_keyCreate_destructor[key])
(*rxi_keyCreate_destructor[key]) (conn->specific[key]);
- tconn->specific[key] = ptr;
+ conn->specific[key] = ptr;
}
- MUTEX_EXIT(&tconn->conn_data_lock);
+ MUTEX_EXIT(&conn->conn_data_lock);
}
void *
rx_GetSpecific(struct rx_connection *conn, int key)
{
void *ptr;
- struct rx_connection *tconn =
- (rx_IsClonedConn(conn)) ? conn->parent : conn;
-
- MUTEX_ENTER(&tconn->conn_data_lock);
- if (key >= tconn->nSpecific)
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if (key >= conn->nSpecific)
ptr = NULL;
else
- ptr = tconn->specific[key];
- MUTEX_EXIT(&tconn->conn_data_lock);
+ ptr = conn->specific[key];
+ MUTEX_EXIT(&conn->conn_data_lock);
return ptr;
}