#include "rx.h"
#include "rx_globals.h"
#include "rx_trace.h"
+#include "rx_atomic.h"
+#include "rx_internal.h"
+#include "rx_stats.h"
#define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
#define AFSOP_STOP_AFS 211 /* Stop AFS process */
#define AFSOP_STOP_BKG 212 /* Stop BKG process */
# include "rx_user.h"
# include "rx_clock.h"
# include "rx_queue.h"
+# include "rx_atomic.h"
# include "rx_globals.h"
# include "rx_trace.h"
+# include "rx_internal.h"
+# include "rx_stats.h"
# include <afs/rxgen_consts.h>
#endif /* KERNEL */
/* Local static routines */
static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
+static void rxi_ComputeRoundTripTime(struct rx_packet *, struct clock *,
+ struct rx_peer *, struct clock *);
+
#ifdef RX_ENABLE_LOCKS
static void rxi_SetAcksInTransmitQueue(struct rx_call *call);
#endif
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
struct rx_tq_debug {
- afs_int32 rxi_start_aborted; /* rxi_start awoke after rxi_Send in error. */
- afs_int32 rxi_start_in_error;
+ rx_atomic_t rxi_start_aborted; /* rxi_start awoke after rxi_Send in error.*/
+ rx_atomic_t rxi_start_in_error;
} rx_tq_debug;
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
static unsigned int rxi_rpc_process_stat_cnt;
+rx_atomic_t rx_nWaiting = RX_ATOMIC_INIT(0);
+rx_atomic_t rx_nWaited = RX_ATOMIC_INIT(0);
+
#if !defined(offsetof)
#include <stddef.h> /* for definition of offsetof() */
#endif
+#ifdef RX_ENABLE_LOCKS
+afs_kmutex_t rx_atomic_mutex;
+#endif
+
#ifdef AFS_PTHREAD_ENV
#include <assert.h>
* to ease NT porting
*/
-extern afs_kmutex_t rx_stats_mutex;
-extern afs_kmutex_t rx_waiting_mutex;
extern afs_kmutex_t rx_quota_mutex;
extern afs_kmutex_t rx_pthread_mutex;
extern afs_kmutex_t rx_packets_mutex;
+extern afs_kmutex_t rx_refcnt_mutex;
extern afs_kmutex_t des_init_mutex;
extern afs_kmutex_t des_random_mutex;
extern afs_kmutex_t rx_clock_mutex;
{
MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rx_waiting_mutex, "waiting", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_atomic_mutex, "atomic", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_refcnt_mutex, "refcnts", MUTEX_DEFAULT, 0);
MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_event_mutex, "event", MUTEX_DEFAULT, 0);
/*
* The rx_pthread_mutex mutex protects the following global variables:
- * rxi_pthread_hinum
+ * rxi_fcfs_thread_num
*/
#else
#define INIT_PTHREAD_LOCKS
* lowest level:
* multi_handle->lock
* rxevent_lock
+ * rx_packets_mutex
* rx_stats_mutex
+ * rx_refcnt_mutex
+ * rx_atomic_mutex
*
* Do we need a lock to protect the peer field in the conn structure?
* conn->peer was previously a constant for all intents and so has no
rxdb_init();
#endif /* RX_LOCKS_DB */
MUTEX_INIT(&rx_stats_mutex, "rx_stats_mutex", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rx_waiting_mutex, "rx_waiting_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_quota_mutex, "rx_quota_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_pthread_mutex, "rx_pthread_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_packets_mutex, "rx_packets_mutex", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_refcnt_mutex, "rx_refcnt_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- memset(&rx_stats, 0, sizeof(struct rx_statistics));
+ rxi_ResetStatistics();
htable = (char *)
osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nClientConns);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
if (rx_stats_active)
{
if (conn->type == RX_SERVER_CONNECTION)
- rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nServerConns);
else
- rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nClientConns);
}
#ifndef KERNEL
if (conn->specific) {
NETPRI;
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (conn->refCount > 0)
conn->refCount--;
else {
if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Busy; wait till the last guy before proceeding */
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
return;
USERPRI;
return;
}
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
/* Check for extant references to this connection */
SPLVAR;
NETPRI;
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
USERPRI;
}
* effect on overall system performance.
*/
call->state = RX_STATE_RESET;
- CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&conn->conn_call_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_ResetCall(call, 0);
(*call->callNumber)++;
if (MUTEX_TRYENTER(&conn->conn_call_lock))
* Instead, cycle through one more time to see if
* we can find a call that can call our own.
*/
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
wait = 0;
}
MUTEX_EXIT(&call->lock);
} else {
/* rxi_NewCall returns with mutex locked */
call = rxi_NewCall(conn, i);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
break;
}
}
if (call->flags & RX_CALL_WAIT_PROC) {
call->flags &= ~RX_CALL_WAIT_PROC;
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_waiting_mutex);
+ rx_atomic_dec(&rx_nWaiting);
}
if (call->state != RX_STATE_PRECALL || call->error) {
call->conn->service->servicePort, call->conn->service->serviceId,
call));
- CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
} else {
dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
rxi_minDeficit--;
rxi_availProcs--;
MUTEX_EXIT(&rx_quota_mutex);
- rx_nWaiting--;
+ rx_atomic_dec(&rx_nWaiting);
/* MUTEX_EXIT(&call->lock); */
} else {
/* If there are no eligible incoming calls, add this process
call->arrivalProc = (void (*)())0;
if (rc && call->error == 0) {
rxi_CallError(call, rc);
+ call->mode = RX_MODE_ERROR;
/* Send an abort message to the peer if this error code has
* only just been set. If it was set previously, assume the
* peer has already been sent the error code or will request it
if (conn->type == RX_SERVER_CONNECTION) {
/* Make sure reply or at least dummy reply is sent */
if (call->mode == RX_MODE_RECEIVING) {
+ MUTEX_EXIT(&call->lock);
rxi_WriteProc(call, 0, 0);
+ MUTEX_ENTER(&call->lock);
}
if (call->mode == RX_MODE_SENDING) {
+ MUTEX_EXIT(&call->lock);
rxi_FlushWrite(call);
+ MUTEX_ENTER(&call->lock);
}
rxi_calltrace(RX_CALL_END, call);
/* Call goes to hold state until reply packets are acknowledged */
* no reply arguments are expected */
if ((call->mode == RX_MODE_SENDING)
|| (call->mode == RX_MODE_RECEIVING && call->rnext == 1)) {
+ MUTEX_EXIT(&call->lock);
(void)rxi_ReadProc(call, &dummy, 1);
+ MUTEX_ENTER(&call->lock);
}
/* If we had an outstanding delayed ack, be nice to the server
call->iovqc -=
#endif /* RXDEBUG_PACKET */
rxi_FreePackets(0, &call->iovq);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&call->lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (conn->type == RX_CLIENT_CONNECTION) {
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags &= ~RX_CONN_BUSY;
for (conn = *conn_ptr; conn; conn = next) {
next = conn->next;
if (conn->type == RX_CLIENT_CONNECTION) {
- /* MUTEX_ENTER(&conn->conn_data_lock); when used in kernel */
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- /* MUTEX_EXIT(&conn->conn_data_lock); when used in kernel */
+ MUTEX_EXIT(&rx_refcnt_mutex);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
#else /* RX_ENABLE_LOCKS */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
call->allNextp = rx_allCallsp;
rx_allCallsp = call;
call->call_id =
+ rx_atomic_inc_and_read(&rx_stats.nCallStructs);
+#else /* RXDEBUG_PACKET */
+ rx_atomic_inc(&rx_stats.nCallStructs);
#endif /* RXDEBUG_PACKET */
- rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
/* A call has been inactive long enough that so we can throw away
* state, including the call structure, which is placed on the call
* free list.
- * Call is locked upon entry.
- * haveCTLock set if called from rxi_ReapConnections
+ *
+ * call->lock amd rx_refcnt_mutex are held upon entry.
+ * haveCTLock is set when called from rxi_ReapConnections.
*/
-#ifdef RX_ENABLE_LOCKS
void
rxi_FreeCall(struct rx_call *call, int haveCTLock)
-#else /* RX_ENABLE_LOCKS */
-void
-rxi_FreeCall(struct rx_call *call)
-#endif /* RX_ENABLE_LOCKS */
{
int channel = call->channel;
struct rx_connection *conn = call->conn;
(*call->callNumber)++;
rxi_ResetCall(call, 0);
call->conn->call[channel] = (struct rx_call *)0;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_ENTER(&rx_freeCallQueue_lock);
SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
*/
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
if (haveCTLock)
} else {
MUTEX_EXIT(&conn->conn_data_lock);
}
+ MUTEX_ENTER(&rx_refcnt_mutex);
}
-afs_int32 rxi_Alloccnt = 0, rxi_Allocsize = 0;
+rx_atomic_t rxi_Allocsize = RX_ATOMIC_INIT(0);
+rx_atomic_t rxi_Alloccnt = RX_ATOMIC_INIT(0);
+
void *
rxi_Alloc(size_t size)
{
char *p;
- if (rx_stats_active)
- rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active) {
+ rx_atomic_add(&rxi_Allocsize, (int) size);
+ rx_atomic_inc(&rxi_Alloccnt);
+ }
p = (char *)
#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
void
rxi_Free(void *addr, size_t size)
{
- if (rx_stats_active)
- rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active) {
+ rx_atomic_sub(&rxi_Allocsize, (int) size);
+ rx_atomic_dec(&rxi_Alloccnt);
+ }
osi_Free(addr, size);
}
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nPeerStructs);
}
}
if (pp && create) {
if (service->newConnProc)
(*service->newConnProc) (conn);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nServerConns);
}
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxLastConn = conn; /* store this connection as the last conn used */
MUTEX_EXIT(&rx_connHashTable_lock);
MUTEX_ENTER(&conn->conn_data_lock);
if (np->header.type != RX_PACKET_TYPE_ABORT)
np = rxi_SendConnectionAbort(conn, np, 1, 0);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d", errcode));
rxi_ConnectionError(conn, errcode);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
case RX_PACKET_TYPE_CHALLENGE:
tnp = rxi_ReceiveChallengePacket(conn, np, 1);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tnp;
case RX_PACKET_TYPE_RESPONSE:
tnp = rxi_ReceiveResponsePacket(conn, np, 1);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tnp;
case RX_PACKET_TYPE_PARAMS:
case RX_PACKET_TYPE_PARAMS + 1:
case RX_PACKET_TYPE_PARAMS + 2:
/* ignore these packet types for now */
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
MUTEX_ENTER(&conn->conn_data_lock);
tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
}
* it must be for the previous call.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
- MUTEX_ENTER(&conn->conn_data_lock);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
}
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
if (!call) {
* If the number of queued calls exceeds the overload
* threshold then abort this call.
*/
- if ((rx_BusyThreshold > 0) && (rx_nWaiting > rx_BusyThreshold)) {
+ if ((rx_BusyThreshold > 0) &&
+ (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
struct rx_packet *tp;
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nBusies);
return tp;
}
rxi_KeepAliveOn(call);
tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY,
NULL, 0, 1);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tp;
}
rxi_ResetCall(call, 0);
* If the number of queued calls exceeds the overload
* threshold then abort this call.
*/
- if ((rx_BusyThreshold > 0) && (rx_nWaiting > rx_BusyThreshold)) {
+ if ((rx_BusyThreshold > 0) &&
+ (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
struct rx_packet *tp;
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nBusies);
return tp;
}
rxi_KeepAliveOn(call);
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ignorePacketDally);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
/* If the service security object index stamped in the packet does not
#ifdef RX_ENABLE_LOCKS
MUTEX_EXIT(&call->lock);
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
#ifdef RX_ENABLE_LOCKS
rxi_SetAcksInTransmitQueue(call);
#else
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
#endif
} else {
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
}
dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d", errdata));
rxi_CallError(call, errdata);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
}
case RX_PACKET_TYPE_BUSY:
break;
#else /* RX_ENABLE_LOCKS */
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
#endif /* RX_ENABLE_LOCKS */
}
* (if not, then the time won't actually be re-evaluated here). */
call->lastReceiveTime = clock_Sec();
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
MUTEX_ENTER(&conn->conn_data_lock);
conn->checkReachEvent = NULL;
waiting = conn->flags & RX_CONN_ATTACHWAIT;
- if (event)
+ if (event) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ }
MUTEX_EXIT(&conn->conn_data_lock);
if (waiting) {
when.sec += RX_CHECKREACH_TIMEOUT;
MUTEX_ENTER(&conn->conn_data_lock);
if (!conn->checkReachEvent) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
+ MUTEX_EXIT(&rx_refcnt_mutex);
conn->checkReachEvent =
rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
NULL);
struct rx_packet *tnp;
struct clock when, now;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dataPacketsRead);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketBuffersOnRead);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems", np));
|| clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+
call->delayedAckEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
}
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
* application already, then this is a duplicate */
if (seq < call->rnext) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/*Check for duplicate packet */
if (seq == tp->header.seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
* Send an ack when requested by the peer, or once every
* rxi_SoftAckRate packets until the last packet has been
* received. Always send a soft ack for the last packet in
- * the server's reply. */
- if (ackNeeded) {
+ * the server's reply.
+ *
+ * If we have received all of the packets for the call
+ * immediately send an RX_PACKET_TYPE_ACKALL packet so that
+ * the peer can empty its packet queue and cancel all resend
+ * events.
+ */
+ if (call->flags & RX_CALL_RECEIVE_DONE) {
+ rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
+ rxi_AckAll(NULL, call, 0);
+ } else if (ackNeeded) {
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, ackNeeded, istack);
} else if (call->nSoftAcks > (u_short) rxi_SoftAckRate) {
|| clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAckEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
}
- } else if (call->flags & RX_CALL_RECEIVE_DONE) {
- rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
}
return np;
*/
static void
rxi_ComputePeerNetStats(struct rx_call *call, struct rx_packet *p,
- struct rx_ackPacket *ap, struct rx_packet *np)
+ struct rx_ackPacket *ap, struct rx_packet *np,
+ struct clock *now)
{
struct rx_peer *peer = call->conn->peer;
if (!(p->flags & RX_PKTFLAG_ACKED) &&
ap->reason != RX_ACK_DELAY &&
clock_Eq(&p->timeSent, &p->firstSent))
- rxi_ComputeRoundTripTime(p, &p->timeSent, peer);
+ rxi_ComputeRoundTripTime(p, &p->timeSent, peer, now);
#ifdef ADAPT_WINDOW
rxi_ComputeRate(peer, call, p, np, ap->reason);
#endif
struct rx_packet *nxp; /* Next packet pointer for queue_Scan */
struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
+ struct clock now; /* Current time, for RTT calculations */
afs_uint32 first;
afs_uint32 serial;
/* because there are CM's that are bogus, sending weird values for this. */
int conn_data_locked = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsRead);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
* acknowledged as having been sent to the peer's upper level.
* All other packets must be retained. So only packets with
* sequence numbers < ap->firstPacket are candidates. */
+
+ clock_GetTime(&now);
+
for (queue_Scan(&call->tq, tp, nxp, rx_packet)) {
if (tp->header.seq >= first)
break;
call->tfirst = tp->header.seq + 1;
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np, &now);
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
if (tp->header.seq >= first)
#endif /* RX_ENABLE_LOCKS */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np, &now);
/* Set the acknowledge flag per packet based on the
* information in the ack packet. An acknowlegded packet can
if (!(call->flags & RX_CALL_WAIT_PROC)) {
call->flags |= RX_CALL_WAIT_PROC;
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting++;
- rx_nWaited++;
- MUTEX_EXIT(&rx_waiting_mutex);
+ rx_atomic_inc(&rx_nWaiting);
+ rx_atomic_inc(&rx_nWaited);
rxi_calltrace(RX_CALL_ARRIVAL, call);
SET_CALL_QUEUE_LOCK(call, &rx_serverPool_lock);
queue_Append(&rx_incomingCallQueue, call);
*tnop = sq->tno;
*sq->socketp = socket;
clock_GetTime(&call->startTime);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
} else {
sq->newcall = call;
}
if (queue_IsOnQueue(call)) {
queue_Remove(call);
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_waiting_mutex);
+ rx_atomic_dec(&rx_nWaiting);
}
}
call->state = RX_STATE_ACTIVE;
if (event) {
MUTEX_ENTER(&call->lock);
call->delayedAckEvent = NULL;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ACKALL);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
rxi_SendSpecial(call, call->conn, (struct rx_packet *)0,
RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
MUTEX_ENTER(&call->lock);
if (event == call->delayedAckEvent)
call->delayedAckEvent = NULL;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
(void)rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
if (event)
clock_GetTime(&now);
when = now;
clock_Addmsec(&when, rxi_callAbortDelay);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ABORT);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAbortEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedCallAbort, call, 0);
}
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
conn->flags &= ~RX_CONN_ATTACHWAIT;
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
MUTEX_EXIT(&conn->conn_data_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
}
conn->error = error;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.fatalErrors);
}
}
rxi_ResetCall(call, 0);
#endif
call->error = error;
- call->mode = RX_MODE_ERROR;
}
/* Reset various fields in a call structure, and wakeup waiting
rxi_ClearReceiveQueue(call);
/* why init the queue if you just emptied it? queue_Init(&call->rq); */
- if (call->currentPacket) {
-#ifdef RX_TRACK_PACKETS
- call->currentPacket->flags &= ~RX_PKTFLAG_CP;
- call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
-#endif
- queue_Prepend(&call->iovq, call->currentPacket);
-#ifdef RXDEBUG_PACKET
- call->iovqc++;
-#endif /* RXDEBUG_PACKET */
- call->currentPacket = (struct rx_packet *)0;
- }
- call->curlen = call->nLeft = call->nFree = 0;
-
-#ifdef RXDEBUG_PACKET
- call->iovqc -=
-#endif
- rxi_FreePackets(0, &call->iovq);
call->error = 0;
call->twind = call->conn->twind[call->channel];
if (queue_IsOnQueue(call)) {
queue_Remove(call);
if (flags & RX_CALL_WAIT_PROC) {
-
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_waiting_mutex);
+ rx_atomic_dec(&rx_nWaiting);
}
}
MUTEX_EXIT(call->call_queue_lock);
if (queue_IsOnQueue(call)) {
queue_Remove(call);
if (flags & RX_CALL_WAIT_PROC)
- rx_nWaiting--;
+ rx_atomic_dec(&rx_nWaiting);
}
#endif /* RX_ENABLE_LOCKS */
}
}
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsSent);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
if (rx_stats_active) {
if (resending)
- rx_MutexAdd(rx_stats.dataPacketsReSent, len, rx_stats_mutex);
+ rx_atomic_add(&rx_stats.dataPacketsReSent, len);
else
- rx_MutexAdd(rx_stats.dataPacketsSent, len, rx_stats_mutex);
+ rx_atomic_add(&rx_stats.dataPacketsSent, len);
}
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
* safe to nuke any scheduled end-of-packets ack */
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
- CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (len > 1) {
rxi_SendPacketList(call, conn, list, len, istack);
} else {
rxi_SendPacket(call, conn, list[0], istack);
}
MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
* structure, since there is no longer a per-call retransmission
* event pending. */
if (event && event == call->resendEvent) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->resendEvent = NULL;
resending = 1;
if (queue_IsEmpty(&call->tq)) {
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
#endif
return;
}
* recent additions.
* Do a dance to avoid blocking after setting now. */
MUTEX_ENTER(&peer->peer_lock);
- retryTime = peer->timeout;
+ retryTime = peer->timeout;
MUTEX_EXIT(&peer->peer_lock);
+
clock_GetTime(&now);
clock_Add(&retryTime, &now);
usenow = now;
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ignoreAckedPacket);
continue; /* Ignore this packet if it has been acknowledged */
}
* process that the call is in an error state.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_aborted);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
/* Post a new event to re-run rxi_Start when retries may be needed */
if (haveEvent && !(call->flags & RX_CALL_NEED_START)) {
#ifdef RX_ENABLE_LOCKS
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->resendEvent =
rxevent_PostNow2(&retryTime, &usenow,
rxi_StartUnlocked,
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
/* Actually send the packet, filling in more connection-specific fields */
- CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_SendPacket(call, conn, p, istack);
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
rxevent_Cancel(call->keepAliveEvent, call,
RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (call->refCount == 0) {
rxi_FreeCall(call, haveCTLock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return -2;
}
+ MUTEX_EXIT(&rx_refcnt_mutex);
return -1;
#else /* RX_ENABLE_LOCKS */
- rxi_FreeCall(call);
+ rxi_FreeCall(call, 0);
return -2;
#endif /* RX_ENABLE_LOCKS */
}
}
return 0;
mtuout:
- if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT) {
+ if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT
+ && call->lastReceiveTime) {
int oldMTU = conn->peer->ifMTU;
/* if we thought we could send more, perhaps things got worse */
- if (call->conn->peer->maxPacketSize > conn->lastPacketSize)
+ if (conn->peer->maxPacketSize > conn->lastPacketSize)
/* maxpacketsize will be cleared in rxi_SetPeerMtu */
newmtu = MAX(conn->peer->maxPacketSize-RX_IPUDP_SIZE,
conn->lastPacketSize-(128+RX_IPUDP_SIZE));
osi_NetSend(socket, &taddr, tmpiov, 1, 1 + sizeof(struct rx_header), 1);
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
/* Only reschedule ourselves if the connection would not be destroyed */
if (conn->refCount <= 1) {
conn->natKeepAliveEvent = NULL;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
rx_DestroyConnection(conn); /* drop the reference for this */
} else {
- conn->natKeepAliveEvent = NULL;
conn->refCount--; /* drop the reference for this */
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ conn->natKeepAliveEvent = NULL;
rxi_ScheduleNatKeepAliveEvent(conn);
MUTEX_EXIT(&conn->conn_data_lock);
}
clock_GetTime(&now);
when = now;
when.sec += conn->secondsUntilNatPing;
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++; /* hold a reference for this */
+ MUTEX_EXIT(&rx_refcnt_mutex);
conn->natKeepAliveEvent =
rxevent_PostNow(&when, &now, rxi_NatKeepAliveEvent, conn, 0);
}
struct rx_connection *conn;
afs_uint32 now;
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
if (event == call->keepAliveEvent)
call->keepAliveEvent = NULL;
now = clock_Sec();
struct rx_call *call = arg1;
struct rx_connection *conn;
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
+
if (event == call->growMTUEvent)
call->growMTUEvent = NULL;
clock_GetTime(&now);
when = now;
when.sec += call->conn->secondsUntilPing;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->keepAliveEvent =
rxevent_PostNow(&when, &now, rxi_KeepAliveEvent, call, 0);
}
}
when.sec += secs;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->growMTUEvent =
rxevent_PostNow(&when, &now, rxi_GrowMTUEvent, call, 0);
}
(char *)&error, sizeof(error), 0);
rxi_FreePacket(packet);
}
- CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
/* This routine is called periodically (every RX_AUTH_REQUEST_TIMEOUT
/* rxi_ComputeRoundTripTime is called with peer locked. */
/* sentp and/or peer may be null */
-void
+static void
rxi_ComputeRoundTripTime(struct rx_packet *p,
struct clock *sentp,
- struct rx_peer *peer)
+ struct rx_peer *peer,
+ struct clock *now)
{
struct clock thisRtt, *rttp = &thisRtt;
-
int rtt_timeout;
- clock_GetTime(rttp);
+ thisRtt = *now;
- if (clock_Lt(rttp, sentp)) {
- clock_Zero(rttp);
+ if (clock_Lt(rttp, sentp))
return; /* somebody set the clock back, don't count this time. */
- }
+
clock_Sub(rttp, sentp);
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
p->header.callNumber, p, rttp->sec, rttp->usec));
rx_stats.maxRtt = *rttp;
}
clock_Add(&rx_stats.totalRtt, rttp);
- rx_stats.nRttSamples++;
+ rx_atomic_inc(&rx_stats.nRttSamples);
MUTEX_EXIT(&rx_stats_mutex);
}
/* This only actually destroys the connection if
* there are no outstanding calls */
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (!havecalls && !conn->refCount
&& ((conn->lastSendTime + rx_idleConnectionTime) <
now.sec)) {
conn->refCount++; /* it will be decr in rx_DestroyConn */
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
}
#ifdef RX_ENABLE_LOCKS
else {
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
}
#endif /* RX_ENABLE_LOCKS */
prev->next = next;
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
/*
* Now if we hold references on 'prev' and 'next'
} else {
return;
}
- xferSize = rx_AckDataSize(rx_Window) + RX_HEADER_SIZE;
+ xferSize = rx_AckDataSize(rx_maxSendWindow) + RX_HEADER_SIZE;
break;
default:
* one packet exchange */
if (clock_Gt(&newTO, &peer->timeout)) {
- dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u)",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec,
- newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
+ newTO.sec, newTO.usec, peer->smRtt));
peer->timeout = newTO;
}
/* Now, convert to the number of full packets that could fit in a
* reasonable fraction of that interval */
minTime /= (peer->smRtt << 1);
+ minTime = MAX(minTime, rx_minPeerTimeout);
xferSize = minTime; /* (make a copy) */
/* Now clamp the size to reasonable bounds. */
if (minTime <= 1)
minTime = 1;
- else if (minTime > rx_Window)
- minTime = rx_Window;
+ else if (minTime > rx_maxSendWindow)
+ minTime = rx_maxSendWindow;
/* if (minTime != peer->maxWindow) {
- dpf(("CONG peer %lx/%u: windowsize %lu ==> %lu (to %lu.%06lu, rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: windowsize %lu ==> %lu (to %lu.%06lu, rtt %u)",
ntohl(peer->host), ntohs(peer->port), peer->maxWindow, minTime,
- peer->timeout.sec, peer->timeout.usec, peer->smRtt,
- peer->packetSize));
+ peer->timeout.sec, peer->timeout.usec, peer->smRtt));
peer->maxWindow = minTime;
elide... call->twind = minTime;
}
/* Cut back on the peer timeout if it had earlier grown unreasonably.
* Discern this by calculating the timeout necessary for rx_Window
* packets. */
- if ((xferSize > rx_Window) && (peer->timeout.sec >= 3)) {
+ if ((xferSize > rx_maxSendWindow) && (peer->timeout.sec >= 3)) {
/* calculate estimate for transmission interval in milliseconds */
- minTime = rx_Window * peer->smRtt;
+ minTime = rx_maxSendWindow * peer->smRtt;
if (minTime < 1000) {
- dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u)",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec,
- peer->timeout.usec, peer->smRtt, peer->packetSize));
+ peer->timeout.usec, peer->smRtt));
newTO.sec = 0; /* cut back on timeout by half a second */
newTO.usec = 500000;
rx_PrintTheseStats(FILE * file, struct rx_statistics *s, int size,
afs_int32 freePackets, char version)
{
-#ifdef RXDEBUG
int i;
if (size != sizeof(struct rx_statistics)) {
#if !defined(AFS_PTHREAD_ENV) && !defined(AFS_USE_GETTIMEOFDAY)
fprintf(file, " %d clock updates\n", clock_nUpdates);
#endif
-#else
- fprintf(file, "ERROR: compiled without RXDEBUG\n");
-#endif
}
/* for backward compatibility */
rx_PrintStats(FILE * file)
{
MUTEX_ENTER(&rx_stats_mutex);
- rx_PrintTheseStats(file, &rx_stats, sizeof(rx_stats), rx_nFreePackets,
+ rx_PrintTheseStats(file, (struct rx_statistics *) &rx_stats,
+ sizeof(rx_stats), rx_nFreePackets,
RX_DEBUGI_VERSION);
MUTEX_EXIT(&rx_stats_mutex);
}
rx_PrintPeerStats(FILE * file, struct rx_peer *peer)
{
fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %d.%06d.\n",
- ntohl(peer->host), (int)peer->port, (int)peer->burstSize,
+ ntohl(peer->host), (int)ntohs(peer->port), (int)peer->burstSize,
(int)peer->burstWait.sec, (int)peer->burstWait.usec);
fprintf(file,
#define UNLOCK_RX_DEBUG
#endif /* AFS_PTHREAD_ENV */
-#ifdef RXDEBUG
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
static int
MakeDebugCall(osi_socket socket, afs_uint32 remoteAddr, afs_uint16 remotePort,
u_char type, void *inputData, size_t inputLength,
afs_uint16 remotePort, struct rx_debugStats * stat,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
stat->nWaited = ntohl(stat->nWaited);
stat->nPackets = ntohl(stat->nPackets);
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
afs_uint16 remotePort, struct rx_statistics * stat,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
afs_int32 *lp = (afs_int32 *) stat;
*lp = ntohl(*lp);
}
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
afs_uint16 remotePort, size_t version_length,
char *version)
{
-#ifdef RXDEBUG
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
char a[1] = { 0 };
return MakeDebugCall(socket, remoteAddr, remotePort,
RX_PACKET_TYPE_VERSION, a, 1, version,
struct rx_debugConn * conn,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
int i;
conn->epoch = ntohl(conn->epoch);
conn->natMTU = ntohl(conn->natMTU);
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
afs_uint32 debugSupportedValues, struct rx_debugPeer * peer,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
peer->bytesReceived.high = ntohl(peer->bytesReceived.high);
peer->bytesReceived.low = ntohl(peer->bytesReceived.low);
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
next = peer->next;
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
}
MUTEX_EXIT(&rx_peerHashTable_lock);
}