/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#include "rx.h"
#include "rx_globals.h"
#include "rx_trace.h"
+#include "rx_atomic.h"
+#include "rx_internal.h"
+#include "rx_stats.h"
#define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
#define AFSOP_STOP_AFS 211 /* Stop AFS process */
#define AFSOP_STOP_BKG 212 /* Stop BKG process */
#include "sys/lockl.h"
#include "sys/lock_def.h"
#endif /* AFS_AIX41_ENV */
-# include "rxgen_consts.h"
+# include "afs/rxgen_consts.h"
#else /* KERNEL */
# include <sys/types.h>
# include <string.h>
# include "rx_user.h"
# include "rx_clock.h"
# include "rx_queue.h"
+# include "rx_atomic.h"
# include "rx_globals.h"
# include "rx_trace.h"
+# include "rx_internal.h"
+# include "rx_stats.h"
# include <afs/rxgen_consts.h>
#endif /* KERNEL */
/* Local static routines */
static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
+static void rxi_ComputeRoundTripTime(struct rx_packet *, struct clock *,
+ struct rx_peer *, struct clock *);
+
#ifdef RX_ENABLE_LOCKS
static void rxi_SetAcksInTransmitQueue(struct rx_call *call);
#endif
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
struct rx_tq_debug {
- afs_int32 rxi_start_aborted; /* rxi_start awoke after rxi_Send in error. */
- afs_int32 rxi_start_in_error;
+ rx_atomic_t rxi_start_aborted; /* rxi_start awoke after rxi_Send in error.*/
+ rx_atomic_t rxi_start_in_error;
} rx_tq_debug;
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
static unsigned int rxi_rpc_process_stat_cnt;
+rx_atomic_t rx_nWaiting = RX_ATOMIC_INIT(0);
+rx_atomic_t rx_nWaited = RX_ATOMIC_INIT(0);
+
#if !defined(offsetof)
#include <stddef.h> /* for definition of offsetof() */
#endif
+#ifdef RX_ENABLE_LOCKS
+afs_kmutex_t rx_atomic_mutex;
+#endif
+
#ifdef AFS_PTHREAD_ENV
#include <assert.h>
* to ease NT porting
*/
-extern afs_kmutex_t rx_stats_mutex;
-extern afs_kmutex_t rx_waiting_mutex;
extern afs_kmutex_t rx_quota_mutex;
extern afs_kmutex_t rx_pthread_mutex;
extern afs_kmutex_t rx_packets_mutex;
+extern afs_kmutex_t rx_refcnt_mutex;
extern afs_kmutex_t des_init_mutex;
extern afs_kmutex_t des_random_mutex;
extern afs_kmutex_t rx_clock_mutex;
{
MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rx_waiting_mutex, "waiting", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_atomic_mutex, "atomic", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_refcnt_mutex, "refcnts", MUTEX_DEFAULT, 0);
MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_event_mutex, "event", MUTEX_DEFAULT, 0);
== 0);
assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
-
+
rxkad_global_stats_init();
MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
* rxi_totalMin
*/
-/*
+/*
* The rx_freePktQ_lock protects the following global variables:
- * rx_nFreePackets
+ * rx_nFreePackets
*/
/*
/*
* The rx_pthread_mutex mutex protects the following global variables:
- * rxi_pthread_hinum
+ * rxi_fcfs_thread_num
*/
#else
#define INIT_PTHREAD_LOCKS
* are locked. To this end, the code has been modified under #ifdef
* RX_ENABLE_LOCKS so that quota checks and reservation occur at the
* same time. A new function, ReturnToServerPool() returns the allocation.
- *
+ *
* A call can be on several queue's (but only one at a time). When
* rxi_ResetCall wants to remove the call from a queue, it has to ensure
* that no one else is touching the queue. To this end, we store the address
void *arg1, int istack);
#endif
-/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
-** pretty good that the next packet coming in is from the same connection
+/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
+** pretty good that the next packet coming in is from the same connection
** as the last packet, since we're send multiple packets in a transmit window.
*/
struct rx_connection *rxLastConn = 0;
* lowest level:
* multi_handle->lock
* rxevent_lock
+ * rx_packets_mutex
* rx_stats_mutex
+ * rx_refcnt_mutex
+ * rx_atomic_mutex
*
* Do we need a lock to protect the peer field in the conn structure?
* conn->peer was previously a constant for all intents and so has no
#endif /* KERNEL */
char *htable, *ptable;
int tmp_status;
-
+
SPLVAR;
-
+
INIT_PTHREAD_LOCKS;
LOCK_RX_INIT;
if (rxinit_status == 0) {
rxdb_init();
#endif /* RX_LOCKS_DB */
MUTEX_INIT(&rx_stats_mutex, "rx_stats_mutex", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rx_waiting_mutex, "rx_waiting_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_quota_mutex, "rx_quota_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_pthread_mutex, "rx_pthread_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_packets_mutex, "rx_packets_mutex", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_refcnt_mutex, "rx_refcnt_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- memset(&rx_stats, 0, sizeof(struct rx_statistics));
+ rxi_ResetStatistics();
htable = (char *)
osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
}
#ifdef RX_ENABLE_TSFPQ
/* no use leaving packets around in this thread's local queue if
- * it isn't getting donated to the server thread pool.
+ * it isn't getting donated to the server thread pool.
*/
rxi_FlushLocalPacketsTSFPQ();
#endif /* RX_ENABLE_TSFPQ */
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nClientConns);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
}
+/**
+ * Ensure a connection's timeout values are valid.
+ *
+ * @param[in] conn The connection to check
+ *
+ * @post conn->secondUntilDead <= conn->idleDeadTime <= conn->hardDeadTime,
+ * unless idleDeadTime and/or hardDeadTime are not set
+ * @internal
+ */
+static void
+rxi_CheckConnTimeouts(struct rx_connection *conn)
+{
+ /* a connection's timeouts must have the relationship
+ * deadTime <= idleDeadTime <= hardDeadTime. Otherwise, for example, a
+ * total loss of network to a peer may cause an idle timeout instead of a
+ * dead timeout, simply because the idle timeout gets hit first. Also set
+ * a minimum deadTime of 6, just to ensure it doesn't get set too low. */
+ /* this logic is slightly complicated by the fact that
+ * idleDeadTime/hardDeadTime may not be set at all, but it's not too bad.
+ */
+ conn->secondsUntilDead = MAX(conn->secondsUntilDead, 6);
+ if (conn->idleDeadTime) {
+ conn->idleDeadTime = MAX(conn->idleDeadTime, conn->secondsUntilDead);
+ }
+ if (conn->hardDeadTime) {
+ if (conn->idleDeadTime) {
+ conn->hardDeadTime = MAX(conn->idleDeadTime, conn->hardDeadTime);
+ } else {
+ conn->hardDeadTime = MAX(conn->secondsUntilDead, conn->hardDeadTime);
+ }
+ }
+}
+
void
rx_SetConnDeadTime(struct rx_connection *conn, int seconds)
{
/* The idea is to set the dead time to a value that allows several
* keepalives to be dropped without timing out the connection. */
- conn->secondsUntilDead = MAX(seconds, 6);
+ conn->secondsUntilDead = seconds;
+ rxi_CheckConnTimeouts(conn);
conn->secondsUntilPing = conn->secondsUntilDead / 6;
}
+void
+rx_SetConnHardDeadTime(struct rx_connection *conn, int seconds)
+{
+ conn->hardDeadTime = seconds;
+ rxi_CheckConnTimeouts(conn);
+}
+
+void
+rx_SetConnIdleDeadTime(struct rx_connection *conn, int seconds)
+{
+ conn->idleDeadTime = seconds;
+ rxi_CheckConnTimeouts(conn);
+}
+
int rxi_lowPeerRefCount = 0;
int rxi_lowConnRefCount = 0;
if (rx_stats_active)
{
if (conn->type == RX_SERVER_CONNECTION)
- rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nServerConns);
else
- rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nClientConns);
}
#ifndef KERNEL
if (conn->specific) {
NETPRI;
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (conn->refCount > 0)
conn->refCount--;
else {
if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Busy; wait till the last guy before proceeding */
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
return;
USERPRI;
return;
}
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
/* Check for extant references to this connection */
SPLVAR;
NETPRI;
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
USERPRI;
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
-/* Wait for the transmit queue to no longer be busy.
+/* Wait for the transmit queue to no longer be busy.
* requires the call->lock to be held */
static void rxi_WaitforTQBusy(struct rx_call *call) {
while (call->flags & RX_CALL_TQ_BUSY) {
* 0. Maxtime gives the maximum number of seconds this call may take,
* after rx_NewCall returns. After this time interval, a call to any
* of rx_SendData, rx_ReadData, etc. will fail with RX_CALL_TIMEOUT.
- * For fine grain locking, we hold the conn_call_lock in order to
+ * For fine grain locking, we hold the conn_call_lock in order to
* to ensure that we don't get signalle after we found a call in an active
* state and before we go to sleep.
*/
* If so, let them go first to avoid starving them.
* This is a fairly simple scheme, and might not be
* a complete solution for large numbers of waiters.
- *
- * makeCallWaiters keeps track of the number of
- * threads waiting to make calls and the
- * RX_CONN_MAKECALL_WAITING flag bit is used to
+ *
+ * makeCallWaiters keeps track of the number of
+ * threads waiting to make calls and the
+ * RX_CONN_MAKECALL_WAITING flag bit is used to
* indicate that there are indeed calls waiting.
* The flag is set when the waiter is incremented.
* It is only cleared when makeCallWaiters is 0.
conn->makeCallWaiters--;
if (conn->makeCallWaiters == 0)
conn->flags &= ~RX_CONN_MAKECALL_WAITING;
- }
+ }
/* We are now the active thread in rx_NewCall */
conn->flags |= RX_CONN_MAKECALL_ACTIVE;
* effect on overall system performance.
*/
call->state = RX_STATE_RESET;
- CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&conn->conn_call_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_ResetCall(call, 0);
(*call->callNumber)++;
if (MUTEX_TRYENTER(&conn->conn_call_lock))
* Instead, cycle through one more time to see if
* we can find a call that can call our own.
*/
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
wait = 0;
}
MUTEX_EXIT(&call->lock);
} else {
/* rxi_NewCall returns with mutex locked */
call = rxi_NewCall(conn, i);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
break;
}
}
call->mode = RX_MODE_ERROR;
else
call->mode = RX_MODE_SENDING;
-
+
/* remember start time for call in case we have hard dead time limit */
call->queueTime = queueTime;
clock_GetTime(&call->startTime);
/* Advertise a new service. A service is named locally by a UDP port
* number plus a 16-bit service id. Returns (struct rx_service *) 0
- * on a failure.
+ * on a failure.
*
char *serviceName; Name for identification purposes (e.g. the
service name might be used for probing for
statistics) */
struct rx_service *
-rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId,
+rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId,
char *serviceName, struct rx_securityClass **securityObjects,
- int nSecurityObjects,
+ int nSecurityObjects,
afs_int32(*serviceProc) (struct rx_call * acall))
{
osi_socket socket = OSI_NULLSOCKET;
/* Set configuration options for all of a service's security objects */
-afs_int32
-rx_SetSecurityConfiguration(struct rx_service *service,
+afs_int32
+rx_SetSecurityConfiguration(struct rx_service *service,
rx_securityConfigVariables type,
void *value)
{
int i;
for (i = 0; i<service->nSecurityObjects; i++) {
if (service->securityObjects[i]) {
- RXS_SetConfiguration(service->securityObjects[i], NULL, type,
+ RXS_SetConfiguration(service->securityObjects[i], NULL, type,
value, NULL);
}
}
if (tservice->beforeProc)
(*tservice->beforeProc) (call);
- code = call->conn->service->executeRequestProc(call);
+ code = tservice->executeRequestProc(call);
if (tservice->afterProc)
(*tservice->afterProc) (call, code);
/* meltdown:
* One thing that seems to happen is that all the server threads get
* tied up on some empty or slow call, and then a whole bunch of calls
- * arrive at once, using up the packet pool, so now there are more
+ * arrive at once, using up the packet pool, so now there are more
* empty calls. The most critical resources here are server threads
* and the free packet pool. The "doreclaim" code seems to help in
* general. I think that eventually we arrive in this state: there
* are lots of pending calls which do have all their packets present,
* so they won't be reclaimed, are multi-packet calls, so they won't
- * be scheduled until later, and thus are tying up most of the free
+ * be scheduled until later, and thus are tying up most of the free
* packet pool for a very long time.
* future options:
- * 1. schedule multi-packet calls if all the packets are present.
- * Probably CPU-bound operation, useful to return packets to pool.
+ * 1. schedule multi-packet calls if all the packets are present.
+ * Probably CPU-bound operation, useful to return packets to pool.
* Do what if there is a full window, but the last packet isn't here?
* 3. preserve one thread which *only* runs "best" calls, otherwise
* it sleeps and waits for that type of call.
- * 4. Don't necessarily reserve a whole window for each thread. In fact,
+ * 4. Don't necessarily reserve a whole window for each thread. In fact,
* the current dataquota business is badly broken. The quota isn't adjusted
* to reflect how many packets are presently queued for a running call.
* So, when we schedule a queued call with a full window of packets queued
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
MUTEX_EXIT(&freeSQEList_lock);
- sq = (struct rx_serverQueueEntry *)
- rxi_Alloc(sizeof(struct rx_serverQueueEntry));
+ sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
}
* already executing */
/* One thread will process calls FCFS (to prevent starvation),
* while the other threads may run ahead looking for calls which
- * have all their input data available immediately. This helps
+ * have all their input data available immediately. This helps
* keep threads from blocking, waiting for data from the client. */
for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
service = tcall->conn->service;
if (tno == rxi_fcfs_thread_num
|| !tcall->queue_item_header.next) {
MUTEX_EXIT(&rx_pthread_mutex);
- /* If we're the fcfs thread , then we'll just use
- * this call. If we haven't been able to find an optimal
- * choice, and we're at the end of the list, then use a
+ /* If we're the fcfs thread , then we'll just use
+ * this call. If we haven't been able to find an optimal
+ * choice, and we're at the end of the list, then use a
* 2d choice if one has been identified. Otherwise... */
call = (choice2 ? choice2 : tcall);
service = call->conn->service;
if (call->flags & RX_CALL_WAIT_PROC) {
call->flags &= ~RX_CALL_WAIT_PROC;
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_waiting_mutex);
+ rx_atomic_dec(&rx_nWaiting);
}
if (call->state != RX_STATE_PRECALL || call->error) {
call->conn->service->servicePort, call->conn->service->serviceId,
call));
- CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
} else {
dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
MUTEX_EXIT(&freeSQEList_lock);
- sq = (struct rx_serverQueueEntry *)
- rxi_Alloc(sizeof(struct rx_serverQueueEntry));
+ sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
}
* already executing */
/* One thread will process calls FCFS (to prevent starvation),
* while the other threads may run ahead looking for calls which
- * have all their input data available immediately. This helps
+ * have all their input data available immediately. This helps
* keep threads from blocking, waiting for data from the client. */
choice2 = (struct rx_call *)0;
for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
if (tno == rxi_fcfs_thread_num
|| !tcall->queue_item_header.next) {
MUTEX_EXIT(&rx_pthread_mutex);
- /* If we're the fcfs thread, then we'll just use
- * this call. If we haven't been able to find an optimal
- * choice, and we're at the end of the list, then use a
+ /* If we're the fcfs thread, then we'll just use
+ * this call. If we haven't been able to find an optimal
+ * choice, and we're at the end of the list, then use a
* 2d choice if one has been identified. Otherwise... */
call = (choice2 ? choice2 : tcall);
service = call->conn->service;
queue_Remove(call);
/* we can't schedule a call if there's no data!!! */
/* send an ack if there's no data, if we're missing the
- * first packet, or we're missing something between first
+ * first packet, or we're missing something between first
* and last -- there's a "hole" in the incoming data. */
if (queue_IsEmpty(&call->rq)
|| queue_First(&call->rq, rx_packet)->header.seq != 1
rxi_minDeficit--;
rxi_availProcs--;
MUTEX_EXIT(&rx_quota_mutex);
- rx_nWaiting--;
+ rx_atomic_dec(&rx_nWaiting);
/* MUTEX_EXIT(&call->lock); */
} else {
/* If there are no eligible incoming calls, add this process
* and will also be called if there is an error condition on the or
* the call is complete. Used by multi rx to build a selection
* function which determines which of several calls is likely to be a
- * good one to read from.
+ * good one to read from.
* NOTE: the way this is currently implemented it is probably only a
* good idea to (1) use it immediately after a newcall (clients only)
* and (2) only use it once. Other uses currently void your warranty
rx_EndCall(struct rx_call *call, afs_int32 rc)
{
struct rx_connection *conn = call->conn;
- struct rx_service *service;
afs_int32 error;
SPLVAR;
call->arrivalProc = (void (*)())0;
if (rc && call->error == 0) {
rxi_CallError(call, rc);
+ call->mode = RX_MODE_ERROR;
/* Send an abort message to the peer if this error code has
* only just been set. If it was set previously, assume the
- * peer has already been sent the error code or will request it
+ * peer has already been sent the error code or will request it
*/
rxi_SendCallAbort(call, (struct rx_packet *)0, 0, 0);
}
if (conn->type == RX_SERVER_CONNECTION) {
/* Make sure reply or at least dummy reply is sent */
if (call->mode == RX_MODE_RECEIVING) {
+ MUTEX_EXIT(&call->lock);
rxi_WriteProc(call, 0, 0);
+ MUTEX_ENTER(&call->lock);
}
if (call->mode == RX_MODE_SENDING) {
+ MUTEX_EXIT(&call->lock);
rxi_FlushWrite(call);
+ MUTEX_ENTER(&call->lock);
}
- service = conn->service;
rxi_calltrace(RX_CALL_END, call);
/* Call goes to hold state until reply packets are acknowledged */
if (call->tfirst + call->nSoftAcked < call->tnext) {
* no reply arguments are expected */
if ((call->mode == RX_MODE_SENDING)
|| (call->mode == RX_MODE_RECEIVING && call->rnext == 1)) {
+ MUTEX_EXIT(&call->lock);
(void)rxi_ReadProc(call, &dummy, 1);
+ MUTEX_ENTER(&call->lock);
}
/* If we had an outstanding delayed ack, be nice to the server
* kernel version, and may interrupt the macros rx_Read or
* rx_Write, which run at normal priority for efficiency. */
if (call->currentPacket) {
+#ifdef RX_TRACK_PACKETS
call->currentPacket->flags &= ~RX_PKTFLAG_CP;
+#endif
rxi_FreePacket(call->currentPacket);
call->currentPacket = (struct rx_packet *)0;
}
-
+
call->nLeft = call->nFree = call->curlen = 0;
/* Free any packets from the last call to ReadvProc/WritevProc */
call->iovqc -=
#endif /* RXDEBUG_PACKET */
rxi_FreePackets(0, &call->iovq);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&call->lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (conn->type == RX_CLIENT_CONNECTION) {
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags &= ~RX_CONN_BUSY;
for (conn = *conn_ptr; conn; conn = next) {
next = conn->next;
if (conn->type == RX_CLIENT_CONNECTION) {
- /* MUTEX_ENTER(&conn->conn_data_lock); when used in kernel */
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- /* MUTEX_EXIT(&conn->conn_data_lock); when used in kernel */
+ MUTEX_EXIT(&rx_refcnt_mutex);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
#else /* RX_ENABLE_LOCKS */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
rxi_ResetCall(call, 1);
} else {
- call = (struct rx_call *)rxi_Alloc(sizeof(struct rx_call));
+ call = rxi_Alloc(sizeof(struct rx_call));
#ifdef RXDEBUG_PACKET
call->allNextp = rx_allCallsp;
rx_allCallsp = call;
- call->call_id =
+ call->call_id =
+ rx_atomic_inc_and_read(&rx_stats.nCallStructs);
+#else /* RXDEBUG_PACKET */
+ rx_atomic_inc(&rx_stats.nCallStructs);
#endif /* RXDEBUG_PACKET */
- rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
-
+
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
MUTEX_ENTER(&call->lock);
/* A call has been inactive long enough that so we can throw away
* state, including the call structure, which is placed on the call
* free list.
- * Call is locked upon entry.
- * haveCTLock set if called from rxi_ReapConnections
+ *
+ * call->lock amd rx_refcnt_mutex are held upon entry.
+ * haveCTLock is set when called from rxi_ReapConnections.
*/
-#ifdef RX_ENABLE_LOCKS
void
rxi_FreeCall(struct rx_call *call, int haveCTLock)
-#else /* RX_ENABLE_LOCKS */
-void
-rxi_FreeCall(struct rx_call *call)
-#endif /* RX_ENABLE_LOCKS */
{
int channel = call->channel;
struct rx_connection *conn = call->conn;
(*call->callNumber)++;
rxi_ResetCall(call, 0);
call->conn->call[channel] = (struct rx_call *)0;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_ENTER(&rx_freeCallQueue_lock);
SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
*/
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
if (haveCTLock)
} else {
MUTEX_EXIT(&conn->conn_data_lock);
}
+ MUTEX_ENTER(&rx_refcnt_mutex);
}
-afs_int32 rxi_Alloccnt = 0, rxi_Allocsize = 0;
-char *
+rx_atomic_t rxi_Allocsize = RX_ATOMIC_INIT(0);
+rx_atomic_t rxi_Alloccnt = RX_ATOMIC_INIT(0);
+
+void *
rxi_Alloc(size_t size)
{
char *p;
- if (rx_stats_active)
- rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active) {
+ rx_atomic_add(&rxi_Allocsize, (int) size);
+ rx_atomic_inc(&rxi_Alloccnt);
+ }
p = (char *)
#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
void
rxi_Free(void *addr, size_t size)
{
- if (rx_stats_active)
- rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active) {
+ rx_atomic_sub(&rxi_Allocsize, (int) size);
+ rx_atomic_dec(&rxi_Alloccnt);
+ }
osi_Free(addr, size);
}
-void
+void
rxi_SetPeerMtu(struct rx_peer *peer, afs_uint32 host, afs_uint32 port, int mtu)
{
struct rx_peer **peer_ptr = NULL, **peer_end = NULL;
/* Find the peer process represented by the supplied (host,port)
* combination. If there is no appropriate active peer structure, a
- * new one will be allocated and initialized
+ * new one will be allocated and initialized
* The origPeer, if set, is a pointer to a peer structure on which the
* refcount will be be decremented. This is used to replace the peer
* structure hanging off a connection structure */
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nPeerStructs);
}
}
if (pp && create) {
if (service->newConnProc)
(*service->newConnProc) (conn);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nServerConns);
}
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxLastConn = conn; /* store this connection as the last conn used */
MUTEX_EXIT(&rx_connHashTable_lock);
MUTEX_ENTER(&conn->conn_data_lock);
if (np->header.type != RX_PACKET_TYPE_ABORT)
np = rxi_SendConnectionAbort(conn, np, 1, 0);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d", errcode));
rxi_ConnectionError(conn, errcode);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
case RX_PACKET_TYPE_CHALLENGE:
tnp = rxi_ReceiveChallengePacket(conn, np, 1);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tnp;
case RX_PACKET_TYPE_RESPONSE:
tnp = rxi_ReceiveResponsePacket(conn, np, 1);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tnp;
case RX_PACKET_TYPE_PARAMS:
case RX_PACKET_TYPE_PARAMS + 1:
case RX_PACKET_TYPE_PARAMS + 2:
/* ignore these packet types for now */
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
MUTEX_ENTER(&conn->conn_data_lock);
tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
}
* it must be for the previous call.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
- MUTEX_ENTER(&conn->conn_data_lock);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
}
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
if (!call) {
MUTEX_EXIT(&conn->conn_call_lock);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
- if (np->header.callNumber == 0)
+ if (np->header.callNumber == 0)
dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.06d len %d",
np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
* If the number of queued calls exceeds the overload
* threshold then abort this call.
*/
- if ((rx_BusyThreshold > 0) && (rx_nWaiting > rx_BusyThreshold)) {
+ if ((rx_BusyThreshold > 0) &&
+ (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
struct rx_packet *tp;
-
+
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nBusies);
return tp;
}
rxi_KeepAliveOn(call);
tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY,
NULL, 0, 1);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tp;
}
rxi_ResetCall(call, 0);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
- if (np->header.callNumber == 0)
+ if (np->header.callNumber == 0)
dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%06d len %d",
np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
* If the number of queued calls exceeds the overload
* threshold then abort this call.
*/
- if ((rx_BusyThreshold > 0) && (rx_nWaiting > rx_BusyThreshold)) {
+ if ((rx_BusyThreshold > 0) &&
+ (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
struct rx_packet *tp;
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nBusies);
return tp;
}
rxi_KeepAliveOn(call);
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ignorePacketDally);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
}
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
/* If the service security object index stamped in the packet does not
#ifdef RX_ENABLE_LOCKS
MUTEX_EXIT(&call->lock);
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
* traversing the tq in rxi_Start sending packets out because
* packets may move to the freePacketQueue as result of being here!
* So we drop these packets until we're safely out of the
- * traversing. Really ugly!
+ * traversing. Really ugly!
* For fine grain RX locking, we set the acked field in the
* packets and let rxi_Start remove them from the transmit queue.
*/
#ifdef RX_ENABLE_LOCKS
rxi_SetAcksInTransmitQueue(call);
#else
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
#endif
} else {
/* XXX I'm not sure this is exactly right, since tfirst **IS**
* XXX unacknowledged. I think that this is off-by-one, but
* XXX I don't dare change it just yet, since it will
- * XXX interact badly with the server-restart detection
+ * XXX interact badly with the server-restart detection
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
}
* so this will be quite important with very large window sizes.
* Skew is checked against 0 here to avoid any dependence on the type of
* inPacketSkew (which may be unsigned). In C, -1 > (unsigned) 0 is always
- * true!
+ * true!
* The inPacketSkew should be a smoothed running value, not just a maximum. MTUXXX
* see CalculateRoundTripTime for an example of how to keep smoothed values.
* I think using a beta of 1/8 is probably appropriate. 93.04.21
dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d", errdata));
rxi_CallError(call, errdata);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
}
case RX_PACKET_TYPE_BUSY:
* traversing the tq in rxi_Start sending packets out because
* packets may move to the freePacketQueue as result of being
* here! So we drop these packets until we're safely out of the
- * traversing. Really ugly!
+ * traversing. Really ugly!
* For fine grain RX locking, we set the acked field in the packets
* and let rxi_Start remove the packets from the transmit queue.
*/
break;
#else /* RX_ENABLE_LOCKS */
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
#endif /* RX_ENABLE_LOCKS */
}
* (if not, then the time won't actually be re-evaluated here). */
call->lastReceiveTime = clock_Sec();
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
MUTEX_ENTER(&conn->conn_data_lock);
conn->checkReachEvent = NULL;
waiting = conn->flags & RX_CONN_ATTACHWAIT;
- if (event)
+ if (event) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ }
MUTEX_EXIT(&conn->conn_data_lock);
if (waiting) {
when.sec += RX_CHECKREACH_TIMEOUT;
MUTEX_ENTER(&conn->conn_data_lock);
if (!conn->checkReachEvent) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
+ MUTEX_EXIT(&rx_refcnt_mutex);
conn->checkReachEvent =
- rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
+ rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
NULL);
}
MUTEX_EXIT(&conn->conn_data_lock);
int newPackets = 0;
int didHardAck = 0;
int haveLast = 0;
- afs_uint32 seq;
+ afs_uint32 seq;
afs_uint32 serial=0, flags=0;
int isFirst;
struct rx_packet *tnp;
struct clock when, now;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dataPacketsRead);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketBuffersOnRead);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems", np));
|| clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+
call->delayedAckEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
}
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
/* It's the next packet. Stick it on the receive queue
* for this call. Set newPackets to make sure we wake
* the reader once all packets have been processed */
+#ifdef RX_TRACK_PACKETS
np->flags |= RX_PKTFLAG_RQ;
+#endif
queue_Prepend(&call->rq, np);
#ifdef RXDEBUG_PACKET
call->rqc++;
* application already, then this is a duplicate */
if (seq < call->rnext) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/*Check for duplicate packet */
if (seq == tp->header.seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
* packet before which to insert the new packet, or at the
* queue head if the queue is empty or the packet should be
* appended. */
+#ifdef RX_TRACK_PACKETS
np->flags |= RX_PKTFLAG_RQ;
+#endif
#ifdef RXDEBUG_PACKET
call->rqc++;
#endif /* RXDEBUG_PACKET */
}
}
- /* We need to send an ack of the packet is out of sequence,
+ /* We need to send an ack of the packet is out of sequence,
* or if an ack was requested by the peer. */
if (seq != prev + 1 || missing) {
ackNeeded = RX_ACK_OUT_OF_SEQUENCE;
* Send an ack when requested by the peer, or once every
* rxi_SoftAckRate packets until the last packet has been
* received. Always send a soft ack for the last packet in
- * the server's reply. */
- if (ackNeeded) {
+ * the server's reply.
+ *
+ * If we have received all of the packets for the call
+ * immediately send an RX_PACKET_TYPE_ACKALL packet so that
+ * the peer can empty its packet queue and cancel all resend
+ * events.
+ */
+ if (call->flags & RX_CALL_RECEIVE_DONE) {
+ rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
+ rxi_AckAll(NULL, call, 0);
+ } else if (ackNeeded) {
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, ackNeeded, istack);
} else if (call->nSoftAcks > (u_short) rxi_SoftAckRate) {
|| clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAckEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
}
- } else if (call->flags & RX_CALL_RECEIVE_DONE) {
- rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
}
return np;
*/
static void
rxi_ComputePeerNetStats(struct rx_call *call, struct rx_packet *p,
- struct rx_ackPacket *ap, struct rx_packet *np)
+ struct rx_ackPacket *ap, struct rx_packet *np,
+ struct clock *now)
{
struct rx_peer *peer = call->conn->peer;
if (!(p->flags & RX_PKTFLAG_ACKED) &&
ap->reason != RX_ACK_DELAY &&
clock_Eq(&p->timeSent, &p->firstSent))
- rxi_ComputeRoundTripTime(p, &p->timeSent, peer);
+ rxi_ComputeRoundTripTime(p, &p->timeSent, peer, now);
#ifdef ADAPT_WINDOW
rxi_ComputeRate(peer, call, p, np, ap->reason);
#endif
struct rx_packet *nxp; /* Next packet pointer for queue_Scan */
struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
+ struct clock now; /* Current time, for RTT calculations */
afs_uint32 first;
afs_uint32 serial;
/* because there are CM's that are bogus, sending weird values for this. */
afs_uint32 skew = 0;
int nbytes;
int missing;
- int backedOff = 0;
int acked;
int nNacked = 0;
int newAckCount = 0;
- u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
int pktsize = 0; /* Set if we need to update the peer mtu */
+ int conn_data_locked = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsRead);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
nAcks = MIN((unsigned)nbytes, (unsigned)ap->nAcks);
first = ntohl(ap->firstPacket);
serial = ntohl(ap->serial);
- /* temporarily disabled -- needs to degrade over time
+ /* temporarily disabled -- needs to degrade over time
* skew = ntohs(ap->maxSkew); */
/* Ignore ack packets received out of order */
if (conn->lastPacketSizeSeq) {
MUTEX_ENTER(&conn->conn_data_lock);
+ conn_data_locked = 1;
if ((first > conn->lastPacketSizeSeq) && (conn->lastPacketSize)) {
pktsize = conn->lastPacketSize;
conn->lastPacketSize = conn->lastPacketSizeSeq = 0;
}
- MUTEX_EXIT(&conn->conn_data_lock);
}
if ((ap->reason == RX_ACK_PING_RESPONSE) && (conn->lastPingSizeSer)) {
- MUTEX_ENTER(&conn->conn_data_lock);
+ if (!conn_data_locked) {
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn_data_locked = 1;
+ }
if ((conn->lastPingSizeSer == serial) && (conn->lastPingSize)) {
/* process mtu ping ack */
pktsize = conn->lastPingSize;
conn->lastPingSizeSer = conn->lastPingSize = 0;
}
- MUTEX_EXIT(&conn->conn_data_lock);
}
- if (pktsize) {
- MUTEX_ENTER(&peer->peer_lock);
- /*
- * Start somewhere. Can't assume we can send what we can receive,
- * but we are clearly receiving.
- */
- if (!peer->maxPacketSize)
- peer->maxPacketSize = RX_MIN_PACKET_SIZE+RX_IPUDP_SIZE;
-
- if (pktsize > peer->maxPacketSize) {
- peer->maxPacketSize = pktsize;
- if ((pktsize-RX_IPUDP_SIZE > peer->ifMTU)) {
- peer->ifMTU=pktsize-RX_IPUDP_SIZE;
- peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
- rxi_ScheduleGrowMTUEvent(call, 1);
- }
- }
- MUTEX_EXIT(&peer->peer_lock);
+ if (conn_data_locked) {
+ MUTEX_EXIT(&conn->conn_data_lock);
+ conn_data_locked = 0;
}
-
#ifdef RXDEBUG
#ifdef AFS_NT40_ENV
if (rxdebug_active) {
len = _snprintf(msg, sizeof(msg),
"tid[%d] RACK: reason %s serial %u previous %u seq %u skew %d first %u acks %u space %u ",
- GetCurrentThreadId(), rx_ack_reason(ap->reason),
+ GetCurrentThreadId(), rx_ack_reason(ap->reason),
ntohl(ap->serial), ntohl(ap->previousPacket),
- (unsigned int)np->header.seq, (unsigned int)skew,
+ (unsigned int)np->header.seq, (unsigned int)skew,
ntohl(ap->firstPacket), ap->nAcks, ntohs(ap->bufferSpace) );
if (nAcks) {
int offset;
- for (offset = 0; offset < nAcks && len < sizeof(msg); offset++)
+ for (offset = 0; offset < nAcks && len < sizeof(msg); offset++)
msg[len++] = (ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*');
}
msg[len++]='\n';
#endif /* AFS_NT40_ENV */
#endif
+ MUTEX_ENTER(&peer->peer_lock);
+ if (pktsize) {
+ /*
+ * Start somewhere. Can't assume we can send what we can receive,
+ * but we are clearly receiving.
+ */
+ if (!peer->maxPacketSize)
+ peer->maxPacketSize = RX_MIN_PACKET_SIZE+RX_IPUDP_SIZE;
+
+ if (pktsize > peer->maxPacketSize) {
+ peer->maxPacketSize = pktsize;
+ if ((pktsize-RX_IPUDP_SIZE > peer->ifMTU)) {
+ peer->ifMTU=pktsize-RX_IPUDP_SIZE;
+ peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ rxi_ScheduleGrowMTUEvent(call, 1);
+ }
+ }
+ }
+
/* Update the outgoing packet skew value to the latest value of
* the peer's incoming packet skew value. The ack packet, of
* course, could arrive out of order, but that won't affect things
* much */
- MUTEX_ENTER(&peer->peer_lock);
peer->outPacketSkew = skew;
/* Check for packets that no longer need to be transmitted, and
* acknowledged as having been sent to the peer's upper level.
* All other packets must be retained. So only packets with
* sequence numbers < ap->firstPacket are candidates. */
+
+ clock_GetTime(&now);
+
for (queue_Scan(&call->tq, tp, nxp, rx_packet)) {
if (tp->header.seq >= first)
break;
call->tfirst = tp->header.seq + 1;
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np, &now);
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
* packets (osi_NetSend) we drop all acks while we're traversing the tq
* in rxi_Start sending packets out because packets may move to the
* freePacketQueue as result of being here! So we drop these packets until
- * we're safely out of the traversing. Really ugly!
+ * we're safely out of the traversing. Really ugly!
* To make it even uglier, if we're using fine grain locking, we can
* set the ack bits in the packets and have rxi_Start remove the packets
* when it's done transmitting.
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
{
queue_Remove(tp);
+#ifdef RX_TRACK_PACKETS
tp->flags &= ~RX_PKTFLAG_TQ;
+#endif
#ifdef RXDEBUG_PACKET
call->tqc--;
#endif /* RXDEBUG_PACKET */
if (tp->header.seq >= first)
#endif /* RX_ENABLE_LOCKS */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np, &now);
/* Set the acknowledge flag per packet based on the
* information in the ack packet. An acknowlegded packet can
* timeout value for future packets until a successful response
* is received for an initial transmission.
*/
- if (missing && !backedOff) {
+ if (missing && !peer->backedOff) {
struct clock c = peer->timeout;
struct clock max_to = {3, 0};
clock_Add(&peer->timeout, &c);
if (clock_Gt(&peer->timeout, &max_to))
peer->timeout = max_to;
- backedOff = 1;
+ peer->backedOff = 1;
}
- /* If packet isn't yet acked, and it has been transmitted at least
- * once, reset retransmit time using latest timeout
- * ie, this should readjust the retransmit timer for all outstanding
+ /* If packet isn't yet acked, and it has been transmitted at least
+ * once, reset retransmit time using latest timeout
+ * ie, this should readjust the retransmit timer for all outstanding
* packets... So we don't just retransmit when we should know better*/
if (!(tp->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&tp->retryTime)) {
if (np->length >= rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32)) {
afs_uint32 tSize;
- /* If the ack packet has a "recommended" size that is less than
+ /* If the ack packet has a "recommended" size that is less than
* what I am using now, reduce my size to match */
rx_packetread(np, rx_AckDataSize(ap->nAcks) + (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = rxi_AdjustMaxMTU(peer->natMTU, tSize);
/* sanity check - peer might have restarted with different params.
- * If peer says "send less", dammit, send less... Peer should never
+ * If peer says "send less", dammit, send less... Peer should never
* be unable to accept packets of the size that prior AFS versions would
* send without asking. */
if (peer->maxMTU != tSize) {
* network MTU confused with the loopback MTU. Calculate the
* maximum MTU here for use in the slow start code below.
*/
- maxMTU = peer->maxMTU;
/* Did peer restart with older RX version? */
if (peer->maxDgramPackets > 1) {
peer->maxDgramPackets = 1;
sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
/*
- * As of AFS 3.5 we set the send window to match the receive window.
+ * As of AFS 3.5 we set the send window to match the receive window.
*/
if (tSize < call->twind) {
call->twind = tSize;
if (!(call->flags & RX_CALL_WAIT_PROC)) {
call->flags |= RX_CALL_WAIT_PROC;
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting++;
- rx_nWaited++;
- MUTEX_EXIT(&rx_waiting_mutex);
+ rx_atomic_inc(&rx_nWaiting);
+ rx_atomic_inc(&rx_nWaited);
rxi_calltrace(RX_CALL_ARRIVAL, call);
SET_CALL_QUEUE_LOCK(call, &rx_serverPool_lock);
queue_Append(&rx_incomingCallQueue, call);
*tnop = sq->tno;
*sq->socketp = socket;
clock_GetTime(&call->startTime);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
} else {
sq->newcall = call;
}
call->flags &= ~RX_CALL_WAIT_PROC;
if (queue_IsOnQueue(call)) {
queue_Remove(call);
-
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_waiting_mutex);
+
+ rx_atomic_dec(&rx_nWaiting);
}
}
call->state = RX_STATE_ACTIVE;
if (event) {
MUTEX_ENTER(&call->lock);
call->delayedAckEvent = NULL;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ACKALL);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
rxi_SendSpecial(call, call->conn, (struct rx_packet *)0,
RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
MUTEX_ENTER(&call->lock);
if (event == call->delayedAckEvent)
call->delayedAckEvent = NULL;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
(void)rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
if (event)
{
if (queue_IsNotEmpty(&call->rq)) {
u_short count;
-
+
count = rxi_FreePackets(0, &call->rq);
rx_packetReclaims += count;
#ifdef RXDEBUG_PACKET
call->rqc -= count;
- if ( call->rqc != 0 )
+ if ( call->rqc != 0 )
dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0", call, call->rqc));
#endif
call->flags &= ~(RX_CALL_RECEIVE_DONE | RX_CALL_HAVE_LAST);
clock_GetTime(&now);
when = now;
clock_Addmsec(&when, rxi_callAbortDelay);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ABORT);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAbortEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedCallAbort, call, 0);
}
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
conn->flags &= ~RX_CONN_ATTACHWAIT;
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
MUTEX_EXIT(&conn->conn_data_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
}
conn->error = error;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.fatalErrors);
}
}
rxi_ResetCall(call, 0);
#endif
call->error = error;
- call->mode = RX_MODE_ERROR;
}
/* Reset various fields in a call structure, and wakeup waiting
rxi_ClearReceiveQueue(call);
/* why init the queue if you just emptied it? queue_Init(&call->rq); */
-
- if (call->currentPacket) {
- call->currentPacket->flags &= ~RX_PKTFLAG_CP;
- call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
- queue_Prepend(&call->iovq, call->currentPacket);
-#ifdef RXDEBUG_PACKET
- call->iovqc++;
-#endif /* RXDEBUG_PACKET */
- call->currentPacket = (struct rx_packet *)0;
- }
- call->curlen = call->nLeft = call->nFree = 0;
-#ifdef RXDEBUG_PACKET
- call->iovqc -=
-#endif
- rxi_FreePackets(0, &call->iovq);
call->error = 0;
call->twind = call->conn->twind[call->channel];
if (queue_IsOnQueue(call)) {
queue_Remove(call);
if (flags & RX_CALL_WAIT_PROC) {
-
- MUTEX_ENTER(&rx_waiting_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_waiting_mutex);
+ rx_atomic_dec(&rx_nWaiting);
}
}
MUTEX_EXIT(call->call_queue_lock);
if (queue_IsOnQueue(call)) {
queue_Remove(call);
if (flags & RX_CALL_WAIT_PROC)
- rx_nWaiting--;
+ rx_atomic_dec(&rx_nWaiting);
}
#endif /* RX_ENABLE_LOCKS */
* higher level yet (unless, of course, the sender decides to abort
* the call altogether). Any of p, seq, serial, pflags, or reason may
* be set to zero without ill effect. That is, if they are zero, they
- * will not convey any information.
+ * will not convey any information.
* NOW there is a trailer field, after the ack where it will safely be
- * ignored by mundanes, which indicates the maximum size packet this
+ * ignored by mundanes, which indicates the maximum size packet this
* host can swallow. */
/*
- struct rx_packet *optionalPacket; use to send ack (or null)
- int seq; Sequence number of the packet we are acking
- int serial; Serial number of the packet
- int pflags; Flags field from packet header
- int reason; Reason an acknowledge was prompted
+ struct rx_packet *optionalPacket; use to send ack (or null)
+ int seq; Sequence number of the packet we are acking
+ int serial; Serial number of the packet
+ int pflags; Flags field from packet header
+ int reason; Reason an acknowledge was prompted
*/
struct rx_packet *
ap->previousPacket = htonl(call->rprev); /* Previous packet received */
/* No fear of running out of ack packet here because there can only be at most
- * one window full of unacknowledged packets. The window size must be constrained
+ * one window full of unacknowledged packets. The window size must be constrained
* to be less than the maximum ack size, of course. Also, an ack should always
* fit into a single packet -- it should not ever be fragmented. */
for (offset = 0, queue_Scan(&call->rq, rqp, nxp, rx_packet)) {
len = _snprintf(msg, sizeof(msg),
"tid[%d] SACK: reason %s serial %u previous %u seq %u first %u acks %u space %u ",
- GetCurrentThreadId(), rx_ack_reason(ap->reason),
+ GetCurrentThreadId(), rx_ack_reason(ap->reason),
ntohl(ap->serial), ntohl(ap->previousPacket),
(unsigned int)p->header.seq, ntohl(ap->firstPacket),
ap->nAcks, ntohs(ap->bufferSpace) );
if (ap->nAcks) {
int offset;
- for (offset = 0; offset < ap->nAcks && len < sizeof(msg); offset++)
+ for (offset = 0; offset < ap->nAcks && len < sizeof(msg); offset++)
msg[len++] = (ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*');
}
msg[len++]='\n';
}
}
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsSent);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
peer->nSent += len;
if (resending)
peer->reSends += len;
- if (rx_stats_active)
- rx_MutexAdd(rx_stats.dataPacketsSent, len, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
+ if (rx_stats_active) {
+ if (resending)
+ rx_atomic_add(&rx_stats.dataPacketsReSent, len);
+ else
+ rx_atomic_add(&rx_stats.dataPacketsSent, len);
+ }
+
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
lastPacket = 1;
}
* packet until the congestion window reaches the ack rate. */
if (list[i]->header.serial) {
requestAck = 1;
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
} else {
/* improved RTO calculation- not Karn */
list[i]->firstSent = *now;
* safe to nuke any scheduled end-of-packets ack */
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
- CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (len > 1) {
rxi_SendPacketList(call, conn, list, len, istack);
} else {
rxi_SendPacket(call, conn, list[0], istack);
}
MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
#ifdef RX_ENABLE_LOCKS
/* Call rxi_Start, below, but with the call lock held. */
void
-rxi_StartUnlocked(struct rxevent *event,
+rxi_StartUnlocked(struct rxevent *event,
void *arg0, void *arg1, int istack)
{
struct rx_call *call = arg0;
-
+
MUTEX_ENTER(&call->lock);
rxi_Start(event, call, arg1, istack);
MUTEX_EXIT(&call->lock);
* better optimized for new packets, the usual case, now that we've
* got rid of queues of send packets. XXXXXXXXXXX */
void
-rxi_Start(struct rxevent *event,
+rxi_Start(struct rxevent *event,
void *arg0, void *arg1, int istack)
{
struct rx_call *call = arg0;
-
+
struct rx_packet *p;
struct rx_packet *nxp; /* Next pointer for queue_Scan */
struct rx_peer *peer = call->conn->peer;
* structure, since there is no longer a per-call retransmission
* event pending. */
if (event && event == call->resendEvent) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->resendEvent = NULL;
resending = 1;
if (queue_IsEmpty(&call->tq)) {
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
#endif
return;
}
* recent additions.
* Do a dance to avoid blocking after setting now. */
MUTEX_ENTER(&peer->peer_lock);
- retryTime = peer->timeout;
+ retryTime = peer->timeout;
MUTEX_EXIT(&peer->peer_lock);
+
clock_GetTime(&now);
clock_Add(&retryTime, &now);
usenow = now;
*(call->callNumber)));
break;
}
+#ifdef RX_TRACK_PACKETS
if ((p->flags & RX_PKTFLAG_FREE)
|| (!queue_IsEnd(&call->tq, nxp)
&& (nxp->flags & RX_PKTFLAG_FREE))
|| (nxp == (struct rx_packet *)&rx_freePacketQueue)) {
osi_Panic("rxi_Start: xmit queue clobbered");
}
+#endif
if (p->flags & RX_PKTFLAG_ACKED) {
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ignoreAckedPacket);
continue; /* Ignore this packet if it has been acknowledged */
}
/* Transmit the packet if it needs to be sent. */
if (!clock_Lt(&now, &p->retryTime)) {
if (nXmitPackets == maxXmitPackets) {
- rxi_SendXmitList(call, xmitList, nXmitPackets,
- istack, &now, &retryTime,
+ rxi_SendXmitList(call, xmitList, nXmitPackets,
+ istack, &now, &retryTime,
resending);
- osi_Free(xmitList, maxXmitPackets *
+ osi_Free(xmitList, maxXmitPackets *
sizeof(struct rx_packet *));
goto restart;
}
* process that the call is in an error state.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_aborted);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
if (p->header.seq < call->tfirst
&& (p->flags & RX_PKTFLAG_ACKED)) {
queue_Remove(p);
+#ifdef RX_TRACK_PACKETS
p->flags &= ~RX_PKTFLAG_TQ;
+#endif
#ifdef RXDEBUG_PACKET
call->tqc--;
#endif
/* Post a new event to re-run rxi_Start when retries may be needed */
if (haveEvent && !(call->flags & RX_CALL_NEED_START)) {
#ifdef RX_ENABLE_LOCKS
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->resendEvent =
- rxevent_PostNow2(&retryTime, &usenow,
+ rxevent_PostNow2(&retryTime, &usenow,
rxi_StartUnlocked,
(void *)call, 0, istack);
#else /* RX_ENABLE_LOCKS */
call->resendEvent =
- rxevent_PostNow2(&retryTime, &usenow, rxi_Start,
+ rxevent_PostNow2(&retryTime, &usenow, rxi_Start,
(void *)call, 0, istack);
#endif /* RX_ENABLE_LOCKS */
}
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
/* Actually send the packet, filling in more connection-specific fields */
- CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_SendPacket(call, conn, p, istack);
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
{
struct rx_connection *conn = call->conn;
afs_uint32 now;
- afs_uint32 deadTime;
+ afs_uint32 deadTime, idleDeadTime = 0, hardDeadTime = 0;
+ afs_uint32 fudgeFactor;
int cerror = 0;
int newmtu = 0;
return 0;
}
#endif
- /* dead time + RTT + 8*MDEV, rounded up to next second. */
- deadTime =
- (((afs_uint32) conn->secondsUntilDead << 10) +
- ((afs_uint32) conn->peer->rtt >> 3) +
- ((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10;
+ /* RTT + 8*MDEV, rounded up to the next second. */
+ fudgeFactor = (((afs_uint32) conn->peer->rtt >> 3) +
+ ((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10;
+
+ deadTime = conn->secondsUntilDead + fudgeFactor;
now = clock_Sec();
/* These are computed to the second (+- 1 second). But that's
* good enough for these values, which should be a significant
#endif
#endif
);
-
+
if (ire && ire->ire_max_frag > 0)
rxi_SetPeerMtu(NULL, conn->peer->host, 0,
ire->ire_max_frag);
rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
rxevent_Cancel(call->keepAliveEvent, call,
RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (call->refCount == 0) {
rxi_FreeCall(call, haveCTLock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return -2;
}
+ MUTEX_EXIT(&rx_refcnt_mutex);
return -1;
#else /* RX_ENABLE_LOCKS */
- rxi_FreeCall(call);
+ rxi_FreeCall(call, 0);
return -2;
#endif /* RX_ENABLE_LOCKS */
}
* to pings; active calls are simply flagged in error, so the
* attached process can die reasonably gracefully. */
}
+
+ if (conn->idleDeadTime) {
+ idleDeadTime = conn->idleDeadTime + fudgeFactor;
+ }
+
/* see if we have a non-activity timeout */
- if (call->startWait && conn->idleDeadTime
- && ((call->startWait + conn->idleDeadTime) < now) &&
+ if (call->startWait && idleDeadTime
+ && ((call->startWait + idleDeadTime) < now) &&
(call->flags & RX_CALL_READER_WAIT)) {
if (call->state == RX_STATE_ACTIVE) {
cerror = RX_CALL_TIMEOUT;
goto mtuout;
}
}
- if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0)
- && ((call->lastSendData + conn->idleDeadTime) < now)) {
+ if (call->lastSendData && idleDeadTime && (conn->idleDeadErr != 0)
+ && ((call->lastSendData + idleDeadTime) < now)) {
if (call->state == RX_STATE_ACTIVE) {
cerror = conn->idleDeadErr;
goto mtuout;
}
}
+
+ if (hardDeadTime) {
+ hardDeadTime = conn->hardDeadTime + fudgeFactor;
+ }
+
/* see if we have a hard timeout */
- if (conn->hardDeadTime
- && (now > (conn->hardDeadTime + call->startTime.sec))) {
+ if (hardDeadTime
+ && (now > (hardDeadTime + call->startTime.sec))) {
if (call->state == RX_STATE_ACTIVE)
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
}
return 0;
mtuout:
- if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT) {
- /* if we never succeeded, let the error pass out as-is */
- if (conn->peer->maxPacketSize)
- cerror = conn->msgsizeRetryErr;
+ if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT
+ && call->lastReceiveTime) {
+ int oldMTU = conn->peer->ifMTU;
/* if we thought we could send more, perhaps things got worse */
- if (call->conn->peer->maxPacketSize > conn->lastPacketSize)
+ if (conn->peer->maxPacketSize > conn->lastPacketSize)
/* maxpacketsize will be cleared in rxi_SetPeerMtu */
newmtu = MAX(conn->peer->maxPacketSize-RX_IPUDP_SIZE,
conn->lastPacketSize-(128+RX_IPUDP_SIZE));
/* needed so ResetCall doesn't clobber us. */
call->MTU = conn->peer->ifMTU;
+
+ /* if we never succeeded, let the error pass out as-is */
+ if (conn->peer->maxPacketSize && oldMTU != conn->peer->ifMTU)
+ cerror = conn->msgsizeRetryErr;
+
}
rxi_CallError(call, cerror);
return -1;
osi_NetSend(socket, &taddr, tmpiov, 1, 1 + sizeof(struct rx_header), 1);
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
/* Only reschedule ourselves if the connection would not be destroyed */
if (conn->refCount <= 1) {
conn->natKeepAliveEvent = NULL;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
rx_DestroyConnection(conn); /* drop the reference for this */
} else {
- conn->natKeepAliveEvent = NULL;
conn->refCount--; /* drop the reference for this */
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ conn->natKeepAliveEvent = NULL;
rxi_ScheduleNatKeepAliveEvent(conn);
MUTEX_EXIT(&conn->conn_data_lock);
}
clock_GetTime(&now);
when = now;
when.sec += conn->secondsUntilNatPing;
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++; /* hold a reference for this */
+ MUTEX_EXIT(&rx_refcnt_mutex);
conn->natKeepAliveEvent =
rxevent_PostNow(&when, &now, rxi_NatKeepAliveEvent, conn, 0);
}
struct rx_connection *conn;
afs_uint32 now;
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
if (event == call->keepAliveEvent)
call->keepAliveEvent = NULL;
now = clock_Sec();
conn = call->conn;
if ((now - call->lastSendTime) > conn->secondsUntilPing) {
/* Don't try to send keepalives if there is unacknowledged data */
- /* the rexmit code should be good enough, this little hack
+ /* the rexmit code should be good enough, this little hack
* doesn't quite work XXX */
(void)rxi_SendAck(call, NULL, 0, RX_ACK_PING, 0);
}
struct rx_call *call = arg1;
struct rx_connection *conn;
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
+
if (event == call->growMTUEvent)
call->growMTUEvent = NULL;
clock_GetTime(&now);
when = now;
when.sec += call->conn->secondsUntilPing;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->keepAliveEvent =
rxevent_PostNow(&when, &now, rxi_KeepAliveEvent, call, 0);
}
}
when.sec += secs;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->growMTUEvent =
rxevent_PostNow(&when, &now, rxi_GrowMTUEvent, call, 0);
}
void *arg1, void *unused)
{
struct rx_connection *conn = arg1;
-
+
afs_int32 error;
struct rx_packet *packet;
/* This routine is called to send call abort messages
* that have been delayed to throttle looping clients. */
void
-rxi_SendDelayedCallAbort(struct rxevent *event,
+rxi_SendDelayedCallAbort(struct rxevent *event,
void *arg1, void *dummy)
{
struct rx_call *call = arg1;
-
+
afs_int32 error;
struct rx_packet *packet;
(char *)&error, sizeof(error), 0);
rxi_FreePacket(packet);
}
- CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
/* This routine is called periodically (every RX_AUTH_REQUEST_TIMEOUT
* issues a challenge to the client, which is obtained from the
* security object associated with the connection */
void
-rxi_ChallengeEvent(struct rxevent *event,
+rxi_ChallengeEvent(struct rxevent *event,
void *arg0, void *arg1, int tries)
{
struct rx_connection *conn = arg0;
-
+
conn->challengeEvent = NULL;
if (RXS_CheckAuthentication(conn->securityObject, conn) != 0) {
struct rx_packet *packet;
/* rxi_ComputeRoundTripTime is called with peer locked. */
/* sentp and/or peer may be null */
-void
+static void
rxi_ComputeRoundTripTime(struct rx_packet *p,
struct clock *sentp,
- struct rx_peer *peer)
+ struct rx_peer *peer,
+ struct clock *now)
{
struct clock thisRtt, *rttp = &thisRtt;
-
int rtt_timeout;
- clock_GetTime(rttp);
+ thisRtt = *now;
- if (clock_Lt(rttp, sentp)) {
- clock_Zero(rttp);
+ if (clock_Lt(rttp, sentp))
return; /* somebody set the clock back, don't count this time. */
- }
+
clock_Sub(rttp, sentp);
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
p->header.callNumber, p, rttp->sec, rttp->usec));
rx_stats.maxRtt = *rttp;
}
clock_Add(&rx_stats.totalRtt, rttp);
- rx_stats.nRttSamples++;
+ rx_atomic_inc(&rx_stats.nRttSamples);
MUTEX_EXIT(&rx_stats_mutex);
}
} else {
/* I don't have a stored RTT so I start with this value. Since I'm
* probably just starting a call, and will be pushing more data down
- * this, I expect congestion to increase rapidly. So I fudge a
+ * this, I expect congestion to increase rapidly. So I fudge a
* little, and I set deviance to half the rtt. In practice,
* deviance tends to approach something a little less than
* half the smoothed rtt. */
clock_Zero(&(peer->timeout));
clock_Addmsec(&(peer->timeout), rtt_timeout);
+ /* Reset the backedOff flag since we just computed a new timeout value */
+ peer->backedOff = 0;
+
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n",
p->header.callNumber, p, MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
}
/* This only actually destroys the connection if
* there are no outstanding calls */
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (!havecalls && !conn->refCount
&& ((conn->lastSendTime + rx_idleConnectionTime) <
now.sec)) {
conn->refCount++; /* it will be decr in rx_DestroyConn */
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
}
#ifdef RX_ENABLE_LOCKS
else {
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
}
#endif /* RX_ENABLE_LOCKS */
prev->next = next;
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
/*
* Now if we hold references on 'prev' and 'next'
} else {
return;
}
- xferSize = rx_AckDataSize(rx_Window) + RX_HEADER_SIZE;
+ xferSize = rx_AckDataSize(rx_maxSendWindow) + RX_HEADER_SIZE;
break;
default:
* one packet exchange */
if (clock_Gt(&newTO, &peer->timeout)) {
- dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u)",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec,
- newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
+ newTO.sec, newTO.usec, peer->smRtt));
peer->timeout = newTO;
}
/* Now, convert to the number of full packets that could fit in a
* reasonable fraction of that interval */
minTime /= (peer->smRtt << 1);
+ minTime = MAX(minTime, rx_minPeerTimeout);
xferSize = minTime; /* (make a copy) */
/* Now clamp the size to reasonable bounds. */
if (minTime <= 1)
minTime = 1;
- else if (minTime > rx_Window)
- minTime = rx_Window;
+ else if (minTime > rx_maxSendWindow)
+ minTime = rx_maxSendWindow;
/* if (minTime != peer->maxWindow) {
- dpf(("CONG peer %lx/%u: windowsize %lu ==> %lu (to %lu.%06lu, rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: windowsize %lu ==> %lu (to %lu.%06lu, rtt %u)",
ntohl(peer->host), ntohs(peer->port), peer->maxWindow, minTime,
- peer->timeout.sec, peer->timeout.usec, peer->smRtt,
- peer->packetSize));
+ peer->timeout.sec, peer->timeout.usec, peer->smRtt));
peer->maxWindow = minTime;
- elide... call->twind = minTime;
+ elide... call->twind = minTime;
}
*/
/* Cut back on the peer timeout if it had earlier grown unreasonably.
* Discern this by calculating the timeout necessary for rx_Window
* packets. */
- if ((xferSize > rx_Window) && (peer->timeout.sec >= 3)) {
+ if ((xferSize > rx_maxSendWindow) && (peer->timeout.sec >= 3)) {
/* calculate estimate for transmission interval in milliseconds */
- minTime = rx_Window * peer->smRtt;
+ minTime = rx_maxSendWindow * peer->smRtt;
if (minTime < 1000) {
- dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u)",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec,
- peer->timeout.usec, peer->smRtt, peer->packetSize));
+ peer->timeout.usec, peer->smRtt));
newTO.sec = 0; /* cut back on timeout by half a second */
newTO.usec = 500000;
va_end(ap);
#else
struct clock now;
-
+
va_start(ap, format);
clock_GetTime(&now);
rx_PrintTheseStats(FILE * file, struct rx_statistics *s, int size,
afs_int32 freePackets, char version)
{
-#ifdef RXDEBUG
int i;
if (size != sizeof(struct rx_statistics)) {
#if !defined(AFS_PTHREAD_ENV) && !defined(AFS_USE_GETTIMEOFDAY)
fprintf(file, " %d clock updates\n", clock_nUpdates);
#endif
-#else
- fprintf(file, "ERROR: compiled without RXDEBUG\n");
-#endif
}
/* for backward compatibility */
rx_PrintStats(FILE * file)
{
MUTEX_ENTER(&rx_stats_mutex);
- rx_PrintTheseStats(file, &rx_stats, sizeof(rx_stats), rx_nFreePackets,
+ rx_PrintTheseStats(file, (struct rx_statistics *) &rx_stats,
+ sizeof(rx_stats), rx_nFreePackets,
RX_DEBUGI_VERSION);
MUTEX_EXIT(&rx_stats_mutex);
}
rx_PrintPeerStats(FILE * file, struct rx_peer *peer)
{
fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %d.%06d.\n",
- ntohl(peer->host), (int)peer->port, (int)peer->burstSize,
+ ntohl(peer->host), (int)ntohs(peer->port), (int)peer->burstSize,
(int)peer->burstWait.sec, (int)peer->burstWait.usec);
fprintf(file,
#define UNLOCK_RX_DEBUG
#endif /* AFS_PTHREAD_ENV */
-#ifdef RXDEBUG
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
static int
MakeDebugCall(osi_socket socket, afs_uint32 remoteAddr, afs_uint16 remotePort,
u_char type, void *inputData, size_t inputLength,
void *outputData, size_t outputLength)
{
static afs_int32 counter = 100;
- time_t waitTime, waitCount, startTime;
+ time_t waitTime, waitCount;
struct rx_header theader;
char tbuffer[1500];
afs_int32 code;
fd_set imask;
char *tp;
- startTime = time(0);
waitTime = 1;
waitCount = 5;
LOCK_RX_DEBUG;
tv_delta.tv_sec = tv_wake.tv_sec;
tv_delta.tv_usec = tv_wake.tv_usec;
gettimeofday(&tv_now, 0);
-
+
if (tv_delta.tv_usec < tv_now.tv_usec) {
/* borrow */
tv_delta.tv_usec += 1000000;
tv_delta.tv_sec--;
}
tv_delta.tv_usec -= tv_now.tv_usec;
-
+
if (tv_delta.tv_sec < tv_now.tv_sec) {
/* time expired */
break;
}
tv_delta.tv_sec -= tv_now.tv_sec;
-
+
#ifdef AFS_NT40_ENV
code = select(0, &imask, 0, 0, &tv_delta);
#else /* AFS_NT40_ENV */
code =
recvfrom(socket, tbuffer, sizeof(tbuffer), 0,
(struct sockaddr *)&faddr, &faddrLen);
-
+
if (code > 0) {
memcpy(&theader, tbuffer, sizeof(struct rx_header));
if (counter == ntohl(theader.callNumber))
}
waitTime <<= 1;
}
-
+
success:
code -= sizeof(struct rx_header);
if (code > outputLength)
afs_uint16 remotePort, struct rx_debugStats * stat,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
stat->nWaited = ntohl(stat->nWaited);
stat->nPackets = ntohl(stat->nPackets);
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
afs_uint16 remotePort, struct rx_statistics * stat,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
afs_int32 *lp = (afs_int32 *) stat;
*lp = ntohl(*lp);
}
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
afs_uint16 remotePort, size_t version_length,
char *version)
{
-#ifdef RXDEBUG
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
char a[1] = { 0 };
return MakeDebugCall(socket, remoteAddr, remotePort,
RX_PACKET_TYPE_VERSION, a, 1, version,
struct rx_debugConn * conn,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
int i;
conn->epoch = ntohl(conn->epoch);
conn->natMTU = ntohl(conn->natMTU);
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
afs_uint32 debugSupportedValues, struct rx_debugPeer * peer,
afs_uint32 * supportedValues)
{
-#ifndef RXDEBUG
- afs_int32 rc = -1;
-#else
+#if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
afs_int32 rc = 0;
struct rx_debugIn in;
peer->bytesReceived.high = ntohl(peer->bytesReceived.high);
peer->bytesReceived.low = ntohl(peer->bytesReceived.low);
}
+#else
+ afs_int32 rc = -1;
#endif
return rc;
}
-afs_int32
+afs_int32
rx_GetLocalPeers(afs_uint32 peerHost, afs_uint16 peerPort,
struct rx_debugPeer * peerStats)
{
afs_uint32 hashValue = PEER_HASH(peerHost, peerPort);
MUTEX_ENTER(&rx_peerHashTable_lock);
- for(tp = rx_peerHashTable[hashValue];
+ for(tp = rx_peerHashTable[hashValue];
tp != NULL; tp = tp->next) {
if (tp->host == peerHost)
break;
next = peer->next;
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
}
MUTEX_EXIT(&rx_peerHashTable_lock);
}
sizeof(rx_interface_stat_t) +
totalFunc * sizeof(rx_function_entry_v1_t);
- rpc_stat = (rx_interface_stat_p) rxi_Alloc(space);
+ rpc_stat = rxi_Alloc(space);
if (rpc_stat == NULL) {
rc = 1;
goto fail;
if (space > (size_t) 0) {
*allocSize = space;
- ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
+ ptr = *stats = rxi_Alloc(space);
if (ptr != NULL) {
rx_interface_stat_p rpc_stat, nrpc_stat;
if (space > (size_t) 0) {
*allocSize = space;
- ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
+ ptr = *stats = rxi_Alloc(space);
if (ptr != NULL) {
rx_interface_stat_p rpc_stat, nrpc_stat;
"\r\n",
cookie, c, c->call_id, (afs_uint32)c->state, (afs_uint32)c->mode, c->conn, c->conn?c->conn->epoch:0, c->conn?c->conn->cid:0,
c->callNumber?*c->callNumber:0, c->conn?c->conn->flags:0, c->flags,
- (afs_uint32)c->rqc, (afs_uint32)rqc, (afs_uint32)c->tqc, (afs_uint32)tqc, (afs_uint32)c->iovqc, (afs_uint32)iovqc,
- (afs_uint32)c->localStatus, (afs_uint32)c->remoteStatus, c->error, c->timeout,
+ (afs_uint32)c->rqc, (afs_uint32)rqc, (afs_uint32)c->tqc, (afs_uint32)tqc, (afs_uint32)c->iovqc, (afs_uint32)iovqc,
+ (afs_uint32)c->localStatus, (afs_uint32)c->remoteStatus, c->error, c->timeout,
c->resendEvent?1:0, c->timeoutEvent?1:0, c->keepAliveEvent?1:0, c->delayedAckEvent?1:0, c->delayedAbortEvent?1:0,
c->abortCode, c->abortCount, c->lastSendTime, c->lastReceiveTime, c->lastSendData
#ifdef RX_ENABLE_LOCKS