#include <afs/param.h>
#endif
-RCSID
- ("$Header$");
#ifdef KERNEL
#include "afs/sysincludes.h"
#include "h/socket.h"
#endif
#include "netinet/in.h"
+#ifdef AFS_SUN57_ENV
+#include "inet/common.h"
+#include "inet/ip.h"
+#include "inet/ip_ire.h"
+#endif
#include "afs/afs_args.h"
#include "afs/afs_osi.h"
#ifdef RX_KERNEL_TRACE
#else /* KERNEL */
# include <sys/types.h>
# include <string.h>
+# include <stdarg.h>
# include <errno.h>
+# ifdef HAVE_STDINT_H
+# include <stdint.h>
+# endif
#ifdef AFS_NT40_ENV
# include <stdlib.h>
# include <fcntl.h>
# include <afs/rxgen_consts.h>
#endif /* KERNEL */
-int (*registerProgram) () = 0;
-int (*swapNameProgram) () = 0;
+#ifndef KERNEL
+#ifdef AFS_PTHREAD_ENV
+#ifndef AFS_NT40_ENV
+int (*registerProgram) (pid_t, char *) = 0;
+int (*swapNameProgram) (pid_t, const char *, char *) = 0;
+#endif
+#else
+int (*registerProgram) (PROCESS, char *) = 0;
+int (*swapNameProgram) (PROCESS, const char *, char *) = 0;
+#endif
+#endif
/* Local static routines */
-static void rxi_DestroyConnectionNoLock(register struct rx_connection *conn);
+static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
#ifdef RX_ENABLE_LOCKS
-static void rxi_SetAcksInTransmitQueue(register struct rx_call *call);
+static void rxi_SetAcksInTransmitQueue(struct rx_call *call);
#endif
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
* to ease NT porting
*/
-extern pthread_mutex_t rx_stats_mutex;
-extern pthread_mutex_t des_init_mutex;
-extern pthread_mutex_t des_random_mutex;
-extern pthread_mutex_t rx_clock_mutex;
-extern pthread_mutex_t rxi_connCacheMutex;
-extern pthread_mutex_t rx_event_mutex;
-extern pthread_mutex_t osi_malloc_mutex;
-extern pthread_mutex_t event_handler_mutex;
-extern pthread_mutex_t listener_mutex;
-extern pthread_mutex_t rx_if_init_mutex;
-extern pthread_mutex_t rx_if_mutex;
-extern pthread_mutex_t rxkad_client_uid_mutex;
-extern pthread_mutex_t rxkad_random_mutex;
-
-extern pthread_cond_t rx_event_handler_cond;
-extern pthread_cond_t rx_listener_cond;
-
-static pthread_mutex_t epoch_mutex;
-static pthread_mutex_t rx_init_mutex;
-static pthread_mutex_t rx_debug_mutex;
+extern afs_kmutex_t rx_stats_mutex;
+extern afs_kmutex_t rx_waiting_mutex;
+extern afs_kmutex_t rx_quota_mutex;
+extern afs_kmutex_t rx_pthread_mutex;
+extern afs_kmutex_t rx_packets_mutex;
+extern afs_kmutex_t des_init_mutex;
+extern afs_kmutex_t des_random_mutex;
+extern afs_kmutex_t rx_clock_mutex;
+extern afs_kmutex_t rxi_connCacheMutex;
+extern afs_kmutex_t rx_event_mutex;
+extern afs_kmutex_t osi_malloc_mutex;
+extern afs_kmutex_t event_handler_mutex;
+extern afs_kmutex_t listener_mutex;
+extern afs_kmutex_t rx_if_init_mutex;
+extern afs_kmutex_t rx_if_mutex;
+extern afs_kmutex_t rxkad_client_uid_mutex;
+extern afs_kmutex_t rxkad_random_mutex;
+
+extern afs_kcondvar_t rx_event_handler_cond;
+extern afs_kcondvar_t rx_listener_cond;
+
+static afs_kmutex_t epoch_mutex;
+static afs_kmutex_t rx_init_mutex;
+static afs_kmutex_t rx_debug_mutex;
+static afs_kmutex_t rx_rpc_stats;
static void
rxi_InitPthread(void)
{
- assert(pthread_mutex_init(&rx_clock_mutex, (const pthread_mutexattr_t *)0)
- == 0);
- assert(pthread_mutex_init(&rx_stats_mutex, (const pthread_mutexattr_t *)0)
- == 0);
- assert(pthread_mutex_init
- (&rxi_connCacheMutex, (const pthread_mutexattr_t *)0) == 0);
- assert(pthread_mutex_init(&rx_init_mutex, (const pthread_mutexattr_t *)0)
- == 0);
- assert(pthread_mutex_init(&epoch_mutex, (const pthread_mutexattr_t *)0) ==
- 0);
- assert(pthread_mutex_init(&rx_event_mutex, (const pthread_mutexattr_t *)0)
- == 0);
- assert(pthread_mutex_init(&des_init_mutex, (const pthread_mutexattr_t *)0)
- == 0);
- assert(pthread_mutex_init
- (&des_random_mutex, (const pthread_mutexattr_t *)0) == 0);
- assert(pthread_mutex_init
- (&osi_malloc_mutex, (const pthread_mutexattr_t *)0) == 0);
- assert(pthread_mutex_init
- (&event_handler_mutex, (const pthread_mutexattr_t *)0) == 0);
- assert(pthread_mutex_init(&listener_mutex, (const pthread_mutexattr_t *)0)
- == 0);
- assert(pthread_mutex_init
- (&rx_if_init_mutex, (const pthread_mutexattr_t *)0) == 0);
- assert(pthread_mutex_init(&rx_if_mutex, (const pthread_mutexattr_t *)0) ==
- 0);
- assert(pthread_mutex_init
- (&rxkad_client_uid_mutex, (const pthread_mutexattr_t *)0) == 0);
- assert(pthread_mutex_init
- (&rxkad_random_mutex, (const pthread_mutexattr_t *)0) == 0);
- assert(pthread_mutex_init(&rx_debug_mutex, (const pthread_mutexattr_t *)0)
- == 0);
+ MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_waiting_mutex, "waiting", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_event_mutex, "event", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&des_init_mutex, "des", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&des_random_mutex, "random", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&osi_malloc_mutex, "malloc", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&event_handler_mutex, "event handler", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rxi_connCacheMutex, "conn cache", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&listener_mutex, "listener", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_if_init_mutex, "if init", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_if_mutex, "if", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rxkad_client_uid_mutex, "uid", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rxkad_random_mutex, "rxkad random", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_debug_mutex, "debug", MUTEX_DEFAULT, 0);
assert(pthread_cond_init
(&rx_event_handler_cond, (const pthread_condattr_t *)0) == 0);
assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
rxkad_global_stats_init();
+
+ MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
+#ifdef RX_ENABLE_LOCKS
+#ifdef RX_LOCKS_DB
+ rxdb_init();
+#endif /* RX_LOCKS_DB */
+ MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_freeCallQueue_lock, "rx_freeCallQueue_lock", MUTEX_DEFAULT,
+ 0);
+ CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
+ 0);
+ MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
+ 0);
+ MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
+ 0);
+ MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
+#endif /* RX_ENABLE_LOCKS */
}
pthread_once_t rx_once_init = PTHREAD_ONCE_INIT;
assert(pthread_once(&rx_once_init, rxi_InitPthread)==0)
/*
* The rx_stats_mutex mutex protects the following global variables:
- * rxi_dataQuota
- * rxi_minDeficit
- * rxi_availProcs
- * rxi_totalMin
* rxi_lowConnRefCount
* rxi_lowPeerRefCount
* rxi_nCalls
* rxi_Alloccnt
* rxi_Allocsize
- * rx_nFreePackets
* rx_tq_debug
* rx_stats
*/
+
+/*
+ * The rx_quota_mutex mutex protects the following global variables:
+ * rxi_dataQuota
+ * rxi_minDeficit
+ * rxi_availProcs
+ * rxi_totalMin
+ */
+
+/*
+ * The rx_freePktQ_lock protects the following global variables:
+ * rx_nFreePackets
+ */
+
+/*
+ * The rx_packets_mutex mutex protects the following global variables:
+ * rx_nPackets
+ * rx_TSFPQLocalMax
+ * rx_TSFPQGlobSize
+ * rx_TSFPQMaxProcs
+ */
+
+/*
+ * The rx_pthread_mutex mutex protects the following global variables:
+ * rxi_pthread_hinum
+ */
#else
#define INIT_PTHREAD_LOCKS
#endif
* to manipulate the queue.
*/
-#ifdef RX_ENABLE_LOCKS
+#if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
static afs_kmutex_t rx_rpc_stats;
-void rxi_StartUnlocked();
+void rxi_StartUnlocked(struct rxevent *event, void *call,
+ void *arg1, int istack);
#endif
/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
* conn->peer was previously a constant for all intents and so has no
* lock protecting this field. The multihomed client delta introduced
* a RX code change : change the peer field in the connection structure
- * to that remote inetrface from which the last packet for this
+ * to that remote interface from which the last packet for this
* connection was sent out. This may become an issue if further changes
* are made.
*/
* rx_epoch
*/
-#define LOCK_EPOCH assert(pthread_mutex_lock(&epoch_mutex)==0)
-#define UNLOCK_EPOCH assert(pthread_mutex_unlock(&epoch_mutex)==0)
+#define LOCK_EPOCH MUTEX_ENTER(&epoch_mutex)
+#define UNLOCK_EPOCH MUTEX_EXIT(&epoch_mutex)
#else
#define LOCK_EPOCH
#define UNLOCK_EPOCH
* by the kernel. Whether this will ever overlap anything in
* /etc/services is anybody's guess... Returns 0 on success, -1 on
* error. */
-static int rxinit_status = 1;
+#ifndef AFS_NT40_ENV
+static
+#endif
+int rxinit_status = 1;
#ifdef AFS_PTHREAD_ENV
/*
* This mutex protects the following global variables:
* rxinit_status
*/
-#define LOCK_RX_INIT assert(pthread_mutex_lock(&rx_init_mutex)==0)
-#define UNLOCK_RX_INIT assert(pthread_mutex_unlock(&rx_init_mutex)==0)
+#define LOCK_RX_INIT MUTEX_ENTER(&rx_init_mutex)
+#define UNLOCK_RX_INIT MUTEX_EXIT(&rx_init_mutex)
#else
#define LOCK_RX_INIT
#define UNLOCK_RX_INIT
if (afs_winsockInit() < 0)
return -1;
#endif
-
+
#ifndef KERNEL
/*
* Initialize anything necessary to provide a non-premptive threading
*/
rxi_InitializeThreadSupport();
#endif
-
+
/* Allocate and initialize a socket for client and perhaps server
* connections. */
-
+
rx_socket = rxi_GetHostUDPSocket(host, (u_short) port);
if (rx_socket == OSI_NULLSOCKET) {
UNLOCK_RX_INIT;
return RX_ADDRINUSE;
}
-#ifdef RX_ENABLE_LOCKS
+#if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
#ifdef RX_LOCKS_DB
rxdb_init();
#endif /* RX_LOCKS_DB */
MUTEX_INIT(&rx_stats_mutex, "rx_stats_mutex", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_waiting_mutex, "rx_waiting_mutex", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_quota_mutex, "rx_quota_mutex", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_pthread_mutex, "rx_pthread_mutex", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_packets_mutex, "rx_packets_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
0);
MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
-#ifndef KERNEL
- MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
-#endif /* !KERNEL */
-#if defined(KERNEL) && defined(AFS_HPUX110_ENV)
+#if defined(AFS_HPUX110_ENV)
if (!uniprocessor)
rx_sleepLock = alloc_spinlock(LAST_HELD_ORDER - 10, "rx_sleepLock");
-#endif /* KERNEL && AFS_HPUX110_ENV */
-#endif /* RX_ENABLE_LOCKS */
+#endif /* AFS_HPUX110_ENV */
+#endif /* RX_ENABLE_LOCKS && KERNEL */
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- memset((char *)&rx_stats, 0, sizeof(struct rx_stats));
+ memset(&rx_stats, 0, sizeof(struct rx_statistics));
htable = (char *)
osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
rx_port = 0;
#else
struct sockaddr_in addr;
- int addrlen = sizeof(addr);
- if (getsockname((int)rx_socket, (struct sockaddr *)&addr, &addrlen)) {
+#ifdef AFS_NT40_ENV
+ int addrlen = sizeof(addr);
+#else
+ socklen_t addrlen = sizeof(addr);
+#endif
+ if (getsockname((intptr_t)rx_socket, (struct sockaddr *)&addr, &addrlen)) {
rx_Finalize();
return -1;
}
rx_SetEpoch(tv.tv_sec); /* Start time of this package, rxkad
* will provide a randomer value. */
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_dataQuota += rx_extraQuota; /* + extra pkts caller asked to rsrv */
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_ENTER(&rx_quota_mutex);
+ rxi_dataQuota += rx_extraQuota; /* + extra pkts caller asked to rsrv */
+ MUTEX_EXIT(&rx_quota_mutex);
/* *Slightly* random start time for the cid. This is just to help
* out with the hashing function at the peer */
rx_nextCid = ((tv.tv_sec ^ tv.tv_usec) << RX_CIDSHIFT);
* rx_serverPool_lock is held. Return quota using ReturnToServerPool().
*/
static int
-QuotaOK(register struct rx_service *aservice)
+QuotaOK(struct rx_service *aservice)
{
/* check if over max quota */
if (aservice->nRequestsRunning >= aservice->maxProcs) {
/* otherwise, can use only if there are enough to allow everyone
* to go to their min quota after this guy starts.
*/
- MUTEX_ENTER(&rx_stats_mutex);
+
+ MUTEX_ENTER(&rx_quota_mutex);
if ((aservice->nRequestsRunning < aservice->minProcs)
|| (rxi_availProcs > rxi_minDeficit)) {
aservice->nRequestsRunning++;
if (aservice->nRequestsRunning <= aservice->minProcs)
rxi_minDeficit--;
rxi_availProcs--;
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_EXIT(&rx_quota_mutex);
return 1;
}
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_EXIT(&rx_quota_mutex);
return 0;
}
static void
-ReturnToServerPool(register struct rx_service *aservice)
+ReturnToServerPool(struct rx_service *aservice)
{
aservice->nRequestsRunning--;
- MUTEX_ENTER(&rx_stats_mutex);
+ MUTEX_ENTER(&rx_quota_mutex);
if (aservice->nRequestsRunning < aservice->minProcs)
rxi_minDeficit++;
rxi_availProcs++;
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_EXIT(&rx_quota_mutex);
}
#else /* RX_ENABLE_LOCKS */
static int
-QuotaOK(register struct rx_service *aservice)
+QuotaOK(struct rx_service *aservice)
{
int rc = 0;
/* under min quota, we're OK */
void
rxi_StartServerProcs(int nExistingProcs)
{
- register struct rx_service *service;
- register int i;
+ struct rx_service *service;
+ int i;
int maxdiff = 0;
int nProcs = 0;
void
rx_StartServer(int donateMe)
{
- register struct rx_service *service;
- register int i;
+ struct rx_service *service;
+ int i;
SPLVAR;
clock_NewTime();
service = rx_services[i];
if (service == (struct rx_service *)0)
break;
- MUTEX_ENTER(&rx_stats_mutex);
+ MUTEX_ENTER(&rx_quota_mutex);
rxi_totalMin += service->minProcs;
/* below works even if a thread is running, since minDeficit would
* still have been decremented and later re-incremented.
*/
rxi_minDeficit += service->minProcs;
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_EXIT(&rx_quota_mutex);
}
/* Turn on reaping of idle server connections */
- rxi_ReapConnections();
+ rxi_ReapConnections(NULL, NULL, NULL);
USERPRI;
static int nProcs;
#ifdef AFS_PTHREAD_ENV
pid_t pid;
- pid = (pid_t) pthread_self();
+ pid = afs_pointer_to_int(pthread_self());
#else /* AFS_PTHREAD_ENV */
PROCESS pid;
LWP_CurrentProcess(&pid);
* specified security object to implement the security model for this
* connection. */
struct rx_connection *
-rx_NewConnection(register afs_uint32 shost, u_short sport, u_short sservice,
- register struct rx_securityClass *securityObject,
+rx_NewConnection(afs_uint32 shost, u_short sport, u_short sservice,
+ struct rx_securityClass *securityObject,
int serviceSecurityIndex)
{
int hashindex, i;
afs_int32 cid;
- register struct rx_connection *conn;
+ struct rx_connection *conn;
SPLVAR;
clock_NewTime();
- dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
+ dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %p, "
+ "serviceSecurityIndex %d)\n",
+ ntohl(shost), ntohs(sport), sservice, securityObject,
+ serviceSecurityIndex));
/* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
* the case of kmem_alloc? */
conn = rxi_AllocConnection();
#ifdef RX_ENABLE_LOCKS
MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&conn->conn_data_lock, "conn call lock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
#endif
NETPRI;
conn->securityData = (void *) 0;
conn->securityIndex = serviceSecurityIndex;
rx_SetConnDeadTime(conn, rx_connDeadTime);
+ rx_SetConnSecondsUntilNatPing(conn, 0);
conn->ackRate = RX_FAST_ACK_RATE;
conn->nSpecific = 0;
conn->specific = NULL;
conn->refCount++; /* no lock required since only this thread knows... */
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
}
void
-rx_SetConnDeadTime(register struct rx_connection *conn, register int seconds)
+rx_SetConnDeadTime(struct rx_connection *conn, int seconds)
{
/* The idea is to set the dead time to a value that allows several
* keepalives to be dropped without timing out the connection. */
conn->peer->idleWhen = clock_Sec();
if (conn->peer->refCount < 1) {
conn->peer->refCount = 1;
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_lowPeerRefCount++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_lowPeerRefCount++;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
}
}
conn->peer->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
- if (conn->type == RX_SERVER_CONNECTION)
- rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
- else
- rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ if (rx_stats_active)
+ {
+ if (conn->type == RX_SERVER_CONNECTION)
+ rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ else
+ rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ }
#ifndef KERNEL
if (conn->specific) {
int i;
/* Destroy the specified connection */
void
-rxi_DestroyConnection(register struct rx_connection *conn)
+rxi_DestroyConnection(struct rx_connection *conn)
{
MUTEX_ENTER(&rx_connHashTable_lock);
rxi_DestroyConnectionNoLock(conn);
}
static void
-rxi_DestroyConnectionNoLock(register struct rx_connection *conn)
+rxi_DestroyConnectionNoLock(struct rx_connection *conn)
{
- register struct rx_connection **conn_ptr;
- register int havecalls = 0;
+ struct rx_connection **conn_ptr;
+ int havecalls = 0;
struct rx_packet *packet;
int i;
SPLVAR;
if (conn->refCount > 0)
conn->refCount--;
else {
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_lowConnRefCount++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_lowConnRefCount++;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
}
if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Check for extant references to this connection */
for (i = 0; i < RX_MAXCALLS; i++) {
- register struct rx_call *call = conn->call[i];
+ struct rx_call *call = conn->call[i];
if (call) {
havecalls = 1;
if (conn->type == RX_CLIENT_CONNECTION) {
return;
}
+ if (conn->natKeepAliveEvent) {
+ rxi_NatKeepAliveOff(conn);
+ }
+
if (conn->delayedAbortEvent) {
rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0);
packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
if (conn->checkReachEvent)
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
+ if (conn->natKeepAliveEvent)
+ rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
/* Add the connection to the list of destroyed connections that
* need to be cleaned up. This is necessary to avoid deadlocks
/* Externally available version */
void
-rx_DestroyConnection(register struct rx_connection *conn)
+rx_DestroyConnection(struct rx_connection *conn)
{
SPLVAR;
}
void
-rx_GetConnection(register struct rx_connection *conn)
+rx_GetConnection(struct rx_connection *conn)
{
SPLVAR;
USERPRI;
}
+#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Wait for the transmit queue to no longer be busy.
* requires the call->lock to be held */
static void rxi_WaitforTQBusy(struct rx_call *call) {
}
}
}
+#endif
+
/* Start a new rx remote procedure call, on the specified connection.
* If wait is set to 1, wait for a free call channel; otherwise return
* 0. Maxtime gives the maximum number of seconds this call may take,
* state and before we go to sleep.
*/
struct rx_call *
-rx_NewCall(register struct rx_connection *conn)
+rx_NewCall(struct rx_connection *conn)
{
- register int i;
- register struct rx_call *call;
+ int i;
+ struct rx_call *call;
struct clock queueTime;
SPLVAR;
clock_NewTime();
- dpf(("rx_NewCall(conn %x)\n", conn));
+ dpf(("rx_NewCall(conn %"AFS_PTR_FMT")\n", conn));
NETPRI;
clock_GetTime(&queueTime);
- MUTEX_ENTER(&conn->conn_call_lock);
-
/*
* Check if there are others waiting for a new call.
* If so, let them go first to avoid starving them.
* RX_CONN_MAKECALL_WAITING flag bit is used to
* indicate that there are indeed calls waiting.
* The flag is set when the waiter is incremented.
- * It is only cleared in rx_EndCall when
- * makeCallWaiters is 0. This prevents us from
- * accidently destroying the connection while it
- * is potentially about to be used.
+ * It is only cleared when makeCallWaiters is 0.
+ * This prevents us from accidently destroying the
+ * connection while it is potentially about to be used.
*/
+ MUTEX_ENTER(&conn->conn_call_lock);
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->makeCallWaiters) {
- conn->flags |= RX_CONN_MAKECALL_WAITING;
conn->makeCallWaiters++;
MUTEX_EXIT(&conn->conn_data_lock);
#endif
MUTEX_ENTER(&conn->conn_data_lock);
conn->makeCallWaiters--;
+ if (conn->makeCallWaiters == 0)
+ conn->flags &= ~RX_CONN_MAKECALL_WAITING;
}
MUTEX_EXIT(&conn->conn_data_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
call = conn->call[i];
if (call) {
- MUTEX_ENTER(&call->lock);
if (call->state == RX_STATE_DALLY) {
- rxi_ResetCall(call, 0);
- (*call->callNumber)++;
- break;
- }
- MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&call->lock);
+ if (call->state == RX_STATE_DALLY) {
+ call->state = RX_STATE_RESET;
+ MUTEX_EXIT(&conn->conn_call_lock);
+ rxi_ResetCall(call, 0);
+ MUTEX_ENTER(&conn->conn_call_lock);
+ (*call->callNumber)++;
+ break;
+ }
+ MUTEX_EXIT(&call->lock);
+ }
} else {
+ /* rxi_NewCall returns with mutex locked */
call = rxi_NewCall(conn, i);
break;
}
#endif
MUTEX_ENTER(&conn->conn_data_lock);
conn->makeCallWaiters--;
+ if (conn->makeCallWaiters == 0)
+ conn->flags &= ~RX_CONN_MAKECALL_WAITING;
MUTEX_EXIT(&conn->conn_data_lock);
}
/*
/* Turn on busy protocol. */
rxi_KeepAliveOn(call);
-
- MUTEX_EXIT(&call->lock);
MUTEX_EXIT(&conn->conn_call_lock);
- USERPRI;
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- /* Now, if TQ wasn't cleared earlier, do it now. */
- MUTEX_ENTER(&call->lock);
- rxi_WaitforTQBusy(call);
- if (call->flags & RX_CALL_TQ_CLEARME) {
- rxi_ClearTransmitQueue(call, 0);
- queue_Init(&call->tq);
+ if (call->flags & (RX_CALL_TQ_BUSY | RX_CALL_TQ_CLEARME)) {
+ osi_Panic("rx_NewCall call about to be used without an empty tq");
}
- MUTEX_EXIT(&call->lock);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- dpf(("rx_NewCall(call %x)\n", call));
+ MUTEX_EXIT(&call->lock);
+ USERPRI;
+
+ dpf(("rx_NewCall(call %"AFS_PTR_FMT")\n", call));
return call;
}
int
-rxi_HasActiveCalls(register struct rx_connection *aconn)
+rxi_HasActiveCalls(struct rx_connection *aconn)
{
- register int i;
- register struct rx_call *tcall;
+ int i;
+ struct rx_call *tcall;
SPLVAR;
NETPRI;
}
int
-rxi_GetCallNumberVector(register struct rx_connection *aconn,
- register afs_int32 * aint32s)
+rxi_GetCallNumberVector(struct rx_connection *aconn,
+ afs_int32 * aint32s)
{
- register int i;
- register struct rx_call *tcall;
+ int i;
+ struct rx_call *tcall;
SPLVAR;
NETPRI;
}
int
-rxi_SetCallNumberVector(register struct rx_connection *aconn,
- register afs_int32 * aint32s)
+rxi_SetCallNumberVector(struct rx_connection *aconn,
+ afs_int32 * aint32s)
{
- register int i;
- register struct rx_call *tcall;
+ int i;
+ struct rx_call *tcall;
SPLVAR;
NETPRI;
afs_int32(*serviceProc) (struct rx_call * acall))
{
osi_socket socket = OSI_NULLSOCKET;
- register struct rx_service *tservice;
- register int i;
+ struct rx_service *tservice;
+ int i;
SPLVAR;
clock_NewTime();
tservice = rxi_AllocService();
NETPRI;
for (i = 0; i < RX_MAX_SERVICES; i++) {
- register struct rx_service *service = rx_services[i];
+ struct rx_service *service = rx_services[i];
if (service) {
if (port == service->servicePort && host == service->serviceHost) {
if (service->serviceId == serviceId) {
if (socket == OSI_NULLSOCKET) {
/* If we don't already have a socket (from another
* service on same port) get a new one */
- socket = rxi_GetHostUDPSocket(htonl(INADDR_ANY), port);
+ socket = rxi_GetHostUDPSocket(host, port);
if (socket == OSI_NULLSOCKET) {
USERPRI;
rxi_FreeService(tservice);
service->minProcs = 0;
service->maxProcs = 1;
service->idleDeadTime = 60;
+ service->idleDeadErr = 0;
service->connDeadTime = rx_connDeadTime;
service->executeRequestProc = serviceProc;
service->checkReach = 0;
void
rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket * socketp)
{
- register struct rx_call *call;
- register afs_int32 code;
- register struct rx_service *tservice = NULL;
+ struct rx_call *call;
+ afs_int32 code;
+ struct rx_service *tservice = NULL;
for (;;) {
if (newcall) {
(*tservice->afterProc) (call, code);
rx_EndCall(call, code);
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_nCalls++;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rxi_nCalls++;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
}
}
rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
{
struct rx_serverQueueEntry *sq;
- register struct rx_call *call = (struct rx_call *)0;
+ struct rx_call *call = (struct rx_call *)0;
struct rx_service *service = NULL;
SPLVAR;
}
while (1) {
if (queue_IsNotEmpty(&rx_incomingCallQueue)) {
- register struct rx_call *tcall, *ncall, *choice2 = NULL;
+ struct rx_call *tcall, *ncall, *choice2 = NULL;
/* Scan for eligible incoming calls. A call is not eligible
* if the maximum number of calls for its service type are
if (!QuotaOK(service)) {
continue;
}
+ MUTEX_ENTER(&rx_pthread_mutex);
if (tno == rxi_fcfs_thread_num
|| !tcall->queue_item_header.next) {
+ MUTEX_EXIT(&rx_pthread_mutex);
/* If we're the fcfs thread , then we'll just use
* this call. If we haven't been able to find an optimal
* choice, and we're at the end of the list, then use a
* 2d choice if one has been identified. Otherwise... */
call = (choice2 ? choice2 : tcall);
service = call->conn->service;
- } else if (!queue_IsEmpty(&tcall->rq)) {
- struct rx_packet *rp;
- rp = queue_First(&tcall->rq, rx_packet);
- if (rp->header.seq == 1) {
- if (!meltdown_1pkt
- || (rp->header.flags & RX_LAST_PACKET)) {
- call = tcall;
- } else if (rxi_2dchoice && !choice2
- && !(tcall->flags & RX_CALL_CLEARED)
- && (tcall->rprev > rxi_HardAckRate)) {
- choice2 = tcall;
- } else
- rxi_md2cnt++;
+ } else {
+ MUTEX_EXIT(&rx_pthread_mutex);
+ if (!queue_IsEmpty(&tcall->rq)) {
+ struct rx_packet *rp;
+ rp = queue_First(&tcall->rq, rx_packet);
+ if (rp->header.seq == 1) {
+ if (!meltdown_1pkt
+ || (rp->header.flags & RX_LAST_PACKET)) {
+ call = tcall;
+ } else if (rxi_2dchoice && !choice2
+ && !(tcall->flags & RX_CALL_CLEARED)
+ && (tcall->rprev > rxi_HardAckRate)) {
+ choice2 = tcall;
+ } else
+ rxi_md2cnt++;
+ }
}
}
if (call) {
if (call->flags & RX_CALL_WAIT_PROC) {
call->flags &= ~RX_CALL_WAIT_PROC;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_ENTER(&rx_waiting_mutex);
+ rx_nWaiting--;
+ MUTEX_EXIT(&rx_waiting_mutex);
}
if (call->state != RX_STATE_PRECALL || call->error) {
#endif
rxi_calltrace(RX_CALL_START, call);
- dpf(("rx_GetCall(port=%d, service=%d) ==> call %x\n",
+ dpf(("rx_GetCall(port=%d, service=%d) ==> call %"AFS_PTR_FMT"\n",
call->conn->service->servicePort, call->conn->service->serviceId,
call));
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
} else {
- dpf(("rx_GetCall(socketp=0x%x, *socketp=0x%x)\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
return call;
rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
{
struct rx_serverQueueEntry *sq;
- register struct rx_call *call = (struct rx_call *)0, *choice2;
+ struct rx_call *call = (struct rx_call *)0, *choice2;
struct rx_service *service = NULL;
SPLVAR;
rxi_availProcs++;
}
if (queue_IsNotEmpty(&rx_incomingCallQueue)) {
- register struct rx_call *tcall, *ncall;
+ struct rx_call *tcall, *ncall;
/* Scan for eligible incoming calls. A call is not eligible
* if the maximum number of calls for its service type are
* already executing */
for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
service = tcall->conn->service;
if (QuotaOK(service)) {
+ MUTEX_ENTER(&rx_pthread_mutex);
if (tno == rxi_fcfs_thread_num
|| !tcall->queue_item_header.next) {
+ MUTEX_EXIT(&rx_pthread_mutex);
/* If we're the fcfs thread, then we'll just use
* this call. If we haven't been able to find an optimal
* choice, and we're at the end of the list, then use a
* 2d choice if one has been identified. Otherwise... */
call = (choice2 ? choice2 : tcall);
service = call->conn->service;
- } else if (!queue_IsEmpty(&tcall->rq)) {
- struct rx_packet *rp;
- rp = queue_First(&tcall->rq, rx_packet);
- if (rp->header.seq == 1
- && (!meltdown_1pkt
- || (rp->header.flags & RX_LAST_PACKET))) {
- call = tcall;
- } else if (rxi_2dchoice && !choice2
- && !(tcall->flags & RX_CALL_CLEARED)
- && (tcall->rprev > rxi_HardAckRate)) {
- choice2 = tcall;
- } else
- rxi_md2cnt++;
+ } else {
+ MUTEX_EXIT(&rx_pthread_mutex);
+ if (!queue_IsEmpty(&tcall->rq)) {
+ struct rx_packet *rp;
+ rp = queue_First(&tcall->rq, rx_packet);
+ if (rp->header.seq == 1
+ && (!meltdown_1pkt
+ || (rp->header.flags & RX_LAST_PACKET))) {
+ call = tcall;
+ } else if (rxi_2dchoice && !choice2
+ && !(tcall->flags & RX_CALL_CLEARED)
+ && (tcall->rprev > rxi_HardAckRate)) {
+ choice2 = tcall;
+ } else
+ rxi_md2cnt++;
+ }
}
}
if (call)
#endif
rxi_calltrace(RX_CALL_START, call);
- dpf(("rx_GetCall(port=%d, service=%d) ==> call %x\n",
+ dpf(("rx_GetCall(port=%d, service=%d) ==> call %p\n",
call->conn->service->servicePort, call->conn->service->serviceId,
call));
} else {
- dpf(("rx_GetCall(socketp=0x%x, *socketp=0x%x)\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
USERPRI;
* and (2) only use it once. Other uses currently void your warranty
*/
void
-rx_SetArrivalProc(register struct rx_call *call,
- register void (*proc) (register struct rx_call * call,
- register void * mh,
- register int index),
- register void * handle, register int arg)
+rx_SetArrivalProc(struct rx_call *call,
+ void (*proc) (struct rx_call * call,
+ void * mh,
+ int index),
+ void * handle, int arg)
{
call->arrivalProc = proc;
call->arrivalProcHandle = handle;
* to the caller */
afs_int32
-rx_EndCall(register struct rx_call *call, afs_int32 rc)
+rx_EndCall(struct rx_call *call, afs_int32 rc)
{
- register struct rx_connection *conn = call->conn;
- register struct rx_service *service;
+ struct rx_connection *conn = call->conn;
+ struct rx_service *service;
afs_int32 error;
SPLVAR;
-
-
- dpf(("rx_EndCall(call %x rc %d error %d abortCode %d)\n", call, rc, call->error, call->abortCode));
+ dpf(("rx_EndCall(call %"AFS_PTR_FMT" rc %d error %d abortCode %d)\n",
+ call, rc, call->error, call->abortCode));
NETPRI;
MUTEX_ENTER(&call->lock);
* rx_NewCall is in a stable state. Otherwise, rx_NewCall may
* have checked this call, found it active and by the time it
* goes to sleep, will have missed the signal.
- *
- * Do not clear the RX_CONN_MAKECALL_WAITING flag as long as
- * there are threads waiting to use the conn object.
*/
- MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_call_lock);
- MUTEX_ENTER(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_BUSY;
if (conn->flags & RX_CONN_MAKECALL_WAITING) {
- if (conn->makeCallWaiters == 0)
- conn->flags &= (~RX_CONN_MAKECALL_WAITING);
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
CV_BROADCAST(&conn->conn_call_cv);
* kernel version, and may interrupt the macros rx_Read or
* rx_Write, which run at normal priority for efficiency. */
if (call->currentPacket) {
- queue_Prepend(&call->iovq, call->currentPacket);
+ call->currentPacket->flags &= ~RX_PKTFLAG_CP;
+ rxi_FreePacket(call->currentPacket);
call->currentPacket = (struct rx_packet *)0;
}
call->nLeft = call->nFree = call->curlen = 0;
/* Free any packets from the last call to ReadvProc/WritevProc */
- rxi_FreePackets(0, &call->iovq);
+#ifdef RXDEBUG_PACKET
+ call->iovqc -=
+#endif /* RXDEBUG_PACKET */
+ rxi_FreePackets(0, &call->iovq);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
if (conn->type == RX_CLIENT_CONNECTION) {
- MUTEX_EXIT(&conn->conn_call_lock);
conn->flags &= ~RX_CONN_BUSY;
}
USERPRI;
void
rx_Finalize(void)
{
- register struct rx_connection **conn_ptr, **conn_end;
+ struct rx_connection **conn_ptr, **conn_end;
INIT_PTHREAD_LOCKS;
LOCK_RX_INIT;
/* Return this process's service structure for the
* specified socket and service */
struct rx_service *
-rxi_FindService(register osi_socket socket, register u_short serviceId)
+rxi_FindService(osi_socket socket, u_short serviceId)
{
- register struct rx_service **sp;
+ struct rx_service **sp;
for (sp = &rx_services[0]; *sp; sp++) {
if ((*sp)->serviceId == serviceId && (*sp)->socket == socket)
return *sp;
return 0;
}
+#ifdef RXDEBUG_PACKET
+#ifdef KDUMP_RX_LOCK
+static struct rx_call_rx_lock *rx_allCallsp = 0;
+#else
+static struct rx_call *rx_allCallsp = 0;
+#endif
+#endif /* RXDEBUG_PACKET */
+
/* Allocate a call structure, for the indicated channel of the
* supplied connection. The mode and state of the call must be set by
* the caller. Returns the call with mutex locked. */
struct rx_call *
-rxi_NewCall(register struct rx_connection *conn, register int channel)
+rxi_NewCall(struct rx_connection *conn, int channel)
{
- register struct rx_call *call;
+ struct rx_call *call;
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- register struct rx_call *cp; /* Call pointer temp */
- register struct rx_call *nxp; /* Next call pointer, for queue_Scan */
+ struct rx_call *cp; /* Call pointer temp */
+ struct rx_call *nxp; /* Next call pointer, for queue_Scan */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- dpf(("rxi_NewCall(conn %x, channel %d)\n", conn, channel));
+ dpf(("rxi_NewCall(conn %"AFS_PTR_FMT", channel %d)\n", conn, channel));
/* Grab an existing call structure, or allocate a new one.
* Existing call structures are assumed to have been left reset by
call = queue_First(&rx_freeCallQueue, rx_call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
- rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Now, if TQ wasn't cleared earlier, do it now. */
+ rxi_WaitforTQBusy(call);
if (call->flags & RX_CALL_TQ_CLEARME) {
- rxi_ClearTransmitQueue(call, 0);
- queue_Init(&call->tq);
+ rxi_ClearTransmitQueue(call, 1);
+ /*queue_Init(&call->tq);*/
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
/* Bind the call to its connection structure */
call->conn = conn;
rxi_ResetCall(call, 1);
} else {
- call = (struct rx_call *)rxi_Alloc(sizeof(struct rx_call));
- MUTEX_EXIT(&rx_freeCallQueue_lock);
+ call = (struct rx_call *)rxi_Alloc(sizeof(struct rx_call));
+#ifdef RXDEBUG_PACKET
+ call->allNextp = rx_allCallsp;
+ rx_allCallsp = call;
+ call->call_id =
+#endif /* RXDEBUG_PACKET */
+ rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
+
+ MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
MUTEX_ENTER(&call->lock);
CV_INIT(&call->cv_twind, "call twind", CV_DEFAULT, 0);
CV_INIT(&call->cv_rq, "call rq", CV_DEFAULT, 0);
CV_INIT(&call->cv_tq, "call tq", CV_DEFAULT, 0);
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
/* Initialize once-only items */
queue_Init(&call->tq);
queue_Init(&call->rq);
queue_Init(&call->iovq);
+#ifdef RXDEBUG_PACKET
+ call->rqc = call->tqc = call->iovqc = 0;
+#endif /* RXDEBUG_PACKET */
/* Bind the call to its connection structure (prereq for reset) */
call->conn = conn;
rxi_ResetCall(call, 1);
*/
#ifdef RX_ENABLE_LOCKS
void
-rxi_FreeCall(register struct rx_call *call, int haveCTLock)
+rxi_FreeCall(struct rx_call *call, int haveCTLock)
#else /* RX_ENABLE_LOCKS */
void
-rxi_FreeCall(register struct rx_call *call)
+rxi_FreeCall(struct rx_call *call)
#endif /* RX_ENABLE_LOCKS */
{
- register int channel = call->channel;
- register struct rx_connection *conn = call->conn;
+ int channel = call->channel;
+ struct rx_connection *conn = call->conn;
if (call->state == RX_STATE_DALLY || call->state == RX_STATE_HOLD)
#else /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
afs_int32 rxi_Alloccnt = 0, rxi_Allocsize = 0;
char *
-rxi_Alloc(register size_t size)
+rxi_Alloc(size_t size)
{
- register char *p;
+ char *p;
- rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
- p = (char *)osi_Alloc(size);
+ if (rx_stats_active)
+ rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+p = (char *)
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ afs_osi_Alloc_NoSleep(size);
+#else
+ osi_Alloc(size);
+#endif
if (!p)
osi_Panic("rxi_Alloc error");
memset(p, 0, size);
}
void
-rxi_Free(void *addr, register size_t size)
+rxi_Free(void *addr, size_t size)
{
- rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
osi_Free(addr, size);
}
+void
+rxi_SetPeerMtu(afs_uint32 host, afs_uint32 port, int mtu)
+{
+ struct rx_peer **peer_ptr, **peer_end;
+ int hashIndex;
+
+ MUTEX_ENTER(&rx_peerHashTable_lock);
+ if (port == 0) {
+ for (peer_ptr = &rx_peerHashTable[0], peer_end =
+ &rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
+ peer_ptr++) {
+ struct rx_peer *peer, *next;
+ for (peer = *peer_ptr; peer; peer = next) {
+ next = peer->next;
+ if (host == peer->host) {
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->ifMTU=MIN(mtu, peer->ifMTU);
+ peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+ }
+ }
+ } else {
+ struct rx_peer *peer;
+ hashIndex = PEER_HASH(host, port);
+ for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
+ if ((peer->host == host) && (peer->port == port)) {
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->ifMTU=MIN(mtu, peer->ifMTU);
+ peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+ }
+ }
+ MUTEX_EXIT(&rx_peerHashTable_lock);
+}
+
/* Find the peer process represented by the supplied (host,port)
* combination. If there is no appropriate active peer structure, a
* new one will be allocated and initialized
* refcount will be be decremented. This is used to replace the peer
* structure hanging off a connection structure */
struct rx_peer *
-rxi_FindPeer(register afs_uint32 host, register u_short port,
- struct rx_peer *origPeer, int create)
+rxi_FindPeer(afs_uint32 host, u_short port,
+ struct rx_peer *origPeer, int create)
{
- register struct rx_peer *pp;
+ struct rx_peer *pp;
int hashIndex;
hashIndex = PEER_HASH(host, port);
MUTEX_ENTER(&rx_peerHashTable_lock);
for (pp = rx_peerHashTable[hashIndex]; pp; pp = pp->next) {
if ((pp->host == host) && (pp->port == port))
- break;
+ break;
}
if (!pp) {
- if (create) {
- pp = rxi_AllocPeer(); /* This bzero's *pp */
- pp->host = host; /* set here or in InitPeerParams is zero */
+ if (create) {
+ pp = rxi_AllocPeer(); /* This bzero's *pp */
+ pp->host = host; /* set here or in InitPeerParams is zero */
pp->port = port;
MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0);
queue_Init(&pp->congestionQueue);
pp->next = rx_peerHashTable[hashIndex];
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
- rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
if (pp && create) {
* server connection is created, it will be created using the supplied
* index, if the index is valid for this service */
struct rx_connection *
-rxi_FindConnection(osi_socket socket, register afs_int32 host,
- register u_short port, u_short serviceId, afs_uint32 cid,
+rxi_FindConnection(osi_socket socket, afs_int32 host,
+ u_short port, u_short serviceId, afs_uint32 cid,
afs_uint32 epoch, int type, u_int securityIndex)
{
int hashindex, flag, i;
- register struct rx_connection *conn;
+ struct rx_connection *conn;
hashindex = CONN_HASH(host, port, cid, epoch, type);
MUTEX_ENTER(&rx_connHashTable_lock);
rxLastConn ? (conn = rxLastConn, flag = 0) : (conn =
for (; conn;) {
if ((conn->type == type) && ((cid & RX_CIDMASK) == conn->cid)
&& (epoch == conn->epoch)) {
- register struct rx_peer *pp = conn->peer;
+ struct rx_peer *pp = conn->peer;
if (securityIndex != conn->securityIndex) {
/* this isn't supposed to happen, but someone could forge a packet
* like this, and there seems to be some CM bug that makes this
conn->specific = NULL;
rx_SetConnDeadTime(conn, service->connDeadTime);
rx_SetConnIdleDeadTime(conn, service->idleDeadTime);
+ rx_SetServerConnIdleDeadErr(conn, service->idleDeadErr);
for (i = 0; i < RX_MAXCALLS; i++) {
conn->twind[i] = rx_initSendWindow;
conn->rwind[i] = rx_initReceiveWindow;
/* XXXX Connection timeout? */
if (service->newConnProc)
(*service->newConnProc) (conn);
- rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
}
MUTEX_ENTER(&conn->conn_data_lock);
* containing the network address. Both can be modified. The return value, if
* non-zero, indicates that the packet should be dropped. */
-int (*rx_justReceived) () = 0;
-int (*rx_almostSent) () = 0;
+int (*rx_justReceived) (struct rx_packet *, struct sockaddr_in *) = 0;
+int (*rx_almostSent) (struct rx_packet *, struct sockaddr_in *) = 0;
/* A packet has been received off the interface. Np is the packet, socket is
* the socket number it was received from (useful in determining which service
* it, rather than de-allocating it, just as a small performance hack */
struct rx_packet *
-rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
+rxi_ReceivePacket(struct rx_packet *np, osi_socket socket,
afs_uint32 host, u_short port, int *tnop,
struct rx_call **newcallp)
{
- register struct rx_call *call;
- register struct rx_connection *conn;
+ struct rx_call *call;
+ struct rx_connection *conn;
int channel;
afs_uint32 currentCallNumber;
int type;
* this is the first time the packet has been seen */
packetType = (np->header.type > 0 && np->header.type < RX_N_PACKET_TYPES)
? rx_packetTypes[np->header.type - 1] : "*UNKNOWN*";
- dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %x",
+ dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %"AFS_PTR_FMT,
np->header.serial, packetType, ntohl(host), ntohs(port), np->header.serviceId,
np->header.epoch, np->header.cid, np->header.callNumber,
np->header.seq, np->header.flags, np));
* then, since this is a client connection we're getting data for
* it must be for the previous call.
*/
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
call = rxi_NewCall(conn, channel);
MUTEX_EXIT(&conn->conn_call_lock);
*call->callNumber = np->header.callNumber;
+#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
-
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.06d len %d",
+ np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
+ np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
+ np->header.flags, np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
+#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
hzero(call->bytesSent);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
}
rxi_ResetCall(call, 0);
*call->callNumber = np->header.callNumber;
+#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
-
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%06d len %d",
+ np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
+ np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
+ np->header.flags, np, np->retryTime.sec, np->retryTime.usec, np->length));
+#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
hzero(call->bytesSent);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
/* Ignore all incoming acknowledgements for calls in DALLY state */
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
- rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
/* Ignore anything that's not relevant to the current call. If there
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
* XXX interact badly with the server-restart detection
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
conn->lastSerial = np->header.serial;
MUTEX_EXIT(&conn->conn_data_lock);
if (skew > 0) {
- register struct rx_peer *peer;
+ struct rx_peer *peer;
peer = conn->peer;
if (skew > peer->inPacketSkew) {
- dpf(("*** In skew changed from %d to %d\n", peer->inPacketSkew,
- skew));
+ dpf(("*** In skew changed from %d to %d\n",
+ peer->inPacketSkew, skew));
peer->inPacketSkew = skew;
}
}
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
rxi_ClearTransmitQueue(call, 0);
+ rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
break;
default:
/* Should not reach here, unless the peer is broken: send an abort
int
rxi_IsConnInteresting(struct rx_connection *aconn)
{
- register int i;
- register struct rx_call *tcall;
+ int i;
+ struct rx_call *tcall;
if (aconn->flags & (RX_CONN_MAKECALL_WAITING | RX_CONN_DESTROY_ME))
return 1;
TooLow(struct rx_packet *ap, struct rx_call *acall)
{
int rc = 0;
- MUTEX_ENTER(&rx_stats_mutex);
+
+ MUTEX_ENTER(&rx_quota_mutex);
if (((ap->header.seq != 1) && (acall->flags & RX_CALL_CLEARED)
&& (acall->state == RX_STATE_PRECALL))
|| ((rx_nFreePackets < rxi_dataQuota + 2)
&& (acall->flags & RX_CALL_READER_WAIT)))) {
rc = 1;
}
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_EXIT(&rx_quota_mutex);
return rc;
}
#endif /* KERNEL */
static void
-rxi_CheckReachEvent(struct rxevent *event, struct rx_connection *conn,
- struct rx_call *acall)
+rxi_CheckReachEvent(struct rxevent *event, void *arg1, void *arg2)
{
+ struct rx_connection *conn = arg1;
+ struct rx_call *acall = arg2;
struct rx_call *call = acall;
struct clock when, now;
int i, waiting;
/* try to attach call, if authentication is complete */
static void
-TryAttach(register struct rx_call *acall, register osi_socket socket,
- register int *tnop, register struct rx_call **newcallp,
+TryAttach(struct rx_call *acall, osi_socket socket,
+ int *tnop, struct rx_call **newcallp,
int reachOverride)
{
struct rx_connection *conn = acall->conn;
* routine can return a packet to the caller, for re-use */
struct rx_packet *
-rxi_ReceiveDataPacket(register struct rx_call *call,
- register struct rx_packet *np, int istack,
+rxi_ReceiveDataPacket(struct rx_call *call,
+ struct rx_packet *np, int istack,
osi_socket socket, afs_uint32 host, u_short port,
int *tnop, struct rx_call **newcallp)
{
int newPackets = 0;
int didHardAck = 0;
int haveLast = 0;
- afs_uint32 seq, serial, flags;
+ afs_uint32 seq;
+ afs_uint32 serial=0, flags=0;
int isFirst;
struct rx_packet *tnp;
struct clock when, now;
- rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
MUTEX_ENTER(&rx_freePktQ_lock);
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
- rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
- dpf(("packet %x dropped on receipt - quota problems", np));
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems", np));
if (rxi_doreclaim)
rxi_ClearReceiveQueue(call);
clock_GetTime(&now);
/* Check to make sure it is not a duplicate of one already queued */
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
- dpf(("packet %x dropped on receipt - duplicate", np));
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/* It's the next packet. Stick it on the receive queue
* for this call. Set newPackets to make sure we wake
* the reader once all packets have been processed */
+ np->flags |= RX_PKTFLAG_RQ;
queue_Prepend(&call->rq, np);
+#ifdef RXDEBUG_PACKET
+ call->rqc++;
+#endif /* RXDEBUG_PACKET */
call->nSoftAcks++;
np = NULL; /* We can't use this anymore */
newPackets = 1;
/* If the new packet's sequence number has been sent to the
* application already, then this is a duplicate */
if (seq < call->rnext) {
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
/*Check for duplicate packet */
if (seq == tp->header.seq) {
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
* packet before which to insert the new packet, or at the
* queue head if the queue is empty or the packet should be
* appended. */
+ np->flags |= RX_PKTFLAG_RQ;
+#ifdef RXDEBUG_PACKET
+ call->rqc++;
+#endif /* RXDEBUG_PACKET */
queue_InsertBefore(tp, np);
call->nSoftAcks++;
np = NULL;
MUTEX_EXIT(&conn->conn_data_lock);
}
+#if defined(RXDEBUG) && defined(AFS_NT40_ENV)
static const char *
rx_ack_reason(int reason)
{
return "unknown!!";
}
}
+#endif
/* rxi_ComputePeerNetStats
{
struct rx_peer *peer = call->conn->peer;
- /* Use RTT if not delayed by client. */
- if (ap->reason != RX_ACK_DELAY)
+ /* Use RTT if not delayed by client and
+ * ignore packets that were retransmitted. */
+ if (!(p->flags & RX_PKTFLAG_ACKED) &&
+ ap->reason != RX_ACK_DELAY &&
+ clock_Eq(&p->timeSent, &p->firstSent))
rxi_ComputeRoundTripTime(p, &p->timeSent, peer);
#ifdef ADAPT_WINDOW
rxi_ComputeRate(peer, call, p, np, ap->reason);
/* The real smarts of the whole thing. */
struct rx_packet *
-rxi_ReceiveAckPacket(register struct rx_call *call, struct rx_packet *np,
+rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
int istack)
{
struct rx_ackPacket *ap;
int nAcks;
- register struct rx_packet *tp;
- register struct rx_packet *nxp; /* Next packet pointer for queue_Scan */
- register struct rx_connection *conn = call->conn;
+ struct rx_packet *tp;
+ struct rx_packet *nxp; /* Next packet pointer for queue_Scan */
+ struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
afs_uint32 first;
afs_uint32 serial;
afs_uint32 skew = 0;
int nbytes;
int missing;
+ int backedOff = 0;
int acked;
int nNacked = 0;
int newAckCount = 0;
u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
- rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
if (tp->header.seq >= first)
break;
call->tfirst = tp->header.seq + 1;
- if (serial
- && (tp->header.serial == serial || tp->firstSerial == serial))
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np);
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
{
queue_Remove(tp);
+ tp->flags &= ~RX_PKTFLAG_TQ;
+#ifdef RXDEBUG_PACKET
+ call->tqc--;
+#endif /* RXDEBUG_PACKET */
rxi_FreePacket(tp); /* rxi_FreePacket mustn't wake up anyone, preemptively. */
}
}
if (tp->header.seq >= first)
#endif /* RX_ENABLE_LOCKS */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- if (serial
- && (tp->header.serial == serial || tp->firstSerial == serial))
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np);
/* Set the acknowledge flag per packet based on the
* information in the ack packet. An acknowlegded packet can
missing = 1;
}
+ /*
+ * Following the suggestion of Phil Kern, we back off the peer's
+ * timeout value for future packets until a successful response
+ * is received for an initial transmission.
+ */
+ if (missing && !backedOff) {
+ struct clock c = peer->timeout;
+ struct clock max_to = {3, 0};
+
+ clock_Add(&peer->timeout, &c);
+ if (clock_Gt(&peer->timeout, &max_to))
+ peer->timeout = max_to;
+ backedOff = 1;
+ }
+
/* If packet isn't yet acked, and it has been transmitted at least
* once, reset retransmit time using latest timeout
* ie, this should readjust the retransmit timer for all outstanding
* packets... So we don't just retransmit when we should know better*/
if (!(tp->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&tp->retryTime)) {
- tp->retryTime = tp->timeSent;
+ tp->retryTime = tp->timeSent;
clock_Add(&tp->retryTime, &peer->timeout);
/* shift by eight because one quarter-sec ~ 256 milliseconds */
clock_Addmsec(&(tp->retryTime), ((afs_uint32) tp->backoff) << 8);
/* If the ack packet has a "recommended" size that is less than
* what I am using now, reduce my size to match */
- rx_packetread(np, rx_AckDataSize(ap->nAcks) + sizeof(afs_int32),
+ rx_packetread(np, rx_AckDataSize(ap->nAcks) + (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
peer->natMTU = rxi_AdjustIfMTU(MIN(tSize, peer->ifMTU));
if (np->length == rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32)) {
/* AFS 3.4a */
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32),
+ rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize); /* peer's receive window, if it's */
if (tSize < call->twind) { /* smaller than our send */
rx_AckDataSize(ap->nAcks) + 4 * sizeof(afs_int32)) {
/* AFS 3.5 */
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32),
+ rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
/*
* larger than the natural MTU.
*/
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32),
- sizeof(afs_int32), &tSize);
+ rx_AckDataSize(ap->nAcks) + 3 * (int)sizeof(afs_int32),
+ (int)sizeof(afs_int32), &tSize);
maxDgramPackets = (afs_uint32) ntohl(tSize);
maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets);
- maxDgramPackets = MIN(maxDgramPackets, peer->ifDgramPackets);
- if (peer->natMTU < peer->ifMTU)
- maxDgramPackets = MIN(maxDgramPackets, rxi_AdjustDgramPackets(1, peer->natMTU));
+ maxDgramPackets =
+ MIN(maxDgramPackets, (int)(peer->ifDgramPackets));
+ maxDgramPackets = MIN(maxDgramPackets, tSize);
if (maxDgramPackets > 1) {
peer->maxDgramPackets = maxDgramPackets;
call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
&& call->tfirst + call->nSoftAcked >= call->tnext) {
call->state = RX_STATE_DALLY;
rxi_ClearTransmitQueue(call, 0);
+ rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
} else if (!queue_IsEmpty(&call->tq)) {
rxi_Start(0, call, 0, istack);
}
/* Received a response to a challenge packet */
struct rx_packet *
-rxi_ReceiveResponsePacket(register struct rx_connection *conn,
- register struct rx_packet *np, int istack)
+rxi_ReceiveResponsePacket(struct rx_connection *conn,
+ struct rx_packet *np, int istack)
{
int error;
* challenge if it fails to get a response. */
struct rx_packet *
-rxi_ReceiveChallengePacket(register struct rx_connection *conn,
- register struct rx_packet *np, int istack)
+rxi_ReceiveChallengePacket(struct rx_connection *conn,
+ struct rx_packet *np, int istack)
{
int error;
* the given call structure. If one isn't available, queue up this
* call so it eventually gets one */
void
-rxi_AttachServerProc(register struct rx_call *call,
- register osi_socket socket, register int *tnop,
- register struct rx_call **newcallp)
+rxi_AttachServerProc(struct rx_call *call,
+ osi_socket socket, int *tnop,
+ struct rx_call **newcallp)
{
- register struct rx_serverQueueEntry *sq;
- register struct rx_service *service = call->conn->service;
- register int haveQuota = 0;
+ struct rx_serverQueueEntry *sq;
+ struct rx_service *service = call->conn->service;
+ int haveQuota = 0;
/* May already be attached */
if (call->state == RX_STATE_ACTIVE)
if (!(call->flags & RX_CALL_WAIT_PROC)) {
call->flags |= RX_CALL_WAIT_PROC;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_nWaiting++;
- rx_nWaited++;
- MUTEX_EXIT(&rx_stats_mutex);
+ MUTEX_ENTER(&rx_waiting_mutex);
+ rx_nWaiting++;
+ rx_nWaited++;
+ MUTEX_EXIT(&rx_waiting_mutex);
rxi_calltrace(RX_CALL_ARRIVAL, call);
SET_CALL_QUEUE_LOCK(call, &rx_serverPool_lock);
queue_Append(&rx_incomingCallQueue, call);
call->flags &= ~RX_CALL_WAIT_PROC;
if (queue_IsOnQueue(call)) {
queue_Remove(call);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_stats_mutex);
+
+ MUTEX_ENTER(&rx_waiting_mutex);
+ rx_nWaiting--;
+ MUTEX_EXIT(&rx_waiting_mutex);
}
}
call->state = RX_STATE_ACTIVE;
* is being prepared (in the case of a server). Rather than sending
* an ack packet, an ACKALL packet is sent. */
void
-rxi_AckAll(struct rxevent *event, register struct rx_call *call, char *dummy)
+rxi_AckAll(struct rxevent *event, struct rx_call *call, char *dummy)
{
#ifdef RX_ENABLE_LOCKS
if (event) {
}
void
-rxi_SendDelayedAck(struct rxevent *event, register struct rx_call *call,
- char *dummy)
+rxi_SendDelayedAck(struct rxevent *event, void *arg1, void *unused)
{
+ struct rx_call *call = arg1;
#ifdef RX_ENABLE_LOCKS
if (event) {
MUTEX_ENTER(&call->lock);
* clearing them out.
*/
static void
-rxi_SetAcksInTransmitQueue(register struct rx_call *call)
+rxi_SetAcksInTransmitQueue(struct rx_call *call)
{
- register struct rx_packet *p, *tp;
+ struct rx_packet *p, *tp;
int someAcked = 0;
for (queue_Scan(&call->tq, p, tp, rx_packet)) {
}
rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
- rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
call->tfirst = call->tnext;
call->nSoftAcked = 0;
/* Clear out the transmit queue for the current call (all packets have
* been received by peer) */
void
-rxi_ClearTransmitQueue(register struct rx_call *call, register int force)
+rxi_ClearTransmitQueue(struct rx_call *call, int force)
{
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- register struct rx_packet *p, *tp;
+ struct rx_packet *p, *tp;
if (!force && (call->flags & RX_CALL_TQ_BUSY)) {
int someAcked = 0;
}
} else {
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rxi_FreePackets(0, &call->tq);
+#ifdef RXDEBUG_PACKET
+ call->tqc -=
+#endif /* RXDEBUG_PACKET */
+ rxi_FreePackets(0, &call->tq);
+ if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
+#ifdef RX_ENABLE_LOCKS
+ CV_BROADCAST(&call->cv_tq);
+#else /* RX_ENABLE_LOCKS */
+ osi_rxWakeup(&call->tq);
+#endif /* RX_ENABLE_LOCKS */
+ }
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
call->flags &= ~RX_CALL_TQ_CLEARME;
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
- rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
call->tfirst = call->tnext; /* implicitly acknowledge all data already sent */
call->nSoftAcked = 0;
}
void
-rxi_ClearReceiveQueue(register struct rx_call *call)
+rxi_ClearReceiveQueue(struct rx_call *call)
{
if (queue_IsNotEmpty(&call->rq)) {
- rx_packetReclaims += rxi_FreePackets(0, &call->rq);
+ u_short count;
+
+ count = rxi_FreePackets(0, &call->rq);
+ rx_packetReclaims += count;
+#ifdef RXDEBUG_PACKET
+ call->rqc -= count;
+ if ( call->rqc != 0 )
+ dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0", call, call->rqc));
+#endif
call->flags &= ~(RX_CALL_RECEIVE_DONE | RX_CALL_HAVE_LAST);
}
if (call->state == RX_STATE_PRECALL) {
/* Send an abort packet for the specified call */
struct rx_packet *
-rxi_SendCallAbort(register struct rx_call *call, struct rx_packet *packet,
+rxi_SendCallAbort(struct rx_call *call, struct rx_packet *packet,
int istack, int force)
{
afs_int32 error;
* to send the abort packet.
*/
struct rx_packet *
-rxi_SendConnectionAbort(register struct rx_connection *conn,
+rxi_SendConnectionAbort(struct rx_connection *conn,
struct rx_packet *packet, int istack, int force)
{
afs_int32 error;
* error at this point, so that future packets received will be
* rejected. */
void
-rxi_ConnectionError(register struct rx_connection *conn,
- register afs_int32 error)
+rxi_ConnectionError(struct rx_connection *conn,
+ afs_int32 error)
{
if (error) {
- register int i;
+ int i;
- dpf(("rxi_ConnectionError conn %x error %d", conn, error));
+ dpf(("rxi_ConnectionError conn %"AFS_PTR_FMT" error %d", conn, error));
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->challengeEvent)
rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
+ if (conn->natKeepAliveEvent)
+ rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
if (conn->checkReachEvent) {
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
}
}
conn->error = error;
- rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
}
}
void
-rxi_CallError(register struct rx_call *call, afs_int32 error)
+rxi_CallError(struct rx_call *call, afs_int32 error)
{
- dpf(("rxi_CallError call %x error %d call->error %d", call, error, call->error));
+#ifdef DEBUG
+ osirx_AssertMine(&call->lock, "rxi_CallError");
+#endif
+ dpf(("rxi_CallError call %"AFS_PTR_FMT" error %d call->error %d", call, error, call->error));
if (call->error)
error = call->error;
-#ifdef RX_GLOBAL_RXLOCK_KERNEL
+#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (!((call->flags & RX_CALL_TQ_BUSY) || (call->tqWaiters > 0))) {
rxi_ResetCall(call, 0);
}
#endif /* ADAPT_WINDOW */
void
-rxi_ResetCall(register struct rx_call *call, register int newcall)
+rxi_ResetCall(struct rx_call *call, int newcall)
{
- register int flags;
- register struct rx_peer *peer;
+ int flags;
+ struct rx_peer *peer;
struct rx_packet *packet;
-
- dpf(("rxi_ResetCall(call %x, newcall %d)\n", call, newcall));
+#ifdef DEBUG
+ osirx_AssertMine(&call->lock, "rxi_ResetCall");
+#endif
+ dpf(("rxi_ResetCall(call %"AFS_PTR_FMT", newcall %d)\n", call, newcall));
/* Notify anyone who is waiting for asynchronous packet arrival */
if (call->arrivalProc) {
MUTEX_EXIT(&peer->peer_lock);
flags = call->flags;
- rxi_ClearReceiveQueue(call);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- if (flags & RX_CALL_TQ_BUSY) {
- call->flags = RX_CALL_TQ_CLEARME | RX_CALL_TQ_BUSY;
- call->flags |= (flags & RX_CALL_TQ_WAIT);
- } else
+ rxi_WaitforTQBusy(call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- {
- rxi_ClearTransmitQueue(call, 0);
- queue_Init(&call->tq);
- if (call->tqWaiters || (flags & RX_CALL_TQ_WAIT)) {
- dpf(("rcall %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
- }
- call->flags = 0;
- while (call->tqWaiters) {
-#ifdef RX_ENABLE_LOCKS
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- call->tqWaiters--;
- }
+
+ rxi_ClearTransmitQueue(call, 1);
+ if (call->tqWaiters || (flags & RX_CALL_TQ_WAIT)) {
+ dpf(("rcall %"AFS_PTR_FMT" has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
}
- queue_Init(&call->rq);
+ call->flags = 0;
+
+ rxi_ClearReceiveQueue(call);
+ /* why init the queue if you just emptied it? queue_Init(&call->rq); */
+
+ if (call->currentPacket) {
+ call->currentPacket->flags &= ~RX_PKTFLAG_CP;
+ call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
+ queue_Prepend(&call->iovq, call->currentPacket);
+#ifdef RXDEBUG_PACKET
+ call->iovqc++;
+#endif /* RXDEBUG_PACKET */
+ call->currentPacket = (struct rx_packet *)0;
+ }
+ call->curlen = call->nLeft = call->nFree = 0;
+
+#ifdef RXDEBUG_PACKET
+ call->iovqc -=
+#endif
+ rxi_FreePackets(0, &call->iovq);
+
call->error = 0;
call->twind = call->conn->twind[call->channel];
call->rwind = call->conn->rwind[call->channel];
if (queue_IsOnQueue(call)) {
queue_Remove(call);
if (flags & RX_CALL_WAIT_PROC) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_nWaiting--;
- MUTEX_EXIT(&rx_stats_mutex);
+
+ MUTEX_ENTER(&rx_waiting_mutex);
+ rx_nWaiting--;
+ MUTEX_EXIT(&rx_waiting_mutex);
}
}
MUTEX_EXIT(call->call_queue_lock);
* ignored by mundanes, which indicates the maximum size packet this
* host can swallow. */
/*
- register struct rx_packet *optionalPacket; use to send ack (or null)
+ struct rx_packet *optionalPacket; use to send ack (or null)
int seq; Sequence number of the packet we are acking
int serial; Serial number of the packet
int pflags; Flags field from packet header
*/
struct rx_packet *
-rxi_SendAck(register struct rx_call *call,
- register struct rx_packet *optionalPacket, int serial, int reason,
+rxi_SendAck(struct rx_call *call,
+ struct rx_packet *optionalPacket, int serial, int reason,
int istack)
{
struct rx_ackPacket *ap;
- register struct rx_packet *rqp;
- register struct rx_packet *nxp; /* For queue_Scan */
- register struct rx_packet *p;
+ struct rx_packet *rqp;
+ struct rx_packet *nxp; /* For queue_Scan */
+ struct rx_packet *p;
u_char offset;
afs_int32 templ;
#ifdef RX_ENABLE_TSFPQ
#endif /* AFS_NT40_ENV */
#endif
{
- register int i, nbytes = p->length;
+ int i, nbytes = p->length;
for (i = 1; i < p->niovecs; i++) { /* vec 0 is ALWAYS header */
if (nbytes <= p->wirevec[i].iov_len) {
- register int savelen, saven;
+ int savelen, saven;
savelen = p->wirevec[i].iov_len;
saven = p->niovecs;
nbytes -= p->wirevec[i].iov_len;
}
}
- rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
peer->nSent += len;
if (resending)
peer->reSends += len;
- rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexAdd(rx_stats.dataPacketsSent, len, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
* packet until the congestion window reaches the ack rate. */
if (list[i]->header.serial) {
requestAck = 1;
- rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
} else {
/* improved RTO calculation- not Karn */
list[i]->firstSent = *now;
}
}
- MUTEX_ENTER(&peer->peer_lock);
- peer->nSent++;
- if (resending)
- peer->reSends++;
- rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
- MUTEX_EXIT(&peer->peer_lock);
-
/* Tag this packet as not being the last in this group,
* for the receiver's benefit */
if (i < len - 1 || moreFlag) {
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
* idle connections) */
- conn->lastSendTime = call->lastSendTime = clock_Sec();
+ call->lastSendData = conn->lastSendTime = call->lastSendTime = clock_Sec();
}
/* When sending packets we need to follow these rules:
#ifdef RX_ENABLE_LOCKS
/* Call rxi_Start, below, but with the call lock held. */
void
-rxi_StartUnlocked(struct rxevent *event, register struct rx_call *call,
- void *arg1, int istack)
+rxi_StartUnlocked(struct rxevent *event,
+ void *arg0, void *arg1, int istack)
{
+ struct rx_call *call = arg0;
+
MUTEX_ENTER(&call->lock);
rxi_Start(event, call, arg1, istack);
MUTEX_EXIT(&call->lock);
* better optimized for new packets, the usual case, now that we've
* got rid of queues of send packets. XXXXXXXXXXX */
void
-rxi_Start(struct rxevent *event, register struct rx_call *call,
- void *arg1, int istack)
+rxi_Start(struct rxevent *event,
+ void *arg0, void *arg1, int istack)
{
+ struct rx_call *call = arg0;
+
struct rx_packet *p;
- register struct rx_packet *nxp; /* Next pointer for queue_Scan */
+ struct rx_packet *nxp; /* Next pointer for queue_Scan */
struct rx_peer *peer = call->conn->peer;
struct clock now, usenow, retryTime;
int haveEvent;
}
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
#endif
return;
}
* some of them have been retransmitted more times than more
* recent additions.
* Do a dance to avoid blocking after setting now. */
- clock_Zero(&retryTime);
MUTEX_ENTER(&peer->peer_lock);
- clock_Add(&retryTime, &peer->timeout);
+ retryTime = peer->timeout;
MUTEX_EXIT(&peer->peer_lock);
clock_GetTime(&now);
clock_Add(&retryTime, &now);
nXmitPackets = 0;
maxXmitPackets = MIN(call->twind, call->cwind);
xmitList = (struct rx_packet **)
- osi_Alloc(maxXmitPackets * sizeof(struct rx_packet *));
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ /* XXXX else we must drop any mtx we hold */
+ afs_osi_Alloc_NoSleep(maxXmitPackets * sizeof(struct rx_packet *));
+#else
+ osi_Alloc(maxXmitPackets * sizeof(struct rx_packet *));
+#endif
if (xmitList == NULL)
osi_Panic("rxi_Start, failed to allocate xmit list");
for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
/* We shouldn't be sending packets if a thread is waiting
* to initiate congestion recovery */
+ dpf(("call %d waiting to initiate fast recovery\n",
+ *(call->callNumber)));
break;
}
if ((nXmitPackets)
&& (call->flags & RX_CALL_FAST_RECOVER)) {
/* Only send one packet during fast recovery */
+ dpf(("call %d restricted to one packet per send during fast recovery\n",
+ *(call->callNumber)));
break;
}
if ((p->flags & RX_PKTFLAG_FREE)
if (p->flags & RX_PKTFLAG_ACKED) {
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
- rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
continue; /* Ignore this packet if it has been acknowledged */
}
/* Note: if we're waiting for more window space, we can
* still send retransmits; hence we don't return here, but
* break out to schedule a retransmit event */
- dpf(("call %d waiting for window",
- *(call->callNumber)));
+ dpf(("call %d waiting for window (seq %d, twind %d, nSoftAcked %d, cwind %d)\n",
+ *(call->callNumber), p->header.seq, call->twind, call->nSoftAcked,
+ call->cwind));
break;
}
sizeof(struct rx_packet *));
goto restart;
}
+ dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
+ *(call->callNumber), p,
+ now.sec, now.usec,
+ p->retryTime.sec, p->retryTime.usec,
+ retryTime.sec, retryTime.usec));
xmitList[nXmitPackets++] = p;
}
}
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start start");
CV_BROADCAST(&call->cv_tq);
* the time to reset the call. This will also inform the using
* process that the call is in an error state.
*/
- rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
+ call->error, call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start middle");
CV_BROADCAST(&call->cv_tq);
}
#ifdef RX_ENABLE_LOCKS
if (call->flags & RX_CALL_TQ_SOME_ACKED) {
- register int missing;
+ int missing;
call->flags &= ~RX_CALL_TQ_SOME_ACKED;
/* Some packets have received acks. If they all have, we can clear
* the transmit queue.
if (p->header.seq < call->tfirst
&& (p->flags & RX_PKTFLAG_ACKED)) {
queue_Remove(p);
+ p->flags &= ~RX_PKTFLAG_TQ;
+#ifdef RXDEBUG_PACKET
+ call->tqc--;
+#endif
rxi_FreePacket(p);
} else
missing = 1;
*/
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start end");
CV_BROADCAST(&call->cv_tq);
* that we have just sent a packet (so keep alives aren't sent
* immediately) */
void
-rxi_Send(register struct rx_call *call, register struct rx_packet *p,
+rxi_Send(struct rx_call *call, struct rx_packet *p,
int istack)
{
- register struct rx_connection *conn = call->conn;
+ struct rx_connection *conn = call->conn;
/* Stamp each packet with the user supplied status */
p->header.userStatus = call->localStatus;
* processing), and for the connection (so that we can discover
* idle connections) */
conn->lastSendTime = call->lastSendTime = clock_Sec();
+ /* Don't count keepalives here, so idleness can be tracked. */
+ if ((p->header.type != RX_PACKET_TYPE_ACK) || (((struct rx_ackPacket *)rx_DataOf(p))->reason != RX_ACK_PING))
+ call->lastSendData = call->lastSendTime;
}
*/
#ifdef RX_ENABLE_LOCKS
int
-rxi_CheckCall(register struct rx_call *call, int haveCTLock)
+rxi_CheckCall(struct rx_call *call, int haveCTLock)
#else /* RX_ENABLE_LOCKS */
int
-rxi_CheckCall(register struct rx_call *call)
+rxi_CheckCall(struct rx_call *call)
#endif /* RX_ENABLE_LOCKS */
{
- register struct rx_connection *conn = call->conn;
+ struct rx_connection *conn = call->conn;
afs_uint32 now;
afs_uint32 deadTime;
-#ifdef RX_GLOBAL_RXLOCK_KERNEL
+#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (call->flags & RX_CALL_TQ_BUSY) {
/* Call is active and will be reset by rxi_Start if it's
* in an error state.
* number of seconds. */
if (now > (call->lastReceiveTime + deadTime)) {
if (call->state == RX_STATE_ACTIVE) {
+#ifdef ADAPT_PMTU
+#if defined(KERNEL) && defined(AFS_SUN57_ENV)
+ ire_t *ire;
+#if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
+ netstack_t *ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);
+ ip_stack_t *ipst = ns->netstack_ip;
+#endif
+ ire = ire_cache_lookup(call->conn->peer->host
+#if defined(AFS_SUN510_ENV) && defined(ALL_ZONES)
+ , ALL_ZONES
+#if defined(AFS_SUN510_ENV) && (defined(ICL_3_ARG) || defined(GLOBAL_NETSTACKID))
+ , NULL
+#if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
+ , ipst
+#endif
+#endif
+#endif
+ );
+
+ if (ire && ire->ire_max_frag > 0)
+ rxi_SetPeerMtu(call->conn->peer->host, 0, ire->ire_max_frag);
+#if defined(GLOBAL_NETSTACKID)
+ netstack_rele(ns);
+#endif
+#endif
+#endif /* ADAPT_PMTU */
rxi_CallError(call, RX_CALL_DEAD);
return -1;
} else {
}
/* see if we have a non-activity timeout */
if (call->startWait && conn->idleDeadTime
- && ((call->startWait + conn->idleDeadTime) < now)) {
+ && ((call->startWait + conn->idleDeadTime) < now) &&
+ (call->flags & RX_CALL_READER_WAIT)) {
if (call->state == RX_STATE_ACTIVE) {
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
}
}
+ if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0)
+ && ((call->lastSendData + conn->idleDeadTime) < now)) {
+ if (call->state == RX_STATE_ACTIVE) {
+ rxi_CallError(call, conn->idleDeadErr);
+ return -1;
+ }
+ }
/* see if we have a hard timeout */
if (conn->hardDeadTime
&& (now > (conn->hardDeadTime + call->startTime.sec))) {
return 0;
}
+void
+rxi_NatKeepAliveEvent(struct rxevent *event, void *arg1, void *dummy)
+{
+ struct rx_connection *conn = arg1;
+ struct rx_header theader;
+ char tbuffer[1500];
+ struct sockaddr_in taddr;
+ char *tp;
+ char a[1] = { 0 };
+ struct iovec tmpiov[2];
+ osi_socket socket =
+ (conn->type ==
+ RX_CLIENT_CONNECTION ? rx_socket : conn->service->socket);
+
+
+ tp = &tbuffer[sizeof(struct rx_header)];
+ taddr.sin_family = AF_INET;
+ taddr.sin_port = rx_PortOf(rx_PeerOf(conn));
+ taddr.sin_addr.s_addr = rx_HostOf(rx_PeerOf(conn));
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
+ taddr.sin_len = sizeof(struct sockaddr_in);
+#endif
+ memset(&theader, 0, sizeof(theader));
+ theader.epoch = htonl(999);
+ theader.cid = 0;
+ theader.callNumber = 0;
+ theader.seq = 0;
+ theader.serial = 0;
+ theader.type = RX_PACKET_TYPE_VERSION;
+ theader.flags = RX_LAST_PACKET;
+ theader.serviceId = 0;
+
+ memcpy(tbuffer, &theader, sizeof(theader));
+ memcpy(tp, &a, sizeof(a));
+ tmpiov[0].iov_base = tbuffer;
+ tmpiov[0].iov_len = 1 + sizeof(struct rx_header);
+
+ osi_NetSend(socket, &taddr, tmpiov, 1, 1 + sizeof(struct rx_header), 1);
+
+ MUTEX_ENTER(&conn->conn_data_lock);
+ /* Only reschedule ourselves if the connection would not be destroyed */
+ if (conn->refCount <= 1) {
+ conn->natKeepAliveEvent = NULL;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ rx_DestroyConnection(conn); /* drop the reference for this */
+ } else {
+ conn->natKeepAliveEvent = NULL;
+ conn->refCount--; /* drop the reference for this */
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+ }
+}
+
+void
+rxi_ScheduleNatKeepAliveEvent(struct rx_connection *conn)
+{
+ if (!conn->natKeepAliveEvent && conn->secondsUntilNatPing) {
+ struct clock when, now;
+ clock_GetTime(&now);
+ when = now;
+ when.sec += conn->secondsUntilNatPing;
+ conn->refCount++; /* hold a reference for this */
+ conn->natKeepAliveEvent =
+ rxevent_PostNow(&when, &now, rxi_NatKeepAliveEvent, conn, 0);
+ }
+}
+
+void
+rx_SetConnSecondsUntilNatPing(struct rx_connection *conn, afs_int32 seconds)
+{
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->secondsUntilNatPing = seconds;
+ if (seconds != 0)
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+}
+
+void
+rxi_NatKeepAliveOn(struct rx_connection *conn)
+{
+ MUTEX_ENTER(&conn->conn_data_lock);
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+}
/* When a call is in progress, this routine is called occasionally to
* make sure that some traffic has arrived (or been sent to) the peer.
* keep-alive packet (if we're actually trying to keep the call alive)
*/
void
-rxi_KeepAliveEvent(struct rxevent *event, register struct rx_call *call,
- char *dummy)
+rxi_KeepAliveEvent(struct rxevent *event, void *arg1, void *dummy)
{
+ struct rx_call *call = arg1;
struct rx_connection *conn;
afs_uint32 now;
void
-rxi_ScheduleKeepAliveEvent(register struct rx_call *call)
+rxi_ScheduleKeepAliveEvent(struct rx_call *call)
{
if (!call->keepAliveEvent) {
struct clock when, now;
/* N.B. rxi_KeepAliveOff: is defined earlier as a macro */
void
-rxi_KeepAliveOn(register struct rx_call *call)
+rxi_KeepAliveOn(struct rx_call *call)
{
/* Pretend last packet received was received now--i.e. if another
* packet isn't received within the keep alive time, then the call
* that have been delayed to throttle looping clients. */
void
rxi_SendDelayedConnAbort(struct rxevent *event,
- register struct rx_connection *conn, char *dummy)
+ void *arg1, void *unused)
{
+ struct rx_connection *conn = arg1;
+
afs_int32 error;
struct rx_packet *packet;
/* This routine is called to send call abort messages
* that have been delayed to throttle looping clients. */
void
-rxi_SendDelayedCallAbort(struct rxevent *event, register struct rx_call *call,
- char *dummy)
+rxi_SendDelayedCallAbort(struct rxevent *event,
+ void *arg1, void *dummy)
{
+ struct rx_call *call = arg1;
+
afs_int32 error;
struct rx_packet *packet;
* issues a challenge to the client, which is obtained from the
* security object associated with the connection */
void
-rxi_ChallengeEvent(struct rxevent *event, register struct rx_connection *conn,
- void *arg1, int tries)
+rxi_ChallengeEvent(struct rxevent *event,
+ void *arg0, void *arg1, int tries)
{
+ struct rx_connection *conn = arg0;
+
conn->challengeEvent = NULL;
if (RXS_CheckAuthentication(conn->securityObject, conn) != 0) {
- register struct rx_packet *packet;
+ struct rx_packet *packet;
struct clock when, now;
if (tries <= 0) {
* the challenge at this time. N.B. rxi_ChallengeOff is a macro,
* defined earlier. */
void
-rxi_ChallengeOn(register struct rx_connection *conn)
+rxi_ChallengeOn(struct rx_connection *conn)
{
if (!conn->challengeEvent) {
RXS_CreateChallenge(conn->securityObject, conn);
/* rxi_ComputeRoundTripTime is called with peer locked. */
/* sentp and/or peer may be null */
void
-rxi_ComputeRoundTripTime(register struct rx_packet *p,
- register struct clock *sentp,
- register struct rx_peer *peer)
+rxi_ComputeRoundTripTime(struct rx_packet *p,
+ struct clock *sentp,
+ struct rx_peer *peer)
{
struct clock thisRtt, *rttp = &thisRtt;
- register int rtt_timeout;
+ int rtt_timeout;
clock_GetTime(rttp);
return; /* somebody set the clock back, don't count this time. */
}
clock_Sub(rttp, sentp);
- MUTEX_ENTER(&rx_stats_mutex);
- if (clock_Lt(rttp, &rx_stats.minRtt))
- rx_stats.minRtt = *rttp;
- if (clock_Gt(rttp, &rx_stats.maxRtt)) {
- if (rttp->sec > 60) {
- MUTEX_EXIT(&rx_stats_mutex);
- return; /* somebody set the clock ahead */
- }
- rx_stats.maxRtt = *rttp;
+ dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
+ p->header.callNumber, p, rttp->sec, rttp->usec));
+
+ if (rttp->sec == 0 && rttp->usec == 0) {
+ /*
+ * The actual round trip time is shorter than the
+ * clock_GetTime resolution. It is most likely 1ms or 100ns.
+ * Since we can't tell which at the moment we will assume 1ms.
+ */
+ rttp->usec = 1000;
+ }
+
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ if (clock_Lt(rttp, &rx_stats.minRtt))
+ rx_stats.minRtt = *rttp;
+ if (clock_Gt(rttp, &rx_stats.maxRtt)) {
+ if (rttp->sec > 60) {
+ MUTEX_EXIT(&rx_stats_mutex);
+ return; /* somebody set the clock ahead */
+ }
+ rx_stats.maxRtt = *rttp;
+ }
+ clock_Add(&rx_stats.totalRtt, rttp);
+ rx_stats.nRttSamples++;
+ MUTEX_EXIT(&rx_stats_mutex);
}
- clock_Add(&rx_stats.totalRtt, rttp);
- rx_stats.nRttSamples++;
- MUTEX_EXIT(&rx_stats_mutex);
/* better rtt calculation courtesy of UMich crew (dave,larry,peter,?) */
/* Apply VanJacobson round-trip estimations */
if (peer->rtt) {
- register int delta;
+ int delta;
/*
* srtt (peer->rtt) is in units of one-eighth-milliseconds.
* srtt is stored as fixed point with 3 bits after the binary
* point (i.e., scaled by 8). The following magic is
* equivalent to the smoothing algorithm in rfc793 with an
- * alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed point).
- * srtt*8 = srtt*8 + rtt - srtt
- * srtt = srtt + rtt/8 - srtt/8
+ * alpha of .875 (srtt' = rtt/8 + srtt*7/8 in fixed point).
+ * srtt'*8 = rtt + srtt*7
+ * srtt'*8 = srtt*8 + rtt - srtt
+ * srtt' = srtt + rtt/8 - srtt/8
+ * srtt' = srtt + (rtt - srtt)/8
*/
- delta = MSEC(rttp) - (peer->rtt >> 3);
- peer->rtt += delta;
+ delta = _8THMSEC(rttp) - peer->rtt;
+ peer->rtt += (delta >> 3);
/*
* We accumulate a smoothed rtt variance (actually, a smoothed
* rttvar is stored as
* fixed point with 2 bits after the binary point (scaled by
* 4). The following is equivalent to rfc793 smoothing with
- * an alpha of .75 (rttvar = rttvar*3/4 + |delta| / 4). This
- * replaces rfc793's wired-in beta.
+ * an alpha of .75 (rttvar' = rttvar*3/4 + |delta| / 4).
+ * rttvar'*4 = rttvar*3 + |delta|
+ * rttvar'*4 = rttvar*4 + |delta| - rttvar
+ * rttvar' = rttvar + |delta|/4 - rttvar/4
+ * rttvar' = rttvar + (|delta| - rttvar)/4
+ * This replaces rfc793's wired-in beta.
* dev*4 = dev*4 + (|actual - expected| - dev)
*/
if (delta < 0)
delta = -delta;
- delta -= (peer->rtt_dev >> 2);
- peer->rtt_dev += delta;
+ delta -= (peer->rtt_dev << 1);
+ peer->rtt_dev += (delta >> 3);
} else {
/* I don't have a stored RTT so I start with this value. Since I'm
* probably just starting a call, and will be pushing more data down
* little, and I set deviance to half the rtt. In practice,
* deviance tends to approach something a little less than
* half the smoothed rtt. */
- peer->rtt = (MSEC(rttp) << 3) + 8;
+ peer->rtt = _8THMSEC(rttp) + 8;
peer->rtt_dev = peer->rtt >> 2; /* rtt/2: they're scaled differently */
}
- /* the timeout is RTT + 4*MDEV + 0.35 sec This is because one end or
- * the other of these connections is usually in a user process, and can
- * be switched and/or swapped out. So on fast, reliable networks, the
- * timeout would otherwise be too short.
- */
- rtt_timeout = (peer->rtt >> 3) + peer->rtt_dev + 350;
+ /* the timeout is RTT + 4*MDEV but no less than rx_minPeerTimeout msec.
+ * This is because one end or the other of these connections is usually
+ * in a user process, and can be switched and/or swapped out. So on fast,
+ * reliable networks, the timeout would otherwise be too short. */
+ rtt_timeout = MAX(((peer->rtt >> 3) + peer->rtt_dev), rx_minPeerTimeout);
clock_Zero(&(peer->timeout));
clock_Addmsec(&(peer->timeout), rtt_timeout);
- dpf(("rxi_ComputeRoundTripTime(rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%0.3d sec)\n", MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
+ dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n",
+ p->header.callNumber, p, MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
}
/* Find all server connections that have not been active for a long time, and
* toss them */
void
-rxi_ReapConnections(void)
+rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2)
{
struct clock now, when;
clock_GetTime(&now);
for (i = 0; i < RX_MAXCALLS; i++) {
call = conn->call[i];
if (call) {
+ int code;
havecalls = 1;
- MUTEX_ENTER(&call->lock);
+ code = MUTEX_TRYENTER(&call->lock);
+ if (!code)
+ continue;
#ifdef RX_ENABLE_LOCKS
result = rxi_CheckCall(call, 1);
#else /* RX_ENABLE_LOCKS */
rxi_rpc_peer_stat_cnt -= num_funcs;
}
rxi_FreePeer(peer);
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
if (peer == *peer_ptr) {
*peer_ptr = next;
prev = next;
*/
static void
-rxi_ComputeRate(register struct rx_peer *peer, register struct rx_call *call,
+rxi_ComputeRate(struct rx_peer *peer, struct rx_call *call,
struct rx_packet *p, struct rx_packet *ackp, u_char ackReason)
{
afs_int32 xferSize, xferMs;
- register afs_int32 minTime;
+ afs_int32 minTime;
struct clock newTO;
/* Count down packets */
return;
}
- dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %lu.%06lu, rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"), xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
+ dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)",
+ ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"),
+ xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
/* Track only packets that are big enough. */
if ((p->length + RX_HEADER_SIZE + call->conn->securityMaxTrailerSize) <
* one packet exchange */
if (clock_Gt(&newTO, &peer->timeout)) {
- dpf(("CONG peer %lx/%u: timeout %lu.%06lu ==> %lu.%06lu (rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec, newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
+ dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)",
+ ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec,
+ newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
peer->timeout = newTO;
}
/* calculate estimate for transmission interval in milliseconds */
minTime = rx_Window * peer->smRtt;
if (minTime < 1000) {
- dpf(("CONG peer %lx/%u: cut TO %lu.%06lu by 0.5 (rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u, ps %u)",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec,
peer->timeout.usec, peer->smRtt, peer->packetSize));
#endif /* ADAPT_WINDOW */
-#ifdef RXDEBUG
void
rxi_DebugInit(void)
{
+#ifdef RXDEBUG
#ifdef AFS_NT40_ENV
-#define TRACE_OPTION_DEBUGLOG 4
+#define TRACE_OPTION_RX_DEBUG 16
HKEY parmKey;
DWORD dummyLen;
DWORD TraceOption;
code = RegQueryValueEx(parmKey, "TraceOption", NULL, NULL,
(BYTE *) &TraceOption, &dummyLen);
if (code == ERROR_SUCCESS) {
- rxdebug_active = (TraceOption & TRACE_OPTION_DEBUGLOG) ? 1 : 0;
+ rxdebug_active = (TraceOption & TRACE_OPTION_RX_DEBUG) ? 1 : 0;
}
RegCloseKey (parmKey);
#endif /* AFS_NT40_ENV */
+#endif
}
-#ifdef AFS_NT40_ENV
void
rx_DebugOnOff(int on)
{
+#ifdef RXDEBUG
+#ifdef AFS_NT40_ENV
rxdebug_active = on;
+#endif
+#endif
+}
+
+void
+rx_StatsOnOff(int on)
+{
+#ifdef RXDEBUG
+ rx_stats_active = on;
+#endif
}
-#endif /* AFS_NT40_ENV */
/* Don't call this debugging routine directly; use dpf */
void
-rxi_DebugPrint(char *format, int a1, int a2, int a3, int a4, int a5, int a6,
- int a7, int a8, int a9, int a10, int a11, int a12, int a13,
- int a14, int a15)
+rxi_DebugPrint(char *format, ...)
{
+#ifdef RXDEBUG
+ va_list ap;
#ifdef AFS_NT40_ENV
char msg[512];
char tformat[256];
size_t len;
+ va_start(ap, format);
+
len = _snprintf(tformat, sizeof(tformat), "tid[%d] %s", GetCurrentThreadId(), format);
if (len > 0) {
- len = _snprintf(msg, sizeof(msg)-2,
- tformat, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
- a11, a12, a13, a14, a15);
+ len = _vsnprintf(msg, sizeof(msg)-2, tformat, ap);
if (len > 0) {
if (msg[len-1] != '\n') {
msg[len] = '\n';
OutputDebugString(msg);
}
}
+ va_end(ap);
#else
struct clock now;
+
+ va_start(ap, format);
+
clock_GetTime(&now);
- fprintf(rx_Log, " %u.%.3u:", (unsigned int)now.sec,
- (unsigned int)now.usec / 1000);
- fprintf(rx_Log, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12,
- a13, a14, a15);
+ fprintf(rx_Log, " %d.%06d:", (unsigned int)now.sec,
+ (unsigned int)now.usec);
+ vfprintf(rx_Log, format, ap);
putc('\n', rx_Log);
+ va_end(ap);
+#endif
#endif
}
+#ifndef KERNEL
/*
* This function is used to process the rx_stats structure that is local
* to a process as well as an rx_stats structure received from a remote
* checking.
*/
void
-rx_PrintTheseStats(FILE * file, struct rx_stats *s, int size,
+rx_PrintTheseStats(FILE * file, struct rx_statistics *s, int size,
afs_int32 freePackets, char version)
{
+#ifdef RXDEBUG
int i;
- if (size != sizeof(struct rx_stats)) {
+ if (size != sizeof(struct rx_statistics)) {
fprintf(file,
- "Unexpected size of stats structure: was %d, expected %d\n",
- size, sizeof(struct rx_stats));
+ "Unexpected size of stats structure: was %d, expected %" AFS_SIZET_FMT "\n",
+ size, sizeof(struct rx_statistics));
}
fprintf(file, "rx stats: free packets %d, allocs %d, ", (int)freePackets,
s->packetRequests);
if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
- fprintf(file, "alloc-failures(rcv %d/%d,send %d/%d,ack %d)\n",
+ fprintf(file, "alloc-failures(rcv %u/%u,send %u/%u,ack %u)\n",
s->receivePktAllocFailures, s->receiveCbufPktAllocFailures,
s->sendPktAllocFailures, s->sendCbufPktAllocFailures,
s->specialPktAllocFailures);
} else {
- fprintf(file, "alloc-failures(rcv %d,send %d,ack %d)\n",
+ fprintf(file, "alloc-failures(rcv %u,send %u,ack %u)\n",
s->receivePktAllocFailures, s->sendPktAllocFailures,
s->specialPktAllocFailures);
}
fprintf(file,
- " greedy %d, " "bogusReads %d (last from host %x), "
- "noPackets %d, " "noBuffers %d, " "selects %d, "
- "sendSelects %d\n", s->socketGreedy, s->bogusPacketOnRead,
+ " greedy %u, " "bogusReads %u (last from host %x), "
+ "noPackets %u, " "noBuffers %u, " "selects %u, "
+ "sendSelects %u\n", s->socketGreedy, s->bogusPacketOnRead,
s->bogusHost, s->noPacketOnRead, s->noPacketBuffersOnRead,
s->selects, s->sendSelects);
fprintf(file, " packets read: ");
for (i = 0; i < RX_N_PACKET_TYPES; i++) {
- fprintf(file, "%s %d ", rx_packetTypes[i], s->packetsRead[i]);
+ fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsRead[i]);
}
fprintf(file, "\n");
fprintf(file,
- " other read counters: data %d, " "ack %d, " "dup %d "
- "spurious %d " "dally %d\n", s->dataPacketsRead,
+ " other read counters: data %u, " "ack %u, " "dup %u "
+ "spurious %u " "dally %u\n", s->dataPacketsRead,
s->ackPacketsRead, s->dupPacketsRead, s->spuriousPacketsRead,
s->ignorePacketDally);
fprintf(file, " packets sent: ");
for (i = 0; i < RX_N_PACKET_TYPES; i++) {
- fprintf(file, "%s %d ", rx_packetTypes[i], s->packetsSent[i]);
+ fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsSent[i]);
}
fprintf(file, "\n");
fprintf(file,
- " other send counters: ack %d, " "data %d (not resends), "
- "resends %d, " "pushed %d, " "acked&ignored %d\n",
+ " other send counters: ack %u, " "data %u (not resends), "
+ "resends %u, " "pushed %u, " "acked&ignored %u\n",
s->ackPacketsSent, s->dataPacketsSent, s->dataPacketsReSent,
s->dataPacketsPushed, s->ignoreAckedPacket);
fprintf(file,
- " \t(these should be small) sendFailed %d, " "fatalErrors %d\n",
+ " \t(these should be small) sendFailed %u, " "fatalErrors %u\n",
s->netSendFailures, (int)s->fatalErrors);
if (s->nRttSamples) {
#if !defined(AFS_PTHREAD_ENV) && !defined(AFS_USE_GETTIMEOFDAY)
fprintf(file, " %d clock updates\n", clock_nUpdates);
#endif
-
+#else
+ fprintf(file, "ERROR: compiled without RXDEBUG\n");
+#endif
}
/* for backward compatibility */
void
rx_PrintPeerStats(FILE * file, struct rx_peer *peer)
{
- fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %u.%d.\n",
+ fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %d.%06d.\n",
ntohl(peer->host), (int)peer->port, (int)peer->burstSize,
(int)peer->burstWait.sec, (int)peer->burstWait.usec);
"max out packet skew %d\n", peer->ifMTU, (int)peer->inPacketSkew,
(int)peer->outPacketSkew);
}
+#endif
-#ifdef AFS_PTHREAD_ENV
+#if defined(AFS_PTHREAD_ENV) && defined(RXDEBUG)
/*
* This mutex protects the following static variables:
* counter
*/
-#define LOCK_RX_DEBUG assert(pthread_mutex_lock(&rx_debug_mutex)==0)
-#define UNLOCK_RX_DEBUG assert(pthread_mutex_unlock(&rx_debug_mutex)==0)
+#define LOCK_RX_DEBUG MUTEX_ENTER(&rx_debug_mutex)
+#define UNLOCK_RX_DEBUG MUTEX_EXIT(&rx_debug_mutex)
#else
#define LOCK_RX_DEBUG
#define UNLOCK_RX_DEBUG
#endif /* AFS_PTHREAD_ENV */
+#ifdef RXDEBUG
static int
MakeDebugCall(osi_socket socket, afs_uint32 remoteAddr, afs_uint16 remotePort,
u_char type, void *inputData, size_t inputLength,
time_t waitTime, waitCount, startTime;
struct rx_header theader;
char tbuffer[1500];
- register afs_int32 code;
+ afs_int32 code;
struct timeval tv_now, tv_wake, tv_delta;
struct sockaddr_in taddr, faddr;
+#ifdef AFS_NT40_ENV
int faddrLen;
+#else
+ socklen_t faddrLen;
+#endif
fd_set imask;
- register char *tp;
+ char *tp;
startTime = time(0);
waitTime = 1;
}
tv_delta.tv_sec -= tv_now.tv_sec;
+#ifdef AFS_NT40_ENV
+ code = select(0, &imask, 0, 0, &tv_delta);
+#else /* AFS_NT40_ENV */
code = select(socket + 1, &imask, 0, 0, &tv_delta);
+#endif /* AFS_NT40_ENV */
if (code == 1 && FD_ISSET(socket, &imask)) {
/* now receive a packet */
faddrLen = sizeof(struct sockaddr_in);
memcpy(outputData, tp, code);
return code;
}
+#endif /* RXDEBUG */
afs_int32
rx_GetServerDebug(osi_socket socket, afs_uint32 remoteAddr,
afs_uint16 remotePort, struct rx_debugStats * stat,
afs_uint32 * supportedValues)
{
- struct rx_debugIn in;
+#ifndef RXDEBUG
+ afs_int32 rc = -1;
+#else
afs_int32 rc = 0;
+ struct rx_debugIn in;
*supportedValues = 0;
in.type = htonl(RX_DEBUGI_GETSTATS);
if (stat->version >= RX_DEBUGI_VERSION_W_WAITED) {
*supportedValues |= RX_SERVER_DEBUG_WAITED_CNT;
}
-
+ if (stat->version >= RX_DEBUGI_VERSION_W_PACKETS) {
+ *supportedValues |= RX_SERVER_DEBUG_PACKETS_CNT;
+ }
stat->nFreePackets = ntohl(stat->nFreePackets);
stat->packetReclaims = ntohl(stat->packetReclaims);
stat->callsExecuted = ntohl(stat->callsExecuted);
stat->nWaiting = ntohl(stat->nWaiting);
stat->idleThreads = ntohl(stat->idleThreads);
+ stat->nWaited = ntohl(stat->nWaited);
+ stat->nPackets = ntohl(stat->nPackets);
}
-
+#endif
return rc;
}
afs_int32
rx_GetServerStats(osi_socket socket, afs_uint32 remoteAddr,
- afs_uint16 remotePort, struct rx_stats * stat,
+ afs_uint16 remotePort, struct rx_statistics * stat,
afs_uint32 * supportedValues)
{
+#ifndef RXDEBUG
+ afs_int32 rc = -1;
+#else
+ afs_int32 rc = 0;
struct rx_debugIn in;
afs_int32 *lp = (afs_int32 *) stat;
int i;
- afs_int32 rc = 0;
/*
* supportedValues is currently unused, but added to allow future
*lp = ntohl(*lp);
}
}
-
+#endif
return rc;
}
afs_uint16 remotePort, size_t version_length,
char *version)
{
+#ifdef RXDEBUG
char a[1] = { 0 };
return MakeDebugCall(socket, remoteAddr, remotePort,
RX_PACKET_TYPE_VERSION, a, 1, version,
version_length);
+#else
+ return -1;
+#endif
}
afs_int32
struct rx_debugConn * conn,
afs_uint32 * supportedValues)
{
- struct rx_debugIn in;
+#ifndef RXDEBUG
+ afs_int32 rc = -1;
+#else
afs_int32 rc = 0;
+ struct rx_debugIn in;
int i;
/*
conn->epoch = ntohl(conn->epoch);
conn->natMTU = ntohl(conn->natMTU);
}
-
+#endif
return rc;
}
afs_uint32 debugSupportedValues, struct rx_debugPeer * peer,
afs_uint32 * supportedValues)
{
- struct rx_debugIn in;
+#ifndef RXDEBUG
+ afs_int32 rc = -1;
+#else
afs_int32 rc = 0;
+ struct rx_debugIn in;
/*
* supportedValues is currently unused, but added to allow future
peer->bytesReceived.high = ntohl(peer->bytesReceived.high);
peer->bytesReceived.low = ntohl(peer->bytesReceived.low);
}
-
+#endif
return rc;
}
-#endif /* RXDEBUG */
+
+afs_int32
+rx_GetLocalPeers(afs_uint32 peerHost, afs_uint16 peerPort,
+ struct rx_debugPeer * peerStats)
+{
+ struct rx_peer *tp;
+ afs_int32 error = 1; /* default to "did not succeed" */
+ afs_uint32 hashValue = PEER_HASH(peerHost, peerPort);
+
+ MUTEX_ENTER(&rx_peerHashTable_lock);
+ for(tp = rx_peerHashTable[hashValue];
+ tp != NULL; tp = tp->next) {
+ if (tp->host == peerHost)
+ break;
+ }
+
+ if (tp) {
+ error = 0;
+
+ peerStats->host = tp->host;
+ peerStats->port = tp->port;
+ peerStats->ifMTU = tp->ifMTU;
+ peerStats->idleWhen = tp->idleWhen;
+ peerStats->refCount = tp->refCount;
+ peerStats->burstSize = tp->burstSize;
+ peerStats->burst = tp->burst;
+ peerStats->burstWait.sec = tp->burstWait.sec;
+ peerStats->burstWait.usec = tp->burstWait.usec;
+ peerStats->rtt = tp->rtt;
+ peerStats->rtt_dev = tp->rtt_dev;
+ peerStats->timeout.sec = tp->timeout.sec;
+ peerStats->timeout.usec = tp->timeout.usec;
+ peerStats->nSent = tp->nSent;
+ peerStats->reSends = tp->reSends;
+ peerStats->inPacketSkew = tp->inPacketSkew;
+ peerStats->outPacketSkew = tp->outPacketSkew;
+ peerStats->rateFlag = tp->rateFlag;
+ peerStats->natMTU = tp->natMTU;
+ peerStats->maxMTU = tp->maxMTU;
+ peerStats->maxDgramPackets = tp->maxDgramPackets;
+ peerStats->ifDgramPackets = tp->ifDgramPackets;
+ peerStats->MTU = tp->MTU;
+ peerStats->cwind = tp->cwind;
+ peerStats->nDgramPackets = tp->nDgramPackets;
+ peerStats->congestSeq = tp->congestSeq;
+ peerStats->bytesSent.high = tp->bytesSent.high;
+ peerStats->bytesSent.low = tp->bytesSent.low;
+ peerStats->bytesReceived.high = tp->bytesReceived.high;
+ peerStats->bytesReceived.low = tp->bytesReceived.low;
+ }
+ MUTEX_EXIT(&rx_peerHashTable_lock);
+
+ return error;
+}
void
shutdown_rx(void)
{
struct rx_serverQueueEntry *np;
- register int i, j;
+ int i, j;
#ifndef KERNEL
- register struct rx_call *call;
- register struct rx_serverQueueEntry *sq;
+ struct rx_call *call;
+ struct rx_serverQueueEntry *sq;
#endif /* KERNEL */
LOCK_RX_INIT;
}
next = peer->next;
rxi_FreePeer(peer);
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
}
rxi_Free(rx_services[i], sizeof(*rx_services[i]));
}
for (i = 0; i < rx_hashTableSize; i++) {
- register struct rx_connection *tc, *ntc;
+ struct rx_connection *tc, *ntc;
MUTEX_ENTER(&rx_connHashTable_lock);
for (tc = rx_connHashTable[i]; tc; tc = ntc) {
ntc = tc->next;
rxi_FreeAllPackets();
- MUTEX_ENTER(&rx_stats_mutex);
+ MUTEX_ENTER(&rx_quota_mutex);
rxi_dataQuota = RX_MAX_QUOTA;
rxi_availProcs = rxi_totalMin = rxi_minDeficit = 0;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ MUTEX_EXIT(&rx_quota_mutex);
rxinit_status = 1;
UNLOCK_RX_INIT;
}
return FALSE;
}
}
+#endif /* AFS_NT40_ENV */
+
+#ifndef KERNEL
+int rx_DumpCalls(FILE *outputFile, char *cookie)
+{
+#ifdef RXDEBUG_PACKET
+#ifdef KDUMP_RX_LOCK
+ struct rx_call_rx_lock *c;
+#else
+ struct rx_call *c;
+#endif
+#ifdef AFS_NT40_ENV
+ int zilch;
+ char output[2048];
+#define RXDPRINTF sprintf
+#define RXDPRINTOUT output
+#else
+#define RXDPRINTF fprintf
+#define RXDPRINTOUT outputFile
#endif
+ RXDPRINTF(RXDPRINTOUT, "%s - Start dumping all Rx Calls - count=%u\r\n", cookie, rx_stats.nCallStructs);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+
+ for (c = rx_allCallsp; c; c = c->allNextp) {
+ u_short rqc, tqc, iovqc;
+ struct rx_packet *p, *np;
+
+ MUTEX_ENTER(&c->lock);
+ queue_Count(&c->rq, p, np, rx_packet, rqc);
+ queue_Count(&c->tq, p, np, rx_packet, tqc);
+ queue_Count(&c->iovq, p, np, rx_packet, iovqc);
+
+ RXDPRINTF(RXDPRINTOUT, "%s - call=0x%p, id=%u, state=%u, mode=%u, conn=%p, epoch=%u, cid=%u, callNum=%u, connFlags=0x%x, flags=0x%x, "
+ "rqc=%u,%u, tqc=%u,%u, iovqc=%u,%u, "
+ "lstatus=%u, rstatus=%u, error=%d, timeout=%u, "
+ "resendEvent=%d, timeoutEvt=%d, keepAliveEvt=%d, delayedAckEvt=%d, delayedAbortEvt=%d, abortCode=%d, abortCount=%d, "
+ "lastSendTime=%u, lastRecvTime=%u, lastSendData=%u"
+#ifdef RX_ENABLE_LOCKS
+ ", refCount=%u"
+#endif
+#ifdef RX_REFCOUNT_CHECK
+ ", refCountBegin=%u, refCountResend=%u, refCountDelay=%u, "
+ "refCountAlive=%u, refCountPacket=%u, refCountSend=%u, refCountAckAll=%u, refCountAbort=%u"
+#endif
+ "\r\n",
+ cookie, c, c->call_id, (afs_uint32)c->state, (afs_uint32)c->mode, c->conn, c->conn?c->conn->epoch:0, c->conn?c->conn->cid:0,
+ c->callNumber?*c->callNumber:0, c->conn?c->conn->flags:0, c->flags,
+ (afs_uint32)c->rqc, (afs_uint32)rqc, (afs_uint32)c->tqc, (afs_uint32)tqc, (afs_uint32)c->iovqc, (afs_uint32)iovqc,
+ (afs_uint32)c->localStatus, (afs_uint32)c->remoteStatus, c->error, c->timeout,
+ c->resendEvent?1:0, c->timeoutEvent?1:0, c->keepAliveEvent?1:0, c->delayedAckEvent?1:0, c->delayedAbortEvent?1:0,
+ c->abortCode, c->abortCount, c->lastSendTime, c->lastReceiveTime, c->lastSendData
+#ifdef RX_ENABLE_LOCKS
+ , (afs_uint32)c->refCount
+#endif
+#ifdef RX_REFCOUNT_CHECK
+ , c->refCDebug[0],c->refCDebug[1],c->refCDebug[2],c->refCDebug[3],c->refCDebug[4],c->refCDebug[5],c->refCDebug[6],c->refCDebug[7]
+#endif
+ );
+ MUTEX_EXIT(&c->lock);
+
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+ }
+ RXDPRINTF(RXDPRINTOUT, "%s - End dumping all Rx Calls\r\n", cookie);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+#endif /* RXDEBUG_PACKET */
+ return 0;
+}
+#endif