/* RX: Extended Remote Procedure Call */
+#include <afsconfig.h>
#ifdef KERNEL
-#include "../afs/param.h"
-#include "../afs/sysincludes.h"
-#include "../afs/afsincludes.h"
+#include "afs/param.h"
+#else
+#include <afs/param.h>
+#endif
+
+RCSID("$Header$");
+
+#ifdef KERNEL
+#include "afs/sysincludes.h"
+#include "afsincludes.h"
#ifndef UKERNEL
-#include "../h/types.h"
-#include "../h/time.h"
-#include "../h/stat.h"
+#include "h/types.h"
+#include "h/time.h"
+#include "h/stat.h"
#ifdef AFS_OSF_ENV
#include <net/net_globals.h>
#endif /* AFS_OSF_ENV */
#ifdef AFS_LINUX20_ENV
-#include "../h/socket.h"
+#include "h/socket.h"
#endif
-#include "../netinet/in.h"
-#include "../afs/afs_args.h"
-#include "../afs/afs_osi.h"
+#include "netinet/in.h"
+#include "afs/afs_args.h"
+#include "afs/afs_osi.h"
#if (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
-#include "../h/systm.h"
+#include "h/systm.h"
#endif
#ifdef RXDEBUG
#undef RXDEBUG /* turn off debugging */
#endif /* RXDEBUG */
#if defined(AFS_SGI_ENV)
-#include "../sys/debug.h"
+#include "sys/debug.h"
#endif
-#include "../afsint/afsint.h"
+#include "afsint.h"
#ifdef AFS_ALPHA_ENV
#undef kmem_alloc
#undef kmem_free
#undef register
#endif /* AFS_ALPHA_ENV */
#else /* !UKERNEL */
-#include "../afs/sysincludes.h"
-#include "../afs/afsincludes.h"
+#include "afs/sysincludes.h"
+#include "afsincludes.h"
#endif /* !UKERNEL */
-#include "../afs/lock.h"
-#include "../rx/rx_kmutex.h"
-#include "../rx/rx_kernel.h"
-#include "../rx/rx_clock.h"
-#include "../rx/rx_queue.h"
-#include "../rx/rx.h"
-#include "../rx/rx_globals.h"
-#include "../rx/rx_trace.h"
+#include "afs/lock.h"
+#include "rx_kmutex.h"
+#include "rx_kernel.h"
+#include "rx_clock.h"
+#include "rx_queue.h"
+#include "rx.h"
+#include "rx_globals.h"
+#include "rx_trace.h"
#define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
#define AFSOP_STOP_AFS 211 /* Stop AFS process */
#define AFSOP_STOP_BKG 212 /* Stop BKG process */
-#include "../afsint/afsint.h"
+#include "afsint.h"
extern afs_int32 afs_termState;
#ifdef AFS_AIX41_ENV
#include "sys/lockl.h"
#include "sys/lock_def.h"
#endif /* AFS_AIX41_ENV */
-# include "../afsint/rxgen_consts.h"
+# include "rxgen_consts.h"
#else /* KERNEL */
-# include <afs/param.h>
# include <sys/types.h>
# include <errno.h>
#ifdef AFS_NT40_ENV
# include <netinet/in.h>
# include <sys/time.h>
#endif
+#ifdef HAVE_STRING_H
+#include <string.h>
+#else
+#ifdef HAVE_STRINGS_H
+#include <strings.h>
+#endif
+#endif
# include "rx.h"
# include "rx_user.h"
# include "rx_clock.h"
# include "rx_queue.h"
# include "rx_globals.h"
# include "rx_trace.h"
-# include "rx_internal.h"
# include <afs/rxgen_consts.h>
#endif /* KERNEL */
-#ifdef RXDEBUG
-extern afs_uint32 LWP_ThreadId();
-#endif /* RXDEBUG */
-
int (*registerProgram)() = 0;
int (*swapNameProgram)() = 0;
+/* Local static routines */
+static void rxi_DestroyConnectionNoLock(register struct rx_connection *conn);
+#ifdef RX_ENABLE_LOCKS
+static void rxi_SetAcksInTransmitQueue(register struct rx_call *call);
+#endif
+
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
struct rx_tq_debug {
afs_int32 rxi_start_aborted; /* rxi_start awoke after rxi_Send in error. */
#define INIT_PTHREAD_LOCKS
#endif
-extern void rxi_DeleteCachedConnections(void);
-
/* Variables for handling the minProcs implementation. availProcs gives the
* number of threads available in the pool at this moment (not counting dudes
* to manipulate the queue.
*/
-extern void rxi_Delay(int);
-
-static int rxi_ServerThreadSelectingCall;
-
#ifdef RX_ENABLE_LOCKS
+static int rxi_ServerThreadSelectingCall;
static afs_kmutex_t rx_rpc_stats;
void rxi_StartUnlocked();
#endif
struct rx_connection *rxLastConn = 0;
#ifdef RX_ENABLE_LOCKS
-/* The locking hierarchy for rx fine grain locking is composed of five
+/* The locking hierarchy for rx fine grain locking is composed of these
* tiers:
+ *
+ * rx_connHashTable_lock - synchronizes conn creation, rx_connHashTable access
* conn_call_lock - used to synchonize rx_EndCall and rx_NewCall
* call->lock - locks call data fields.
- * Most any other lock - these are all independent of each other.....
- * rx_freePktQ_lock
+ * These are independent of each other:
* rx_freeCallQueue_lock
- * freeSQEList_lock
- * rx_connHashTable_lock
- * rx_serverPool_lock
* rxi_keyCreate_lock
+ * rx_serverPool_lock
+ * freeSQEList_lock
+ *
+ * serverQueueEntry->lock
+ * rx_rpc_stats
* rx_peerHashTable_lock - locked under rx_connHashTable_lock
-
+ * peer->lock - locks peer data fields.
+ * conn_data_lock - that more than one thread is not updating a conn data
+ * field at the same time.
+ * rx_freePktQ_lock
+ *
* lowest level:
- * peer_lock - locks peer data fields.
- * conn_data_lock - that more than one thread is not updating a conn data
- * field at the same time.
+ * multi_handle->lock
+ * rxevent_lock
+ * rx_stats_mutex
+ *
* Do we need a lock to protect the peer field in the conn structure?
* conn->peer was previously a constant for all intents and so has no
* lock protecting this field. The multihomed client delta introduced
/* rxdb_fileID is used to identify the lock location, along with line#. */
static int rxdb_fileID = RXDB_FILE_RX;
#endif /* RX_LOCKS_DB */
-static void rxi_SetAcksInTransmitQueue();
-void osirx_AssertMine(afs_kmutex_t *lockaddr, char *msg);
#else /* RX_ENABLE_LOCKS */
#define SET_CALL_QUEUE_LOCK(C, L)
#define CLEAR_CALL_QUEUE_LOCK(C)
#endif /* RX_ENABLE_LOCKS */
-static void rxi_DestroyConnectionNoLock();
-void rxi_DestroyConnection();
-void rxi_CleanupConnection();
struct rx_serverQueueEntry *rx_waitForPacket = 0;
/* ------------Exported Interfaces------------- */
#define UNLOCK_EPOCH
#endif /* AFS_PTHREAD_ENV */
-void rx_SetEpoch (epoch)
- afs_uint32 epoch;
+void rx_SetEpoch (afs_uint32 epoch)
{
LOCK_EPOCH
rx_epoch = epoch;
char *htable, *ptable;
int tmp_status;
+#if defined(AFS_DJGPP_ENV) && !defined(DEBUG)
+ __djgpp_set_quiet_socket(1);
+#endif
+
SPLVAR;
INIT_PTHREAD_LOCKS
#ifdef RX_LOCKS_DB
rxdb_init();
#endif /* RX_LOCKS_DB */
- MUTEX_INIT(&rx_stats_mutex, "rx_stats_mutex",MUTEX_DEFAULT,0);
- MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats",MUTEX_DEFAULT,0);
- MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock",MUTEX_DEFAULT,0);
+ MUTEX_INIT(&rx_stats_mutex, "rx_stats_mutex",MUTEX_DEFAULT,0);
+ MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats",MUTEX_DEFAULT,0);
+ MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock",MUTEX_DEFAULT,0);
MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock",MUTEX_DEFAULT,0);
MUTEX_INIT(&rx_freeCallQueue_lock, "rx_freeCallQueue_lock",
MUTEX_DEFAULT,0);
rx_sleepLock = alloc_spinlock(LAST_HELD_ORDER-10, "rx_sleepLock");
#endif /* KERNEL && AFS_HPUX110_ENV */
#else /* RX_ENABLE_LOCKS */
-#if defined(KERNEL) && defined(AFS_GLOBAL_SUNLOCK) && !defined(AFS_HPUX_ENV)
+#if defined(KERNEL) && defined(AFS_GLOBAL_SUNLOCK) && !defined(AFS_HPUX_ENV) && !defined(AFS_OBSD_ENV)
mutex_init(&afs_rxglobal_lock, "afs_rxglobal_lock", MUTEX_DEFAULT, NULL);
#endif /* AFS_GLOBAL_SUNLOCK */
#endif /* RX_ENABLE_LOCKS */
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- bzero((char *)&rx_stats, sizeof(struct rx_stats));
+ memset((char *)&rx_stats, 0, sizeof(struct rx_stats));
htable = (char *)
osi_Alloc(rx_hashTableSize*sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize*sizeof(struct rx_connection *)); /* XXXXX */
- bzero(htable, rx_hashTableSize*sizeof(struct rx_connection *));
+ memset(htable, 0, rx_hashTableSize*sizeof(struct rx_connection *));
ptable = (char *) osi_Alloc(rx_hashTableSize*sizeof(struct rx_peer *));
PIN(ptable, rx_hashTableSize*sizeof(struct rx_peer *)); /* XXXXX */
- bzero(ptable, rx_hashTableSize*sizeof(struct rx_peer *));
+ memset(ptable, 0, rx_hashTableSize*sizeof(struct rx_peer *));
/* Malloc up a bunch of packets & buffers */
rx_nFreePackets = 0;
/* This verion of QuotaOK reserves quota if it's ok while the
* rx_serverPool_lock is held. Return quota using ReturnToServerPool().
*/
-static int QuotaOK(aservice)
-register struct rx_service *aservice;
+static int QuotaOK(register struct rx_service *aservice)
{
/* check if over max quota */
if (aservice->nRequestsRunning >= aservice->maxProcs) {
return 0;
}
-static void ReturnToServerPool(aservice)
-register struct rx_service *aservice;
+
+static void ReturnToServerPool(register struct rx_service *aservice)
{
aservice->nRequestsRunning--;
MUTEX_ENTER(&rx_stats_mutex);
}
#else /* RX_ENABLE_LOCKS */
-static QuotaOK(aservice)
-register struct rx_service *aservice; {
+static int QuotaOK(register struct rx_service *aservice)
+{
int rc=0;
/* under min quota, we're OK */
if (aservice->nRequestsRunning < aservice->minProcs) return 1;
/* Called by rx_StartServer to start up lwp's to service calls.
NExistingProcs gives the number of procs already existing, and which
therefore needn't be created. */
-void rxi_StartServerProcs(nExistingProcs)
- int nExistingProcs;
+void rxi_StartServerProcs(int nExistingProcs)
{
register struct rx_service *service;
register int i;
/* This routine must be called if any services are exported. If the
* donateMe flag is set, the calling process is donated to the server
* process pool */
-void rx_StartServer(donateMe)
+void rx_StartServer(int donateMe)
{
register struct rx_service *service;
- register int i, nProcs;
+ register int i, nProcs=0;
SPLVAR;
clock_NewTime();
if (donateMe) {
#ifndef AFS_NT40_ENV
#ifndef KERNEL
- int code;
char name[32];
#ifdef AFS_PTHREAD_ENV
pid_t pid;
- pid = pthread_self();
+ pid = (pid_t) pthread_self();
#else /* AFS_PTHREAD_ENV */
PROCESS pid;
- code = LWP_CurrentProcess(&pid);
+ LWP_CurrentProcess(&pid);
#endif /* AFS_PTHREAD_ENV */
sprintf(name,"srv_%d", ++nProcs);
/* Create a new client connection to the specified service, using the
* specified security object to implement the security model for this
* connection. */
-struct rx_connection *
-rx_NewConnection(shost, sport, sservice, securityObject, serviceSecurityIndex)
- register afs_uint32 shost; /* Server host */
- u_short sport; /* Server port */
- u_short sservice; /* Server service id */
- register struct rx_securityClass *securityObject;
- int serviceSecurityIndex;
+struct rx_connection *rx_NewConnection(register afs_uint32 shost,
+ u_short sport, u_short sservice,
+ register struct rx_securityClass *securityObject, int serviceSecurityIndex)
{
int hashindex;
afs_int32 cid;
conn->ackRate = RX_FAST_ACK_RATE;
conn->nSpecific = 0;
conn->specific = NULL;
- conn->challengeEvent = (struct rxevent *)0;
- conn->delayedAbortEvent = (struct rxevent *)0;
+ conn->challengeEvent = NULL;
+ conn->delayedAbortEvent = NULL;
conn->abortCount = 0;
conn->error = 0;
return conn;
}
-void rx_SetConnDeadTime(conn, seconds)
- register struct rx_connection *conn;
- register int seconds;
+void rx_SetConnDeadTime(register struct rx_connection *conn, register int seconds)
{
/* The idea is to set the dead time to a value that allows several
* keepalives to be dropped without timing out the connection. */
* Cleanup a connection that was destroyed in rxi_DestroyConnectioNoLock.
* NOTE: must not be called with rx_connHashTable_lock held.
*/
-void rxi_CleanupConnection(conn)
- struct rx_connection *conn;
+void rxi_CleanupConnection(struct rx_connection *conn)
{
- int i;
-
/* Notify the service exporter, if requested, that this connection
* is being destroyed */
if (conn->type == RX_SERVER_CONNECTION && conn->service->destroyConnProc)
#ifndef KERNEL
if (conn->specific) {
+ int i;
for (i = 0 ; i < conn->nSpecific ; i++) {
if (conn->specific[i] && rxi_keyCreate_destructor[i])
(*rxi_keyCreate_destructor[i])(conn->specific[i]);
}
/* Destroy the specified connection */
-void rxi_DestroyConnection(conn)
- register struct rx_connection *conn;
+void rxi_DestroyConnection(register struct rx_connection *conn)
{
MUTEX_ENTER(&rx_connHashTable_lock);
rxi_DestroyConnectionNoLock(conn);
#endif /* RX_ENABLE_LOCKS */
}
-static void rxi_DestroyConnectionNoLock(conn)
- register struct rx_connection *conn;
+static void rxi_DestroyConnectionNoLock(register struct rx_connection *conn)
{
register struct rx_connection **conn_ptr;
register int havecalls = 0;
MUTEX_EXIT(&rx_stats_mutex);
}
- if (conn->refCount > 0) {
+ if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Busy; wait till the last guy before proceeding */
MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
* last reply packets */
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
- rxi_AckAll((struct rxevent *)0, call, 0);
+ if (call->state == RX_STATE_PRECALL ||
+ call->state == RX_STATE_ACTIVE) {
+ rxi_SendAck(call, 0, 0, 0, 0, RX_ACK_DELAY, 0);
+ } else {
+ rxi_AckAll(NULL, call, 0);
+ }
}
MUTEX_EXIT(&call->lock);
}
/* Make sure the connection is completely reset before deleting it. */
/* get rid of pending events that could zap us later */
- if (conn->challengeEvent) {
+ if (conn->challengeEvent)
rxevent_Cancel(conn->challengeEvent, (struct rx_call*)0, 0);
- }
+ if (conn->checkReachEvent)
+ rxevent_Cancel(conn->checkReachEvent, (struct rx_call*)0, 0);
/* Add the connection to the list of destroyed connections that
* need to be cleaned up. This is necessary to avoid deadlocks
}
/* Externally available version */
-void rx_DestroyConnection(conn)
- register struct rx_connection *conn;
+void rx_DestroyConnection(register struct rx_connection *conn)
{
SPLVAR;
* to ensure that we don't get signalle after we found a call in an active
* state and before we go to sleep.
*/
-struct rx_call *rx_NewCall(conn)
- register struct rx_connection *conn;
+struct rx_call *rx_NewCall(register struct rx_connection *conn)
{
register int i;
register struct rx_call *call;
clock_GetTime(&queueTime);
AFS_RXGLOCK();
MUTEX_ENTER(&conn->conn_call_lock);
+
+ /*
+ * Check if there are others waiting for a new call.
+ * If so, let them go first to avoid starving them.
+ * This is a fairly simple scheme, and might not be
+ * a complete solution for large numbers of waiters.
+ */
+ if (conn->makeCallWaiters) {
+#ifdef RX_ENABLE_LOCKS
+ CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
+#else
+ osi_rxSleep(conn);
+#endif
+ }
+
for (;;) {
for (i=0; i<RX_MAXCALLS; i++) {
call = conn->call[i];
}
else {
call = rxi_NewCall(conn, i);
- MUTEX_ENTER(&call->lock);
break;
}
}
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_MAKECALL_WAITING;
MUTEX_EXIT(&conn->conn_data_lock);
+
+ conn->makeCallWaiters++;
#ifdef RX_ENABLE_LOCKS
CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
#else
osi_rxSleep(conn);
#endif
+ conn->makeCallWaiters--;
}
+ /*
+ * Wake up anyone else who might be giving us a chance to
+ * run (see code above that avoids resource starvation).
+ */
+#ifdef RX_ENABLE_LOCKS
+ CV_BROADCAST(&conn->conn_call_cv);
+#else
+ osi_rxWakeup(conn);
+#endif
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
return call;
}
-rxi_HasActiveCalls(aconn)
-register struct rx_connection *aconn; {
+int rxi_HasActiveCalls(register struct rx_connection *aconn)
+{
register int i;
register struct rx_call *tcall;
SPLVAR;
NETPRI;
for(i=0; i<RX_MAXCALLS; i++) {
- if (tcall = aconn->call[i]) {
+ if ((tcall = aconn->call[i])) {
if ((tcall->state == RX_STATE_ACTIVE)
|| (tcall->state == RX_STATE_PRECALL)) {
USERPRI;
return 0;
}
-rxi_GetCallNumberVector(aconn, aint32s)
-register struct rx_connection *aconn;
-register afs_int32 *aint32s; {
+int rxi_GetCallNumberVector(register struct rx_connection *aconn,
+ register afs_int32 *aint32s)
+{
register int i;
register struct rx_call *tcall;
SPLVAR;
return 0;
}
-rxi_SetCallNumberVector(aconn, aint32s)
-register struct rx_connection *aconn;
-register afs_int32 *aint32s; {
+int rxi_SetCallNumberVector(register struct rx_connection *aconn,
+ register afs_int32 *aint32s)
+{
register int i;
register struct rx_call *tcall;
SPLVAR;
/* Advertise a new service. A service is named locally by a UDP port
* number plus a 16-bit service id. Returns (struct rx_service *) 0
- * on a failure. */
-struct rx_service *
-rx_NewService(port, serviceId, serviceName, securityObjects,
- nSecurityObjects, serviceProc)
- u_short port;
- u_short serviceId;
- char *serviceName; /* Name for identification purposes (e.g. the
- * service name might be used for probing for
- * statistics) */
- struct rx_securityClass **securityObjects;
- int nSecurityObjects;
- afs_int32 (*serviceProc)();
+ * on a failure.
+ *
+ char *serviceName; Name for identification purposes (e.g. the
+ service name might be used for probing for
+ statistics) */
+struct rx_service *rx_NewService(u_short port, u_short serviceId,
+ char *serviceName,
+ struct rx_securityClass **securityObjects,
+ int nSecurityObjects, afs_int32 (*serviceProc)(struct rx_call *acall))
{
osi_socket socket = OSI_NULLSOCKET;
register struct rx_service *tservice;
service->idleDeadTime = 60;
service->connDeadTime = rx_connDeadTime;
service->executeRequestProc = serviceProc;
+ service->checkReach = 0;
rx_services[i] = service; /* not visible until now */
AFS_RXGUNLOCK();
USERPRI;
* non-null, it will be set to the file descriptor that this thread
* is now listening on. If socketp is null, this routine will never
* returns. */
-void rxi_ServerProc(threadID, newcall, socketp)
-int threadID;
-struct rx_call *newcall;
-osi_socket *socketp;
+void rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket *socketp)
{
register struct rx_call *call;
register afs_int32 code;
}
-void rx_WakeupServerProcs()
+void rx_WakeupServerProcs(void)
{
struct rx_serverQueueEntry *np, *tqp;
SPLVAR;
/* Sleep until a call arrives. Returns a pointer to the call, ready
* for an rx_Read. */
#ifdef RX_ENABLE_LOCKS
-struct rx_call *
-rx_GetCall(tno, cur_service, socketp)
-int tno;
-struct rx_service *cur_service;
-osi_socket *socketp;
+struct rx_call *rx_GetCall(int tno, struct rx_service *cur_service, osi_socket *socketp)
{
struct rx_serverQueueEntry *sq;
register struct rx_call *call = (struct rx_call *) 0, *choice2;
- struct rx_service *service;
+ struct rx_service *service = NULL;
SPLVAR;
MUTEX_ENTER(&freeSQEList_lock);
- if (sq = rx_FreeSQEList) {
+ if ((sq = rx_FreeSQEList)) {
rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
MUTEX_EXIT(&freeSQEList_lock);
sq = (struct rx_serverQueueEntry *) rxi_Alloc(sizeof(struct rx_serverQueueEntry));
- MUTEX_INIT(&sq->lock, "server Queue lock",MUTEX_DEFAULT,0);
+ MUTEX_INIT(&sq->lock, "server Queue lock",MUTEX_DEFAULT,0);
CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
}
return call;
}
#else /* RX_ENABLE_LOCKS */
-struct rx_call *
-rx_GetCall(tno, cur_service, socketp)
- int tno;
- struct rx_service *cur_service;
- osi_socket *socketp;
+struct rx_call *rx_GetCall(int tno, struct rx_service *cur_service, osi_socket *socketp)
{
struct rx_serverQueueEntry *sq;
register struct rx_call *call = (struct rx_call *) 0, *choice2;
- struct rx_service *service;
+ struct rx_service *service = NULL;
SPLVAR;
NETPRI;
AFS_RXGLOCK();
MUTEX_ENTER(&freeSQEList_lock);
- if (sq = rx_FreeSQEList) {
+ if ((sq = rx_FreeSQEList)) {
rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
MUTEX_EXIT(&freeSQEList_lock);
sq = (struct rx_serverQueueEntry *) rxi_Alloc(sizeof(struct rx_serverQueueEntry));
- MUTEX_INIT(&sq->lock, "server Queue lock",MUTEX_DEFAULT,0);
+ MUTEX_INIT(&sq->lock, "server Queue lock",MUTEX_DEFAULT,0);
CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
}
MUTEX_ENTER(&sq->lock);
* good idea to (1) use it immediately after a newcall (clients only)
* and (2) only use it once. Other uses currently void your warranty
*/
-void rx_SetArrivalProc(call, proc, handle, arg)
- register struct rx_call *call;
- register VOID (*proc)();
- register VOID *handle;
- register VOID *arg;
+void rx_SetArrivalProc(register struct rx_call *call,
+ register VOID (*proc)(register struct rx_call *call,
+ register struct multi_handle *mh, register int index),
+ register VOID *handle, register VOID *arg)
{
call->arrivalProc = proc;
call->arrivalProcHandle = handle;
* appropriate, and return the final error code from the conversation
* to the caller */
-afs_int32 rx_EndCall(call, rc)
- register struct rx_call *call;
- afs_int32 rc;
+afs_int32 rx_EndCall(register struct rx_call *call, afs_int32 rc)
{
register struct rx_connection *conn = call->conn;
register struct rx_service *service;
|| (call->mode == RX_MODE_RECEIVING && call->rnext == 1)) {
(void) rxi_ReadProc(call, &dummy, 1);
}
+
+ /* If we had an outstanding delayed ack, be nice to the server
+ * and force-send it now.
+ */
+ if (call->delayedAckEvent) {
+ rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
+ call->delayedAckEvent = NULL;
+ rxi_SendDelayedAck(NULL, call, NULL);
+ }
+
/* We need to release the call lock since it's lower than the
* conn_call_lock and we don't want to hold the conn_call_lock
* over the rx_ReadProc call. The conn_call_lock needs to be held
MUTEX_ENTER(&conn->conn_call_lock);
MUTEX_ENTER(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
+ conn->flags |= RX_CONN_BUSY;
if (conn->flags & RX_CONN_MAKECALL_WAITING) {
conn->flags &= (~RX_CONN_MAKECALL_WAITING);
MUTEX_EXIT(&conn->conn_data_lock);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
- if (conn->type == RX_CLIENT_CONNECTION)
+ if (conn->type == RX_CLIENT_CONNECTION) {
MUTEX_EXIT(&conn->conn_call_lock);
+ conn->flags &= ~RX_CONN_BUSY;
+ }
AFS_RXGUNLOCK();
USERPRI;
/*
* make to a dead client.
* This is not quite right, since some calls may still be ongoing and
* we can't lock them to destroy them. */
-void rx_Finalize() {
+void rx_Finalize(void)
+{
register struct rx_connection **conn_ptr, **conn_end;
INIT_PTHREAD_LOCKS
/* if we wakeup packet waiter too often, can get in loop with two
AllocSendPackets each waking each other up (from ReclaimPacket calls) */
-void
-rxi_PacketsUnWait() {
-
+void rxi_PacketsUnWait(void)
+{
if (!rx_waitingForPackets) {
return;
}
/* Return this process's service structure for the
* specified socket and service */
-struct rx_service *rxi_FindService(socket, serviceId)
- register osi_socket socket;
- register u_short serviceId;
+struct rx_service *rxi_FindService(register osi_socket socket,
+ register u_short serviceId)
{
register struct rx_service **sp;
for (sp = &rx_services[0]; *sp; sp++) {
/* Allocate a call structure, for the indicated channel of the
* supplied connection. The mode and state of the call must be set by
- * the caller. */
-struct rx_call *rxi_NewCall(conn, channel)
- register struct rx_connection *conn;
- register int channel;
+ * the caller. Returns the call with mutex locked. */
+struct rx_call *rxi_NewCall(register struct rx_connection *conn,
+ register int channel)
{
register struct rx_call *call;
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
the call number is valid from the last time this channel was used */
if (*call->callNumber == 0) *call->callNumber = 1;
- MUTEX_EXIT(&call->lock);
return call;
}
* state, including the call structure, which is placed on the call
* free list.
* Call is locked upon entry.
+ * haveCTLock set if called from rxi_ReapConnections
*/
#ifdef RX_ENABLE_LOCKS
-void rxi_FreeCall(call, haveCTLock)
- int haveCTLock; /* Set if called from rxi_ReapConnections */
+void rxi_FreeCall(register struct rx_call *call, int haveCTLock)
#else /* RX_ENABLE_LOCKS */
-void rxi_FreeCall(call)
+void rxi_FreeCall(register struct rx_call *call)
#endif /* RX_ENABLE_LOCKS */
- register struct rx_call *call;
{
register int channel = call->channel;
register struct rx_connection *conn = call->conn;
}
afs_int32 rxi_Alloccnt = 0, rxi_Allocsize = 0;
-char *rxi_Alloc(size)
-register size_t size;
+char *rxi_Alloc(register size_t size)
{
register char *p;
p = (char *) osi_Alloc(size);
#endif
if (!p) osi_Panic("rxi_Alloc error");
- bzero(p, size);
+ memset(p, 0, size);
return p;
}
-void rxi_Free(addr, size)
-void *addr;
-register size_t size;
+void rxi_Free(void *addr, register size_t size)
{
#if defined(AFS_AIX41_ENV) && defined(KERNEL)
/* Grab the AFS filesystem lock. See afs/osi.h for the lock
* The origPeer, if set, is a pointer to a peer structure on which the
* refcount will be be decremented. This is used to replace the peer
* structure hanging off a connection structure */
-struct rx_peer *rxi_FindPeer(host, port, origPeer, create)
- register afs_uint32 host;
- register u_short port;
- struct rx_peer *origPeer;
- int create;
+struct rx_peer *rxi_FindPeer(register afs_uint32 host,
+ register u_short port, struct rx_peer *origPeer, int create)
{
register struct rx_peer *pp;
int hashIndex;
* parameter must match the existing index for the connection. If a
* server connection is created, it will be created using the supplied
* index, if the index is valid for this service */
-struct rx_connection *
-rxi_FindConnection(socket, host, port, serviceId, cid,
- epoch, type, securityIndex)
- osi_socket socket;
- register afs_int32 host;
- register u_short port;
- u_short serviceId;
- afs_uint32 cid;
- afs_uint32 epoch;
- int type;
- u_int securityIndex;
+struct rx_connection *rxi_FindConnection(osi_socket socket,
+ register afs_int32 host, register u_short port, u_short serviceId,
+ afs_uint32 cid, afs_uint32 epoch, int type, u_int securityIndex)
{
int hashindex, flag;
register struct rx_connection *conn;
- struct rx_peer *peer;
hashindex = CONN_HASH(host, port, cid, epoch, type);
MUTEX_ENTER(&rx_connHashTable_lock);
rxLastConn ? (conn = rxLastConn, flag = 0) :
MUTEX_EXIT(&rx_connHashTable_lock);
return (struct rx_connection *) 0;
}
- /* epoch's high order bits mean route for security reasons only on
- * the cid, not the host and port fields.
- */
- if (conn->epoch & 0x80000000) break;
- if (((type == RX_CLIENT_CONNECTION)
- || (pp->host == host)) && (pp->port == port))
- break;
+ if (pp->host == host && pp->port == port)
+ break;
+ if (type == RX_CLIENT_CONNECTION && pp->port == port)
+ break;
+ if (type == RX_CLIENT_CONNECTION && (conn->epoch & 0x80000000))
+ break;
}
if ( !flag )
{
CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
- peer = conn->peer = rxi_FindPeer(host, port, 0, 1);
+ conn->peer = rxi_FindPeer(host, port, 0, 1);
conn->type = RX_SERVER_CONNECTION;
conn->lastSendTime = clock_Sec(); /* don't GC immediately */
conn->epoch = epoch;
rx_stats.nServerConns++;
MUTEX_EXIT(&rx_stats_mutex);
}
- else
- {
- /* Ensure that the peer structure is set up in such a way that
- ** replies in this connection go back to that remote interface
- ** from which the last packet was sent out. In case, this packet's
- ** source IP address does not match the peer struct for this conn,
- ** then drop the refCount on conn->peer and get a new peer structure.
- ** We can check the host,port field in the peer structure without the
- ** rx_peerHashTable_lock because the peer structure has its refCount
- ** incremented and the only time the host,port in the peer struct gets
- ** updated is when the peer structure is created.
- */
- if (conn->peer->host == host )
- peer = conn->peer; /* no change to the peer structure */
- else
- peer = rxi_FindPeer(host, port, conn->peer, 1);
- }
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount++;
- conn->peer = peer;
MUTEX_EXIT(&conn->conn_data_lock);
rxLastConn = conn; /* store this connection as the last conn used */
* sender. This call returns the packet to the caller if it is finished with
* it, rather than de-allocating it, just as a small performance hack */
-struct rx_packet *rxi_ReceivePacket(np, socket, host, port, tnop, newcallp)
- register struct rx_packet *np;
- osi_socket socket;
- afs_uint32 host;
- u_short port;
- int *tnop;
- struct rx_call **newcallp;
+struct rx_packet *rxi_ReceivePacket(register struct rx_packet *np,
+ osi_socket socket, afs_uint32 host, u_short port,
+ int *tnop, struct rx_call **newcallp)
{
register struct rx_call *call;
register struct rx_connection *conn;
addr.sin_family = AF_INET;
addr.sin_port = port;
addr.sin_addr.s_addr = host;
-#if defined(AFS_OSF_ENV) && defined(_KERNEL)
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
addr.sin_len = sizeof(addr);
#endif /* AFS_OSF_ENV */
drop = (*rx_justReceived) (np, &addr);
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->maxSerial < np->header.serial)
- conn->maxSerial = np->header.serial;
+ conn->maxSerial = np->header.serial;
MUTEX_EXIT(&conn->conn_data_lock);
/* If the connection is in an error state, send an abort packet and ignore
return np;
}
if (!call) {
+ MUTEX_ENTER(&conn->conn_call_lock);
call = rxi_NewCall(conn, channel);
- MUTEX_ENTER(&call->lock);
+ MUTEX_EXIT(&conn->conn_call_lock);
*call->callNumber = np->header.callNumber;
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
struct rx_packet *tp;
rxi_CallError(call, RX_CALL_DEAD);
- tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY, (char *) 0, 0, 1);
+ tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY, NULL, 0, 1);
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
/* Respond immediately to ack packets requesting acknowledgement
* (ping packets) */
if (np->header.flags & RX_REQUEST_ACK) {
- if (call->error) (void) rxi_SendCallAbort(call, 0, 1, 0);
- else (void) rxi_SendAck(call, 0, 0, 0, 0, RX_ACK_PING_RESPONSE, 1);
+ if (call->error)
+ (void) rxi_SendCallAbort(call, 0, 1, 0);
+ else
+ (void) rxi_SendAck(call, 0, 0, np->header.serial, 0,
+ RX_ACK_PING_RESPONSE, 1);
}
np = rxi_ReceiveAckPacket(call, np, 1);
break;
the precall state then ignore all subsequent packets until the call
is assigned to a thread. */
-static TooLow(ap, acall)
- struct rx_call *acall;
- struct rx_packet *ap; {
+static int TooLow(struct rx_packet *ap, struct rx_call *acall)
+{
int rc=0;
MUTEX_ENTER(&rx_stats_mutex);
if (((ap->header.seq != 1) &&
}
#endif /* KERNEL */
+static void rxi_CheckReachEvent(struct rxevent *event,
+ struct rx_connection *conn, struct rx_call *acall)
+{
+ struct rx_call *call = acall;
+ struct clock when;
+ int i, waiting;
+
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->checkReachEvent = NULL;
+ waiting = conn->flags & RX_CONN_ATTACHWAIT;
+ if (event) conn->refCount--;
+ MUTEX_EXIT(&conn->conn_data_lock);
+
+ if (waiting) {
+ if (!call) {
+ MUTEX_ENTER(&conn->conn_call_lock);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ for (i=0; i<RX_MAXCALLS; i++) {
+ struct rx_call *tc = conn->call[i];
+ if (tc && tc->state == RX_STATE_PRECALL) {
+ call = tc;
+ break;
+ }
+ }
+ if (!call)
+ /* Indicate that rxi_CheckReachEvent is no longer running by
+ * clearing the flag. Must be atomic under conn_data_lock to
+ * avoid a new call slipping by: rxi_CheckConnReach holds
+ * conn_data_lock while checking RX_CONN_ATTACHWAIT.
+ */
+ conn->flags &= ~RX_CONN_ATTACHWAIT;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&conn->conn_call_lock);
+ }
+
+ if (call) {
+ if (call != acall) MUTEX_ENTER(&call->lock);
+ rxi_SendAck(call, NULL, 0, 0, 0, RX_ACK_PING, 0);
+ if (call != acall) MUTEX_EXIT(&call->lock);
+
+ clock_GetTime(&when);
+ when.sec += RX_CHECKREACH_TIMEOUT;
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if (!conn->checkReachEvent) {
+ conn->refCount++;
+ conn->checkReachEvent =
+ rxevent_Post(&when, rxi_CheckReachEvent, conn, NULL);
+ }
+ MUTEX_EXIT(&conn->conn_data_lock);
+ }
+ }
+}
+
+static int rxi_CheckConnReach(struct rx_connection *conn, struct rx_call *call)
+{
+ struct rx_service *service = conn->service;
+ struct rx_peer *peer = conn->peer;
+ afs_uint32 now, lastReach;
+
+ if (service->checkReach == 0)
+ return 0;
+
+ now = clock_Sec();
+ MUTEX_ENTER(&peer->peer_lock);
+ lastReach = peer->lastReachTime;
+ MUTEX_EXIT(&peer->peer_lock);
+ if (now - lastReach < RX_CHECKREACH_TTL)
+ return 0;
+
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if (conn->flags & RX_CONN_ATTACHWAIT) {
+ MUTEX_EXIT(&conn->conn_data_lock);
+ return 1;
+ }
+ conn->flags |= RX_CONN_ATTACHWAIT;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ if (!conn->checkReachEvent)
+ rxi_CheckReachEvent(NULL, conn, call);
+
+ return 1;
+}
+
/* try to attach call, if authentication is complete */
-static void TryAttach(acall, socket, tnop, newcallp)
-register struct rx_call *acall;
-register osi_socket socket;
-register int *tnop;
-register struct rx_call **newcallp; {
- register struct rx_connection *conn;
- conn = acall->conn;
- if ((conn->type==RX_SERVER_CONNECTION) && (acall->state == RX_STATE_PRECALL)) {
+static void TryAttach(register struct rx_call *acall,
+ register osi_socket socket, register int *tnop,
+ register struct rx_call **newcallp, int reachOverride)
+{
+ struct rx_connection *conn = acall->conn;
+
+ if (conn->type==RX_SERVER_CONNECTION && acall->state==RX_STATE_PRECALL) {
/* Don't attach until we have any req'd. authentication. */
if (RXS_CheckAuthentication(conn->securityObject, conn) == 0) {
- rxi_AttachServerProc(acall, socket, tnop, newcallp);
- /* Note: this does not necessarily succeed; there
- may not any proc available */
+ if (reachOverride || rxi_CheckConnReach(conn, acall) == 0)
+ rxi_AttachServerProc(acall, socket, tnop, newcallp);
+ /* Note: this does not necessarily succeed; there
+ * may not any proc available
+ */
}
else {
rxi_ChallengeOn(acall->conn);
* appropriate to the call (the call is in the right state, etc.). This
* routine can return a packet to the caller, for re-use */
-struct rx_packet *rxi_ReceiveDataPacket(call, np, istack, socket, host,
- port, tnop, newcallp)
- register struct rx_call *call;
- register struct rx_packet *np;
- int istack;
- osi_socket socket;
- afs_uint32 host;
- u_short port;
- int *tnop;
- struct rx_call **newcallp;
+struct rx_packet *rxi_ReceiveDataPacket(register struct rx_call *call,
+ register struct rx_packet *np, int istack, osi_socket socket,
+ afs_uint32 host, u_short port, int *tnop, struct rx_call **newcallp)
{
int ackNeeded = 0;
int newPackets = 0;
* (e.g. multi rx) */
if (call->arrivalProc) {
(*call->arrivalProc)(call, call->arrivalProcHandle,
- call->arrivalProcArg);
+ (int) call->arrivalProcArg);
call->arrivalProc = (VOID (*)()) 0;
}
* server thread is available, this thread becomes a server
* thread and the server thread becomes a listener thread. */
if (isFirst) {
- TryAttach(call, socket, tnop, newcallp);
+ TryAttach(call, socket, tnop, newcallp, 0);
}
}
/* This is not the expected next packet. */
static void rxi_ComputeRate();
#endif
+static void rxi_UpdatePeerReach(struct rx_connection *conn, struct rx_call *acall)
+{
+ struct rx_peer *peer = conn->peer;
+
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->lastReachTime = clock_Sec();
+ MUTEX_EXIT(&peer->peer_lock);
+
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if (conn->flags & RX_CONN_ATTACHWAIT) {
+ int i;
+
+ conn->flags &= ~RX_CONN_ATTACHWAIT;
+ MUTEX_EXIT(&conn->conn_data_lock);
+
+ for (i=0; i<RX_MAXCALLS; i++) {
+ struct rx_call *call = conn->call[i];
+ if (call) {
+ if (call != acall) MUTEX_ENTER(&call->lock);
+ TryAttach(call, (osi_socket) -1, NULL, NULL, 1);
+ if (call != acall) MUTEX_EXIT(&call->lock);
+ }
+ }
+ } else
+ MUTEX_EXIT(&conn->conn_data_lock);
+}
+
+/* rxi_ComputePeerNetStats
+ *
+ * Called exclusively by rxi_ReceiveAckPacket to compute network link
+ * estimates (like RTT and throughput) based on ack packets. Caller
+ * must ensure that the packet in question is the right one (i.e.
+ * serial number matches).
+ */
+static void
+rxi_ComputePeerNetStats(struct rx_call *call, struct rx_packet *p,
+ struct rx_ackPacket *ap, struct rx_packet *np)
+{
+ struct rx_peer *peer = call->conn->peer;
+
+ /* Use RTT if not delayed by client. */
+ if (ap->reason != RX_ACK_DELAY)
+ rxi_ComputeRoundTripTime(p, &p->timeSent, peer);
+#ifdef ADAPT_WINDOW
+ rxi_ComputeRate(peer, call, p, np, ap->reason);
+#endif
+}
+
/* The real smarts of the whole thing. */
-struct rx_packet *rxi_ReceiveAckPacket(call, np, istack)
- register struct rx_call *call;
- struct rx_packet *np;
- int istack;
+struct rx_packet *rxi_ReceiveAckPacket(register struct rx_call *call,
+ struct rx_packet *np, int istack)
{
struct rx_ackPacket *ap;
int nAcks;
afs_uint32 serial;
/* because there are CM's that are bogus, sending weird values for this. */
afs_uint32 skew = 0;
- int needRxStart = 0;
int nbytes;
int missing;
int acked;
if (np->header.flags & RX_SLOW_START_OK) {
call->flags |= RX_CALL_SLOW_START_OK;
}
+
+ if (ap->reason == RX_ACK_PING_RESPONSE)
+ rxi_UpdatePeerReach(conn, call);
#ifdef RXDEBUG
if (rx_Log) {
fprintf( rx_Log,
"RACK: reason %x previous %u seq %u serial %u skew %d first %u",
- ap->reason, ntohl(ap->previousPacket), np->header.seq, serial,
- skew, ntohl(ap->firstPacket));
+ ap->reason, ntohl(ap->previousPacket),
+ (unsigned int) np->header.seq, (unsigned int) serial,
+ (unsigned int) skew, ntohl(ap->firstPacket));
if (nAcks) {
int offset;
for (offset = 0; offset < nAcks; offset++)
}
#endif
- /* if a server connection has been re-created, it doesn't remember what
- serial # it was up to. An ack will tell us, since the serial field
- contains the largest serial received by the other side */
- MUTEX_ENTER(&conn->conn_data_lock);
- if ((conn->type == RX_SERVER_CONNECTION) && (conn->serial < serial)) {
- conn->serial = serial+1;
- }
- MUTEX_EXIT(&conn->conn_data_lock);
-
/* Update the outgoing packet skew value to the latest value of
* the peer's incoming packet skew value. The ack packet, of
* course, could arrive out of order, but that won't affect things
for (queue_Scan(&call->tq, tp, nxp, rx_packet)) {
if (tp->header.seq >= first) break;
call->tfirst = tp->header.seq + 1;
- if (tp->header.serial == serial) {
- /* Use RTT if not delayed by client. */
- if (ap->reason != RX_ACK_DELAY)
- rxi_ComputeRoundTripTime(tp, &tp->timeSent, peer);
-#ifdef ADAPT_WINDOW
- rxi_ComputeRate(peer, call, tp, np, ap->reason);
-#endif
- }
- else if (tp->firstSerial == serial) {
- /* Use RTT if not delayed by client. */
- if (ap->reason != RX_ACK_DELAY)
- rxi_ComputeRoundTripTime(tp, &tp->firstSent, peer);
-#ifdef ADAPT_WINDOW
- rxi_ComputeRate(peer, call, tp, np, ap->reason);
-#endif
- }
+ if (serial && (tp->header.serial == serial ||
+ tp->firstSerial == serial))
+ rxi_ComputePeerNetStats(call, tp, ap, np);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* XXX Hack. Because we have to release the global rx lock when sending
* packets (osi_NetSend) we drop all acks while we're traversing the tq
* set the ack bits in the packets and have rxi_Start remove the packets
* when it's done transmitting.
*/
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
if (call->flags & RX_CALL_TQ_BUSY) {
#ifdef RX_ENABLE_LOCKS
- tp->acked = 1;
+ tp->flags |= RX_PKTFLAG_ACKED;
call->flags |= RX_CALL_TQ_SOME_ACKED;
#else /* RX_ENABLE_LOCKS */
break;
* of this packet */
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
#ifdef RX_ENABLE_LOCKS
- if (tp->header.seq >= first) {
-#endif /* RX_ENABLE_LOCKS */
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- if (tp->header.serial == serial) {
- /* Use RTT if not delayed by client. */
- if (ap->reason != RX_ACK_DELAY)
- rxi_ComputeRoundTripTime(tp, &tp->timeSent, peer);
-#ifdef ADAPT_WINDOW
- rxi_ComputeRate(peer, call, tp, np, ap->reason);
-#endif
- }
- else if ((tp->firstSerial == serial)) {
- /* Use RTT if not delayed by client. */
- if (ap->reason != RX_ACK_DELAY)
- rxi_ComputeRoundTripTime(tp, &tp->firstSent, peer);
-#ifdef ADAPT_WINDOW
- rxi_ComputeRate(peer, call, tp, np, ap->reason);
-#endif
- }
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
-#ifdef RX_ENABLE_LOCKS
- }
+ if (tp->header.seq >= first)
#endif /* RX_ENABLE_LOCKS */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
+ if (serial && (tp->header.serial == serial ||
+ tp->firstSerial == serial))
+ rxi_ComputePeerNetStats(call, tp, ap, np);
/* Set the acknowledge flag per packet based on the
* information in the ack packet. An acknowlegded packet can
* out of sequence. */
if (tp->header.seq < first) {
/* Implicit ack information */
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
- tp->acked = 1;
+ tp->flags |= RX_PKTFLAG_ACKED;
}
else if (tp->header.seq < first + nAcks) {
/* Explicit ack information: set it in the packet appropriately */
if (ap->acks[tp->header.seq - first] == RX_ACK_TYPE_ACK) {
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
- tp->acked = 1;
+ tp->flags |= RX_PKTFLAG_ACKED;
}
if (missing) {
nNacked++;
call->nSoftAcked++;
}
} else {
- tp->acked = 0;
+ tp->flags &= ~RX_PKTFLAG_ACKED;
missing = 1;
}
}
else {
- tp->acked = 0;
+ tp->flags &= ~RX_PKTFLAG_ACKED;
missing = 1;
}
* ie, this should readjust the retransmit timer for all outstanding
* packets... So we don't just retransmit when we should know better*/
- if (!tp->acked && !clock_IsZero(&tp->retryTime)) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&tp->retryTime)) {
tp->retryTime = tp->timeSent;
clock_Add(&tp->retryTime, &peer->timeout);
/* shift by eight because one quarter-sec ~ 256 milliseconds */
/* if the ack packet has a receivelen field hanging off it,
* update our state */
- if ( np->length >= rx_AckDataSize(ap->nAcks) +sizeof(afs_int32)) {
+ if ( np->length >= rx_AckDataSize(ap->nAcks) + 2*sizeof(afs_int32)) {
afs_uint32 tSize;
/* If the ack packet has a "recommended" size that is less than
* so we will retransmit as soon as the window permits*/
for(acked = 0, queue_ScanBackwards(&call->tq, tp, nxp, rx_packet)) {
if (acked) {
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
clock_Zero(&tp->retryTime);
}
- } else if (tp->acked) {
+ } else if (tp->flags & RX_PKTFLAG_ACKED) {
acked = 1;
}
}
}
/* Received a response to a challenge packet */
-struct rx_packet *rxi_ReceiveResponsePacket(conn, np, istack)
- register struct rx_connection *conn;
- register struct rx_packet *np;
- int istack;
+struct rx_packet *rxi_ReceiveResponsePacket(register struct rx_connection *conn,
+ register struct rx_packet *np, int istack)
{
int error;
}
else {
/* If the response is valid, any calls waiting to attach
- * servers can now do so */
+ * servers can now do so */
int i;
+
for (i=0; i<RX_MAXCALLS; i++) {
struct rx_call *call = conn->call[i];
if (call) {
MUTEX_ENTER(&call->lock);
if (call->state == RX_STATE_PRECALL)
- rxi_AttachServerProc(call, -1, NULL, NULL);
+ rxi_AttachServerProc(call, (osi_socket) -1, NULL, NULL);
MUTEX_EXIT(&call->lock);
}
}
+
+ /* Update the peer reachability information, just in case
+ * some calls went into attach-wait while we were waiting
+ * for authentication..
+ */
+ rxi_UpdatePeerReach(conn, NULL);
}
return np;
}
* back to the server. The server is responsible for retrying the
* challenge if it fails to get a response. */
-struct rx_packet *
-rxi_ReceiveChallengePacket(conn, np, istack)
- register struct rx_connection *conn;
- register struct rx_packet *np;
- int istack;
+struct rx_packet *rxi_ReceiveChallengePacket(register struct rx_connection *conn,
+ register struct rx_packet *np, int istack)
{
int error;
}
else {
np = rxi_SendSpecial((struct rx_call *)0, conn, np,
- RX_PACKET_TYPE_RESPONSE, (char *) 0, -1, istack);
+ RX_PACKET_TYPE_RESPONSE, NULL, -1, istack);
}
return np;
}
/* Find an available server process to service the current request in
* the given call structure. If one isn't available, queue up this
* call so it eventually gets one */
-void
-rxi_AttachServerProc(call, socket, tnop, newcallp)
-register struct rx_call *call;
-register osi_socket socket;
-register int *tnop;
-register struct rx_call **newcallp;
+void rxi_AttachServerProc(register struct rx_call *call,
+ register osi_socket socket, register int *tnop, register struct rx_call **newcallp)
{
register struct rx_serverQueueEntry *sq;
register struct rx_service *service = call->conn->service;
* a new call is being prepared (in the case of a client) or a reply
* is being prepared (in the case of a server). Rather than sending
* an ack packet, an ACKALL packet is sent. */
-void rxi_AckAll(event, call, dummy)
-struct rxevent *event;
-register struct rx_call *call;
-char *dummy;
+void rxi_AckAll(struct rxevent *event, register struct rx_call *call, char *dummy)
{
#ifdef RX_ENABLE_LOCKS
if (event) {
MUTEX_ENTER(&call->lock);
- call->delayedAckEvent = (struct rxevent *) 0;
+ call->delayedAckEvent = NULL;
CALL_RELE(call, RX_CALL_REFCOUNT_ACKALL);
}
rxi_SendSpecial(call, call->conn, (struct rx_packet *) 0,
- RX_PACKET_TYPE_ACKALL, (char *) 0, 0, 0);
+ RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
if (event)
MUTEX_EXIT(&call->lock);
#else /* RX_ENABLE_LOCKS */
- if (event) call->delayedAckEvent = (struct rxevent *) 0;
+ if (event) call->delayedAckEvent = NULL;
rxi_SendSpecial(call, call->conn, (struct rx_packet *) 0,
- RX_PACKET_TYPE_ACKALL, (char *) 0, 0, 0);
+ RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
#endif /* RX_ENABLE_LOCKS */
}
-void rxi_SendDelayedAck(event, call, dummy)
-struct rxevent *event;
-register struct rx_call *call;
-char *dummy;
+void rxi_SendDelayedAck(struct rxevent *event, register struct rx_call *call, char *dummy)
{
#ifdef RX_ENABLE_LOCKS
if (event) {
MUTEX_ENTER(&call->lock);
if (event == call->delayedAckEvent)
- call->delayedAckEvent = (struct rxevent *) 0;
+ call->delayedAckEvent = NULL;
CALL_RELE(call, RX_CALL_REFCOUNT_DELAY);
}
(void) rxi_SendAck(call, 0, 0, 0, 0, RX_ACK_DELAY, 0);
if (event)
MUTEX_EXIT(&call->lock);
#else /* RX_ENABLE_LOCKS */
- if (event) call->delayedAckEvent = (struct rxevent *) 0;
+ if (event) call->delayedAckEvent = NULL;
(void) rxi_SendAck(call, 0, 0, 0, 0, RX_ACK_DELAY, 0);
#endif /* RX_ENABLE_LOCKS */
}
/* Set ack in all packets in transmit queue. rxi_Start will deal with
* clearing them out.
*/
-static void rxi_SetAcksInTransmitQueue(call)
- register struct rx_call *call;
+static void rxi_SetAcksInTransmitQueue(register struct rx_call *call)
{
register struct rx_packet *p, *tp;
int someAcked = 0;
for (queue_Scan(&call->tq, p, tp, rx_packet)) {
if (!p)
break;
- p->acked = 1;
+ p->flags |= RX_PKTFLAG_ACKED;
someAcked = 1;
}
if (someAcked) {
/* Clear out the transmit queue for the current call (all packets have
* been received by peer) */
-void rxi_ClearTransmitQueue(call, force)
- register struct rx_call *call;
- register int force;
+void rxi_ClearTransmitQueue(register struct rx_call *call, register int force)
{
register struct rx_packet *p, *tp;
for (queue_Scan(&call->tq, p, tp, rx_packet)) {
if (!p)
break;
- p->acked = 1;
+ p->flags |= RX_PKTFLAG_ACKED;
someAcked = 1;
}
if (someAcked) {
#endif
}
-void rxi_ClearReceiveQueue(call)
- register struct rx_call *call;
+void rxi_ClearReceiveQueue(register struct rx_call *call)
{
register struct rx_packet *p, *tp;
if (queue_IsNotEmpty(&call->rq)) {
}
/* Send an abort packet for the specified call */
-struct rx_packet *rxi_SendCallAbort(call, packet, istack, force)
- register struct rx_call *call;
- struct rx_packet *packet;
- int istack;
- int force;
+struct rx_packet *rxi_SendCallAbort(register struct rx_call *call,
+ struct rx_packet *packet, int istack, int force)
{
afs_int32 error;
struct clock when;
* NOTE: Called with conn_data_lock held. conn_data_lock is dropped
* to send the abort packet.
*/
-struct rx_packet *rxi_SendConnectionAbort(conn, packet, istack, force)
- register struct rx_connection *conn;
- struct rx_packet *packet;
- int istack;
- int force;
+struct rx_packet *rxi_SendConnectionAbort(register struct rx_connection *conn,
+ struct rx_packet *packet, int istack, int force)
{
afs_int32 error;
struct clock when;
* bad authentication responses. The connection itself is set in
* error at this point, so that future packets received will be
* rejected. */
-void rxi_ConnectionError(conn, error)
- register struct rx_connection *conn;
- register afs_int32 error;
+void rxi_ConnectionError(register struct rx_connection *conn,
+ register afs_int32 error)
{
if (error) {
register int i;
+ MUTEX_ENTER(&conn->conn_data_lock);
if (conn->challengeEvent)
rxevent_Cancel(conn->challengeEvent, (struct rx_call*)0, 0);
+ if (conn->checkReachEvent) {
+ rxevent_Cancel(conn->checkReachEvent, (struct rx_call*)0, 0);
+ conn->checkReachEvent = 0;
+ conn->flags &= ~RX_CONN_ATTACHWAIT;
+ conn->refCount--;
+ }
+ MUTEX_EXIT(&conn->conn_data_lock);
for (i=0; i<RX_MAXCALLS; i++) {
struct rx_call *call = conn->call[i];
if (call) {
}
}
-void rxi_CallError(call, error)
- register struct rx_call *call;
- afs_int32 error;
+void rxi_CallError(register struct rx_call *call, afs_int32 error)
{
if (call->error) error = call->error;
#ifdef RX_GLOBAL_RXLOCK_KERNEL
/* this code requires that call->conn be set properly as a pre-condition. */
#endif /* ADAPT_WINDOW */
-void rxi_ResetCall(call, newcall)
- register struct rx_call *call;
- register int newcall;
+void rxi_ResetCall(register struct rx_call *call, register int newcall)
{
register int flags;
register struct rx_peer *peer;
/* Notify anyone who is waiting for asynchronous packet arrival */
if (call->arrivalProc) {
- (*call->arrivalProc)(call, call->arrivalProcHandle, call->arrivalProcArg);
+ (*call->arrivalProc)(call, call->arrivalProcHandle, (int) call->arrivalProcArg);
call->arrivalProc = (VOID (*)()) 0;
}
* NOW there is a trailer field, after the ack where it will safely be
* ignored by mundanes, which indicates the maximum size packet this
* host can swallow. */
-struct rx_packet *rxi_SendAck(call, optionalPacket, seq, serial, pflags, reason, istack)
- register struct rx_call *call;
- register struct rx_packet *optionalPacket; /* use to send ack (or null) */
- int seq; /* Sequence number of the packet we are acking */
- int serial; /* Serial number of the packet */
- int pflags; /* Flags field from packet header */
- int reason; /* Reason an acknowledge was prompted */
- int istack;
+/*
+ register struct rx_packet *optionalPacket; use to send ack (or null)
+ int seq; Sequence number of the packet we are acking
+ int serial; Serial number of the packet
+ int pflags; Flags field from packet header
+ int reason; Reason an acknowledge was prompted
+*/
+
+struct rx_packet *rxi_SendAck(register struct rx_call *call,
+ register struct rx_packet *optionalPacket, int seq, int serial,
+ int pflags, int reason, int istack)
{
struct rx_ackPacket *ap;
register struct rx_packet *rqp;
/* The skew computation used to be bogus, I think it's better now. */
/* We should start paying attention to skew. XXX */
- ap->serial = htonl(call->conn->maxSerial);
+ ap->serial = htonl(serial);
ap->maxSkew = 0; /* used to be peer->inPacketSkew */
ap->firstPacket = htonl(call->rnext); /* First packet not yet forwarded to reader */
#ifdef RXDEBUG
if (rx_Log) {
fprintf(rx_Log, "SACK: reason %x previous %u seq %u first %u",
- ap->reason, ntohl(ap->previousPacket), p->header.seq,
- ntohl(ap->firstPacket));
+ ap->reason, ntohl(ap->previousPacket),
+ (unsigned int) p->header.seq, ntohl(ap->firstPacket));
if (ap->nAcks) {
for (offset = 0; offset < ap->nAcks; offset++)
putc(ap->acks[offset] == RX_ACK_TYPE_NACK? '-' : '*', rx_Log);
}
/* Send all of the packets in the list in single datagram */
-static void rxi_SendList(call, list, len, istack, moreFlag, now, retryTime, resending)
- struct rx_call *call;
- struct rx_packet **list;
- int len;
- int istack;
- int moreFlag;
- struct clock *now;
- struct clock *retryTime;
- int resending;
+static void rxi_SendList(struct rx_call *call, struct rx_packet **list,
+ int len, int istack, int moreFlag, struct clock *now,
+ struct clock *retryTime, int resending)
{
int i;
int requestAck = 0;
* We always keep the last list we should have sent so we
* can set the RX_MORE_PACKETS flags correctly.
*/
-static void rxi_SendXmitList(call, list, len, istack, now, retryTime, resending)
- struct rx_call *call;
- struct rx_packet **list;
- int len;
- int istack;
- struct clock *now;
- struct clock *retryTime;
- int resending;
+static void rxi_SendXmitList(struct rx_call *call, struct rx_packet **list,
+ int len, int istack, struct clock *now, struct clock *retryTime,
+ int resending)
{
int i, cnt, lastCnt = 0;
struct rx_packet **listP, **lastP = 0;
/* Does the current packet force us to flush the current list? */
if (cnt > 0
&& (list[i]->header.serial
- || list[i]->acked
+ || (list[i]->flags & RX_PKTFLAG_ACKED)
|| list[i]->length > RX_JUMBOBUFFERSIZE)) {
if (lastCnt > 0) {
rxi_SendList(call, lastP, lastCnt, istack, 1, now, retryTime, resending);
}
/* Add the current packet to the list if it hasn't been acked.
* Otherwise adjust the list pointer to skip the current packet. */
- if (!list[i]->acked) {
+ if (!(list[i]->flags & RX_PKTFLAG_ACKED)) {
cnt++;
/* Do we need to flush the list? */
if (cnt >= (int)peer->maxDgramPackets
* an acked packet. Since we always send retransmissions
* in a separate packet, we only need to check the first
* packet in the list */
- if (cnt > 0 && !listP[0]->acked) {
+ if (cnt > 0 && !(listP[0]->flags & RX_PKTFLAG_ACKED)) {
morePackets = 1;
}
if (lastCnt > 0) {
#ifdef RX_ENABLE_LOCKS
/* Call rxi_Start, below, but with the call lock held. */
-void rxi_StartUnlocked(event, call, istack)
- struct rxevent *event;
- register struct rx_call *call;
- int istack;
+void rxi_StartUnlocked(struct rxevent *event, register struct rx_call *call,
+ int istack)
{
MUTEX_ENTER(&call->lock);
rxi_Start(event, call, istack);
* transmission window or burst count are favourable. This should be
* better optimized for new packets, the usual case, now that we've
* got rid of queues of send packets. XXXXXXXXXXX */
-void rxi_Start(event, call, istack)
- struct rxevent *event;
- register struct rx_call *call;
- int istack;
+void rxi_Start(struct rxevent *event, register struct rx_call *call,
+ int istack)
{
struct rx_packet *p;
register struct rx_packet *nxp; /* Next pointer for queue_Scan */
* than recovery rates.
*/
for(queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (!p->acked) {
+ if (!(p->flags & RX_PKTFLAG_ACKED)) {
clock_Zero(&p->retryTime);
}
}
/* Only send one packet during fast recovery */
break;
}
- if ((p->header.flags == RX_FREE_PACKET) ||
+ if ((p->flags & RX_PKTFLAG_FREE) ||
(!queue_IsEnd(&call->tq, nxp)
- && (nxp->header.flags == RX_FREE_PACKET)) ||
+ && (nxp->flags & RX_PKTFLAG_FREE)) ||
(p == (struct rx_packet *)&rx_freePacketQueue) ||
(nxp == (struct rx_packet *)&rx_freePacketQueue)) {
osi_Panic("rxi_Start: xmit queue clobbered");
}
- if (p->acked) {
+ if (p->flags & RX_PKTFLAG_ACKED) {
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.ignoreAckedPacket++;
MUTEX_EXIT(&rx_stats_mutex);
* the transmit queue.
*/
for (missing = 0, queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (p->header.seq < call->tfirst && p->acked) {
+ if (p->header.seq < call->tfirst && (p->flags & RX_PKTFLAG_ACKED)) {
queue_Remove(p);
rxi_FreePacket(p);
}
break;
}
- if (!p->acked && !clock_IsZero(&p->retryTime)) {
+ if (!(p->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&p->retryTime)) {
haveEvent = 1;
retryTime = p->retryTime;
break;
CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
call->resendEvent = rxevent_Post(&retryTime,
rxi_StartUnlocked,
- (char *)call, istack);
+ (void *)call, (void *)istack);
#else /* RX_ENABLE_LOCKS */
call->resendEvent = rxevent_Post(&retryTime, rxi_Start,
- (char *)call, (void*)(long)istack);
+ (void *)call, (void *)istack);
#endif /* RX_ENABLE_LOCKS */
}
}
/* Also adjusts the keep alive parameters for the call, to reflect
* that we have just sent a packet (so keep alives aren't sent
* immediately) */
-void rxi_Send(call, p, istack)
- register struct rx_call *call;
- register struct rx_packet *p;
- int istack;
+void rxi_Send(register struct rx_call *call, register struct rx_packet *p,
+ int istack)
{
register struct rx_connection *conn = call->conn;
* falls through the cracks (e.g. (error + dally) connections have keepalive
* turned off. Returns 0 if conn is well, -1 otherwise. If otherwise, call
* may be freed!
+ * haveCTLock Set if calling from rxi_ReapConnections
*/
#ifdef RX_ENABLE_LOCKS
-int rxi_CheckCall(call, haveCTLock)
- int haveCTLock; /* Set if calling from rxi_ReapConnections */
+int rxi_CheckCall(register struct rx_call *call, int haveCTLock)
#else /* RX_ENABLE_LOCKS */
-int rxi_CheckCall(call)
+int rxi_CheckCall(register struct rx_call *call)
#endif /* RX_ENABLE_LOCKS */
- register struct rx_call *call;
{
register struct rx_connection *conn = call->conn;
register struct rx_service *tservice;
* declared dead; if nothing has been sent for a while, we send a
* keep-alive packet (if we're actually trying to keep the call alive)
*/
-void rxi_KeepAliveEvent(event, call, dummy)
- struct rxevent *event;
- register struct rx_call *call;
+void rxi_KeepAliveEvent(struct rxevent *event, register struct rx_call *call,
+ char *dummy)
{
struct rx_connection *conn;
afs_uint32 now;
MUTEX_ENTER(&call->lock);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
if (event == call->keepAliveEvent)
- call->keepAliveEvent = (struct rxevent *) 0;
+ call->keepAliveEvent = NULL;
now = clock_Sec();
#ifdef RX_ENABLE_LOCKS
}
-void rxi_ScheduleKeepAliveEvent(call)
- register struct rx_call *call;
+void rxi_ScheduleKeepAliveEvent(register struct rx_call *call)
{
if (!call->keepAliveEvent) {
struct clock when;
}
/* N.B. rxi_KeepAliveOff: is defined earlier as a macro */
-void rxi_KeepAliveOn(call)
- register struct rx_call *call;
+void rxi_KeepAliveOn(register struct rx_call *call)
{
/* Pretend last packet received was received now--i.e. if another
* packet isn't received within the keep alive time, then the call
/* This routine is called to send connection abort messages
* that have been delayed to throttle looping clients. */
-void rxi_SendDelayedConnAbort(event, conn, dummy)
- struct rxevent *event;
- register struct rx_connection *conn;
- char *dummy;
+void rxi_SendDelayedConnAbort(struct rxevent *event, register struct rx_connection *conn,
+ char *dummy)
{
afs_int32 error;
struct rx_packet *packet;
MUTEX_ENTER(&conn->conn_data_lock);
- conn->delayedAbortEvent = (struct rxevent *) 0;
+ conn->delayedAbortEvent = NULL;
error = htonl(conn->error);
conn->abortCount++;
MUTEX_EXIT(&conn->conn_data_lock);
/* This routine is called to send call abort messages
* that have been delayed to throttle looping clients. */
-void rxi_SendDelayedCallAbort(event, call, dummy)
- struct rxevent *event;
- register struct rx_call *call;
- char *dummy;
+void rxi_SendDelayedCallAbort(struct rxevent *event, register struct rx_call *call,
+ char *dummy)
{
afs_int32 error;
struct rx_packet *packet;
MUTEX_ENTER(&call->lock);
- call->delayedAbortEvent = (struct rxevent *) 0;
+ call->delayedAbortEvent = NULL;
error = htonl(call->error);
call->abortCount++;
packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
* seconds) to ask the client to authenticate itself. The routine
* issues a challenge to the client, which is obtained from the
* security object associated with the connection */
-void rxi_ChallengeEvent(event, conn, dummy)
- struct rxevent *event;
- register struct rx_connection *conn;
- char *dummy;
+void rxi_ChallengeEvent(struct rxevent *event, register struct rx_connection *conn,
+ void *atries)
{
- conn->challengeEvent = (struct rxevent *) 0;
+ int tries = (int) atries;
+ conn->challengeEvent = NULL;
if (RXS_CheckAuthentication(conn->securityObject, conn) != 0) {
register struct rx_packet *packet;
struct clock when;
+
+ if (tries <= 0) {
+ /* We've failed to authenticate for too long.
+ * Reset any calls waiting for authentication;
+ * they are all in RX_STATE_PRECALL.
+ */
+ int i;
+
+ MUTEX_ENTER(&conn->conn_call_lock);
+ for (i=0; i<RX_MAXCALLS; i++) {
+ struct rx_call *call = conn->call[i];
+ if (call) {
+ MUTEX_ENTER(&call->lock);
+ if (call->state == RX_STATE_PRECALL) {
+ rxi_CallError(call, RX_CALL_DEAD);
+ rxi_SendCallAbort(call, NULL, 0, 0);
+ }
+ MUTEX_EXIT(&call->lock);
+ }
+ }
+ MUTEX_EXIT(&conn->conn_call_lock);
+ return;
+ }
+
packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
if (packet) {
/* If there's no packet available, do this later. */
RXS_GetChallenge(conn->securityObject, conn, packet);
rxi_SendSpecial((struct rx_call *) 0, conn, packet,
- RX_PACKET_TYPE_CHALLENGE, (char *) 0, -1, 0);
+ RX_PACKET_TYPE_CHALLENGE, NULL, -1, 0);
rxi_FreePacket(packet);
}
clock_GetTime(&when);
when.sec += RX_CHALLENGE_TIMEOUT;
- conn->challengeEvent = rxevent_Post(&when, rxi_ChallengeEvent, conn, 0);
+ conn->challengeEvent =
+ rxevent_Post(&when, rxi_ChallengeEvent, conn, (void *) (tries-1));
}
}
* security object associated with the connection is asked to create
* the challenge at this time. N.B. rxi_ChallengeOff is a macro,
* defined earlier. */
-void rxi_ChallengeOn(conn)
- register struct rx_connection *conn;
+void rxi_ChallengeOn(register struct rx_connection *conn)
{
if (!conn->challengeEvent) {
RXS_CreateChallenge(conn->securityObject, conn);
- rxi_ChallengeEvent((struct rxevent *)0, conn, NULL);
+ rxi_ChallengeEvent(NULL, conn, (void *) RX_CHALLENGE_MAXTRIES);
};
}
*/
/* rxi_ComputeRoundTripTime is called with peer locked. */
-void rxi_ComputeRoundTripTime(p, sentp, peer)
- register struct clock *sentp; /* may be null */
- register struct rx_peer *peer; /* may be null */
- register struct rx_packet *p;
+/* sentp and/or peer may be null */
+void rxi_ComputeRoundTripTime(register struct rx_packet *p,
+ register struct clock *sentp, register struct rx_peer *peer)
{
struct clock thisRtt, *rttp = &thisRtt;
struct timeval temptime;
#endif
register int rtt_timeout;
- static char id[]="@(#)adaptive RTO";
#if defined(AFS_ALPHA_LINUX20_ENV) && defined(AFS_PTHREAD_ENV) && !defined(KERNEL)
/* yet again. This was the worst Heisenbug of the port - stroucki */
/* Find all server connections that have not been active for a long time, and
* toss them */
-void rxi_ReapConnections()
+void rxi_ReapConnections(void)
{
struct clock now;
clock_GetTime(&now);
* This is the only rxs module call. A hold could also be written but no one
* needs it. */
-int rxs_Release (aobj)
- struct rx_securityClass *aobj;
+int rxs_Release (struct rx_securityClass *aobj)
{
return RXS_Close (aobj);
}
* Called with peer and call locked.
*/
-static void rxi_ComputeRate(peer, call, p, ackp, ackReason)
- register struct rx_peer *peer;
- register struct rx_call *call;
- struct rx_packet *p, *ackp;
- u_char ackReason;
+static void rxi_ComputeRate(register struct rx_peer *peer,
+ register struct rx_call *call, struct rx_packet *p,
+ struct rx_packet *ackp, u_char ackReason)
{
afs_int32 xferSize, xferMs;
register afs_int32 minTime;
#ifdef RXDEBUG
/* Don't call this debugging routine directly; use dpf */
void
-rxi_DebugPrint(format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
- a11, a12, a13, a14, a15)
- char *format;
+rxi_DebugPrint(char *format, int a1, int a2, int a3, int a4, int a5, int a6, int a7, int a8, int a9, int a10,
+ int a11, int a12, int a13, int a14, int a15)
{
struct clock now;
clock_GetTime(&now);
- fprintf(rx_Log, " %u.%.3u:", now.sec, now.usec/1000);
+ fprintf(rx_Log, " %u.%.3u:", (unsigned int) now.sec, (unsigned int) now.usec/1000);
fprintf(rx_Log, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15);
putc('\n', rx_Log);
}
* process (via rxdebug). Therefore, it needs to do minimal version
* checking.
*/
-void rx_PrintTheseStats (file, s, size, freePackets, version)
- FILE *file;
- struct rx_stats *s;
- int size; /* some idea of version control */
- afs_int32 freePackets;
- char version;
+void rx_PrintTheseStats (FILE *file, struct rx_stats *s, int size,
+ afs_int32 freePackets, char version)
{
int i;
}
fprintf(file,
- "rx stats: free packets %d, "
- "allocs %d, ",
- freePackets,
+ "rx stats: free packets %d, allocs %d, ",
+ (int) freePackets,
s->packetRequests);
if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
" \t(these should be small) sendFailed %d, "
"fatalErrors %d\n",
s->netSendFailures,
- s->fatalErrors);
+ (int) s->fatalErrors);
if (s->nRttSamples) {
fprintf(file,
}
/* for backward compatibility */
-void rx_PrintStats(file)
- FILE *file;
+void rx_PrintStats(FILE *file)
{
MUTEX_ENTER(&rx_stats_mutex);
rx_PrintTheseStats (file, &rx_stats, sizeof(rx_stats), rx_nFreePackets, RX_DEBUGI_VERSION);
MUTEX_EXIT(&rx_stats_mutex);
}
-void rx_PrintPeerStats(file, peer)
-FILE *file;
-struct rx_peer *peer;
+void rx_PrintPeerStats(FILE *file, struct rx_peer *peer)
{
fprintf(file,
"Peer %x.%d. "
"Burst size %d, "
"burst wait %u.%d.\n",
ntohl(peer->host),
- peer->port,
- peer->burstSize,
- peer->burstWait.sec,
- peer->burstWait.usec);
+ (int) peer->port,
+ (int) peer->burstSize,
+ (int) peer->burstWait.sec,
+ (int) peer->burstWait.usec);
fprintf(file,
" Rtt %d, "
"total sent %d, "
"resent %d\n",
peer->rtt,
- peer->timeout.sec,
- peer->timeout.usec,
+ (int) peer->timeout.sec,
+ (int) peer->timeout.usec,
peer->nSent,
peer->reSends);
"max in packet skew %d, "
"max out packet skew %d\n",
peer->ifMTU,
- peer->inPacketSkew,
- peer->outPacketSkew);
+ (int) peer->inPacketSkew,
+ (int) peer->outPacketSkew);
}
#ifdef AFS_PTHREAD_ENV
#define UNLOCK_RX_DEBUG
#endif /* AFS_PTHREAD_ENV */
-static int MakeDebugCall(
- int socket,
- afs_uint32 remoteAddr,
- afs_uint16 remotePort,
- u_char type,
- void *inputData,
- size_t inputLength,
- void *outputData,
- size_t outputLength
-)
+static int MakeDebugCall(osi_socket socket, afs_uint32 remoteAddr,
+ afs_uint16 remotePort, u_char type, void *inputData, size_t inputLength,
+ void *outputData, size_t outputLength)
{
static afs_int32 counter = 100;
afs_int32 endTime;
taddr.sin_family = AF_INET;
taddr.sin_port = remotePort;
taddr.sin_addr.s_addr = remoteAddr;
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
+ taddr.sin_len = sizeof(struct sockaddr_in);
+#endif
while(1) {
memset(&theader, 0, sizeof(theader));
theader.epoch = htonl(999);
theader.flags = RX_CLIENT_INITIATED | RX_LAST_PACKET;
theader.serviceId = 0;
- bcopy(&theader, tbuffer, sizeof(theader));
- bcopy(inputData, tp, inputLength);
+ memcpy(tbuffer, &theader, sizeof(theader));
+ memcpy(tp, inputData, inputLength);
code = sendto(socket, tbuffer, inputLength+sizeof(struct rx_header), 0,
(struct sockaddr *) &taddr, sizeof(struct sockaddr_in));
code = recvfrom(socket, tbuffer, sizeof(tbuffer), 0,
(struct sockaddr *) &faddr, &faddrLen);
- bcopy(tbuffer, &theader, sizeof(struct rx_header));
+ memcpy(&theader, tbuffer, sizeof(struct rx_header));
if (counter == ntohl(theader.callNumber)) break;
}
}
code -= sizeof(struct rx_header);
if (code > outputLength) code = outputLength;
- bcopy(tp, outputData, code);
+ memcpy(outputData, tp, code);
return code;
}
-afs_int32 rx_GetServerDebug(
- int socket,
- afs_uint32 remoteAddr,
- afs_uint16 remotePort,
- struct rx_debugStats *stat,
- afs_uint32 *supportedValues
-)
+afs_int32 rx_GetServerDebug(osi_socket socket, afs_uint32 remoteAddr,
+ afs_uint16 remotePort, struct rx_debugStats *stat, afs_uint32 *supportedValues)
{
struct rx_debugIn in;
afs_int32 rc = 0;
return rc;
}
-afs_int32 rx_GetServerStats(
- int socket,
- afs_uint32 remoteAddr,
- afs_uint16 remotePort,
- struct rx_stats *stat,
- afs_uint32 *supportedValues
-)
+afs_int32 rx_GetServerStats(osi_socket socket, afs_uint32 remoteAddr,
+ afs_uint16 remotePort, struct rx_stats *stat, afs_uint32 *supportedValues)
{
struct rx_debugIn in;
afs_int32 *lp = (afs_int32 *) stat;
return rc;
}
-afs_int32 rx_GetServerVersion(
- int socket,
- afs_uint32 remoteAddr,
- afs_uint16 remotePort,
- size_t version_length,
- char *version
-)
+afs_int32 rx_GetServerVersion(osi_socket socket, afs_uint32 remoteAddr,
+ afs_uint16 remotePort, size_t version_length, char *version)
{
char a[1] = {0};
return MakeDebugCall(socket,
version_length);
}
-afs_int32 rx_GetServerConnections(
- int socket,
- afs_uint32 remoteAddr,
- afs_uint16 remotePort,
- afs_int32 *nextConnection,
- int allConnections,
- afs_uint32 debugSupportedValues,
- struct rx_debugConn *conn,
- afs_uint32 *supportedValues
-)
+afs_int32 rx_GetServerConnections(osi_socket socket, afs_uint32 remoteAddr,
+ afs_uint16 remotePort, afs_int32 *nextConnection, int allConnections,
+ afs_uint32 debugSupportedValues, struct rx_debugConn *conn, afs_uint32 *supportedValues)
{
struct rx_debugIn in;
afs_int32 rc = 0;
return rc;
}
-afs_int32 rx_GetServerPeers(
- int socket,
- afs_uint32 remoteAddr,
- afs_uint16 remotePort,
- afs_int32 *nextPeer,
- afs_uint32 debugSupportedValues,
- struct rx_debugPeer *peer,
- afs_uint32 *supportedValues
-)
+afs_int32 rx_GetServerPeers(osi_socket socket, afs_uint32 remoteAddr, afs_uint16 remotePort,
+ afs_int32 *nextPeer, afs_uint32 debugSupportedValues, struct rx_debugPeer *peer,
+ afs_uint32 *supportedValues)
{
struct rx_debugIn in;
afs_int32 rc = 0;
- int i;
/*
* supportedValues is currently unused, but added to allow future
{
struct rx_serverQueueEntry *np;
register int i, j;
+#ifndef KERNEL
register struct rx_call *call;
register struct rx_serverQueueEntry *sq;
+#endif /* KERNEL */
LOCK_RX_INIT
if (rxinit_status == 1) {
MUTEX_ENTER(&freeSQEList_lock);
- while (np = rx_FreeSQEList) {
+ while ((np = rx_FreeSQEList)) {
rx_FreeSQEList = *(struct rx_serverQueueEntry **)np;
MUTEX_DESTROY(&np->lock);
rxi_Free(np, sizeof(*np));
* queue.
*/
- if ((rpc_stat == NULL) ||
+ if (queue_IsEnd(stats, rpc_stat) ||
+ (rpc_stat == NULL) ||
(rpc_stat->stats[0].interfaceId != rxInterface) ||
(rpc_stat->stats[0].remote_is_server != isServer)) {
int i;
* Returns void.
*/
-void rx_IncrementTimeAndCount(
- struct rx_peer *peer,
- afs_uint32 rxInterface,
- afs_uint32 currentFunc,
- afs_uint32 totalFunc,
- struct clock *queueTime,
- struct clock *execTime,
- afs_hyper_t *bytesSent,
- afs_hyper_t *bytesRcvd,
- int isServer)
+void rx_IncrementTimeAndCount(struct rx_peer *peer, afs_uint32 rxInterface,
+ afs_uint32 currentFunc, afs_uint32 totalFunc, struct clock *queueTime,
+ struct clock *execTime, afs_hyper_t *bytesSent, afs_hyper_t *bytesRcvd, int isServer)
{
MUTEX_ENTER(&rx_rpc_stats);
*
* Returns void.
*/
-void rx_MarshallProcessRPCStats(
- afs_uint32 callerVersion,
- int count,
- rx_function_entry_v1_t *stats,
- afs_uint32 **ptrP)
+void rx_MarshallProcessRPCStats(afs_uint32 callerVersion,
+ int count, rx_function_entry_v1_t *stats, afs_uint32 **ptrP)
{
int i;
afs_uint32 *ptr;
* Returns void. If successful, stats will != NULL.
*/
-int rx_RetrieveProcessRPCStats(
- afs_uint32 callerVersion,
- afs_uint32 *myVersion,
- afs_uint32 *clock_sec,
- afs_uint32 *clock_usec,
- size_t *allocSize,
- afs_uint32 *statCount,
- afs_uint32 **stats)
+int rx_RetrieveProcessRPCStats(afs_uint32 callerVersion,
+ afs_uint32 *myVersion, afs_uint32 *clock_sec, afs_uint32 *clock_usec,
+ size_t *allocSize, afs_uint32 *statCount, afs_uint32 **stats)
{
size_t space = 0;
afs_uint32 *ptr;
ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
if (ptr != NULL) {
- register struct rx_peer *pp;
- int i;
- int num_copied = 0;
rx_interface_stat_p rpc_stat, nrpc_stat;
* Returns void. If successful, stats will != NULL.
*/
-int rx_RetrievePeerRPCStats(
- afs_uint32 callerVersion,
- afs_uint32 *myVersion,
- afs_uint32 *clock_sec,
- afs_uint32 *clock_usec,
- size_t *allocSize,
- afs_uint32 *statCount,
- afs_uint32 **stats)
+int rx_RetrievePeerRPCStats(afs_uint32 callerVersion,
+ afs_uint32 *myVersion, afs_uint32 *clock_sec, afs_uint32 *clock_usec,
+ size_t *allocSize, afs_uint32 *statCount, afs_uint32 **stats)
{
size_t space = 0;
afs_uint32 *ptr;
ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
if (ptr != NULL) {
- int i;
- int num_copied = 0;
rx_interface_stat_p rpc_stat, nrpc_stat;
char *fix_offset;
* Returns void.
*/
-void rx_FreeRPCStats(
- afs_uint32 *stats,
- size_t allocSize)
+void rx_FreeRPCStats(afs_uint32 *stats, size_t allocSize)
{
rxi_Free(stats, allocSize);
}
* Returns 0 if stats are not enabled != 0 otherwise
*/
-int rx_queryProcessRPCStats()
+int rx_queryProcessRPCStats(void)
{
int rc;
MUTEX_ENTER(&rx_rpc_stats);
* Returns 0 if stats are not enabled != 0 otherwise
*/
-int rx_queryPeerRPCStats()
+int rx_queryPeerRPCStats(void)
{
int rc;
MUTEX_ENTER(&rx_rpc_stats);
* Returns void.
*/
-void rx_enableProcessRPCStats()
+void rx_enableProcessRPCStats(void)
{
MUTEX_ENTER(&rx_rpc_stats);
rx_enable_stats = 1;
* Returns void.
*/
-void rx_enablePeerRPCStats()
+void rx_enablePeerRPCStats(void)
{
MUTEX_ENTER(&rx_rpc_stats);
rx_enable_stats = 1;
* Returns void.
*/
-void rx_disableProcessRPCStats()
+void rx_disableProcessRPCStats(void)
{
rx_interface_stat_p rpc_stat, nrpc_stat;
size_t space;
* Returns void.
*/
-void rx_disablePeerRPCStats()
+void rx_disablePeerRPCStats(void)
{
struct rx_peer **peer_ptr, **peer_end;
int code;
* Returns void.
*/
-void rx_clearProcessRPCStats(
- afs_uint32 clearFlag)
+void rx_clearProcessRPCStats(afs_uint32 clearFlag)
{
rx_interface_stat_p rpc_stat, nrpc_stat;
* Returns void.
*/
-void rx_clearPeerRPCStats(
- afs_uint32 clearFlag)
+void rx_clearPeerRPCStats(afs_uint32 clearFlag)
{
rx_interface_stat_p rpc_stat, nrpc_stat;
*/
static int (*rxi_rxstat_userok)(struct rx_call *call) = NULL;
-void rx_SetRxStatUserOk(
- int (*proc)(struct rx_call *call))
+void rx_SetRxStatUserOk(int (*proc)(struct rx_call *call))
{
rxi_rxstat_userok = proc;
}
-int rx_RxStatUserOk(
- struct rx_call *call)
+int rx_RxStatUserOk(struct rx_call *call)
{
if (!rxi_rxstat_userok)
return 0;