#include "sys/lock_def.h"
#endif /* AFS_AIX41_ENV */
# include "rxgen_consts.h"
-# include "afs/magic.h"
#else /* KERNEL */
# include <sys/types.h>
+# include <string.h>
# include <errno.h>
#ifdef AFS_NT40_ENV
# include <stdlib.h>
# include <netinet/in.h>
# include <sys/time.h>
#endif
-#ifdef HAVE_STRING_H
-#include <string.h>
-#else
-#ifdef HAVE_STRINGS_H
-#include <strings.h>
-#endif
-#endif
# include "rx.h"
# include "rx_user.h"
# include "rx_clock.h"
# include "rx_globals.h"
# include "rx_trace.h"
# include <afs/rxgen_consts.h>
-# include <afs/magic.h>
#endif /* KERNEL */
int (*registerProgram) () = 0;
#define UNLOCK_RX_INIT
#endif
-/*
- * Now, rx_InitHost is just a stub for rx_InitAddrs
- * Parameters are in network byte order.
- */
-
int
rx_InitHost(u_int host, u_int port)
{
- struct sockaddr_storage saddr;
- int type = SOCK_DGRAM, len = sizeof(struct sockaddr_in);
-
- memset((void *) &saddr, 0, sizeof(saddr));
- rx_ssfamily(&saddr) = AF_INET;
- ((struct sockaddr_in *) &saddr)->sin_addr.s_addr = host;
- ((struct sockaddr_in *) &saddr)->sin_port = (u_short)port;
-#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
- ((struct sockaddr_in *) &saddr)->sin_len = sizeof(struct sockaddr_in);
-#endif
- return rx_InitAddrs(&saddr, &type, &len, 1);
-}
-
-/*
- * New API: rx_InitAddrs(struct sockaddr_storage *, int *, int)
- *
- * Arguments:
- *
- * struct sockaddr_storage - array of struct sockaddr_storage elements,
- * each one listing an interface/protocol to
- * be listened on.
- * int * - array of integers listing the socket type
- * (SOCK_STREAM or SOCK_DGRAM) to be used
- * by the corresponding struct sockaddr_storage
- * int * - array of integers listing saddr sizes
- * int - Number of elements in sockaddr_storage array.
- *
- * Note that in general only servers should call this function; clients
- * should (for now) continue to call rx_Init().
- */
-
-int rx_InitAddrs(struct sockaddr_storage *saddrs, int *types, int *salens,
- int nelem)
-{
#ifdef KERNEL
osi_timeval_t tv;
#else /* KERNEL */
struct timeval tv;
#endif /* KERNEL */
char *htable, *ptable;
- int tmp_status, i;
-
-#if defined(AFS_DJGPP_ENV) && !defined(DEBUG)
- __djgpp_set_quiet_socket(1);
-#endif
-
+ int tmp_status;
+
SPLVAR;
-
+
INIT_PTHREAD_LOCKS;
LOCK_RX_INIT;
if (rxinit_status == 0) {
if (afs_winsockInit() < 0)
return -1;
#endif
-
+
#ifndef KERNEL
/*
* Initialize anything necessary to provide a non-premptive threading
*/
rxi_InitializeThreadSupport();
#endif
-
+
/* Allocate and initialize a socket for client and perhaps server
* connections. */
-
- rx_socket = OSI_NULLSOCKET;
- rx_port = 0;
-
- for (i = 0; i < nelem; i++) {
- switch (types[i]) {
- case SOCK_DGRAM:
- rx_socket = rxi_GetHostUDPSocket(&saddrs[i], salens[i]);
- if (rx_socket == OSI_NULLSOCKET) {
- UNLOCK_RX_INIT;
- return RX_ADDRINUSE;
- }
- rx_port = rx_ss2pn(&saddrs[i]);
- break;
- default:
- return RX_INVALID_OPERATION;
- }
-
+
+ rx_socket = rxi_GetHostUDPSocket(host, (u_short) port);
+ if (rx_socket == OSI_NULLSOCKET) {
+ UNLOCK_RX_INIT;
+ return RX_ADDRINUSE;
}
-
#ifdef RX_ENABLE_LOCKS
#ifdef RX_LOCKS_DB
rxdb_init();
#else
osi_GetTime(&tv);
#endif
-
- if (! rx_port) {
+ if (port) {
+ rx_port = port;
+ } else {
#if defined(KERNEL) && !defined(UKERNEL)
/* Really, this should never happen in a real kernel */
rx_port = 0;
#else
- struct sockaddr_storage sn;
- socklen_t addrlen = sizeof(sn);
- if (getsockname((int)rx_socket, (struct sockaddr *)&sn, &addrlen)) {
+ struct sockaddr_in addr;
+ int addrlen = sizeof(addr);
+ if (getsockname((int)rx_socket, (struct sockaddr *)&addr, &addrlen)) {
rx_Finalize();
return -1;
}
- rx_port = rx_ss2pn(&sn);
+ rx_port = addr.sin_port;
#endif
}
rx_stats.minRtt.sec = 9999999;
return rx_InitHost(htonl(INADDR_ANY), port);
}
-
/* called with unincremented nRequestsRunning to see if it is OK to start
* a new thread in this service. Could be "no" for two reasons: over the
* max quota, or would prevent others from reaching their min quota.
rx_StartClientThread(void)
{
#ifdef AFS_PTHREAD_ENV
- int pid;
- pid = (int) pthread_self();
+ pthread_t pid;
+ pid = pthread_self();
#endif /* AFS_PTHREAD_ENV */
}
#endif /* AFS_NT40_ENV */
(*registerProgram) (pid, name);
#endif /* KERNEL */
#endif /* AFS_NT40_ENV */
- rx_ServerProc(); /* Never returns */
+ rx_ServerProc(NULL); /* Never returns */
}
#ifdef RX_ENABLE_TSFPQ
/* no use leaving packets around in this thread's local queue if
return;
}
-/*
- * Now, rx_NewConnection is just a stub for rx_NewConnectionAddrs()
- */
-
+/* Create a new client connection to the specified service, using the
+ * specified security object to implement the security model for this
+ * connection. */
struct rx_connection *
rx_NewConnection(register afs_uint32 shost, u_short sport, u_short sservice,
register struct rx_securityClass *securityObject,
int serviceSecurityIndex)
{
- struct sockaddr_in sin;
- int len = sizeof(sin), type = SOCK_DGRAM;
-
- memset((void *) &sin, 0, sizeof(sin));
-
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = shost;
- sin.sin_port = sport;
-
- return rx_NewConnectionAddrs((struct sockaddr_storage *) &sin, &type,
- &len, 1, sservice, securityObject,
- serviceSecurityIndex);
-}
-
-/* Create a new client connection to the specified service, using the
- * specified security object to implement the security model for this
- * connection
- *
- * This follows the same logic as rx_InitAddrs() for the first four
- * arguments.
- */
-struct rx_connection *
-rx_NewConnectionAddrs(struct sockaddr_storage *saddr, int *type, int *slen,
- int nelem, u_short sservice,
- struct rx_securityClass *securityObject,
- int serviceSecurityIndex)
-{
- int hashindex, i;
+ int hashindex;
afs_int32 cid;
register struct rx_connection *conn;
SPLVAR;
- osi_Assert(securityObject->magic == MAGIC_RXSECURITY);
clock_NewTime();
- dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(rx_ss2v4addr(saddr)), ntohs(rx_ss2pn(saddr)), sservice, securityObject, serviceSecurityIndex));
+ dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
/* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
* the case of kmem_alloc? */
NETPRI;
MUTEX_ENTER(&rx_connHashTable_lock);
cid = (rx_nextCid += RX_MAXCALLS);
- conn->magic = MAGIC_RXCONN;
conn->type = RX_CLIENT_CONNECTION;
conn->cid = cid;
conn->epoch = rx_epoch;
- /*
- * Right now we're going to just call rxi_FindPeer for UDP connections
- * We're only going to support one.
- */
- for (i = 0; i < nelem; i++) {
- if (type[i] == SOCK_DGRAM) {
- conn->peer = rxi_FindPeer(&saddr[i], slen[i], type[i], 0, 1);
- break;
- }
- }
+ conn->peer = rxi_FindPeer(shost, sport, 0, 1);
conn->serviceId = sservice;
conn->securityObject = securityObject;
- /* This doesn't work in all compilers with void (they're buggy), so fake it
- * with VOID */
- conn->securityData = (VOID *) 0;
+ conn->securityData = (void *) 0;
conn->securityIndex = serviceSecurityIndex;
rx_SetConnDeadTime(conn, rx_connDeadTime);
conn->ackRate = RX_FAST_ACK_RATE;
conn->refCount++; /* no lock required since only this thread knows... */
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nClientConns++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
void
rxi_CleanupConnection(struct rx_connection *conn)
{
- osi_Assert(conn->magic == MAGIC_RXCONN);
-
/* Notify the service exporter, if requested, that this connection
* is being destroyed */
if (conn->type == RX_SERVER_CONNECTION && conn->service->destroyConnProc)
(*conn->service->destroyConnProc) (conn);
- conn->magic = MAGIC_RXCONN_FREE;
-
/* Notify the security module that this connection is being destroyed */
RXS_DestroyConnection(conn->securityObject, conn);
conn->peer->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
- MUTEX_ENTER(&rx_stats_mutex);
if (conn->type == RX_SERVER_CONNECTION)
- rx_stats.nServerConns--;
+ rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
else
- rx_stats.nClientConns--;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
#ifndef KERNEL
if (conn->specific) {
int i;
void
rxi_DestroyConnection(register struct rx_connection *conn)
{
- osi_Assert(conn->magic == MAGIC_RXCONN);
-
MUTEX_ENTER(&rx_connHashTable_lock);
rxi_DestroyConnectionNoLock(conn);
/* conn should be at the head of the cleanup list */
{
SPLVAR;
- osi_Assert(conn->magic == MAGIC_RXCONN);
NETPRI;
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount++;
USERPRI;
}
+/* Wait for the transmit queue to no longer be busy.
+ * requires the call->lock to be held */
+static void rxi_WaitforTQBusy(struct rx_call *call) {
+ while (call->flags & RX_CALL_TQ_BUSY) {
+ call->flags |= RX_CALL_TQ_WAIT;
+ call->tqWaiters++;
+#ifdef RX_ENABLE_LOCKS
+ osirx_AssertMine(&call->lock, "rxi_WaitforTQ lock");
+ CV_WAIT(&call->cv_tq, &call->lock);
+#else /* RX_ENABLE_LOCKS */
+ osi_rxSleep(&call->tq);
+#endif /* RX_ENABLE_LOCKS */
+ call->tqWaiters--;
+ if (call->tqWaiters == 0) {
+ call->flags &= ~RX_CALL_TQ_WAIT;
+ }
+ }
+}
/* Start a new rx remote procedure call, on the specified connection.
* If wait is set to 1, wait for a free call channel; otherwise return
* 0. Maxtime gives the maximum number of seconds this call may take,
- * after rx_MakeCall returns. After this time interval, a call to any
+ * after rx_NewCall returns. After this time interval, a call to any
* of rx_SendData, rx_ReadData, etc. will fail with RX_CALL_TIMEOUT.
* For fine grain locking, we hold the conn_call_lock in order to
* to ensure that we don't get signalle after we found a call in an active
struct clock queueTime;
SPLVAR;
- osi_Assert(conn->magic == MAGIC_RXCONN);
clock_NewTime();
- dpf(("rx_MakeCall(conn %x)\n", conn));
+ dpf(("rx_NewCall(conn %x)\n", conn));
NETPRI;
clock_GetTime(&queueTime);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Now, if TQ wasn't cleared earlier, do it now. */
MUTEX_ENTER(&call->lock);
- while (call->flags & RX_CALL_TQ_BUSY) {
- call->flags |= RX_CALL_TQ_WAIT;
- call->tqWaiters++;
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start lock4");
- CV_WAIT(&call->cv_tq, &call->lock);
-#else /* RX_ENABLE_LOCKS */
- osi_rxSleep(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- call->tqWaiters--;
- if (call->tqWaiters == 0) {
- call->flags &= ~RX_CALL_TQ_WAIT;
- }
- }
+ rxi_WaitforTQBusy(call);
if (call->flags & RX_CALL_TQ_CLEARME) {
rxi_ClearTransmitQueue(call, 0);
queue_Init(&call->tq);
MUTEX_EXIT(&call->lock);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
+ dpf(("rx_NewCall(call %x)\n", call));
return call;
}
service name might be used for probing for
statistics) */
struct rx_service *
-rx_NewService(u_short port, u_short serviceId, char *serviceName,
- struct rx_securityClass **securityObjects, int nSecurityObjects,
- afs_int32(*serviceProc) (struct rx_call * acall))
+rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId,
+ char *serviceName, struct rx_securityClass **securityObjects,
+ int nSecurityObjects,
+ afs_int32(*serviceProc) (struct rx_call * acall))
{
osi_socket socket = OSI_NULLSOCKET;
register struct rx_service *tservice;
for (i = 0; i < RX_MAX_SERVICES; i++) {
register struct rx_service *service = rx_services[i];
if (service) {
- if (port == service->servicePort) {
+ if (port == service->servicePort && host == service->serviceHost) {
if (service->serviceId == serviceId) {
/* The identical service has already been
* installed; if the caller was intending to
if (socket == OSI_NULLSOCKET) {
/* If we don't already have a socket (from another
* service on same port) get a new one */
- struct sockaddr_in sin;
-
- memset((void *) &sin, 0, sizeof(sin));
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = htonl(INADDR_ANY);
- sin.sin_port = port;
-#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
- sin.sin_len = sizeof(sin);
-#endif
- socket = rxi_GetHostUDPSocket((struct sockaddr_storage *) &sin,
- sizeof(sin));
+ socket = rxi_GetHostUDPSocket(htonl(INADDR_ANY), port);
if (socket == OSI_NULLSOCKET) {
USERPRI;
rxi_FreeService(tservice);
}
}
service = tservice;
- service->magic = MAGIC_RXSVC;
service->socket = socket;
+ service->serviceHost = host;
service->servicePort = port;
service->serviceId = serviceId;
service->serviceName = serviceName;
return 0;
}
+/* Set configuration options for all of a service's security objects */
+
+afs_int32
+rx_SetSecurityConfiguration(struct rx_service *service,
+ rx_securityConfigVariables type,
+ void *value)
+{
+ int i;
+ for (i = 0; i<service->nSecurityObjects; i++) {
+ if (service->securityObjects[i]) {
+ RXS_SetConfiguration(service->securityObjects[i], NULL, type,
+ value, NULL);
+ }
+ }
+ return 0;
+}
+
+struct rx_service *
+rx_NewService(u_short port, u_short serviceId, char *serviceName,
+ struct rx_securityClass **securityObjects, int nSecurityObjects,
+ afs_int32(*serviceProc) (struct rx_call * acall))
+{
+ return rx_NewServiceHost(htonl(INADDR_ANY), port, serviceId, serviceName, securityObjects, nSecurityObjects, serviceProc);
+}
+
/* Generic request processing loop. This routine should be called
* by the implementation dependent rx_ServerProc. If socketp is
* non-null, it will be set to the file descriptor that this thread
struct rx_service *service = NULL;
SPLVAR;
- osi_Assert(cur_service->magic == MAGIC_RXSVC);
-
MUTEX_ENTER(&freeSQEList_lock);
if ((sq = rx_FreeSQEList)) {
struct rx_service *service = NULL;
SPLVAR;
- osi_Assert(cur_service->magic == MAGIC_RXSVC);
-
NETPRI;
MUTEX_ENTER(&freeSQEList_lock);
void
rx_SetArrivalProc(register struct rx_call *call,
register void (*proc) (register struct rx_call * call,
- register VOID * mh,
+ register void * mh,
register int index),
- register VOID * handle, register int arg)
+ register void * handle, register int arg)
{
call->arrivalProc = proc;
call->arrivalProcHandle = handle;
register struct rx_service *service;
afs_int32 error;
SPLVAR;
- osi_Assert(call->magic == MAGIC_RXCALL);
}
rxi_flushtrace();
+#ifdef AFS_NT40_ENV
+ afs_winsockCleanup();
+#endif
+
rxinit_status = 1;
UNLOCK_RX_INIT;
}
register struct rx_call *nxp; /* Next call pointer, for queue_Scan */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- osi_Assert(conn->magic == MAGIC_RXCONN);
+ dpf(("rxi_NewCall(conn %x, channel %d)\n", conn, channel));
/* Grab an existing call structure, or allocate a new one.
* Existing call structures are assumed to have been left reset by
call = queue_First(&rx_freeCallQueue, rx_call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nFreeCallStructs--;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
CV_INIT(&call->cv_rq, "call rq", CV_DEFAULT, 0);
CV_INIT(&call->cv_tq, "call tq", CV_DEFAULT, 0);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nCallStructs++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
/* Initialize once-only items */
queue_Init(&call->tq);
queue_Init(&call->rq);
if (*call->callNumber == 0)
*call->callNumber = 1;
- call->magic = MAGIC_RXCALL;
-
return call;
}
register struct rx_connection *conn = call->conn;
- osi_Assert(call->magic == MAGIC_RXCALL);
-
if (call->state == RX_STATE_DALLY || call->state == RX_STATE_HOLD)
(*call->callNumber)++;
rxi_ResetCall(call, 0);
call->conn->call[channel] = (struct rx_call *)0;
- call->magic = MAGIC_RXCALL_FREE;
-
MUTEX_ENTER(&rx_freeCallQueue_lock);
SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
#else /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nFreeCallStructs++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
{
register char *p;
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_Alloccnt++;
- rxi_Allocsize += (afs_int32)size;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
p = (char *)osi_Alloc(size);
if (!p)
void
rxi_Free(void *addr, register size_t size)
{
- MUTEX_ENTER(&rx_stats_mutex);
- rxi_Alloccnt--;
- rxi_Allocsize -= (afs_int32)size;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
osi_Free(addr, size);
}
* refcount will be be decremented. This is used to replace the peer
* structure hanging off a connection structure */
struct rx_peer *
-rxi_FindPeer(struct sockaddr_storage *saddr, int slen, int stype,
- struct rx_peer *origPeer, int create)
+rxi_FindPeer(register afs_uint32 host, register u_short port,
+ struct rx_peer *origPeer, int create)
{
register struct rx_peer *pp;
- int hashIndex, i, j;
- for (i = 0, j = 0; i < slen; i++)
- j += ((unsigned char *) saddr)[i];
- hashIndex = j % rx_hashTableSize;
+ int hashIndex;
+ hashIndex = PEER_HASH(host, port);
MUTEX_ENTER(&rx_peerHashTable_lock);
for (pp = rx_peerHashTable[hashIndex]; pp; pp = pp->next) {
- if (memcmp(saddr, &pp->saddr, slen) == 0 && stype == pp->socktype)
- break;
+ if ((pp->host == host) && (pp->port == port))
+ break;
}
if (!pp) {
- if (create) {
- pp = rxi_AllocPeer(); /* This bzero's *pp */
- pp->magic = MAGIC_RXPEER;
- memcpy(&pp->saddr, saddr, slen);
- pp->saddrlen = slen;
- pp->socktype = stype;
- switch (rx_ssfamily(saddr)) {
- case AF_INET:
- /*
- * Should be enough storage for a dotted quad
- */
- snprintf(pp->addrstring, sizeof pp->addrstring, "%d.%d.%d.%d",
- rx_ss2addrp(saddr)[0], rx_ss2addrp(saddr)[1],
- rx_ss2addrp(saddr)[2], rx_ss2addrp(saddr)[3]);
- break;
-#ifdef AF_INET6
- case AF_INET6:
- /*
- * This gets more complicated, unfortunately
- */
- if (IN6_IS_ADDR_V4COMPAT(&(rx_ss2sin6(saddr)->sin6_addr))) {
- snprintf(pp->addrstring,
- sizeof pp->addrstring, "%d.%d.%d.%d",
- rx_ss2addrp(saddr)[12], rx_ss2addrp(saddr)[13],
- rx_ss2addrp(saddr)[14], rx_ss2addrp(saddr)[15]);
- } else {
- snprintf(pp->addrstring,
- sizeof pp->addrstring, "%x:%x:%x:%x:%x:%x:%x:%x",
- ntohs(rx_ss2addrp6(saddr)[0]),
- ntohs(rx_ss2addrp6(saddr)[1]),
- ntohs(rx_ss2addrp6(saddr)[2]),
- ntohs(rx_ss2addrp6(saddr)[3]),
- ntohs(rx_ss2addrp6(saddr)[4]),
- ntohs(rx_ss2addrp6(saddr)[5]),
- ntohs(rx_ss2addrp6(saddr)[6]),
- ntohs(rx_ss2addrp6(saddr)[7]));
- }
- break;
-#endif /* AF_INET6 */
- default:
- strcpy(pp->addrstring, "??.??.??.??");
- break;
- }
+ if (create) {
+ pp = rxi_AllocPeer(); /* This bzero's *pp */
+ pp->host = host; /* set here or in InitPeerParams is zero */
+ pp->port = port;
MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0);
queue_Init(&pp->congestionQueue);
queue_Init(&pp->rpcStats);
pp->next = rx_peerHashTable[hashIndex];
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nPeerStructs++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
if (pp && create) {
pp->refCount++;
}
- if (origPeer) {
- osi_Assert(origPeer->magic == MAGIC_RXPEER);
+ if (origPeer)
origPeer->refCount--;
- }
MUTEX_EXIT(&rx_peerHashTable_lock);
return pp;
}
* server connection is created, it will be created using the supplied
* index, if the index is valid for this service */
struct rx_connection *
-rxi_FindConnection(osi_socket socket, struct sockaddr_storage *saddr,
- int slen, int socktype, u_short serviceId, afs_uint32 cid,
+rxi_FindConnection(osi_socket socket, register afs_int32 host,
+ register u_short port, u_short serviceId, afs_uint32 cid,
afs_uint32 epoch, int type, u_int securityIndex)
{
int hashindex, flag;
MUTEX_EXIT(&rx_connHashTable_lock);
return (struct rx_connection *)0;
}
- if (memcmp(&pp->saddr, saddr, slen) == 0 &&
- socktype == pp->socktype)
+ if (pp->host == host && pp->port == port)
break;
- if (type == RX_CLIENT_CONNECTION &&
- rx_ss2pn(&pp->saddr) == rx_ss2pn(saddr))
+ if (type == RX_CLIENT_CONNECTION && pp->port == port)
break;
/* So what happens when it's a callback connection? */
if ( /*type == RX_CLIENT_CONNECTION && */
CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
- conn->magic = MAGIC_RXCONN;
- conn->peer = rxi_FindPeer(saddr, slen, socktype, 0, 1);
+ conn->peer = rxi_FindPeer(host, port, 0, 1);
conn->type = RX_SERVER_CONNECTION;
conn->lastSendTime = clock_Sec(); /* don't GC immediately */
conn->epoch = epoch;
/* XXXX Connection timeout? */
if (service->newConnProc)
(*service->newConnProc) (conn);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nServerConns++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
}
MUTEX_ENTER(&conn->conn_data_lock);
struct rx_packet *
rxi_ReceivePacket(register struct rx_packet *np, osi_socket socket,
- struct sockaddr_storage *saddr, int slen, int *tnop,
+ afs_uint32 host, u_short port, int *tnop,
struct rx_call **newcallp)
{
register struct rx_call *call;
packetType = (np->header.type > 0 && np->header.type < RX_N_PACKET_TYPES)
? rx_packetTypes[np->header.type - 1] : "*UNKNOWN*";
dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %x",
- np->header.serial, packetType, ntohl(rx_ss2v4addr(saddr)),
- ntohs(rx_ss2pn(saddr)), np->header.serviceId,
+ np->header.serial, packetType, ntohl(host), ntohs(port), np->header.serviceId,
np->header.epoch, np->header.cid, np->header.callNumber,
np->header.seq, np->header.flags, np));
#endif
if (np->header.type == RX_PACKET_TYPE_VERSION) {
- return rxi_ReceiveVersionPacket(np, socket, saddr, slen, 1);
+ return rxi_ReceiveVersionPacket(np, socket, host, port, 1);
}
if (np->header.type == RX_PACKET_TYPE_DEBUG) {
- return rxi_ReceiveDebugPacket(np, socket, saddr, slen, 1);
+ return rxi_ReceiveDebugPacket(np, socket, host, port, 1);
}
#ifdef RXDEBUG
/* If an input tracer function is defined, call it with the packet and
* network address. Note this function may modify its arguments. */
if (rx_justReceived) {
- struct sockaddr_in *addr = (struct sockaddr_in *) saddr;
+ struct sockaddr_in addr;
int drop;
- drop = (*rx_justReceived) (np, addr);
+ addr.sin_family = AF_INET;
+ addr.sin_port = port;
+ addr.sin_addr.s_addr = host;
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
+ addr.sin_len = sizeof(addr);
+#endif /* AFS_OSF_ENV */
+ drop = (*rx_justReceived) (np, &addr);
/* drop packet if return value is non-zero */
if (drop)
return np;
+ port = addr.sin_port; /* in case fcn changed addr */
+ host = addr.sin_addr.s_addr;
}
#endif
/* Find the connection (or fabricate one, if we're the server & if
* necessary) associated with this packet */
conn =
- rxi_FindConnection(socket, saddr, slen, SOCK_DGRAM,
- np->header.serviceId, np->header.cid,
- np->header.epoch, type, np->header.securityIndex);
+ rxi_FindConnection(socket, host, port, np->header.serviceId,
+ np->header.cid, np->header.epoch, type,
+ np->header.securityIndex);
if (!conn) {
/* If no connection found or fabricated, just ignore the packet.
* then, since this is a client connection we're getting data for
* it must be for the previous call.
*/
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
if (type == RX_SERVER_CONNECTION) { /* We're the server */
if (np->header.callNumber < currentCallNumber) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call)
MUTEX_EXIT(&call->lock);
MUTEX_EXIT(&conn->conn_call_lock);
*call->callNumber = np->header.callNumber;
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %s.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], rx_AddrStringOf(conn->peer), ntohs(rx_PortOf(conn->peer)), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nBusies++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
rxi_ResetCall(call, 0);
*call->callNumber = np->header.callNumber;
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %s.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], rx_AddrStringOf(conn->peer), ntohs(rx_PortOf(conn->peer)), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
MUTEX_EXIT(&conn->conn_data_lock);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nBusies++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
return tp;
}
rxi_KeepAliveOn(call);
/* Ignore all incoming acknowledgements for calls in DALLY state */
if (call && (call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ignorePacketDally++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
/* Ignore anything that's not relevant to the current call. If there
* isn't a current call, then no packet is relevant. */
if (!call || (np->header.callNumber != currentCallNumber)) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
#ifdef RX_ENABLE_LOCKS
if (call) {
MUTEX_EXIT(&call->lock);
* XXX interact badly with the server-restart detection
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.spuriousPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
conn->refCount--;
/* Now do packet type-specific processing */
switch (np->header.type) {
case RX_PACKET_TYPE_DATA:
- np = rxi_ReceiveDataPacket(call, np, 1, socket, saddr, slen, tnop,
+ np = rxi_ReceiveDataPacket(call, np, 1, socket, host, port, tnop,
newcallp);
break;
case RX_PACKET_TYPE_ACK:
struct rx_packet *
rxi_ReceiveDataPacket(register struct rx_call *call,
register struct rx_packet *np, int istack,
- osi_socket socket, struct sockaddr_storage *saddr,
- int slen, int *tnop, struct rx_call **newcallp)
+ osi_socket socket, afs_uint32 host, u_short port,
+ int *tnop, struct rx_call **newcallp)
{
int ackNeeded = 0; /* 0 means no, otherwise ack_reason */
int newPackets = 0;
int isFirst;
struct rx_packet *tnp;
struct clock when;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
MUTEX_ENTER(&rx_freePktQ_lock);
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.noPacketBuffersOnRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.nPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
dpf(("packet %x dropped on receipt - quota problems", np));
/* The RX_JUMBO_PACKET is set in all but the last packet in each
* AFS 3.5 jumbogram. */
if (flags & RX_JUMBO_PACKET) {
- tnp = rxi_SplitJumboPacket(np, saddr, slen, isFirst);
+ tnp = rxi_SplitJumboPacket(np, host, port, isFirst);
} else {
tnp = NULL;
}
/* Check to make sure it is not a duplicate of one already queued */
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dupPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
dpf(("packet %x dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
/* If the new packet's sequence number has been sent to the
* application already, then this is a duplicate */
if (seq < call->rnext) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dupPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
/*Check for duplicate packet */
if (seq == tp->header.seq) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dupPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
/* We need to send an ack of the packet is out of sequence,
* or if an ack was requested by the peer. */
- if (seq != prev + 1 || missing || (flags & RX_REQUEST_ACK)) {
+ if (seq != prev + 1 || missing) {
ackNeeded = RX_ACK_OUT_OF_SEQUENCE;
- }
+ } else if (flags & RX_REQUEST_ACK) {
+ ackNeeded = RX_ACK_REQUESTED;
+ }
/* Acknowledge the last packet for each call */
if (flags & RX_LAST_PACKET) {
u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ackPacketsRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
if (serial
&& (tp->header.serial == serial || tp->firstSerial == serial))
rxi_ComputePeerNetStats(call, tp, ap, np);
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
+ newAckCount++;
+ }
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* XXX Hack. Because we have to release the global rx lock when sending
* packets (osi_NetSend) we drop all acks while we're traversing the tq
* set the ack bits in the packets and have rxi_Start remove the packets
* when it's done transmitting.
*/
- if (!(tp->flags & RX_PKTFLAG_ACKED)) {
- newAckCount++;
- }
if (call->flags & RX_CALL_TQ_BUSY) {
#ifdef RX_ENABLE_LOCKS
tp->flags |= RX_PKTFLAG_ACKED;
* be unable to accept packets of the size that prior AFS versions would
* send without asking. */
if (peer->maxMTU != tSize) {
+ if (peer->maxMTU > tSize) /* possible cong., maxMTU decreased */
+ peer->congestSeq++;
peer->maxMTU = tSize;
peer->MTU = MIN(tSize, peer->MTU);
call->MTU = MIN(call->MTU, tSize);
- peer->congestSeq++;
}
if (np->length == rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32)) {
sizeof(afs_int32), &tSize);
maxDgramPackets = (afs_uint32) ntohl(tSize);
maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets);
- maxDgramPackets =
- MIN(maxDgramPackets, (int)(peer->ifDgramPackets));
- maxDgramPackets = MIN(maxDgramPackets, tSize);
+ maxDgramPackets = MIN(maxDgramPackets, peer->ifDgramPackets);
+ if (peer->natMTU < peer->ifMTU)
+ maxDgramPackets = MIN(maxDgramPackets, rxi_AdjustDgramPackets(1, peer->natMTU));
if (maxDgramPackets > 1) {
peer->maxDgramPackets = maxDgramPackets;
call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
return np;
}
call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- while (call->flags & RX_CALL_TQ_BUSY) {
- call->flags |= RX_CALL_TQ_WAIT;
- call->tqWaiters++;
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start lock2");
- CV_WAIT(&call->cv_tq, &call->lock);
-#else /* RX_ENABLE_LOCKS */
- osi_rxSleep(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- call->tqWaiters--;
- if (call->tqWaiters == 0)
- call->flags &= ~RX_CALL_TQ_WAIT;
- }
+ rxi_WaitforTQBusy(call);
MUTEX_ENTER(&peer->peer_lock);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
}
}
conn->error = error;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.fatalErrors++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
}
}
register struct rx_peer *peer;
struct rx_packet *packet;
+ dpf(("rxi_ResetCall(call %x, newcall %d)\n", call, newcall));
+
/* Notify anyone who is waiting for asynchronous packet arrival */
if (call->arrivalProc) {
(*call->arrivalProc) (call, call->arrivalProcHandle,
nbytes -= p->wirevec[i].iov_len;
}
}
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ackPacketsSent++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
peer->nSent += len;
if (resending)
peer->reSends += len;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsSent += len;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
if (list[len - 1]->header.flags & RX_LAST_PACKET) {
* packet until the congestion window reaches the ack rate. */
if (list[i]->header.serial) {
requestAck = 1;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsReSent++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsReSent, rx_stats_mutex);
} else {
/* improved RTO calculation- not Karn */
list[i]->firstSent = *now;
peer->nSent++;
if (resending)
peer->reSends++;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.dataPacketsSent++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.dataPacketsSent, rx_stats_mutex);
MUTEX_EXIT(&peer->peer_lock);
/* Tag this packet as not being the last in this group,
return;
}
call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- while (call->flags & RX_CALL_TQ_BUSY) {
- call->flags |= RX_CALL_TQ_WAIT;
- call->tqWaiters++;
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start lock1");
- CV_WAIT(&call->cv_tq, &call->lock);
-#else /* RX_ENABLE_LOCKS */
- osi_rxSleep(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- call->tqWaiters--;
- if (call->tqWaiters == 0)
- call->flags &= ~RX_CALL_TQ_WAIT;
- }
+ rxi_WaitforTQBusy(call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
call->flags |= RX_CALL_FAST_RECOVER;
}
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- MUTEX_ENTER(&rx_stats_mutex);
- rx_tq_debug.rxi_start_in_error++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
#endif
return;
}
osi_Panic("rxi_Start: xmit queue clobbered");
}
if (p->flags & RX_PKTFLAG_ACKED) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.ignoreAckedPacket++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
continue; /* Ignore this packet if it has been acknowledged */
}
* the time to reset the call. This will also inform the using
* process that the call is in an error state.
*/
- MUTEX_ENTER(&rx_stats_mutex);
- rx_tq_debug.rxi_start_aborted++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
(char *)&error, sizeof(error), 0);
rxi_FreePacket(packet);
}
+ CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
MUTEX_EXIT(&call->lock);
}
peer_ptr++) {
struct rx_peer *peer, *next, *prev;
for (prev = peer = *peer_ptr; peer; peer = next) {
- osi_Assert(peer->magic == MAGIC_RXPEER);
next = peer->next;
code = MUTEX_TRYENTER(&peer->peer_lock);
if ((code) && (peer->refCount == 0)
rxi_rpc_peer_stat_cnt -= num_funcs;
}
rxi_FreePeer(peer);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nPeerStructs--;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
if (peer == *peer_ptr) {
*peer_ptr = next;
prev = next;
int
rxs_Release(struct rx_securityClass *aobj)
{
- osi_Assert(aobj->magic == MAGIC_RXSECURITY);
-
return RXS_Close(aobj);
}
void
rx_PrintPeerStats(FILE * file, struct rx_peer *peer)
{
-/* fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %u.%d.\n",
+ fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %u.%d.\n",
ntohl(peer->host), (int)peer->port, (int)peer->burstSize,
- (int)peer->burstWait.sec, (int)peer->burstWait.usec); */
+ (int)peer->burstWait.sec, (int)peer->burstWait.usec);
fprintf(file,
" Rtt %d, " "retry time %u.%06d, " "total sent %d, "
void *outputData, size_t outputLength)
{
static afs_int32 counter = 100;
- time_t endTime;
+ time_t waitTime, waitCount, startTime;
struct rx_header theader;
char tbuffer[1500];
register afs_int32 code;
- struct timeval tv;
+ struct timeval tv_now, tv_wake, tv_delta;
struct sockaddr_in taddr, faddr;
int faddrLen;
fd_set imask;
register char *tp;
- endTime = time(0) + 20; /* try for 20 seconds */
+ startTime = time(0);
+ waitTime = 1;
+ waitCount = 5;
LOCK_RX_DEBUG;
counter++;
UNLOCK_RX_DEBUG;
(struct sockaddr *)&taddr, sizeof(struct sockaddr_in));
/* see if there's a packet available */
- FD_ZERO(&imask);
- FD_SET(socket, &imask);
- tv.tv_sec = 1;
- tv.tv_usec = 0;
- code = select((int)(socket + 1), &imask, 0, 0, &tv);
- if (code == 1 && FD_ISSET(socket, &imask)) {
- /* now receive a packet */
- faddrLen = sizeof(struct sockaddr_in);
- code =
- recvfrom(socket, tbuffer, sizeof(tbuffer), 0,
- (struct sockaddr *)&faddr, &faddrLen);
-
- if (code > 0) {
- memcpy(&theader, tbuffer, sizeof(struct rx_header));
- if (counter == ntohl(theader.callNumber))
- break;
+ gettimeofday(&tv_wake,0);
+ tv_wake.tv_sec += waitTime;
+ for (;;) {
+ FD_ZERO(&imask);
+ FD_SET(socket, &imask);
+ tv_delta.tv_sec = tv_wake.tv_sec;
+ tv_delta.tv_usec = tv_wake.tv_usec;
+ gettimeofday(&tv_now, 0);
+
+ if (tv_delta.tv_usec < tv_now.tv_usec) {
+ /* borrow */
+ tv_delta.tv_usec += 1000000;
+ tv_delta.tv_sec--;
}
+ tv_delta.tv_usec -= tv_now.tv_usec;
+
+ if (tv_delta.tv_sec < tv_now.tv_sec) {
+ /* time expired */
+ break;
+ }
+ tv_delta.tv_sec -= tv_now.tv_sec;
+
+ code = select(socket + 1, &imask, 0, 0, &tv_delta);
+ if (code == 1 && FD_ISSET(socket, &imask)) {
+ /* now receive a packet */
+ faddrLen = sizeof(struct sockaddr_in);
+ code =
+ recvfrom(socket, tbuffer, sizeof(tbuffer), 0,
+ (struct sockaddr *)&faddr, &faddrLen);
+
+ if (code > 0) {
+ memcpy(&theader, tbuffer, sizeof(struct rx_header));
+ if (counter == ntohl(theader.callNumber))
+ goto success;
+ continue;
+ }
+ }
+ break;
}
/* see if we've timed out */
- if (endTime < time(0))
- return -1;
+ if (!--waitCount) {
+ return -1;
+ }
+ waitTime <<= 1;
}
+
+ success:
code -= sizeof(struct rx_header);
if (code > outputLength)
code = outputLength;
}
next = peer->next;
rxi_FreePeer(peer);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.nPeerStructs--;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
}
}
}
rx_SetSpecific(struct rx_connection *conn, int key, void *ptr)
{
int i;
- osi_Assert(conn->magic == MAGIC_RXCONN);
MUTEX_ENTER(&conn->conn_data_lock);
- osi_Assert(conn->magic == MAGIC_RXCONN);
if (!conn->specific) {
conn->specific = (void **)malloc((key + 1) * sizeof(void *));
for (i = 0; i < key; i++)
rx_GetSpecific(struct rx_connection *conn, int key)
{
void *ptr;
- osi_Assert(conn->magic == MAGIC_RXCONN);
MUTEX_ENTER(&conn->conn_data_lock);
- osi_Assert(conn->magic == MAGIC_RXCONN);
if (key >= conn->nSpecific)
ptr = NULL;
else
afs_uint32 currentFunc, afs_uint32 totalFunc,
struct clock *queueTime, struct clock *execTime,
afs_hyper_t * bytesSent, afs_hyper_t * bytesRcvd, int isServer,
- struct sockaddr_storage *saddr,
+ afs_uint32 remoteHost, afs_uint32 remotePort,
int addToPeerList, unsigned int *counter)
{
int rc = 0;
}
*counter += totalFunc;
for (i = 0; i < totalFunc; i++) {
- switch (rx_ssfamily(saddr)) {
- case AF_INET:
- rpc_stat->stats[i].remote_peer =
- rx_ss2sin(saddr)->sin_addr.s_addr;
- break;
- default:
-#ifdef AF_INET6
- case AF_INET6:
- rpc_stat->stats[i].remote_peer = 0xffffffff;
- break;
-#endif /* AF_INET6 */
- }
- rpc_stat->stats[i].remote_port = rx_ss2pn(saddr);
+ rpc_stat->stats[i].remote_peer = remoteHost;
+ rpc_stat->stats[i].remote_port = remotePort;
rpc_stat->stats[i].remote_is_server = isServer;
rpc_stat->stats[i].interfaceId = rxInterface;
rpc_stat->stats[i].func_total = totalFunc;
int isServer)
{
+ if (!(rxi_monitor_peerStats || rxi_monitor_processStats))
+ return;
+
MUTEX_ENTER(&rx_rpc_stats);
MUTEX_ENTER(&peer->peer_lock);
if (rxi_monitor_peerStats) {
rxi_AddRpcStat(&peer->rpcStats, rxInterface, currentFunc, totalFunc,
queueTime, execTime, bytesSent, bytesRcvd, isServer,
- &peer->saddr, 1, &rxi_rpc_peer_stat_cnt);
+ peer->host, peer->port, 1, &rxi_rpc_peer_stat_cnt);
}
if (rxi_monitor_processStats) {
- struct sockaddr_in sin;
- sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = 0xffffffff;
- sin.sin_port = 0xffff;
rxi_AddRpcStat(&processStats, rxInterface, currentFunc, totalFunc,
queueTime, execTime, bytesSent, bytesRcvd, isServer,
- (struct sockaddr_storage *) &sin, 0,
- &rxi_rpc_process_stat_cnt);
+ 0xffffffff, 0xffffffff, 0, &rxi_rpc_process_stat_cnt);
}
MUTEX_EXIT(&peer->peer_lock);
return 0;
return rxi_rxstat_userok(call);
}
+
+#ifdef AFS_NT40_ENV
+/*
+ * DllMain() -- Entry-point function called by the DllMainCRTStartup()
+ * function in the MSVC runtime DLL (msvcrt.dll).
+ *
+ * Note: the system serializes calls to this function.
+ */
+BOOL WINAPI
+DllMain(HINSTANCE dllInstHandle, /* instance handle for this DLL module */
+ DWORD reason, /* reason function is being called */
+ LPVOID reserved) /* reserved for future use */
+{
+ switch (reason) {
+ case DLL_PROCESS_ATTACH:
+ /* library is being attached to a process */
+ INIT_PTHREAD_LOCKS;
+ return TRUE;
+
+ case DLL_PROCESS_DETACH:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+}
+#endif
+