#include "h/socket.h"
#endif
#include "netinet/in.h"
+#ifdef AFS_SUN57_ENV
+#include "inet/common.h"
+#include "inet/ip.h"
+#include "inet/ip_ire.h"
+#endif
#include "afs/afs_args.h"
#include "afs/afs_osi.h"
#ifdef RX_KERNEL_TRACE
#else /* KERNEL */
# include <sys/types.h>
# include <string.h>
+# include <stdarg.h>
# include <errno.h>
#ifdef AFS_NT40_ENV
# include <stdlib.h>
# include <afs/rxgen_consts.h>
#endif /* KERNEL */
-int (*registerProgram) () = 0;
-int (*swapNameProgram) () = 0;
+#ifndef KERNEL
+#ifdef AFS_PTHREAD_ENV
+#ifndef AFS_NT40_ENV
+int (*registerProgram) (pid_t, char *) = 0;
+int (*swapNameProgram) (pid_t, const char *, char *) = 0;
+#endif
+#else
+int (*registerProgram) (PROCESS, char *) = 0;
+int (*swapNameProgram) (PROCESS, const char *, char *) = 0;
+#endif
+#endif
/* Local static routines */
static void rxi_DestroyConnectionNoLock(register struct rx_connection *conn);
static pthread_mutex_t epoch_mutex;
static pthread_mutex_t rx_init_mutex;
static pthread_mutex_t rx_debug_mutex;
+static pthread_mutex_t rx_rpc_stats;
static void
rxi_InitPthread(void)
assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
rxkad_global_stats_init();
+
+ MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
+#ifdef RX_ENABLE_LOCKS
+#ifdef RX_LOCKS_DB
+ rxdb_init();
+#endif /* RX_LOCKS_DB */
+ MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_freeCallQueue_lock, "rx_freeCallQueue_lock", MUTEX_DEFAULT,
+ 0);
+ CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
+ 0);
+ MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
+ 0);
+ MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
+ 0);
+ MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
+#endif /* RX_ENABLE_LOCKS */
}
pthread_once_t rx_once_init = PTHREAD_ONCE_INIT;
* to manipulate the queue.
*/
-#ifdef RX_ENABLE_LOCKS
+#if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
static afs_kmutex_t rx_rpc_stats;
-void rxi_StartUnlocked();
+void rxi_StartUnlocked(struct rxevent *event, void *call,
+ void *arg1, int istack);
#endif
/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
* by the kernel. Whether this will ever overlap anything in
* /etc/services is anybody's guess... Returns 0 on success, -1 on
* error. */
-static int rxinit_status = 1;
+#ifndef AFS_NT40_ENV
+static
+#endif
+int rxinit_status = 1;
#ifdef AFS_PTHREAD_ENV
/*
* This mutex protects the following global variables:
UNLOCK_RX_INIT;
return RX_ADDRINUSE;
}
-#ifdef RX_ENABLE_LOCKS
+#if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
#ifdef RX_LOCKS_DB
rxdb_init();
#endif /* RX_LOCKS_DB */
MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
0);
MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
-#ifndef KERNEL
- MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
-#endif /* !KERNEL */
-#if defined(KERNEL) && defined(AFS_HPUX110_ENV)
+#if defined(AFS_HPUX110_ENV)
if (!uniprocessor)
rx_sleepLock = alloc_spinlock(LAST_HELD_ORDER - 10, "rx_sleepLock");
-#endif /* KERNEL && AFS_HPUX110_ENV */
-#endif /* RX_ENABLE_LOCKS */
+#endif /* AFS_HPUX110_ENV */
+#endif /* RX_ENABLE_LOCKS && KERNEL */
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- memset((char *)&rx_stats, 0, sizeof(struct rx_stats));
+ memset((char *)&rx_stats, 0, sizeof(struct rx_statistics));
htable = (char *)
osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
rx_port = 0;
#else
struct sockaddr_in addr;
- int addrlen = sizeof(addr);
+#ifdef AFS_NT40_ENV
+ int addrlen = sizeof(addr);
+#else
+ socklen_t addrlen = sizeof(addr);
+#endif
if (getsockname((int)rx_socket, (struct sockaddr *)&addr, &addrlen)) {
rx_Finalize();
return -1;
}
/* Turn on reaping of idle server connections */
- rxi_ReapConnections();
+ rxi_ReapConnections(NULL, NULL, NULL);
USERPRI;
register struct rx_securityClass *securityObject,
int serviceSecurityIndex)
{
- int hashindex;
- afs_int32 cid;
- register struct rx_connection *conn;
+ int hashindex, i;
+ afs_int32 cid, cix, nclones;
+ register struct rx_connection *conn, *tconn, *ptconn;
SPLVAR;
clock_NewTime();
dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
+ conn = tconn = 0;
+ nclones = rx_max_clones_per_connection;
+
/* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
* the case of kmem_alloc? */
- conn = rxi_AllocConnection();
-#ifdef RX_ENABLE_LOCKS
- MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&conn->conn_data_lock, "conn call lock", MUTEX_DEFAULT, 0);
- CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
-#endif
+
NETPRI;
MUTEX_ENTER(&rx_connHashTable_lock);
- cid = (rx_nextCid += RX_MAXCALLS);
- conn->type = RX_CLIENT_CONNECTION;
- conn->cid = cid;
- conn->epoch = rx_epoch;
- conn->peer = rxi_FindPeer(shost, sport, 0, 1);
- conn->serviceId = sservice;
- conn->securityObject = securityObject;
- conn->securityData = (void *) 0;
- conn->securityIndex = serviceSecurityIndex;
- rx_SetConnDeadTime(conn, rx_connDeadTime);
- conn->ackRate = RX_FAST_ACK_RATE;
- conn->nSpecific = 0;
- conn->specific = NULL;
- conn->challengeEvent = NULL;
- conn->delayedAbortEvent = NULL;
- conn->abortCount = 0;
- conn->error = 0;
-
- RXS_NewConnection(securityObject, conn);
- hashindex =
- CONN_HASH(shost, sport, conn->cid, conn->epoch, RX_CLIENT_CONNECTION);
- conn->refCount++; /* no lock required since only this thread knows... */
- conn->next = rx_connHashTable[hashindex];
- rx_connHashTable[hashindex] = conn;
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ /* send in the clones */
+ for(cix = 0; cix <= nclones; ++cix) {
+
+ ptconn = tconn;
+ tconn = rxi_AllocConnection();
+ tconn->type = RX_CLIENT_CONNECTION;
+ tconn->epoch = rx_epoch;
+ tconn->peer = rxi_FindPeer(shost, sport, 0, 1);
+ tconn->serviceId = sservice;
+ tconn->securityObject = securityObject;
+ tconn->securityData = (void *) 0;
+ tconn->securityIndex = serviceSecurityIndex;
+ tconn->ackRate = RX_FAST_ACK_RATE;
+ tconn->nSpecific = 0;
+ tconn->specific = NULL;
+ tconn->challengeEvent = NULL;
+ tconn->delayedAbortEvent = NULL;
+ tconn->abortCount = 0;
+ tconn->error = 0;
+ for (i = 0; i < RX_MAXCALLS; i++) {
+ tconn->twind[i] = rx_initSendWindow;
+ tconn->rwind[i] = rx_initReceiveWindow;
+ }
+ tconn->parent = 0;
+ tconn->next_clone = 0;
+ tconn->nclones = nclones;
+ rx_SetConnDeadTime(tconn, rx_connDeadTime);
+
+ if(cix == 0) {
+ conn = tconn;
+ } else {
+ tconn->flags |= RX_CLONED_CONNECTION;
+ tconn->parent = conn;
+ ptconn->next_clone = tconn;
+ }
+
+ /* generic connection setup */
+#ifdef RX_ENABLE_LOCKS
+ MUTEX_INIT(&tconn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&tconn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
+ CV_INIT(&tconn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
+#endif
+ cid = (rx_nextCid += RX_MAXCALLS);
+ tconn->cid = cid;
+ RXS_NewConnection(securityObject, tconn);
+ hashindex =
+ CONN_HASH(shost, sport, tconn->cid, tconn->epoch,
+ RX_CLIENT_CONNECTION);
+ tconn->refCount++; /* no lock required since only this thread knows */
+ tconn->next = rx_connHashTable[hashindex];
+ rx_connHashTable[hashindex] = tconn;
+ rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ }
+
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
void
rx_SetConnDeadTime(register struct rx_connection *conn, register int seconds)
{
- /* The idea is to set the dead time to a value that allows several
- * keepalives to be dropped without timing out the connection. */
- conn->secondsUntilDead = MAX(seconds, 6);
- conn->secondsUntilPing = conn->secondsUntilDead / 6;
+ /* The idea is to set the dead time to a value that allows several
+ * keepalives to be dropped without timing out the connection. */
+ struct rx_connection *tconn;
+ tconn = conn;
+ do {
+ tconn->secondsUntilDead = MAX(seconds, 6);
+ tconn->secondsUntilPing = tconn->secondsUntilDead / 6;
+ } while(tconn->next_clone && (tconn = tconn->next_clone));
}
int rxi_lowPeerRefCount = 0;
void
rxi_DestroyConnection(register struct rx_connection *conn)
{
- MUTEX_ENTER(&rx_connHashTable_lock);
- rxi_DestroyConnectionNoLock(conn);
- /* conn should be at the head of the cleanup list */
- if (conn == rx_connCleanup_list) {
+ register struct rx_connection *tconn, *dtconn;
+
+ MUTEX_ENTER(&rx_connHashTable_lock);
+
+ if(!(conn->flags & RX_CLONED_CONNECTION)) {
+ tconn = conn->next_clone;
+ conn->next_clone = 0; /* once */
+ do {
+ if(tconn) {
+ dtconn = tconn;
+ tconn = tconn->next_clone;
+ rxi_DestroyConnectionNoLock(dtconn);
+ /* destroyed? */
+ if (dtconn == rx_connCleanup_list) {
+ rx_connCleanup_list = rx_connCleanup_list->next;
+ MUTEX_EXIT(&rx_connHashTable_lock);
+ /* rxi_CleanupConnection will free tconn */
+ rxi_CleanupConnection(dtconn);
+ MUTEX_ENTER(&rx_connHashTable_lock);
+ (conn->nclones)--;
+ }
+ }
+ } while(tconn);
+ }
+
+ rxi_DestroyConnectionNoLock(conn);
+ /* conn should be at the head of the cleanup list */
+ if (conn == rx_connCleanup_list) {
rx_connCleanup_list = rx_connCleanup_list->next;
MUTEX_EXIT(&rx_connHashTable_lock);
rxi_CleanupConnection(conn);
- }
+ }
#ifdef RX_ENABLE_LOCKS
- else {
+ else {
MUTEX_EXIT(&rx_connHashTable_lock);
- }
+ }
#endif /* RX_ENABLE_LOCKS */
}
USERPRI;
}
+#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Wait for the transmit queue to no longer be busy.
* requires the call->lock to be held */
static void rxi_WaitforTQBusy(struct rx_call *call) {
}
}
}
+#endif
+
/* Start a new rx remote procedure call, on the specified connection.
* If wait is set to 1, wait for a free call channel; otherwise return
* 0. Maxtime gives the maximum number of seconds this call may take,
{
register int i;
register struct rx_call *call;
+ register struct rx_connection *tconn;
struct clock queueTime;
SPLVAR;
}
MUTEX_EXIT(&conn->conn_data_lock);
+ /* search for next free call on this connection or
+ * its clones, if any */
for (;;) {
- for (i = 0; i < RX_MAXCALLS; i++) {
- call = conn->call[i];
- if (call) {
- MUTEX_ENTER(&call->lock);
- if (call->state == RX_STATE_DALLY) {
- rxi_ResetCall(call, 0);
- (*call->callNumber)++;
- break;
+ tconn = conn;
+ do {
+ for (i = 0; i < RX_MAXCALLS; i++) {
+ call = tconn->call[i];
+ if (call) {
+ MUTEX_ENTER(&call->lock);
+ if (call->state == RX_STATE_DALLY) {
+ rxi_ResetCall(call, 0);
+ (*call->callNumber)++;
+ goto f_call;
+ }
+ MUTEX_EXIT(&call->lock);
+ } else {
+ call = rxi_NewCall(tconn, i);
+ goto f_call;
+ }
+ } /* for i < RX_MAXCALLS */
+ } while (tconn->next_clone && (tconn = tconn->next_clone));
+
+ f_call:
+
+ if (i < RX_MAXCALLS) {
+ break;
}
- MUTEX_EXIT(&call->lock);
- } else {
- call = rxi_NewCall(conn, i);
- break;
- }
- }
- if (i < RX_MAXCALLS) {
- break;
- }
- MUTEX_ENTER(&conn->conn_data_lock);
- conn->flags |= RX_CONN_MAKECALL_WAITING;
- conn->makeCallWaiters++;
- MUTEX_EXIT(&conn->conn_data_lock);
+
+ /* to be here, all available calls for this connection (and all
+ * its clones) must be in use */
+
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->flags |= RX_CONN_MAKECALL_WAITING;
+ conn->makeCallWaiters++;
+ MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
- CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
+ CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
#else
- osi_rxSleep(conn);
+ osi_rxSleep(conn);
#endif
- MUTEX_ENTER(&conn->conn_data_lock);
- conn->makeCallWaiters--;
- MUTEX_EXIT(&conn->conn_data_lock);
- }
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->makeCallWaiters--;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ } /* for ;; */
/*
* Wake up anyone else who might be giving us a chance to
* run (see code above that avoids resource starvation).
MUTEX_ENTER(&call->lock);
rxi_WaitforTQBusy(call);
if (call->flags & RX_CALL_TQ_CLEARME) {
- rxi_ClearTransmitQueue(call, 0);
+ rxi_ClearTransmitQueue(call, 1);
queue_Init(&call->tq);
}
MUTEX_EXIT(&call->lock);
service->minProcs = 0;
service->maxProcs = 1;
service->idleDeadTime = 60;
+ service->idleDeadErr = 0;
service->connDeadTime = rx_connDeadTime;
service->executeRequestProc = serviceProc;
service->checkReach = 0;
* kernel version, and may interrupt the macros rx_Read or
* rx_Write, which run at normal priority for efficiency. */
if (call->currentPacket) {
- queue_Prepend(&call->iovq, call->currentPacket);
+ call->currentPacket->flags &= ~RX_PKTFLAG_CP;
+ rxi_FreePacket(call->currentPacket);
call->currentPacket = (struct rx_packet *)0;
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Now, if TQ wasn't cleared earlier, do it now. */
if (call->flags & RX_CALL_TQ_CLEARME) {
- rxi_ClearTransmitQueue(call, 0);
+ rxi_ClearTransmitQueue(call, 1);
queue_Init(&call->tq);
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
call->conn = conn;
rxi_ResetCall(call, 1);
} else {
+
call = (struct rx_call *)rxi_Alloc(sizeof(struct rx_call));
MUTEX_EXIT(&rx_freeCallQueue_lock);
}
call->channel = channel;
call->callNumber = &conn->callNumber[channel];
+ call->rwind = conn->rwind[channel];
+ call->twind = conn->twind[channel];
/* Note that the next expected call number is retained (in
* conn->callNumber[i]), even if we reallocate the call structure
*/
register char *p;
rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
- p = (char *)osi_Alloc(size);
+p = (char *)
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ afs_osi_Alloc_NoSleep(size);
+#else
+ osi_Alloc(size);
+#endif
if (!p)
osi_Panic("rxi_Alloc error");
memset(p, 0, size);
osi_Free(addr, size);
}
+void
+rxi_SetPeerMtu(register afs_uint32 host, register afs_uint32 port, int mtu)
+{
+ struct rx_peer **peer_ptr, **peer_end;
+ int hashIndex;
+
+ MUTEX_ENTER(&rx_peerHashTable_lock);
+ if (port == 0) {
+ for (peer_ptr = &rx_peerHashTable[0], peer_end =
+ &rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
+ peer_ptr++) {
+ struct rx_peer *peer, *next;
+ for (peer = *peer_ptr; peer; peer = next) {
+ next = peer->next;
+ if (host == peer->host) {
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->ifMTU=MIN(mtu, peer->ifMTU);
+ peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+ }
+ }
+ } else {
+ struct rx_peer *peer;
+ hashIndex = PEER_HASH(host, port);
+ for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
+ if ((peer->host == host) && (peer->port == port)) {
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->ifMTU=MIN(mtu, peer->ifMTU);
+ peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+ }
+ }
+ MUTEX_EXIT(&rx_peerHashTable_lock);
+}
+
/* Find the peer process represented by the supplied (host,port)
* combination. If there is no appropriate active peer structure, a
* new one will be allocated and initialized
register u_short port, u_short serviceId, afs_uint32 cid,
afs_uint32 epoch, int type, u_int securityIndex)
{
- int hashindex, flag;
+ int hashindex, flag, i;
register struct rx_connection *conn;
hashindex = CONN_HASH(host, port, cid, epoch, type);
MUTEX_ENTER(&rx_connHashTable_lock);
conn->specific = NULL;
rx_SetConnDeadTime(conn, service->connDeadTime);
rx_SetConnIdleDeadTime(conn, service->idleDeadTime);
+ rx_SetServerConnIdleDeadErr(conn, service->idleDeadErr);
+ for (i = 0; i < RX_MAXCALLS; i++) {
+ conn->twind[i] = rx_initSendWindow;
+ conn->rwind[i] = rx_initReceiveWindow;
+ }
/* Notify security object of the new connection */
RXS_NewConnection(conn->securityObject, conn);
/* XXXX Connection timeout? */
* containing the network address. Both can be modified. The return value, if
* non-zero, indicates that the packet should be dropped. */
-int (*rx_justReceived) () = 0;
-int (*rx_almostSent) () = 0;
+int (*rx_justReceived) (struct rx_packet *, struct sockaddr_in *) = 0;
+int (*rx_almostSent) (struct rx_packet *, struct sockaddr_in *) = 0;
/* A packet has been received off the interface. Np is the packet, socket is
* the socket number it was received from (useful in determining which service
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
rxi_ClearTransmitQueue(call, 0);
+ rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
break;
default:
/* Should not reach here, unless the peer is broken: send an abort
#endif /* KERNEL */
static void
-rxi_CheckReachEvent(struct rxevent *event, struct rx_connection *conn,
- struct rx_call *acall)
+rxi_CheckReachEvent(struct rxevent *event, void *arg1, void *arg2)
{
+ struct rx_connection *conn = arg1;
+ struct rx_call *acall = arg2;
struct rx_call *call = acall;
struct clock when, now;
int i, waiting;
/* It's the next packet. Stick it on the receive queue
* for this call. Set newPackets to make sure we wake
* the reader once all packets have been processed */
+ np->flags |= RX_PKTFLAG_RQ;
queue_Prepend(&call->rq, np);
call->nSoftAcks++;
np = NULL; /* We can't use this anymore */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
{
queue_Remove(tp);
+ tp->flags &= ~RX_PKTFLAG_TQ;
rxi_FreePacket(tp); /* rxi_FreePacket mustn't wake up anyone, preemptively. */
}
}
if (tSize < call->twind) { /* smaller than our send */
call->twind = tSize; /* window, we must send less... */
call->ssthresh = MIN(call->twind, call->ssthresh);
+ call->conn->twind[call->channel] = call->twind;
}
/* Only send jumbograms to 3.4a fileservers. 3.3a RX gets the
*/
if (tSize < call->twind) {
call->twind = tSize;
+ call->conn->twind[call->channel] = call->twind;
call->ssthresh = MIN(call->twind, call->ssthresh);
} else if (tSize > call->twind) {
call->twind = tSize;
+ call->conn->twind[call->channel] = call->twind;
}
/*
call->nNacks = nNacked;
}
} else {
- if (newAckCount) {
- call->nAcks++;
- }
+ call->nAcks += newAckCount;
call->nNacks = 0;
}
&& call->tfirst + call->nSoftAcked >= call->tnext) {
call->state = RX_STATE_DALLY;
rxi_ClearTransmitQueue(call, 0);
+ rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
} else if (!queue_IsEmpty(&call->tq)) {
rxi_Start(0, call, 0, istack);
}
}
void
-rxi_SendDelayedAck(struct rxevent *event, register struct rx_call *call,
- char *dummy)
+rxi_SendDelayedAck(struct rxevent *event, void *arg1, void *unused)
{
+ struct rx_call *call = arg1;
#ifdef RX_ENABLE_LOCKS
if (event) {
MUTEX_ENTER(&call->lock);
}
rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
- rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
call->tfirst = call->tnext;
call->nSoftAcked = 0;
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
- rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
call->tfirst = call->tnext; /* implicitly acknowledge all data already sent */
call->nSoftAcked = 0;
if (call->error)
error = call->error;
-#ifdef RX_GLOBAL_RXLOCK_KERNEL
+#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (!((call->flags & RX_CALL_TQ_BUSY) || (call->tqWaiters > 0))) {
rxi_ResetCall(call, 0);
}
MUTEX_EXIT(&peer->peer_lock);
flags = call->flags;
- rxi_ClearReceiveQueue(call);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (flags & RX_CALL_TQ_BUSY) {
call->flags = RX_CALL_TQ_CLEARME | RX_CALL_TQ_BUSY;
} else
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
{
- rxi_ClearTransmitQueue(call, 0);
- queue_Init(&call->tq);
+ rxi_ClearTransmitQueue(call, 1);
+ /* why init the queue if you just emptied it? queue_Init(&call->tq); */
if (call->tqWaiters || (flags & RX_CALL_TQ_WAIT)) {
dpf(("rcall %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
}
call->tqWaiters--;
}
}
- queue_Init(&call->rq);
+
+ rxi_ClearReceiveQueue(call);
+ /* why init the queue if you just emptied it? queue_Init(&call->rq); */
+
+ if (call->currentPacket) {
+ call->currentPacket->flags &= ~RX_PKTFLAG_CP;
+ rxi_FreePacket(call->currentPacket);
+ call->currentPacket = (struct rx_packet *)0;
+ }
+ call->curlen = call->nLeft = call->nFree = 0;
+
+ rxi_FreePackets(0, &call->iovq);
+
call->error = 0;
- call->rwind = rx_initReceiveWindow;
- call->twind = rx_initSendWindow;
+ call->twind = call->conn->twind[call->channel];
+ call->rwind = call->conn->rwind[call->channel];
call->nSoftAcked = 0;
call->nextCwind = 0;
call->nAcks = 0;
* Open the receive window once a thread starts reading packets
*/
if (call->rnext > 1) {
- call->rwind = rx_maxReceiveWindow;
+ call->conn->rwind[call->channel] = call->rwind = rx_maxReceiveWindow;
}
call->nHardAcks = 0;
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
* idle connections) */
- conn->lastSendTime = call->lastSendTime = clock_Sec();
+ call->lastSendData = conn->lastSendTime = call->lastSendTime = clock_Sec();
}
/* When sending packets we need to follow these rules:
#ifdef RX_ENABLE_LOCKS
/* Call rxi_Start, below, but with the call lock held. */
void
-rxi_StartUnlocked(struct rxevent *event, register struct rx_call *call,
- void *arg1, int istack)
+rxi_StartUnlocked(struct rxevent *event,
+ void *arg0, void *arg1, int istack)
{
+ struct rx_call *call = arg0;
+
MUTEX_ENTER(&call->lock);
rxi_Start(event, call, arg1, istack);
MUTEX_EXIT(&call->lock);
* better optimized for new packets, the usual case, now that we've
* got rid of queues of send packets. XXXXXXXXXXX */
void
-rxi_Start(struct rxevent *event, register struct rx_call *call,
- void *arg1, int istack)
+rxi_Start(struct rxevent *event,
+ void *arg0, void *arg1, int istack)
{
+ struct rx_call *call = arg0;
+
struct rx_packet *p;
register struct rx_packet *nxp; /* Next pointer for queue_Scan */
struct rx_peer *peer = call->conn->peer;
nXmitPackets = 0;
maxXmitPackets = MIN(call->twind, call->cwind);
xmitList = (struct rx_packet **)
- osi_Alloc(maxXmitPackets * sizeof(struct rx_packet *));
+#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
+ /* XXXX else we must drop any mtx we hold */
+ afs_osi_Alloc_NoSleep(maxXmitPackets * sizeof(struct rx_packet *));
+#else
+ osi_Alloc(maxXmitPackets * sizeof(struct rx_packet *));
+#endif
if (xmitList == NULL)
osi_Panic("rxi_Start, failed to allocate xmit list");
for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
if (p->header.seq < call->tfirst
&& (p->flags & RX_PKTFLAG_ACKED)) {
queue_Remove(p);
+ p->flags &= ~RX_PKTFLAG_TQ;
rxi_FreePacket(p);
} else
missing = 1;
* processing), and for the connection (so that we can discover
* idle connections) */
conn->lastSendTime = call->lastSendTime = clock_Sec();
+ /* Don't count keepalives here, so idleness can be tracked. */
+ if (p->header.type != RX_PACKET_TYPE_ACK)
+ call->lastSendData = call->lastSendTime;
}
afs_uint32 now;
afs_uint32 deadTime;
-#ifdef RX_GLOBAL_RXLOCK_KERNEL
+#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (call->flags & RX_CALL_TQ_BUSY) {
/* Call is active and will be reset by rxi_Start if it's
* in an error state.
* number of seconds. */
if (now > (call->lastReceiveTime + deadTime)) {
if (call->state == RX_STATE_ACTIVE) {
+#ifdef ADAPT_PMTU
+#if defined(KERNEL) && defined(AFS_SUN57_ENV)
+ ire_t *ire;
+#if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
+ netstack_t *ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);
+ ip_stack_t *ipst = ns->netstack_ip;
+#endif
+ ire = ire_cache_lookup(call->conn->peer->host
+#if defined(AFS_SUN510_ENV) && defined(ALL_ZONES)
+ , ALL_ZONES
+#if defined(AFS_SUN510_ENV) && (defined(ICL_3_ARG) || defined(GLOBAL_NETSTACKID))
+ , NULL
+#if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
+ , ipst
+#endif
+#endif
+#endif
+ );
+
+ if (ire && ire->ire_max_frag > 0)
+ rxi_SetPeerMtu(call->conn->peer->host, 0, ire->ire_max_frag);
+#if defined(GLOBAL_NETSTACKID)
+ netstack_rele(ns);
+#endif
+#endif
+#endif /* ADAPT_PMTU */
rxi_CallError(call, RX_CALL_DEAD);
return -1;
} else {
return -1;
}
}
+ if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0)
+ && ((call->lastSendData + conn->idleDeadTime) < now)) {
+ if (call->state == RX_STATE_ACTIVE) {
+ rxi_CallError(call, conn->idleDeadErr);
+ return -1;
+ }
+ }
/* see if we have a hard timeout */
if (conn->hardDeadTime
&& (now > (conn->hardDeadTime + call->startTime.sec))) {
* keep-alive packet (if we're actually trying to keep the call alive)
*/
void
-rxi_KeepAliveEvent(struct rxevent *event, register struct rx_call *call,
- char *dummy)
+rxi_KeepAliveEvent(struct rxevent *event, void *arg1, void *dummy)
{
+ struct rx_call *call = arg1;
struct rx_connection *conn;
afs_uint32 now;
* that have been delayed to throttle looping clients. */
void
rxi_SendDelayedConnAbort(struct rxevent *event,
- register struct rx_connection *conn, char *dummy)
+ void *arg1, void *unused)
{
+ struct rx_connection *conn = arg1;
+
afs_int32 error;
struct rx_packet *packet;
/* This routine is called to send call abort messages
* that have been delayed to throttle looping clients. */
void
-rxi_SendDelayedCallAbort(struct rxevent *event, register struct rx_call *call,
- char *dummy)
+rxi_SendDelayedCallAbort(struct rxevent *event,
+ void *arg1, void *dummy)
{
+ struct rx_call *call = arg1;
+
afs_int32 error;
struct rx_packet *packet;
* issues a challenge to the client, which is obtained from the
* security object associated with the connection */
void
-rxi_ChallengeEvent(struct rxevent *event, register struct rx_connection *conn,
- void *arg1, int tries)
+rxi_ChallengeEvent(struct rxevent *event,
+ void *arg0, void *arg1, int tries)
{
+ struct rx_connection *conn = arg0;
+
conn->challengeEvent = NULL;
if (RXS_CheckAuthentication(conn->securityObject, conn) != 0) {
register struct rx_packet *packet;
/* Find all server connections that have not been active for a long time, and
* toss them */
void
-rxi_ReapConnections(void)
+rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2)
{
struct clock now, when;
clock_GetTime(&now);
/* Don't call this debugging routine directly; use dpf */
void
-rxi_DebugPrint(char *format, int a1, int a2, int a3, int a4, int a5, int a6,
- int a7, int a8, int a9, int a10, int a11, int a12, int a13,
- int a14, int a15)
+rxi_DebugPrint(char *format, ...)
{
#ifdef AFS_NT40_ENV
char msg[512];
len = _snprintf(tformat, sizeof(tformat), "tid[%d] %s", GetCurrentThreadId(), format);
if (len > 0) {
- len = _snprintf(msg, sizeof(msg)-2,
- tformat, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10,
- a11, a12, a13, a14, a15);
+ len = _vsnprintf(msg, sizeof(msg)-2, tformat, ap);
if (len > 0) {
if (msg[len-1] != '\n') {
msg[len] = '\n';
OutputDebugString(msg);
}
}
+ va_end(ap);
#else
struct clock now;
+ va_list ap;
+
+ va_start(ap, format);
+
clock_GetTime(&now);
fprintf(rx_Log, " %u.%.3u:", (unsigned int)now.sec,
(unsigned int)now.usec / 1000);
- fprintf(rx_Log, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12,
- a13, a14, a15);
+ vfprintf(rx_Log, format, ap);
putc('\n', rx_Log);
+ va_end(ap);
#endif
}
* checking.
*/
void
-rx_PrintTheseStats(FILE * file, struct rx_stats *s, int size,
+rx_PrintTheseStats(FILE * file, struct rx_statistics *s, int size,
afs_int32 freePackets, char version)
{
int i;
- if (size != sizeof(struct rx_stats)) {
+ if (size != sizeof(struct rx_statistics)) {
fprintf(file,
- "Unexpected size of stats structure: was %d, expected %d\n",
- size, sizeof(struct rx_stats));
+ "Unexpected size of stats structure: was %d, expected %lud\n",
+ size, sizeof(struct rx_statistics));
}
fprintf(file, "rx stats: free packets %d, allocs %d, ", (int)freePackets,
register afs_int32 code;
struct timeval tv_now, tv_wake, tv_delta;
struct sockaddr_in taddr, faddr;
+#ifdef AFS_NT40_ENV
int faddrLen;
+#else
+ socklen_t faddrLen;
+#endif
fd_set imask;
register char *tp;
afs_int32
rx_GetServerStats(osi_socket socket, afs_uint32 remoteAddr,
- afs_uint16 remotePort, struct rx_stats * stat,
+ afs_uint16 remotePort, struct rx_statistics * stat,
afs_uint32 * supportedValues)
{
struct rx_debugIn in;