/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#undef kmem_free
#undef mem_alloc
#undef mem_free
-#undef register
#endif /* AFS_OSF_ENV */
#else /* !UKERNEL */
#include "afs/sysincludes.h"
== 0);
assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
-
+
rxkad_global_stats_init();
MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
* rxi_totalMin
*/
-/*
+/*
* The rx_freePktQ_lock protects the following global variables:
- * rx_nFreePackets
+ * rx_nFreePackets
*/
/*
* are locked. To this end, the code has been modified under #ifdef
* RX_ENABLE_LOCKS so that quota checks and reservation occur at the
* same time. A new function, ReturnToServerPool() returns the allocation.
- *
+ *
* A call can be on several queue's (but only one at a time). When
* rxi_ResetCall wants to remove the call from a queue, it has to ensure
* that no one else is touching the queue. To this end, we store the address
void *arg1, int istack);
#endif
-/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
-** pretty good that the next packet coming in is from the same connection
+/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
+** pretty good that the next packet coming in is from the same connection
** as the last packet, since we're send multiple packets in a transmit window.
*/
struct rx_connection *rxLastConn = 0;
#endif /* KERNEL */
char *htable, *ptable;
int tmp_status;
-
+
SPLVAR;
-
+
INIT_PTHREAD_LOCKS;
LOCK_RX_INIT;
if (rxinit_status == 0) {
rx_nFreePackets = 0;
queue_Init(&rx_freePacketQueue);
rxi_NeedMorePackets = FALSE;
+ rx_nPackets = 0; /* rx_nPackets is managed by rxi_MorePackets* */
+
+ /* enforce a minimum number of allocated packets */
+ if (rx_extraPackets < rxi_nSendFrags * rx_maxSendWindow)
+ rx_extraPackets = rxi_nSendFrags * rx_maxSendWindow;
+
+ /* allocate the initial free packet pool */
#ifdef RX_ENABLE_TSFPQ
- rx_nPackets = 0; /* in TSFPQ version, rx_nPackets is managed by rxi_MorePackets* */
rxi_MorePacketsTSFPQ(rx_extraPackets + RX_MAX_QUOTA + 2, RX_TS_FPQ_FLUSH_GLOBAL, 0);
#else /* RX_ENABLE_TSFPQ */
- rx_nPackets = rx_extraPackets + RX_MAX_QUOTA + 2; /* fudge */
- rxi_MorePackets(rx_nPackets);
+ rxi_MorePackets(rx_extraPackets + RX_MAX_QUOTA + 2); /* fudge */
#endif /* RX_ENABLE_TSFPQ */
rx_CheckPackets();
/* otherwise, can use only if there are enough to allow everyone
* to go to their min quota after this guy starts.
*/
+ MUTEX_ENTER(&rx_quota_mutex);
if (rxi_availProcs > rxi_minDeficit)
rc = 1;
+ MUTEX_EXIT(&rx_quota_mutex);
return rc;
}
#endif /* RX_ENABLE_LOCKS */
}
#ifdef RX_ENABLE_TSFPQ
/* no use leaving packets around in this thread's local queue if
- * it isn't getting donated to the server thread pool.
+ * it isn't getting donated to the server thread pool.
*/
rxi_FlushLocalPacketsTSFPQ();
#endif /* RX_ENABLE_TSFPQ */
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
-/* Wait for the transmit queue to no longer be busy.
+/* Wait for the transmit queue to no longer be busy.
* requires the call->lock to be held */
static void rxi_WaitforTQBusy(struct rx_call *call) {
while (call->flags & RX_CALL_TQ_BUSY) {
* 0. Maxtime gives the maximum number of seconds this call may take,
* after rx_NewCall returns. After this time interval, a call to any
* of rx_SendData, rx_ReadData, etc. will fail with RX_CALL_TIMEOUT.
- * For fine grain locking, we hold the conn_call_lock in order to
+ * For fine grain locking, we hold the conn_call_lock in order to
* to ensure that we don't get signalle after we found a call in an active
* state and before we go to sleep.
*/
* If so, let them go first to avoid starving them.
* This is a fairly simple scheme, and might not be
* a complete solution for large numbers of waiters.
- *
- * makeCallWaiters keeps track of the number of
- * threads waiting to make calls and the
- * RX_CONN_MAKECALL_WAITING flag bit is used to
+ *
+ * makeCallWaiters keeps track of the number of
+ * threads waiting to make calls and the
+ * RX_CONN_MAKECALL_WAITING flag bit is used to
* indicate that there are indeed calls waiting.
* The flag is set when the waiter is incremented.
* It is only cleared when makeCallWaiters is 0.
conn->makeCallWaiters--;
if (conn->makeCallWaiters == 0)
conn->flags &= ~RX_CONN_MAKECALL_WAITING;
- }
+ }
/* We are now the active thread in rx_NewCall */
conn->flags |= RX_CONN_MAKECALL_ACTIVE;
call->mode = RX_MODE_ERROR;
else
call->mode = RX_MODE_SENDING;
-
+
/* remember start time for call in case we have hard dead time limit */
call->queueTime = queueTime;
clock_GetTime(&call->startTime);
/* Turn on busy protocol. */
rxi_KeepAliveOn(call);
+ /* Attempt MTU discovery */
+ rxi_GrowMTUOn(call);
+
/*
* We are no longer the active thread in rx_NewCall
*/
/* Advertise a new service. A service is named locally by a UDP port
* number plus a 16-bit service id. Returns (struct rx_service *) 0
- * on a failure.
+ * on a failure.
*
char *serviceName; Name for identification purposes (e.g. the
service name might be used for probing for
statistics) */
struct rx_service *
-rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId,
+rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId,
char *serviceName, struct rx_securityClass **securityObjects,
- int nSecurityObjects,
+ int nSecurityObjects,
afs_int32(*serviceProc) (struct rx_call * acall))
{
osi_socket socket = OSI_NULLSOCKET;
tservice = rxi_AllocService();
NETPRI;
+
+#ifdef RX_ENABLE_LOCKS
+ MUTEX_INIT(&tservice->svc_data_lock, "svc data lock", MUTEX_DEFAULT, 0);
+#endif
+
for (i = 0; i < RX_MAX_SERVICES; i++) {
struct rx_service *service = rx_services[i];
if (service) {
service->connDeadTime = rx_connDeadTime;
service->executeRequestProc = serviceProc;
service->checkReach = 0;
+ service->nSpecific = 0;
+ service->specific = NULL;
rx_services[i] = service; /* not visible until now */
USERPRI;
return service;
/* Set configuration options for all of a service's security objects */
-afs_int32
-rx_SetSecurityConfiguration(struct rx_service *service,
+afs_int32
+rx_SetSecurityConfiguration(struct rx_service *service,
rx_securityConfigVariables type,
void *value)
{
int i;
for (i = 0; i<service->nSecurityObjects; i++) {
if (service->securityObjects[i]) {
- RXS_SetConfiguration(service->securityObjects[i], NULL, type,
+ RXS_SetConfiguration(service->securityObjects[i], NULL, type,
value, NULL);
}
}
/* meltdown:
* One thing that seems to happen is that all the server threads get
* tied up on some empty or slow call, and then a whole bunch of calls
- * arrive at once, using up the packet pool, so now there are more
+ * arrive at once, using up the packet pool, so now there are more
* empty calls. The most critical resources here are server threads
* and the free packet pool. The "doreclaim" code seems to help in
* general. I think that eventually we arrive in this state: there
* are lots of pending calls which do have all their packets present,
* so they won't be reclaimed, are multi-packet calls, so they won't
- * be scheduled until later, and thus are tying up most of the free
+ * be scheduled until later, and thus are tying up most of the free
* packet pool for a very long time.
* future options:
- * 1. schedule multi-packet calls if all the packets are present.
- * Probably CPU-bound operation, useful to return packets to pool.
+ * 1. schedule multi-packet calls if all the packets are present.
+ * Probably CPU-bound operation, useful to return packets to pool.
* Do what if there is a full window, but the last packet isn't here?
* 3. preserve one thread which *only* runs "best" calls, otherwise
* it sleeps and waits for that type of call.
- * 4. Don't necessarily reserve a whole window for each thread. In fact,
+ * 4. Don't necessarily reserve a whole window for each thread. In fact,
* the current dataquota business is badly broken. The quota isn't adjusted
* to reflect how many packets are presently queued for a running call.
* So, when we schedule a queued call with a full window of packets queued
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
MUTEX_EXIT(&freeSQEList_lock);
- sq = (struct rx_serverQueueEntry *)
- rxi_Alloc(sizeof(struct rx_serverQueueEntry));
+ sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
}
* already executing */
/* One thread will process calls FCFS (to prevent starvation),
* while the other threads may run ahead looking for calls which
- * have all their input data available immediately. This helps
+ * have all their input data available immediately. This helps
* keep threads from blocking, waiting for data from the client. */
for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
service = tcall->conn->service;
if (tno == rxi_fcfs_thread_num
|| !tcall->queue_item_header.next) {
MUTEX_EXIT(&rx_pthread_mutex);
- /* If we're the fcfs thread , then we'll just use
- * this call. If we haven't been able to find an optimal
- * choice, and we're at the end of the list, then use a
+ /* If we're the fcfs thread , then we'll just use
+ * this call. If we haven't been able to find an optimal
+ * choice, and we're at the end of the list, then use a
* 2d choice if one has been identified. Otherwise... */
call = (choice2 ? choice2 : tcall);
service = call->conn->service;
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
MUTEX_EXIT(&freeSQEList_lock);
- sq = (struct rx_serverQueueEntry *)
- rxi_Alloc(sizeof(struct rx_serverQueueEntry));
+ sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
}
if (cur_service != NULL) {
cur_service->nRequestsRunning--;
+ MUTEX_ENTER(&rx_quota_mutex);
if (cur_service->nRequestsRunning < cur_service->minProcs)
rxi_minDeficit++;
rxi_availProcs++;
+ MUTEX_EXIT(&rx_quota_mutex);
}
if (queue_IsNotEmpty(&rx_incomingCallQueue)) {
struct rx_call *tcall, *ncall;
* already executing */
/* One thread will process calls FCFS (to prevent starvation),
* while the other threads may run ahead looking for calls which
- * have all their input data available immediately. This helps
+ * have all their input data available immediately. This helps
* keep threads from blocking, waiting for data from the client. */
choice2 = (struct rx_call *)0;
for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
if (tno == rxi_fcfs_thread_num
|| !tcall->queue_item_header.next) {
MUTEX_EXIT(&rx_pthread_mutex);
- /* If we're the fcfs thread, then we'll just use
- * this call. If we haven't been able to find an optimal
- * choice, and we're at the end of the list, then use a
+ /* If we're the fcfs thread, then we'll just use
+ * this call. If we haven't been able to find an optimal
+ * choice, and we're at the end of the list, then use a
* 2d choice if one has been identified. Otherwise... */
call = (choice2 ? choice2 : tcall);
service = call->conn->service;
queue_Remove(call);
/* we can't schedule a call if there's no data!!! */
/* send an ack if there's no data, if we're missing the
- * first packet, or we're missing something between first
+ * first packet, or we're missing something between first
* and last -- there's a "hole" in the incoming data. */
if (queue_IsEmpty(&call->rq)
|| queue_First(&call->rq, rx_packet)->header.seq != 1
service->nRequestsRunning++;
/* just started call in minProcs pool, need fewer to maintain
* guarantee */
+ MUTEX_ENTER(&rx_quota_mutex);
if (service->nRequestsRunning <= service->minProcs)
rxi_minDeficit--;
rxi_availProcs--;
+ MUTEX_EXIT(&rx_quota_mutex);
rx_nWaiting--;
/* MUTEX_EXIT(&call->lock); */
} else {
* and will also be called if there is an error condition on the or
* the call is complete. Used by multi rx to build a selection
* function which determines which of several calls is likely to be a
- * good one to read from.
+ * good one to read from.
* NOTE: the way this is currently implemented it is probably only a
* good idea to (1) use it immediately after a newcall (clients only)
* and (2) only use it once. Other uses currently void your warranty
rx_EndCall(struct rx_call *call, afs_int32 rc)
{
struct rx_connection *conn = call->conn;
- struct rx_service *service;
afs_int32 error;
SPLVAR;
rxi_CallError(call, rc);
/* Send an abort message to the peer if this error code has
* only just been set. If it was set previously, assume the
- * peer has already been sent the error code or will request it
+ * peer has already been sent the error code or will request it
*/
rxi_SendCallAbort(call, (struct rx_packet *)0, 0, 0);
}
if (call->mode == RX_MODE_SENDING) {
rxi_FlushWrite(call);
}
- service = conn->service;
rxi_calltrace(RX_CALL_END, call);
/* Call goes to hold state until reply packets are acknowledged */
if (call->tfirst + call->nSoftAcked < call->tnext) {
* kernel version, and may interrupt the macros rx_Read or
* rx_Write, which run at normal priority for efficiency. */
if (call->currentPacket) {
+#ifdef RX_TRACK_PACKETS
call->currentPacket->flags &= ~RX_PKTFLAG_CP;
+#endif
rxi_FreePacket(call->currentPacket);
call->currentPacket = (struct rx_packet *)0;
}
-
+
call->nLeft = call->nFree = call->curlen = 0;
/* Free any packets from the last call to ReadvProc/WritevProc */
rxi_ResetCall(call, 1);
} else {
- call = (struct rx_call *)rxi_Alloc(sizeof(struct rx_call));
+ call = rxi_Alloc(sizeof(struct rx_call));
#ifdef RXDEBUG_PACKET
call->allNextp = rx_allCallsp;
rx_allCallsp = call;
- call->call_id =
+ call->call_id =
#endif /* RXDEBUG_PACKET */
rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
-
+
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
MUTEX_ENTER(&call->lock);
}
afs_int32 rxi_Alloccnt = 0, rxi_Allocsize = 0;
-char *
+void *
rxi_Alloc(size_t size)
{
char *p;
osi_Free(addr, size);
}
-void
-rxi_SetPeerMtu(afs_uint32 host, afs_uint32 port, int mtu)
+void
+rxi_SetPeerMtu(struct rx_peer *peer, afs_uint32 host, afs_uint32 port, int mtu)
{
- struct rx_peer **peer_ptr, **peer_end;
- struct rx_peer *peer = NULL;
+ struct rx_peer **peer_ptr = NULL, **peer_end = NULL;
+ struct rx_peer *next = NULL;
int hashIndex;
- MUTEX_ENTER(&rx_peerHashTable_lock);
- if (port == 0) {
- for (peer_ptr = &rx_peerHashTable[0], peer_end =
- &rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
- peer_ptr++) {
- struct rx_peer *next;
- for (peer = *peer_ptr; peer; peer = next) {
- next = peer->next;
- if (host == peer->host)
- break;
- }
- }
+ if (!peer) {
+ MUTEX_ENTER(&rx_peerHashTable_lock);
+ if (port == 0) {
+ peer_ptr = &rx_peerHashTable[0];
+ peer_end = &rx_peerHashTable[rx_hashTableSize];
+ next = NULL;
+ resume:
+ for ( ; peer_ptr < peer_end; peer_ptr++) {
+ if (!peer)
+ peer = *peer_ptr;
+ for ( ; peer; peer = next) {
+ next = peer->next;
+ if (host == peer->host)
+ break;
+ }
+ }
+ } else {
+ hashIndex = PEER_HASH(host, port);
+ for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
+ if ((peer->host == host) && (peer->port == port))
+ break;
+ }
+ }
} else {
- hashIndex = PEER_HASH(host, port);
- for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
- if ((peer->host == host) && (peer->port == port))
- break;
- }
+ MUTEX_ENTER(&rx_peerHashTable_lock);
}
if (peer) {
MUTEX_EXIT(&rx_peerHashTable_lock);
MUTEX_ENTER(&peer->peer_lock);
+ /* We don't handle dropping below min, so don't */
+ mtu = MAX(mtu, RX_MIN_PACKET_SIZE);
peer->ifMTU=MIN(mtu, peer->ifMTU);
peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ /* if we tweaked this down, need to tune our peer MTU too */
+ peer->MTU = MIN(peer->MTU, peer->natMTU);
+ /* if we discovered a sub-1500 mtu, degrade */
+ if (peer->ifMTU < OLD_MAX_PACKET_SIZE)
+ peer->maxDgramPackets = 1;
+ /* We no longer have valid peer packet information */
+ if (peer->maxPacketSize-RX_IPUDP_SIZE > peer->ifMTU)
+ peer->maxPacketSize = 0;
MUTEX_EXIT(&peer->peer_lock);
MUTEX_ENTER(&rx_peerHashTable_lock);
- peer->refCount++;
+ peer->refCount--;
+ if (host && !port) {
+ peer = next;
+ /* pick up where we left off */
+ goto resume;
+ }
}
MUTEX_EXIT(&rx_peerHashTable_lock);
}
/* Find the peer process represented by the supplied (host,port)
* combination. If there is no appropriate active peer structure, a
- * new one will be allocated and initialized
+ * new one will be allocated and initialized
* The origPeer, if set, is a pointer to a peer structure on which the
* refcount will be be decremented. This is used to replace the peer
* structure hanging off a connection structure */
* server connection is created, it will be created using the supplied
* index, if the index is valid for this service */
struct rx_connection *
-rxi_FindConnection(osi_socket socket, afs_int32 host,
+rxi_FindConnection(osi_socket socket, afs_uint32 host,
u_short port, u_short serviceId, afs_uint32 cid,
afs_uint32 epoch, int type, u_int securityIndex)
{
MUTEX_EXIT(&conn->conn_call_lock);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
- if (np->header.callNumber == 0)
+ if (np->header.callNumber == 0)
dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.06d len %d",
np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
*/
if ((rx_BusyThreshold > 0) && (rx_nWaiting > rx_BusyThreshold)) {
struct rx_packet *tp;
-
+
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
rxi_ResetCall(call, 0);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
- if (np->header.callNumber == 0)
+ if (np->header.callNumber == 0)
dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%06d len %d",
np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
* traversing the tq in rxi_Start sending packets out because
* packets may move to the freePacketQueue as result of being here!
* So we drop these packets until we're safely out of the
- * traversing. Really ugly!
+ * traversing. Really ugly!
* For fine grain RX locking, we set the acked field in the
* packets and let rxi_Start remove them from the transmit queue.
*/
/* XXX I'm not sure this is exactly right, since tfirst **IS**
* XXX unacknowledged. I think that this is off-by-one, but
* XXX I don't dare change it just yet, since it will
- * XXX interact badly with the server-restart detection
+ * XXX interact badly with the server-restart detection
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
if (rx_stats_active)
* so this will be quite important with very large window sizes.
* Skew is checked against 0 here to avoid any dependence on the type of
* inPacketSkew (which may be unsigned). In C, -1 > (unsigned) 0 is always
- * true!
+ * true!
* The inPacketSkew should be a smoothed running value, not just a maximum. MTUXXX
* see CalculateRoundTripTime for an example of how to keep smoothed values.
* I think using a beta of 1/8 is probably appropriate. 93.04.21
* traversing the tq in rxi_Start sending packets out because
* packets may move to the freePacketQueue as result of being
* here! So we drop these packets until we're safely out of the
- * traversing. Really ugly!
+ * traversing. Really ugly!
* For fine grain RX locking, we set the acked field in the packets
* and let rxi_Start remove the packets from the transmit queue.
*/
if (!conn->checkReachEvent) {
conn->refCount++;
conn->checkReachEvent =
- rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
+ rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
NULL);
}
MUTEX_EXIT(&conn->conn_data_lock);
int newPackets = 0;
int didHardAck = 0;
int haveLast = 0;
- afs_uint32 seq;
+ afs_uint32 seq;
afs_uint32 serial=0, flags=0;
int isFirst;
struct rx_packet *tnp;
/* It's the next packet. Stick it on the receive queue
* for this call. Set newPackets to make sure we wake
* the reader once all packets have been processed */
+#ifdef RX_TRACK_PACKETS
np->flags |= RX_PKTFLAG_RQ;
+#endif
queue_Prepend(&call->rq, np);
#ifdef RXDEBUG_PACKET
call->rqc++;
* packet before which to insert the new packet, or at the
* queue head if the queue is empty or the packet should be
* appended. */
+#ifdef RX_TRACK_PACKETS
np->flags |= RX_PKTFLAG_RQ;
+#endif
#ifdef RXDEBUG_PACKET
call->rqc++;
#endif /* RXDEBUG_PACKET */
}
}
- /* We need to send an ack of the packet is out of sequence,
+ /* We need to send an ack of the packet is out of sequence,
* or if an ack was requested by the peer. */
if (seq != prev + 1 || missing) {
ackNeeded = RX_ACK_OUT_OF_SEQUENCE;
int acked;
int nNacked = 0;
int newAckCount = 0;
- u_short maxMTU = 0; /* Set if peer supports AFS 3.4a jumbo datagrams */
int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
+ int pktsize = 0; /* Set if we need to update the peer mtu */
if (rx_stats_active)
rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
nAcks = MIN((unsigned)nbytes, (unsigned)ap->nAcks);
first = ntohl(ap->firstPacket);
serial = ntohl(ap->serial);
- /* temporarily disabled -- needs to degrade over time
+ /* temporarily disabled -- needs to degrade over time
* skew = ntohs(ap->maxSkew); */
/* Ignore ack packets received out of order */
if (ap->reason == RX_ACK_PING_RESPONSE)
rxi_UpdatePeerReach(conn, call);
+ if (conn->lastPacketSizeSeq) {
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if ((first > conn->lastPacketSizeSeq) && (conn->lastPacketSize)) {
+ pktsize = conn->lastPacketSize;
+ conn->lastPacketSize = conn->lastPacketSizeSeq = 0;
+ }
+ MUTEX_EXIT(&conn->conn_data_lock);
+ }
+ if ((ap->reason == RX_ACK_PING_RESPONSE) && (conn->lastPingSizeSer)) {
+ MUTEX_ENTER(&conn->conn_data_lock);
+ if ((conn->lastPingSizeSer == serial) && (conn->lastPingSize)) {
+ /* process mtu ping ack */
+ pktsize = conn->lastPingSize;
+ conn->lastPingSizeSer = conn->lastPingSize = 0;
+ }
+ MUTEX_EXIT(&conn->conn_data_lock);
+ }
+
+ if (pktsize) {
+ MUTEX_ENTER(&peer->peer_lock);
+ /*
+ * Start somewhere. Can't assume we can send what we can receive,
+ * but we are clearly receiving.
+ */
+ if (!peer->maxPacketSize)
+ peer->maxPacketSize = RX_MIN_PACKET_SIZE+RX_IPUDP_SIZE;
+
+ if (pktsize > peer->maxPacketSize) {
+ peer->maxPacketSize = pktsize;
+ if ((pktsize-RX_IPUDP_SIZE > peer->ifMTU)) {
+ peer->ifMTU=pktsize-RX_IPUDP_SIZE;
+ peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
+ rxi_ScheduleGrowMTUEvent(call, 1);
+ }
+ }
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+
#ifdef RXDEBUG
#ifdef AFS_NT40_ENV
if (rxdebug_active) {
len = _snprintf(msg, sizeof(msg),
"tid[%d] RACK: reason %s serial %u previous %u seq %u skew %d first %u acks %u space %u ",
- GetCurrentThreadId(), rx_ack_reason(ap->reason),
+ GetCurrentThreadId(), rx_ack_reason(ap->reason),
ntohl(ap->serial), ntohl(ap->previousPacket),
- (unsigned int)np->header.seq, (unsigned int)skew,
+ (unsigned int)np->header.seq, (unsigned int)skew,
ntohl(ap->firstPacket), ap->nAcks, ntohs(ap->bufferSpace) );
if (nAcks) {
int offset;
- for (offset = 0; offset < nAcks && len < sizeof(msg); offset++)
+ for (offset = 0; offset < nAcks && len < sizeof(msg); offset++)
msg[len++] = (ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*');
}
msg[len++]='\n';
* packets (osi_NetSend) we drop all acks while we're traversing the tq
* in rxi_Start sending packets out because packets may move to the
* freePacketQueue as result of being here! So we drop these packets until
- * we're safely out of the traversing. Really ugly!
+ * we're safely out of the traversing. Really ugly!
* To make it even uglier, if we're using fine grain locking, we can
* set the ack bits in the packets and have rxi_Start remove the packets
* when it's done transmitting.
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
{
queue_Remove(tp);
+#ifdef RX_TRACK_PACKETS
tp->flags &= ~RX_PKTFLAG_TQ;
+#endif
#ifdef RXDEBUG_PACKET
call->tqc--;
#endif /* RXDEBUG_PACKET */
backedOff = 1;
}
- /* If packet isn't yet acked, and it has been transmitted at least
- * once, reset retransmit time using latest timeout
- * ie, this should readjust the retransmit timer for all outstanding
+ /* If packet isn't yet acked, and it has been transmitted at least
+ * once, reset retransmit time using latest timeout
+ * ie, this should readjust the retransmit timer for all outstanding
* packets... So we don't just retransmit when we should know better*/
if (!(tp->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&tp->retryTime)) {
if (np->length >= rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32)) {
afs_uint32 tSize;
- /* If the ack packet has a "recommended" size that is less than
+ /* If the ack packet has a "recommended" size that is less than
* what I am using now, reduce my size to match */
rx_packetread(np, rx_AckDataSize(ap->nAcks) + (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = rxi_AdjustMaxMTU(peer->natMTU, tSize);
/* sanity check - peer might have restarted with different params.
- * If peer says "send less", dammit, send less... Peer should never
+ * If peer says "send less", dammit, send less... Peer should never
* be unable to accept packets of the size that prior AFS versions would
* send without asking. */
if (peer->maxMTU != tSize) {
* network MTU confused with the loopback MTU. Calculate the
* maximum MTU here for use in the slow start code below.
*/
- maxMTU = peer->maxMTU;
/* Did peer restart with older RX version? */
if (peer->maxDgramPackets > 1) {
peer->maxDgramPackets = 1;
sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
/*
- * As of AFS 3.5 we set the send window to match the receive window.
+ * As of AFS 3.5 we set the send window to match the receive window.
*/
if (tSize < call->twind) {
call->twind = tSize;
}
call->MTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE;
} else if (call->MTU < peer->maxMTU) {
- call->MTU += peer->natMTU;
- call->MTU = MIN(call->MTU, peer->maxMTU);
+ /* don't upgrade if we can't handle it */
+ if ((call->nDgramPackets == 1) && (call->MTU >= peer->ifMTU))
+ call->MTU = peer->ifMTU;
+ else {
+ call->MTU += peer->natMTU;
+ call->MTU = MIN(call->MTU, peer->maxMTU);
+ }
}
call->nAcks = 0;
}
call->flags &= ~RX_CALL_WAIT_PROC;
if (queue_IsOnQueue(call)) {
queue_Remove(call);
-
+
MUTEX_ENTER(&rx_waiting_mutex);
rx_nWaiting--;
MUTEX_EXIT(&rx_waiting_mutex);
CV_SIGNAL(&sq->cv);
#else
service->nRequestsRunning++;
+ MUTEX_ENTER(&rx_quota_mutex);
if (service->nRequestsRunning <= service->minProcs)
rxi_minDeficit--;
rxi_availProcs--;
+ MUTEX_EXIT(&rx_quota_mutex);
osi_rxWakeup(sq);
#endif
}
{
if (queue_IsNotEmpty(&call->rq)) {
u_short count;
-
+
count = rxi_FreePackets(0, &call->rq);
rx_packetReclaims += count;
#ifdef RXDEBUG_PACKET
call->rqc -= count;
- if ( call->rqc != 0 )
+ if ( call->rqc != 0 )
dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0", call, call->rqc));
#endif
call->flags &= ~(RX_CALL_RECEIVE_DONE | RX_CALL_HAVE_LAST);
rxi_ClearReceiveQueue(call);
/* why init the queue if you just emptied it? queue_Init(&call->rq); */
-
+
if (call->currentPacket) {
+#ifdef RX_TRACK_PACKETS
call->currentPacket->flags &= ~RX_PKTFLAG_CP;
call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
+#endif
queue_Prepend(&call->iovq, call->currentPacket);
#ifdef RXDEBUG_PACKET
call->iovqc++;
call->curlen = call->nLeft = call->nFree = 0;
#ifdef RXDEBUG_PACKET
- call->iovqc -=
+ call->iovqc -=
#endif
rxi_FreePackets(0, &call->iovq);
if (queue_IsOnQueue(call)) {
queue_Remove(call);
if (flags & RX_CALL_WAIT_PROC) {
-
+
MUTEX_ENTER(&rx_waiting_mutex);
rx_nWaiting--;
MUTEX_EXIT(&rx_waiting_mutex);
* higher level yet (unless, of course, the sender decides to abort
* the call altogether). Any of p, seq, serial, pflags, or reason may
* be set to zero without ill effect. That is, if they are zero, they
- * will not convey any information.
+ * will not convey any information.
* NOW there is a trailer field, after the ack where it will safely be
- * ignored by mundanes, which indicates the maximum size packet this
+ * ignored by mundanes, which indicates the maximum size packet this
* host can swallow. */
/*
- struct rx_packet *optionalPacket; use to send ack (or null)
- int seq; Sequence number of the packet we are acking
- int serial; Serial number of the packet
- int pflags; Flags field from packet header
- int reason; Reason an acknowledge was prompted
+ struct rx_packet *optionalPacket; use to send ack (or null)
+ int seq; Sequence number of the packet we are acking
+ int serial; Serial number of the packet
+ int pflags; Flags field from packet header
+ int reason; Reason an acknowledge was prompted
*/
struct rx_packet *
struct rx_packet *p;
u_char offset;
afs_int32 templ;
+ afs_uint32 padbytes = 0;
#ifdef RX_ENABLE_TSFPQ
struct rx_ts_info_t * rx_ts_info;
#endif
call->conn->rwind[call->channel] = call->rwind = rx_maxReceiveWindow;
}
+ /* Don't attempt to grow MTU if this is a critical ping */
+ if (reason == RX_ACK_MTU) {
+ /* keep track of per-call attempts, if we're over max, do in small
+ * otherwise in larger? set a size to increment by, decrease
+ * on failure, here?
+ */
+ if (call->conn->peer->maxPacketSize &&
+ (call->conn->peer->maxPacketSize < OLD_MAX_PACKET_SIZE
+ +RX_IPUDP_SIZE))
+ padbytes = call->conn->peer->maxPacketSize+16;
+ else
+ padbytes = call->conn->peer->maxMTU + 128;
+
+ /* do always try a minimum size ping */
+ padbytes = MAX(padbytes, RX_MIN_PACKET_SIZE+RX_IPUDP_SIZE+4);
+
+ /* subtract the ack payload */
+ padbytes -= (rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32));
+ reason = RX_ACK_PING;
+ }
+
call->nHardAcks = 0;
call->nSoftAcks = 0;
if (call->rnext > call->lastAcked)
}
#endif
- templ =
+ templ = padbytes +
rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32) -
rx_GetDataSize(p);
if (templ > 0) {
ap->previousPacket = htonl(call->rprev); /* Previous packet received */
/* No fear of running out of ack packet here because there can only be at most
- * one window full of unacknowledged packets. The window size must be constrained
+ * one window full of unacknowledged packets. The window size must be constrained
* to be less than the maximum ack size, of course. Also, an ack should always
* fit into a single packet -- it should not ever be fragmented. */
for (offset = 0, queue_Scan(&call->rq, rqp, nxp, rx_packet)) {
#ifdef ADAPT_WINDOW
clock_GetTime(&call->pingRequestTime);
#endif
+ if (padbytes) {
+ p->length = padbytes +
+ rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32);
+
+ while (padbytes--)
+ /* not fast but we can potentially use this if truncated
+ * fragments are delivered to figure out the mtu.
+ */
+ rx_packetwrite(p, rx_AckDataSize(offset) + 4 *
+ sizeof(afs_int32), sizeof(afs_int32),
+ &padbytes);
+ }
}
if (call->conn->type == RX_CLIENT_CONNECTION)
p->header.flags |= RX_CLIENT_INITIATED;
len = _snprintf(msg, sizeof(msg),
"tid[%d] SACK: reason %s serial %u previous %u seq %u first %u acks %u space %u ",
- GetCurrentThreadId(), rx_ack_reason(ap->reason),
+ GetCurrentThreadId(), rx_ack_reason(ap->reason),
ntohl(ap->serial), ntohl(ap->previousPacket),
(unsigned int)p->header.seq, ntohl(ap->firstPacket),
ap->nAcks, ntohs(ap->bufferSpace) );
if (ap->nAcks) {
int offset;
- for (offset = 0; offset < ap->nAcks && len < sizeof(msg); offset++)
+ for (offset = 0; offset < ap->nAcks && len < sizeof(msg); offset++)
msg[len++] = (ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*');
}
msg[len++]='\n';
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
* idle connections) */
- call->lastSendData = conn->lastSendTime = call->lastSendTime = clock_Sec();
+ conn->lastSendTime = call->lastSendTime = clock_Sec();
+ /* Let a set of retransmits trigger an idle timeout */
+ if (!resending)
+ call->lastSendData = call->lastSendTime;
}
/* When sending packets we need to follow these rules:
#ifdef RX_ENABLE_LOCKS
/* Call rxi_Start, below, but with the call lock held. */
void
-rxi_StartUnlocked(struct rxevent *event,
+rxi_StartUnlocked(struct rxevent *event,
void *arg0, void *arg1, int istack)
{
struct rx_call *call = arg0;
-
+
MUTEX_ENTER(&call->lock);
rxi_Start(event, call, arg1, istack);
MUTEX_EXIT(&call->lock);
* better optimized for new packets, the usual case, now that we've
* got rid of queues of send packets. XXXXXXXXXXX */
void
-rxi_Start(struct rxevent *event,
+rxi_Start(struct rxevent *event,
void *arg0, void *arg1, int istack)
{
struct rx_call *call = arg0;
-
+
struct rx_packet *p;
struct rx_packet *nxp; /* Next pointer for queue_Scan */
struct rx_peer *peer = call->conn->peer;
*(call->callNumber)));
break;
}
+#ifdef RX_TRACK_PACKETS
if ((p->flags & RX_PKTFLAG_FREE)
|| (!queue_IsEnd(&call->tq, nxp)
&& (nxp->flags & RX_PKTFLAG_FREE))
|| (nxp == (struct rx_packet *)&rx_freePacketQueue)) {
osi_Panic("rxi_Start: xmit queue clobbered");
}
+#endif
if (p->flags & RX_PKTFLAG_ACKED) {
/* Since we may block, don't trust this */
usenow.sec = usenow.usec = 0;
/* Transmit the packet if it needs to be sent. */
if (!clock_Lt(&now, &p->retryTime)) {
if (nXmitPackets == maxXmitPackets) {
- rxi_SendXmitList(call, xmitList, nXmitPackets,
- istack, &now, &retryTime,
+ rxi_SendXmitList(call, xmitList, nXmitPackets,
+ istack, &now, &retryTime,
resending);
- osi_Free(xmitList, maxXmitPackets *
+ osi_Free(xmitList, maxXmitPackets *
sizeof(struct rx_packet *));
goto restart;
}
if (p->header.seq < call->tfirst
&& (p->flags & RX_PKTFLAG_ACKED)) {
queue_Remove(p);
+#ifdef RX_TRACK_PACKETS
p->flags &= ~RX_PKTFLAG_TQ;
+#endif
#ifdef RXDEBUG_PACKET
call->tqc--;
#endif
#ifdef RX_ENABLE_LOCKS
CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
call->resendEvent =
- rxevent_PostNow2(&retryTime, &usenow,
+ rxevent_PostNow2(&retryTime, &usenow,
rxi_StartUnlocked,
(void *)call, 0, istack);
#else /* RX_ENABLE_LOCKS */
call->resendEvent =
- rxevent_PostNow2(&retryTime, &usenow, rxi_Start,
+ rxevent_PostNow2(&retryTime, &usenow, rxi_Start,
(void *)call, 0, istack);
#endif /* RX_ENABLE_LOCKS */
}
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
* idle connections) */
- conn->lastSendTime = call->lastSendTime = clock_Sec();
- /* Don't count keepalives here, so idleness can be tracked. */
- if ((p->header.type != RX_PACKET_TYPE_ACK) || (((struct rx_ackPacket *)rx_DataOf(p))->reason != RX_ACK_PING))
- call->lastSendData = call->lastSendTime;
+ if ((p->header.type != RX_PACKET_TYPE_ACK) ||
+ (((struct rx_ackPacket *)rx_DataOf(p))->reason == RX_ACK_PING) ||
+ (p->length <= (rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32))))
+ {
+ conn->lastSendTime = call->lastSendTime = clock_Sec();
+ /* Don't count keepalive ping/acks here, so idleness can be tracked. */
+ if ((p->header.type != RX_PACKET_TYPE_ACK) ||
+ ((((struct rx_ackPacket *)rx_DataOf(p))->reason != RX_ACK_PING) &&
+ (((struct rx_ackPacket *)rx_DataOf(p))->reason !=
+ RX_ACK_PING_RESPONSE)))
+ call->lastSendData = call->lastSendTime;
+ }
}
-
/* Check if a call needs to be destroyed. Called by keep-alive code to ensure
* that things are fine. Also called periodically to guarantee that nothing
* falls through the cracks (e.g. (error + dally) connections have keepalive
struct rx_connection *conn = call->conn;
afs_uint32 now;
afs_uint32 deadTime;
+ int cerror = 0;
+ int newmtu = 0;
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (call->flags & RX_CALL_TQ_BUSY) {
netstack_t *ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);
ip_stack_t *ipst = ns->netstack_ip;
#endif
- ire = ire_cache_lookup(call->conn->peer->host
+ ire = ire_cache_lookup(conn->peer->host
#if defined(AFS_SUN510_ENV) && defined(ALL_ZONES)
, ALL_ZONES
#if defined(AFS_SUN510_ENV) && (defined(ICL_3_ARG) || defined(GLOBAL_NETSTACKID))
#endif
#endif
);
-
+
if (ire && ire->ire_max_frag > 0)
- rxi_SetPeerMtu(call->conn->peer->host, 0, ire->ire_max_frag);
+ rxi_SetPeerMtu(NULL, conn->peer->host, 0,
+ ire->ire_max_frag);
#if defined(GLOBAL_NETSTACKID)
netstack_rele(ns);
#endif
#endif
#endif /* ADAPT_PMTU */
- rxi_CallError(call, RX_CALL_DEAD);
- return -1;
+ cerror = RX_CALL_DEAD;
+ goto mtuout;
} else {
#ifdef RX_ENABLE_LOCKS
/* Cancel pending events */
&& ((call->startWait + conn->idleDeadTime) < now) &&
(call->flags & RX_CALL_READER_WAIT)) {
if (call->state == RX_STATE_ACTIVE) {
- rxi_CallError(call, RX_CALL_TIMEOUT);
- return -1;
+ cerror = RX_CALL_TIMEOUT;
+ goto mtuout;
}
}
if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0)
&& ((call->lastSendData + conn->idleDeadTime) < now)) {
if (call->state == RX_STATE_ACTIVE) {
- rxi_CallError(call, conn->idleDeadErr);
- return -1;
+ cerror = conn->idleDeadErr;
+ goto mtuout;
}
}
/* see if we have a hard timeout */
return -1;
}
return 0;
+mtuout:
+ if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT) {
+ int oldMTU = conn->peer->ifMTU;
+
+ /* if we thought we could send more, perhaps things got worse */
+ if (call->conn->peer->maxPacketSize > conn->lastPacketSize)
+ /* maxpacketsize will be cleared in rxi_SetPeerMtu */
+ newmtu = MAX(conn->peer->maxPacketSize-RX_IPUDP_SIZE,
+ conn->lastPacketSize-(128+RX_IPUDP_SIZE));
+ else
+ newmtu = conn->lastPacketSize-(128+RX_IPUDP_SIZE);
+
+ /* minimum capped in SetPeerMtu */
+ rxi_SetPeerMtu(conn->peer, 0, 0, newmtu);
+
+ /* clean up */
+ conn->lastPacketSize = 0;
+
+ /* needed so ResetCall doesn't clobber us. */
+ call->MTU = conn->peer->ifMTU;
+
+ /* if we never succeeded, let the error pass out as-is */
+ if (conn->peer->maxPacketSize && oldMTU != conn->peer->ifMTU)
+ cerror = conn->msgsizeRetryErr;
+
+ }
+ rxi_CallError(call, cerror);
+ return -1;
}
void
conn = call->conn;
if ((now - call->lastSendTime) > conn->secondsUntilPing) {
/* Don't try to send keepalives if there is unacknowledged data */
- /* the rexmit code should be good enough, this little hack
+ /* the rexmit code should be good enough, this little hack
* doesn't quite work XXX */
(void)rxi_SendAck(call, NULL, 0, RX_ACK_PING, 0);
}
MUTEX_EXIT(&call->lock);
}
+/* Does what's on the nameplate. */
+void
+rxi_GrowMTUEvent(struct rxevent *event, void *arg1, void *dummy)
+{
+ struct rx_call *call = arg1;
+ struct rx_connection *conn;
+
+ MUTEX_ENTER(&call->lock);
+ CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
+ if (event == call->growMTUEvent)
+ call->growMTUEvent = NULL;
+
+#ifdef RX_ENABLE_LOCKS
+ if (rxi_CheckCall(call, 0)) {
+ MUTEX_EXIT(&call->lock);
+ return;
+ }
+#else /* RX_ENABLE_LOCKS */
+ if (rxi_CheckCall(call))
+ return;
+#endif /* RX_ENABLE_LOCKS */
+
+ /* Don't bother with dallying calls */
+ if (call->state == RX_STATE_DALLY) {
+ MUTEX_EXIT(&call->lock);
+ return;
+ }
+
+ conn = call->conn;
+
+ /*
+ * keep being scheduled, just don't do anything if we're at peak,
+ * or we're not set up to be properly handled (idle timeout required)
+ */
+ if ((conn->peer->maxPacketSize != 0) &&
+ (conn->peer->natMTU < RX_MAX_PACKET_SIZE) &&
+ (conn->idleDeadErr))
+ (void)rxi_SendAck(call, NULL, 0, RX_ACK_MTU, 0);
+ rxi_ScheduleGrowMTUEvent(call, 0);
+ MUTEX_EXIT(&call->lock);
+}
void
rxi_ScheduleKeepAliveEvent(struct rx_call *call)
}
}
+void
+rxi_ScheduleGrowMTUEvent(struct rx_call *call, int secs)
+{
+ if (!call->growMTUEvent) {
+ struct clock when, now;
+
+ clock_GetTime(&now);
+ when = now;
+ if (!secs) {
+ if (call->conn->secondsUntilPing)
+ secs = (6*call->conn->secondsUntilPing)-1;
+
+ if (call->conn->secondsUntilDead)
+ secs = MIN(secs, (call->conn->secondsUntilDead-1));
+ }
+
+ when.sec += secs;
+ CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
+ call->growMTUEvent =
+ rxevent_PostNow(&when, &now, rxi_GrowMTUEvent, call, 0);
+ }
+}
+
/* N.B. rxi_KeepAliveOff: is defined earlier as a macro */
void
rxi_KeepAliveOn(struct rx_call *call)
rxi_ScheduleKeepAliveEvent(call);
}
+void
+rxi_GrowMTUOn(struct rx_call *call)
+{
+ struct rx_connection *conn = call->conn;
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->lastPingSizeSer = conn->lastPingSize = 0;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ rxi_ScheduleGrowMTUEvent(call, 1);
+}
+
/* This routine is called to send connection abort messages
* that have been delayed to throttle looping clients. */
void
void *arg1, void *unused)
{
struct rx_connection *conn = arg1;
-
+
afs_int32 error;
struct rx_packet *packet;
/* This routine is called to send call abort messages
* that have been delayed to throttle looping clients. */
void
-rxi_SendDelayedCallAbort(struct rxevent *event,
+rxi_SendDelayedCallAbort(struct rxevent *event,
void *arg1, void *dummy)
{
struct rx_call *call = arg1;
-
+
afs_int32 error;
struct rx_packet *packet;
* issues a challenge to the client, which is obtained from the
* security object associated with the connection */
void
-rxi_ChallengeEvent(struct rxevent *event,
+rxi_ChallengeEvent(struct rxevent *event,
void *arg0, void *arg1, int tries)
{
struct rx_connection *conn = arg0;
-
+
conn->challengeEvent = NULL;
if (RXS_CheckAuthentication(conn->securityObject, conn) != 0) {
struct rx_packet *packet;
} else {
/* I don't have a stored RTT so I start with this value. Since I'm
* probably just starting a call, and will be pushing more data down
- * this, I expect congestion to increase rapidly. So I fudge a
+ * this, I expect congestion to increase rapidly. So I fudge a
* little, and I set deviance to half the rtt. In practice,
* deviance tends to approach something a little less than
* half the smoothed rtt. */
peer->timeout.sec, peer->timeout.usec, peer->smRtt,
peer->packetSize));
peer->maxWindow = minTime;
- elide... call->twind = minTime;
+ elide... call->twind = minTime;
}
*/
va_end(ap);
#else
struct clock now;
-
+
va_start(ap, format);
clock_GetTime(&now);
void *outputData, size_t outputLength)
{
static afs_int32 counter = 100;
- time_t waitTime, waitCount, startTime;
+ time_t waitTime, waitCount;
struct rx_header theader;
char tbuffer[1500];
afs_int32 code;
fd_set imask;
char *tp;
- startTime = time(0);
waitTime = 1;
waitCount = 5;
LOCK_RX_DEBUG;
tv_delta.tv_sec = tv_wake.tv_sec;
tv_delta.tv_usec = tv_wake.tv_usec;
gettimeofday(&tv_now, 0);
-
+
if (tv_delta.tv_usec < tv_now.tv_usec) {
/* borrow */
tv_delta.tv_usec += 1000000;
tv_delta.tv_sec--;
}
tv_delta.tv_usec -= tv_now.tv_usec;
-
+
if (tv_delta.tv_sec < tv_now.tv_sec) {
/* time expired */
break;
}
tv_delta.tv_sec -= tv_now.tv_sec;
-
+
#ifdef AFS_NT40_ENV
code = select(0, &imask, 0, 0, &tv_delta);
#else /* AFS_NT40_ENV */
code =
recvfrom(socket, tbuffer, sizeof(tbuffer), 0,
(struct sockaddr *)&faddr, &faddrLen);
-
+
if (code > 0) {
memcpy(&theader, tbuffer, sizeof(struct rx_header));
if (counter == ntohl(theader.callNumber))
}
waitTime <<= 1;
}
-
+
success:
code -= sizeof(struct rx_header);
if (code > outputLength)
return rc;
}
-afs_int32
+afs_int32
rx_GetLocalPeers(afs_uint32 peerHost, afs_uint16 peerPort,
struct rx_debugPeer * peerStats)
{
afs_uint32 hashValue = PEER_HASH(peerHost, peerPort);
MUTEX_ENTER(&rx_peerHashTable_lock);
- for(tp = rx_peerHashTable[hashValue];
+ for(tp = rx_peerHashTable[hashValue];
tp != NULL; tp = tp->next) {
if (tp->host == peerHost)
break;
MUTEX_EXIT(&conn->conn_data_lock);
}
+void
+rx_SetServiceSpecific(struct rx_service *svc, int key, void *ptr)
+{
+ int i;
+ MUTEX_ENTER(&svc->svc_data_lock);
+ if (!svc->specific) {
+ svc->specific = (void **)malloc((key + 1) * sizeof(void *));
+ for (i = 0; i < key; i++)
+ svc->specific[i] = NULL;
+ svc->nSpecific = key + 1;
+ svc->specific[key] = ptr;
+ } else if (key >= svc->nSpecific) {
+ svc->specific = (void **)
+ realloc(svc->specific, (key + 1) * sizeof(void *));
+ for (i = svc->nSpecific; i < key; i++)
+ svc->specific[i] = NULL;
+ svc->nSpecific = key + 1;
+ svc->specific[key] = ptr;
+ } else {
+ if (svc->specific[key] && rxi_keyCreate_destructor[key])
+ (*rxi_keyCreate_destructor[key]) (svc->specific[key]);
+ svc->specific[key] = ptr;
+ }
+ MUTEX_EXIT(&svc->svc_data_lock);
+}
+
void *
rx_GetSpecific(struct rx_connection *conn, int key)
{
return ptr;
}
+void *
+rx_GetServiceSpecific(struct rx_service *svc, int key)
+{
+ void *ptr;
+ MUTEX_ENTER(&svc->svc_data_lock);
+ if (key >= svc->nSpecific)
+ ptr = NULL;
+ else
+ ptr = svc->specific[key];
+ MUTEX_EXIT(&svc->svc_data_lock);
+ return ptr;
+}
+
+
#endif /* !KERNEL */
/*
sizeof(rx_interface_stat_t) +
totalFunc * sizeof(rx_function_entry_v1_t);
- rpc_stat = (rx_interface_stat_p) rxi_Alloc(space);
+ rpc_stat = rxi_Alloc(space);
if (rpc_stat == NULL) {
rc = 1;
goto fail;
if (space > (size_t) 0) {
*allocSize = space;
- ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
+ ptr = *stats = rxi_Alloc(space);
if (ptr != NULL) {
rx_interface_stat_p rpc_stat, nrpc_stat;
if (space > (size_t) 0) {
*allocSize = space;
- ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
+ ptr = *stats = rxi_Alloc(space);
if (ptr != NULL) {
rx_interface_stat_p rpc_stat, nrpc_stat;
"\r\n",
cookie, c, c->call_id, (afs_uint32)c->state, (afs_uint32)c->mode, c->conn, c->conn?c->conn->epoch:0, c->conn?c->conn->cid:0,
c->callNumber?*c->callNumber:0, c->conn?c->conn->flags:0, c->flags,
- (afs_uint32)c->rqc, (afs_uint32)rqc, (afs_uint32)c->tqc, (afs_uint32)tqc, (afs_uint32)c->iovqc, (afs_uint32)iovqc,
- (afs_uint32)c->localStatus, (afs_uint32)c->remoteStatus, c->error, c->timeout,
+ (afs_uint32)c->rqc, (afs_uint32)rqc, (afs_uint32)c->tqc, (afs_uint32)tqc, (afs_uint32)c->iovqc, (afs_uint32)iovqc,
+ (afs_uint32)c->localStatus, (afs_uint32)c->remoteStatus, c->error, c->timeout,
c->resendEvent?1:0, c->timeoutEvent?1:0, c->keepAliveEvent?1:0, c->delayedAckEvent?1:0, c->delayedAbortEvent?1:0,
c->abortCode, c->abortCount, c->lastSendTime, c->lastReceiveTime, c->lastSendData
#ifdef RX_ENABLE_LOCKS