2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* RX: Extended Remote Procedure Call */
12 #include <afsconfig.h>
13 #include <afs/param.h>
16 # include "afs/sysincludes.h"
17 # include "afsincludes.h"
22 # ifdef AFS_LINUX20_ENV
23 # include "h/socket.h"
25 # include "netinet/in.h"
27 # include "netinet/ip6.h"
28 # include "inet/common.h"
30 # include "inet/ip_ire.h"
32 # include "afs/afs_args.h"
33 # include "afs/afs_osi.h"
34 # ifdef RX_KERNEL_TRACE
35 # include "rx_kcommon.h"
37 # if defined(AFS_AIX_ENV)
41 # undef RXDEBUG /* turn off debugging */
43 # if defined(AFS_SGI_ENV)
44 # include "sys/debug.h"
47 # include "afs/sysincludes.h"
48 # include "afsincludes.h"
49 # endif /* !UKERNEL */
50 # include "afs/lock.h"
51 # include "rx_kmutex.h"
52 # include "rx_kernel.h"
53 # define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
54 # define AFSOP_STOP_AFS 211 /* Stop AFS process */
55 # define AFSOP_STOP_BKG 212 /* Stop BKG process */
56 extern afs_int32 afs_termState;
58 # include "sys/lockl.h"
59 # include "sys/lock_def.h"
60 # endif /* AFS_AIX41_ENV */
61 # include "afs/rxgen_consts.h"
66 # include <afs/afsutil.h>
67 # include <WINNT\afsreg.h>
75 #include <opr/queue.h>
76 #include <hcrypto/rand.h>
80 #include "rx_atomic.h"
81 #include "rx_globals.h"
83 #include "rx_internal.h"
90 #include "rx_packet.h"
91 #include "rx_server.h"
93 #include <afs/rxgen_consts.h>
96 #ifdef AFS_PTHREAD_ENV
98 int (*registerProgram) (pid_t, char *) = 0;
99 int (*swapNameProgram) (pid_t, const char *, char *) = 0;
102 int (*registerProgram) (PROCESS, char *) = 0;
103 int (*swapNameProgram) (PROCESS, const char *, char *) = 0;
107 /* Local static routines */
108 static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
109 static void rxi_ComputeRoundTripTime(struct rx_packet *, struct rx_ackPacket *,
110 struct rx_call *, struct rx_peer *,
112 static void rxi_Resend(struct rxevent *event, void *arg0, void *arg1,
114 static void rxi_SendDelayedAck(struct rxevent *event, void *call,
115 void *dummy, int dummy2);
116 static void rxi_SendDelayedCallAbort(struct rxevent *event, void *arg1,
117 void *dummy, int dummy2);
118 static void rxi_SendDelayedConnAbort(struct rxevent *event, void *arg1,
119 void *unused, int unused2);
120 static void rxi_ReapConnections(struct rxevent *unused, void *unused1,
121 void *unused2, int unused3);
122 static struct rx_packet *rxi_SendCallAbort(struct rx_call *call,
123 struct rx_packet *packet,
124 int istack, int force);
125 static void rxi_AckAll(struct rx_call *call);
126 static struct rx_connection
127 *rxi_FindConnection(osi_socket socket, afs_uint32 host, u_short port,
128 u_short serviceId, afs_uint32 cid,
129 afs_uint32 epoch, int type, u_int securityIndex,
130 int *unknownService);
131 static struct rx_packet
132 *rxi_ReceiveDataPacket(struct rx_call *call, struct rx_packet *np,
133 int istack, osi_socket socket,
134 afs_uint32 host, u_short port, int *tnop,
135 struct rx_call **newcallp);
136 static struct rx_packet
137 *rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
139 static struct rx_packet
140 *rxi_ReceiveResponsePacket(struct rx_connection *conn,
141 struct rx_packet *np, int istack);
142 static struct rx_packet
143 *rxi_ReceiveChallengePacket(struct rx_connection *conn,
144 struct rx_packet *np, int istack);
145 static void rxi_AttachServerProc(struct rx_call *call, osi_socket socket,
146 int *tnop, struct rx_call **newcallp);
147 static void rxi_ClearTransmitQueue(struct rx_call *call, int force);
148 static void rxi_ClearReceiveQueue(struct rx_call *call);
149 static void rxi_ResetCall(struct rx_call *call, int newcall);
150 static void rxi_ScheduleKeepAliveEvent(struct rx_call *call);
151 static void rxi_ScheduleNatKeepAliveEvent(struct rx_connection *conn);
152 static void rxi_ScheduleGrowMTUEvent(struct rx_call *call, int secs);
153 static void rxi_KeepAliveOn(struct rx_call *call);
154 static void rxi_GrowMTUOn(struct rx_call *call);
155 static void rxi_ChallengeOn(struct rx_connection *conn);
156 static int rxi_CheckCall(struct rx_call *call, int haveCTLock);
157 static void rxi_AckAllInTransmitQueue(struct rx_call *call);
158 static void rxi_CancelKeepAliveEvent(struct rx_call *call);
159 static void rxi_CancelDelayedAbortEvent(struct rx_call *call);
160 static void rxi_CancelGrowMTUEvent(struct rx_call *call);
161 static void update_nextCid(void);
164 static void rxi_Finalize_locked(void);
165 #elif defined(UKERNEL)
166 # define rxi_Finalize_locked() do { } while (0)
169 #ifdef RX_ENABLE_LOCKS
171 rx_atomic_t rxi_start_aborted; /* rxi_start awoke after rxi_Send in error.*/
172 rx_atomic_t rxi_start_in_error;
174 #endif /* RX_ENABLE_LOCKS */
176 /* Constant delay time before sending an acknowledge of the last packet
177 * received. This is to avoid sending an extra acknowledge when the
178 * client is about to make another call, anyway, or the server is
181 * The lastAckDelay may not exceeed 400ms without causing peers to
182 * unecessarily timeout.
184 struct clock rx_lastAckDelay = {0, 400000};
186 /* Constant delay time before sending a soft ack when none was requested.
187 * This is to make sure we send soft acks before the sender times out,
188 * Normally we wait and send a hard ack when the receiver consumes the packet
190 * This value has been 100ms in all shipping versions of OpenAFS. Changing it
191 * will require changes to the peer's RTT calculations.
193 struct clock rx_softAckDelay = {0, 100000};
196 * rxi_rpc_peer_stat_cnt counts the total number of peer stat structures
197 * currently allocated within rx. This number is used to allocate the
198 * memory required to return the statistics when queried.
199 * Protected by the rx_rpc_stats mutex.
202 static unsigned int rxi_rpc_peer_stat_cnt;
205 * rxi_rpc_process_stat_cnt counts the total number of local process stat
206 * structures currently allocated within rx. The number is used to allocate
207 * the memory required to return the statistics when queried.
208 * Protected by the rx_rpc_stats mutex.
211 static unsigned int rxi_rpc_process_stat_cnt;
213 rx_atomic_t rx_nWaiting = RX_ATOMIC_INIT(0);
214 rx_atomic_t rx_nWaited = RX_ATOMIC_INIT(0);
216 /* Incoming calls wait on this queue when there are no available
217 * server processes */
218 struct opr_queue rx_incomingCallQueue;
220 /* Server processes wait on this queue when there are no appropriate
221 * calls to process */
222 struct opr_queue rx_idleServerQueue;
224 #if !defined(offsetof)
225 #include <stddef.h> /* for definition of offsetof() */
228 #ifdef RX_ENABLE_LOCKS
229 afs_kmutex_t rx_atomic_mutex;
232 /* Forward prototypes */
233 static struct rx_call * rxi_NewCall(struct rx_connection *, int);
236 putConnection (struct rx_connection *conn) {
237 MUTEX_ENTER(&rx_refcnt_mutex);
239 MUTEX_EXIT(&rx_refcnt_mutex);
242 #ifdef AFS_PTHREAD_ENV
245 * Use procedural initialization of mutexes/condition variables
249 extern afs_kmutex_t rx_quota_mutex;
250 extern afs_kmutex_t rx_pthread_mutex;
251 extern afs_kmutex_t rx_packets_mutex;
252 extern afs_kmutex_t rx_refcnt_mutex;
253 extern afs_kmutex_t des_init_mutex;
254 extern afs_kmutex_t des_random_mutex;
256 extern afs_kmutex_t rx_clock_mutex;
257 extern afs_kmutex_t rxi_connCacheMutex;
258 extern afs_kmutex_t event_handler_mutex;
259 extern afs_kmutex_t listener_mutex;
260 extern afs_kmutex_t rx_if_init_mutex;
261 extern afs_kmutex_t rx_if_mutex;
263 extern afs_kcondvar_t rx_event_handler_cond;
264 extern afs_kcondvar_t rx_listener_cond;
267 static afs_kmutex_t epoch_mutex;
268 static afs_kmutex_t rx_init_mutex;
269 static afs_kmutex_t rx_debug_mutex;
270 static afs_kmutex_t rx_rpc_stats;
273 rxi_InitPthread(void)
275 MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0);
276 MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0);
277 MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0);
278 MUTEX_INIT(&rx_refcnt_mutex, "refcnts", MUTEX_DEFAULT, 0);
280 MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0);
281 MUTEX_INIT(&rxi_connCacheMutex, "conn cache", MUTEX_DEFAULT, 0);
282 MUTEX_INIT(&event_handler_mutex, "event handler", MUTEX_DEFAULT, 0);
283 MUTEX_INIT(&listener_mutex, "listener", MUTEX_DEFAULT, 0);
284 MUTEX_INIT(&rx_if_init_mutex, "if init", MUTEX_DEFAULT, 0);
285 MUTEX_INIT(&rx_if_mutex, "if", MUTEX_DEFAULT, 0);
287 MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0);
288 MUTEX_INIT(&rx_atomic_mutex, "atomic", MUTEX_DEFAULT, 0);
289 MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
290 MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
291 MUTEX_INIT(&rx_debug_mutex, "debug", MUTEX_DEFAULT, 0);
294 CV_INIT(&rx_event_handler_cond, "evhand", CV_DEFAULT, 0);
295 CV_INIT(&rx_listener_cond, "rxlisten", CV_DEFAULT, 0);
298 osi_Assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
299 osi_Assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
301 MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
302 MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
303 MUTEX_INIT(&rx_mallocedPktQ_lock, "rx_mallocedPktQ_lock", MUTEX_DEFAULT,
306 #ifdef RX_ENABLE_LOCKS
309 #endif /* RX_LOCKS_DB */
310 MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
311 MUTEX_INIT(&rx_freeCallQueue_lock, "rx_freeCallQueue_lock", MUTEX_DEFAULT,
313 CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
315 MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
317 MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
319 MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
321 MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
323 #endif /* RX_ENABLE_LOCKS */
326 pthread_once_t rx_once_init = PTHREAD_ONCE_INIT;
327 #define INIT_PTHREAD_LOCKS osi_Assert(pthread_once(&rx_once_init, rxi_InitPthread)==0)
329 * The rx_stats_mutex mutex protects the following global variables:
330 * rxi_lowConnRefCount
331 * rxi_lowPeerRefCount
340 * The rx_quota_mutex mutex protects the following global variables:
348 * The rx_freePktQ_lock protects the following global variables:
353 * The rx_packets_mutex mutex protects the following global variables:
361 * The rx_pthread_mutex mutex protects the following global variables:
362 * rxi_fcfs_thread_num
365 #define INIT_PTHREAD_LOCKS
369 /* Variables for handling the minProcs implementation. availProcs gives the
370 * number of threads available in the pool at this moment (not counting dudes
371 * executing right now). totalMin gives the total number of procs required
372 * for handling all minProcs requests. minDeficit is a dynamic variable
373 * tracking the # of procs required to satisfy all of the remaining minProcs
375 * For fine grain locking to work, the quota check and the reservation of
376 * a server thread has to come while rxi_availProcs and rxi_minDeficit
377 * are locked. To this end, the code has been modified under #ifdef
378 * RX_ENABLE_LOCKS so that quota checks and reservation occur at the
379 * same time. A new function, ReturnToServerPool() returns the allocation.
381 * A call can be on several queue's (but only one at a time). When
382 * rxi_ResetCall wants to remove the call from a queue, it has to ensure
383 * that no one else is touching the queue. To this end, we store the address
384 * of the queue lock in the call structure (under the call lock) when we
385 * put the call on a queue, and we clear the call_queue_lock when the
386 * call is removed from a queue (once the call lock has been obtained).
387 * This allows rxi_ResetCall to safely synchronize with others wishing
388 * to manipulate the queue.
391 #if defined(RX_ENABLE_LOCKS)
392 static afs_kmutex_t rx_rpc_stats;
395 /* We keep a "last conn pointer" in rxi_FindConnection. The odds are
396 ** pretty good that the next packet coming in is from the same connection
397 ** as the last packet, since we're send multiple packets in a transmit window.
399 struct rx_connection *rxLastConn = 0;
401 #ifdef RX_ENABLE_LOCKS
402 /* The locking hierarchy for rx fine grain locking is composed of these
405 * rx_connHashTable_lock - synchronizes conn creation, rx_connHashTable access
406 * also protects updates to rx_nextCid
407 * conn_call_lock - used to synchonize rx_EndCall and rx_NewCall
408 * call->lock - locks call data fields.
409 * These are independent of each other:
410 * rx_freeCallQueue_lock
415 * serverQueueEntry->lock
416 * rx_peerHashTable_lock - locked under rx_connHashTable_lock
418 * peer->lock - locks peer data fields.
419 * conn_data_lock - that more than one thread is not updating a conn data
420 * field at the same time.
431 * Do we need a lock to protect the peer field in the conn structure?
432 * conn->peer was previously a constant for all intents and so has no
433 * lock protecting this field. The multihomed client delta introduced
434 * a RX code change : change the peer field in the connection structure
435 * to that remote interface from which the last packet for this
436 * connection was sent out. This may become an issue if further changes
439 #define SET_CALL_QUEUE_LOCK(C, L) (C)->call_queue_lock = (L)
440 #define CLEAR_CALL_QUEUE_LOCK(C) (C)->call_queue_lock = NULL
442 /* rxdb_fileID is used to identify the lock location, along with line#. */
443 static int rxdb_fileID = RXDB_FILE_RX;
444 #endif /* RX_LOCKS_DB */
445 #else /* RX_ENABLE_LOCKS */
446 #define SET_CALL_QUEUE_LOCK(C, L)
447 #define CLEAR_CALL_QUEUE_LOCK(C)
448 #endif /* RX_ENABLE_LOCKS */
449 struct rx_serverQueueEntry *rx_waitForPacket = 0;
452 * This mutex serializes calls to our initialization and shutdown routines
453 * (rx_InitHost, rx_Finalize and shutdown_rx). Only one thread can be running
454 * these at any time; all other threads must wait for it to finish running, and
455 * then examine the value of rxi_running afterwards.
457 #ifdef AFS_PTHREAD_ENV
458 # define LOCK_RX_INIT MUTEX_ENTER(&rx_init_mutex)
459 # define UNLOCK_RX_INIT MUTEX_EXIT(&rx_init_mutex)
461 # define LOCK_RX_INIT
462 # define UNLOCK_RX_INIT
465 /* ------------Exported Interfaces------------- */
467 static rx_atomic_t rxi_running = RX_ATOMIC_INIT(0);
471 return rx_atomic_read(&rxi_running);
474 /* Initialize rx. A port number may be mentioned, in which case this
475 * becomes the default port number for any service installed later.
476 * If 0 is provided for the port number, a random port will be chosen
477 * by the kernel. Whether this will ever overlap anything in
478 * /etc/services is anybody's guess... Returns 0 on success, -1 on
481 rx_InitHost(u_int host, u_int port)
488 char *htable, *ptable;
494 if (rxi_IsRunning()) {
496 return 0; /* already started */
502 if (afs_winsockInit() < 0)
508 * Initialize anything necessary to provide a non-premptive threading
511 rxi_InitializeThreadSupport();
514 /* Allocate and initialize a socket for client and perhaps server
517 rx_socket = rxi_GetHostUDPSocket(host, (u_short) port);
518 if (rx_socket == OSI_NULLSOCKET) {
521 #if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
524 #endif /* RX_LOCKS_DB */
525 MUTEX_INIT(&rx_stats_mutex, "rx_stats_mutex", MUTEX_DEFAULT, 0);
526 MUTEX_INIT(&rx_quota_mutex, "rx_quota_mutex", MUTEX_DEFAULT, 0);
527 MUTEX_INIT(&rx_atomic_mutex, "rx_atomic_mutex", MUTEX_DEFAULT, 0);
528 MUTEX_INIT(&rx_pthread_mutex, "rx_pthread_mutex", MUTEX_DEFAULT, 0);
529 MUTEX_INIT(&rx_packets_mutex, "rx_packets_mutex", MUTEX_DEFAULT, 0);
530 MUTEX_INIT(&rx_refcnt_mutex, "rx_refcnt_mutex", MUTEX_DEFAULT, 0);
531 MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
532 MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
533 MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
534 MUTEX_INIT(&rx_freeCallQueue_lock, "rx_freeCallQueue_lock", MUTEX_DEFAULT,
536 CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
538 MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
540 MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
542 MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
543 MUTEX_INIT(&rx_mallocedPktQ_lock, "rx_mallocedPktQ_lock", MUTEX_DEFAULT,
546 #if defined(AFS_HPUX110_ENV)
548 rx_sleepLock = alloc_spinlock(LAST_HELD_ORDER - 10, "rx_sleepLock");
549 #endif /* AFS_HPUX110_ENV */
550 #endif /* RX_ENABLE_LOCKS && KERNEL */
553 rx_connDeadTime = 12;
554 rx_tranquil = 0; /* reset flag */
555 rxi_ResetStatistics();
556 htable = osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
557 PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
558 memset(htable, 0, rx_hashTableSize * sizeof(struct rx_connection *));
559 ptable = osi_Alloc(rx_hashTableSize * sizeof(struct rx_peer *));
560 PIN(ptable, rx_hashTableSize * sizeof(struct rx_peer *)); /* XXXXX */
561 memset(ptable, 0, rx_hashTableSize * sizeof(struct rx_peer *));
563 /* Malloc up a bunch of packets & buffers */
565 opr_queue_Init(&rx_freePacketQueue);
566 rxi_NeedMorePackets = FALSE;
567 rx_nPackets = 0; /* rx_nPackets is managed by rxi_MorePackets* */
568 opr_queue_Init(&rx_mallocedPacketQueue);
570 /* enforce a minimum number of allocated packets */
571 if (rx_extraPackets < rxi_nSendFrags * rx_maxSendWindow)
572 rx_extraPackets = rxi_nSendFrags * rx_maxSendWindow;
574 /* allocate the initial free packet pool */
575 #ifdef RX_ENABLE_TSFPQ
576 rxi_MorePacketsTSFPQ(rx_extraPackets + RX_MAX_QUOTA + 2, RX_TS_FPQ_FLUSH_GLOBAL, 0);
577 #else /* RX_ENABLE_TSFPQ */
578 rxi_MorePackets(rx_extraPackets + RX_MAX_QUOTA + 2); /* fudge */
579 #endif /* RX_ENABLE_TSFPQ */
586 #if defined(AFS_NT40_ENV) && !defined(AFS_PTHREAD_ENV)
587 tv.tv_sec = clock_now.sec;
588 tv.tv_usec = clock_now.usec;
589 srand((unsigned int)tv.tv_usec);
596 #if defined(KERNEL) && !defined(UKERNEL)
597 /* Really, this should never happen in a real kernel */
600 struct sockaddr_in addr;
602 int addrlen = sizeof(addr);
604 socklen_t addrlen = sizeof(addr);
606 if (getsockname((intptr_t)rx_socket, (struct sockaddr *)&addr, &addrlen)) {
607 rxi_Finalize_locked();
608 osi_Free(htable, rx_hashTableSize * sizeof(struct rx_connection *));
611 rx_port = addr.sin_port;
614 rx_stats.minRtt.sec = 9999999;
615 if (RAND_bytes(&rx_epoch, sizeof(rx_epoch)) != 1)
617 rx_epoch = (rx_epoch & ~0x40000000) | 0x80000000;
618 if (RAND_bytes(&rx_nextCid, sizeof(rx_nextCid)) != 1)
620 rx_nextCid &= RX_CIDMASK;
621 MUTEX_ENTER(&rx_quota_mutex);
622 rxi_dataQuota += rx_extraQuota; /* + extra pkts caller asked to rsrv */
623 MUTEX_EXIT(&rx_quota_mutex);
624 /* *Slightly* random start time for the cid. This is just to help
625 * out with the hashing function at the peer */
626 rx_nextCid = ((tv.tv_sec ^ tv.tv_usec) << RX_CIDSHIFT);
627 rx_connHashTable = (struct rx_connection **)htable;
628 rx_peerHashTable = (struct rx_peer **)ptable;
630 rx_hardAckDelay.sec = 0;
631 rx_hardAckDelay.usec = 100000; /* 100 milliseconds */
633 rxevent_Init(20, rxi_ReScheduleEvents);
635 /* Initialize various global queues */
636 opr_queue_Init(&rx_idleServerQueue);
637 opr_queue_Init(&rx_incomingCallQueue);
638 opr_queue_Init(&rx_freeCallQueue);
640 #if defined(AFS_NT40_ENV) && !defined(KERNEL)
641 /* Initialize our list of usable IP addresses. */
645 /* Start listener process (exact function is dependent on the
646 * implementation environment--kernel or user space) */
651 rx_atomic_set(&rxi_running, 1);
668 return rx_InitHost(htonl(INADDR_ANY), port);
674 * The rxi_rto functions implement a TCP (RFC2988) style algorithm for
675 * maintaing the round trip timer.
680 * Start a new RTT timer for a given call and packet.
682 * There must be no resendEvent already listed for this call, otherwise this
683 * will leak events - intended for internal use within the RTO code only
686 * the RX call to start the timer for
687 * @param[in] lastPacket
688 * a flag indicating whether the last packet has been sent or not
690 * @pre call must be locked before calling this function
694 rxi_rto_startTimer(struct rx_call *call, int lastPacket, int istack)
696 struct clock now, retryTime;
698 MUTEX_ASSERT(&call->lock);
702 clock_Add(&retryTime, &call->rto);
704 /* If we're sending the last packet, and we're the client, then the server
705 * may wait for an additional 400ms before returning the ACK, wait for it
706 * rather than hitting a timeout */
707 if (lastPacket && call->conn->type == RX_CLIENT_CONNECTION)
708 clock_Addmsec(&retryTime, 400);
710 CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
711 call->resendEvent = rxevent_Post(&retryTime, &now, rxi_Resend,
716 * Cancel an RTT timer for a given call.
720 * the RX call to cancel the timer for
722 * @pre call must be locked before calling this function
727 rxi_rto_cancel(struct rx_call *call)
729 MUTEX_ASSERT(&call->lock);
730 if (rxevent_Cancel(&call->resendEvent))
731 CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
735 * Tell the RTO timer that we have sent a packet.
737 * If the timer isn't already running, then start it. If the timer is running,
741 * the RX call that the packet has been sent on
742 * @param[in] lastPacket
743 * A flag which is true if this is the last packet for the call
745 * @pre The call must be locked before calling this function
750 rxi_rto_packet_sent(struct rx_call *call, int lastPacket, int istack)
752 if (call->resendEvent)
755 rxi_rto_startTimer(call, lastPacket, istack);
759 * Tell the RTO timer that we have received an new ACK message
761 * This function should be called whenever a call receives an ACK that
762 * acknowledges new packets. Whatever happens, we stop the current timer.
763 * If there are unacked packets in the queue which have been sent, then
764 * we restart the timer from now. Otherwise, we leave it stopped.
767 * the RX call that the ACK has been received on
771 rxi_rto_packet_acked(struct rx_call *call, int istack)
773 struct opr_queue *cursor;
775 rxi_rto_cancel(call);
777 if (opr_queue_IsEmpty(&call->tq))
780 for (opr_queue_Scan(&call->tq, cursor)) {
781 struct rx_packet *p = opr_queue_Entry(cursor, struct rx_packet, entry);
782 if (p->header.seq > call->tfirst + call->twind)
785 if (!(p->flags & RX_PKTFLAG_ACKED) && p->flags & RX_PKTFLAG_SENT) {
786 rxi_rto_startTimer(call, p->header.flags & RX_LAST_PACKET, istack);
794 * Set an initial round trip timeout for a peer connection
796 * @param[in] secs The timeout to set in seconds
800 rx_rto_setPeerTimeoutSecs(struct rx_peer *peer, int secs) {
801 peer->rtt = secs * 8000;
805 * Set a delayed ack event on the specified call for the given time
807 * @param[in] call - the call on which to set the event
808 * @param[in] offset - the delay from now after which the event fires
811 rxi_PostDelayedAckEvent(struct rx_call *call, struct clock *offset)
813 struct clock now, when;
815 MUTEX_ASSERT(&call->lock);
818 clock_Add(&when, offset);
820 if (clock_Gt(&call->delayedAckTime, &when) &&
821 rxevent_Cancel(&call->delayedAckEvent)) {
822 /* We successfully cancelled an event too far in the future to install
823 * our new one; we can reuse the reference on the call. */
824 call->delayedAckEvent = rxevent_Post(&when, &now, rxi_SendDelayedAck,
827 call->delayedAckTime = when;
828 } else if (call->delayedAckEvent == NULL) {
829 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
830 call->delayedAckEvent = rxevent_Post(&when, &now,
833 call->delayedAckTime = when;
838 rxi_CancelDelayedAckEvent(struct rx_call *call)
840 MUTEX_ASSERT(&call->lock);
841 /* Only drop the ref if we cancelled it before it could run. */
842 if (rxevent_Cancel(&call->delayedAckEvent))
843 CALL_RELE(call, RX_CALL_REFCOUNT_DELAY);
846 /* called with unincremented nRequestsRunning to see if it is OK to start
847 * a new thread in this service. Could be "no" for two reasons: over the
848 * max quota, or would prevent others from reaching their min quota.
850 #ifdef RX_ENABLE_LOCKS
851 /* This verion of QuotaOK reserves quota if it's ok while the
852 * rx_serverPool_lock is held. Return quota using ReturnToServerPool().
855 QuotaOK(struct rx_service *aservice)
857 /* check if over max quota */
858 if (aservice->nRequestsRunning >= aservice->maxProcs) {
862 /* under min quota, we're OK */
863 /* otherwise, can use only if there are enough to allow everyone
864 * to go to their min quota after this guy starts.
867 MUTEX_ENTER(&rx_quota_mutex);
868 if ((aservice->nRequestsRunning < aservice->minProcs)
869 || (rxi_availProcs > rxi_minDeficit)) {
870 aservice->nRequestsRunning++;
871 /* just started call in minProcs pool, need fewer to maintain
873 if (aservice->nRequestsRunning <= aservice->minProcs)
876 MUTEX_EXIT(&rx_quota_mutex);
879 MUTEX_EXIT(&rx_quota_mutex);
885 ReturnToServerPool(struct rx_service *aservice)
887 aservice->nRequestsRunning--;
888 MUTEX_ENTER(&rx_quota_mutex);
889 if (aservice->nRequestsRunning < aservice->minProcs)
892 MUTEX_EXIT(&rx_quota_mutex);
895 #else /* RX_ENABLE_LOCKS */
897 QuotaOK(struct rx_service *aservice)
900 /* under min quota, we're OK */
901 if (aservice->nRequestsRunning < aservice->minProcs)
904 /* check if over max quota */
905 if (aservice->nRequestsRunning >= aservice->maxProcs)
908 /* otherwise, can use only if there are enough to allow everyone
909 * to go to their min quota after this guy starts.
911 MUTEX_ENTER(&rx_quota_mutex);
912 if (rxi_availProcs > rxi_minDeficit)
914 MUTEX_EXIT(&rx_quota_mutex);
917 #endif /* RX_ENABLE_LOCKS */
920 /* Called by rx_StartServer to start up lwp's to service calls.
921 NExistingProcs gives the number of procs already existing, and which
922 therefore needn't be created. */
924 rxi_StartServerProcs(int nExistingProcs)
926 struct rx_service *service;
931 /* For each service, reserve N processes, where N is the "minimum"
932 * number of processes that MUST be able to execute a request in parallel,
933 * at any time, for that process. Also compute the maximum difference
934 * between any service's maximum number of processes that can run
935 * (i.e. the maximum number that ever will be run, and a guarantee
936 * that this number will run if other services aren't running), and its
937 * minimum number. The result is the extra number of processes that
938 * we need in order to provide the latter guarantee */
939 for (i = 0; i < RX_MAX_SERVICES; i++) {
941 service = rx_services[i];
942 if (service == (struct rx_service *)0)
944 nProcs += service->minProcs;
945 diff = service->maxProcs - service->minProcs;
949 nProcs += maxdiff; /* Extra processes needed to allow max number requested to run in any given service, under good conditions */
950 nProcs -= nExistingProcs; /* Subtract the number of procs that were previously created for use as server procs */
951 for (i = 0; i < nProcs; i++) {
952 rxi_StartServerProc(rx_ServerProc, rx_stackSize);
958 /* This routine is only required on Windows */
960 rx_StartClientThread(void)
962 #ifdef AFS_PTHREAD_ENV
964 pid = pthread_self();
965 #endif /* AFS_PTHREAD_ENV */
967 #endif /* AFS_NT40_ENV */
969 /* This routine must be called if any services are exported. If the
970 * donateMe flag is set, the calling process is donated to the server
973 rx_StartServer(int donateMe)
975 struct rx_service *service;
981 /* Start server processes, if necessary (exact function is dependent
982 * on the implementation environment--kernel or user space). DonateMe
983 * will be 1 if there is 1 pre-existing proc, i.e. this one. In this
984 * case, one less new proc will be created rx_StartServerProcs.
986 rxi_StartServerProcs(donateMe);
988 /* count up the # of threads in minProcs, and add set the min deficit to
989 * be that value, too.
991 for (i = 0; i < RX_MAX_SERVICES; i++) {
992 service = rx_services[i];
993 if (service == (struct rx_service *)0)
995 MUTEX_ENTER(&rx_quota_mutex);
996 rxi_totalMin += service->minProcs;
997 /* below works even if a thread is running, since minDeficit would
998 * still have been decremented and later re-incremented.
1000 rxi_minDeficit += service->minProcs;
1001 MUTEX_EXIT(&rx_quota_mutex);
1004 /* Turn on reaping of idle server connections */
1005 rxi_ReapConnections(NULL, NULL, NULL, 0);
1010 #ifndef AFS_NT40_ENV
1014 #ifdef AFS_PTHREAD_ENV
1016 pid = afs_pointer_to_int(pthread_self());
1017 #else /* AFS_PTHREAD_ENV */
1019 LWP_CurrentProcess(&pid);
1020 #endif /* AFS_PTHREAD_ENV */
1022 sprintf(name, "srv_%d", ++nProcs);
1023 if (registerProgram)
1024 (*registerProgram) (pid, name);
1026 #endif /* AFS_NT40_ENV */
1027 rx_ServerProc(NULL); /* Never returns */
1029 #ifdef RX_ENABLE_TSFPQ
1030 /* no use leaving packets around in this thread's local queue if
1031 * it isn't getting donated to the server thread pool.
1033 rxi_FlushLocalPacketsTSFPQ();
1034 #endif /* RX_ENABLE_TSFPQ */
1038 /* Create a new client connection to the specified service, using the
1039 * specified security object to implement the security model for this
1041 struct rx_connection *
1042 rx_NewConnection(afs_uint32 shost, u_short sport, u_short sservice,
1043 struct rx_securityClass *securityObject,
1044 int serviceSecurityIndex)
1047 struct rx_connection *conn;
1052 dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %p, "
1053 "serviceSecurityIndex %d)\n",
1054 ntohl(shost), ntohs(sport), sservice, securityObject,
1055 serviceSecurityIndex));
1057 /* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
1058 * the case of kmem_alloc? */
1059 conn = rxi_AllocConnection();
1060 #ifdef RX_ENABLE_LOCKS
1061 MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
1062 MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
1063 CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
1066 MUTEX_ENTER(&rx_connHashTable_lock);
1067 conn->type = RX_CLIENT_CONNECTION;
1068 conn->epoch = rx_epoch;
1069 conn->cid = rx_nextCid;
1071 conn->peer = rxi_FindPeer(shost, sport, 1);
1072 conn->serviceId = sservice;
1073 conn->securityObject = securityObject;
1074 conn->securityData = (void *) 0;
1075 conn->securityIndex = serviceSecurityIndex;
1076 rx_SetConnDeadTime(conn, rx_connDeadTime);
1077 rx_SetConnSecondsUntilNatPing(conn, 0);
1078 conn->ackRate = RX_FAST_ACK_RATE;
1079 conn->nSpecific = 0;
1080 conn->specific = NULL;
1081 conn->challengeEvent = NULL;
1082 conn->delayedAbortEvent = NULL;
1083 conn->abortCount = 0;
1085 for (i = 0; i < RX_MAXCALLS; i++) {
1086 conn->twind[i] = rx_initSendWindow;
1087 conn->rwind[i] = rx_initReceiveWindow;
1088 conn->lastBusy[i] = 0;
1091 RXS_NewConnection(securityObject, conn);
1093 CONN_HASH(shost, sport, conn->cid, conn->epoch, RX_CLIENT_CONNECTION);
1095 conn->refCount++; /* no lock required since only this thread knows... */
1096 conn->next = rx_connHashTable[hashindex];
1097 rx_connHashTable[hashindex] = conn;
1098 if (rx_stats_active)
1099 rx_atomic_inc(&rx_stats.nClientConns);
1100 MUTEX_EXIT(&rx_connHashTable_lock);
1106 * Ensure a connection's timeout values are valid.
1108 * @param[in] conn The connection to check
1110 * @post conn->secondUntilDead <= conn->idleDeadTime <= conn->hardDeadTime,
1111 * unless idleDeadTime and/or hardDeadTime are not set
1115 rxi_CheckConnTimeouts(struct rx_connection *conn)
1117 /* a connection's timeouts must have the relationship
1118 * deadTime <= idleDeadTime <= hardDeadTime. Otherwise, for example, a
1119 * total loss of network to a peer may cause an idle timeout instead of a
1120 * dead timeout, simply because the idle timeout gets hit first. Also set
1121 * a minimum deadTime of 6, just to ensure it doesn't get set too low. */
1122 /* this logic is slightly complicated by the fact that
1123 * idleDeadTime/hardDeadTime may not be set at all, but it's not too bad.
1125 conn->secondsUntilDead = MAX(conn->secondsUntilDead, 6);
1126 if (conn->idleDeadTime) {
1127 conn->idleDeadTime = MAX(conn->idleDeadTime, conn->secondsUntilDead);
1129 if (conn->hardDeadTime) {
1130 if (conn->idleDeadTime) {
1131 conn->hardDeadTime = MAX(conn->idleDeadTime, conn->hardDeadTime);
1133 conn->hardDeadTime = MAX(conn->secondsUntilDead, conn->hardDeadTime);
1139 rx_SetConnDeadTime(struct rx_connection *conn, int seconds)
1141 /* The idea is to set the dead time to a value that allows several
1142 * keepalives to be dropped without timing out the connection. */
1143 conn->secondsUntilDead = seconds;
1144 rxi_CheckConnTimeouts(conn);
1145 conn->secondsUntilPing = conn->secondsUntilDead / 6;
1149 rx_SetConnHardDeadTime(struct rx_connection *conn, int seconds)
1151 conn->hardDeadTime = seconds;
1152 rxi_CheckConnTimeouts(conn);
1156 rx_SetConnIdleDeadTime(struct rx_connection *conn, int seconds)
1158 conn->idleDeadTime = seconds;
1159 rxi_CheckConnTimeouts(conn);
1162 int rxi_lowPeerRefCount = 0;
1163 int rxi_lowConnRefCount = 0;
1166 * Cleanup a connection that was destroyed in rxi_DestroyConnectioNoLock.
1167 * NOTE: must not be called with rx_connHashTable_lock held.
1170 rxi_CleanupConnection(struct rx_connection *conn)
1172 /* Notify the service exporter, if requested, that this connection
1173 * is being destroyed */
1174 if (conn->type == RX_SERVER_CONNECTION && conn->service->destroyConnProc)
1175 (*conn->service->destroyConnProc) (conn);
1177 /* Notify the security module that this connection is being destroyed */
1178 RXS_DestroyConnection(conn->securityObject, conn);
1180 /* If this is the last connection using the rx_peer struct, set its
1181 * idle time to now. rxi_ReapConnections will reap it if it's still
1182 * idle (refCount == 0) after rx_idlePeerTime (60 seconds) have passed.
1184 MUTEX_ENTER(&rx_peerHashTable_lock);
1185 if (conn->peer->refCount < 2) {
1186 conn->peer->idleWhen = clock_Sec();
1187 if (conn->peer->refCount < 1) {
1188 conn->peer->refCount = 1;
1189 if (rx_stats_active) {
1190 MUTEX_ENTER(&rx_stats_mutex);
1191 rxi_lowPeerRefCount++;
1192 MUTEX_EXIT(&rx_stats_mutex);
1196 conn->peer->refCount--;
1197 MUTEX_EXIT(&rx_peerHashTable_lock);
1199 if (rx_stats_active)
1201 if (conn->type == RX_SERVER_CONNECTION)
1202 rx_atomic_dec(&rx_stats.nServerConns);
1204 rx_atomic_dec(&rx_stats.nClientConns);
1207 if (conn->specific) {
1209 for (i = 0; i < conn->nSpecific; i++) {
1210 if (conn->specific[i] && rxi_keyCreate_destructor[i])
1211 (*rxi_keyCreate_destructor[i]) (conn->specific[i]);
1212 conn->specific[i] = NULL;
1214 free(conn->specific);
1216 conn->specific = NULL;
1217 conn->nSpecific = 0;
1218 #endif /* !KERNEL */
1220 MUTEX_DESTROY(&conn->conn_call_lock);
1221 MUTEX_DESTROY(&conn->conn_data_lock);
1222 CV_DESTROY(&conn->conn_call_cv);
1224 rxi_FreeConnection(conn);
1227 /* Destroy the specified connection */
1229 rxi_DestroyConnection(struct rx_connection *conn)
1231 MUTEX_ENTER(&rx_connHashTable_lock);
1232 rxi_DestroyConnectionNoLock(conn);
1233 /* conn should be at the head of the cleanup list */
1234 if (conn == rx_connCleanup_list) {
1235 rx_connCleanup_list = rx_connCleanup_list->next;
1236 MUTEX_EXIT(&rx_connHashTable_lock);
1237 rxi_CleanupConnection(conn);
1239 #ifdef RX_ENABLE_LOCKS
1241 MUTEX_EXIT(&rx_connHashTable_lock);
1243 #endif /* RX_ENABLE_LOCKS */
1247 rxi_DestroyConnectionNoLock(struct rx_connection *conn)
1249 struct rx_connection **conn_ptr;
1257 MUTEX_ENTER(&conn->conn_data_lock);
1258 MUTEX_ENTER(&rx_refcnt_mutex);
1259 if (conn->refCount > 0)
1262 #ifdef RX_REFCOUNT_CHECK
1263 osi_Assert(conn->refCount == 0);
1265 if (rx_stats_active) {
1266 MUTEX_ENTER(&rx_stats_mutex);
1267 rxi_lowConnRefCount++;
1268 MUTEX_EXIT(&rx_stats_mutex);
1272 if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
1273 /* Busy; wait till the last guy before proceeding */
1274 MUTEX_EXIT(&rx_refcnt_mutex);
1275 MUTEX_EXIT(&conn->conn_data_lock);
1280 /* If the client previously called rx_NewCall, but it is still
1281 * waiting, treat this as a running call, and wait to destroy the
1282 * connection later when the call completes. */
1283 if ((conn->type == RX_CLIENT_CONNECTION)
1284 && (conn->flags & (RX_CONN_MAKECALL_WAITING|RX_CONN_MAKECALL_ACTIVE))) {
1285 conn->flags |= RX_CONN_DESTROY_ME;
1286 MUTEX_EXIT(&rx_refcnt_mutex);
1287 MUTEX_EXIT(&conn->conn_data_lock);
1291 MUTEX_EXIT(&rx_refcnt_mutex);
1292 MUTEX_EXIT(&conn->conn_data_lock);
1294 /* Check for extant references to this connection */
1295 MUTEX_ENTER(&conn->conn_call_lock);
1296 for (i = 0; i < RX_MAXCALLS; i++) {
1297 struct rx_call *call = conn->call[i];
1300 if (conn->type == RX_CLIENT_CONNECTION) {
1301 MUTEX_ENTER(&call->lock);
1302 if (call->delayedAckEvent) {
1303 /* Push the final acknowledgment out now--there
1304 * won't be a subsequent call to acknowledge the
1305 * last reply packets */
1306 rxi_CancelDelayedAckEvent(call);
1307 if (call->state == RX_STATE_PRECALL
1308 || call->state == RX_STATE_ACTIVE) {
1309 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
1314 MUTEX_EXIT(&call->lock);
1318 MUTEX_EXIT(&conn->conn_call_lock);
1320 #ifdef RX_ENABLE_LOCKS
1322 if (MUTEX_TRYENTER(&conn->conn_data_lock)) {
1323 MUTEX_EXIT(&conn->conn_data_lock);
1325 /* Someone is accessing a packet right now. */
1329 #endif /* RX_ENABLE_LOCKS */
1332 /* Don't destroy the connection if there are any call
1333 * structures still in use */
1334 MUTEX_ENTER(&conn->conn_data_lock);
1335 conn->flags |= RX_CONN_DESTROY_ME;
1336 MUTEX_EXIT(&conn->conn_data_lock);
1341 /* Remove from connection hash table before proceeding */
1343 &rx_connHashTable[CONN_HASH
1344 (peer->host, peer->port, conn->cid, conn->epoch,
1346 for (; *conn_ptr; conn_ptr = &(*conn_ptr)->next) {
1347 if (*conn_ptr == conn) {
1348 *conn_ptr = conn->next;
1352 /* if the conn that we are destroying was the last connection, then we
1353 * clear rxLastConn as well */
1354 if (rxLastConn == conn)
1357 /* Make sure the connection is completely reset before deleting it. */
1359 * Pending events hold a refcount, so we can't get here if they are
1361 osi_Assert(conn->challengeEvent == NULL);
1362 osi_Assert(conn->delayedAbortEvent == NULL);
1363 osi_Assert(conn->natKeepAliveEvent == NULL);
1364 osi_Assert(conn->checkReachEvent == NULL);
1366 /* Add the connection to the list of destroyed connections that
1367 * need to be cleaned up. This is necessary to avoid deadlocks
1368 * in the routines we call to inform others that this connection is
1369 * being destroyed. */
1370 conn->next = rx_connCleanup_list;
1371 rx_connCleanup_list = conn;
1374 /* Externally available version */
1376 rx_DestroyConnection(struct rx_connection *conn)
1381 rxi_DestroyConnection(conn);
1386 rx_GetConnection(struct rx_connection *conn)
1391 MUTEX_ENTER(&rx_refcnt_mutex);
1393 MUTEX_EXIT(&rx_refcnt_mutex);
1397 #ifdef RX_ENABLE_LOCKS
1398 /* Wait for the transmit queue to no longer be busy.
1399 * requires the call->lock to be held */
1401 rxi_WaitforTQBusy(struct rx_call *call) {
1402 while (!call->error && (call->flags & RX_CALL_TQ_BUSY)) {
1403 call->flags |= RX_CALL_TQ_WAIT;
1405 MUTEX_ASSERT(&call->lock);
1406 CV_WAIT(&call->cv_tq, &call->lock);
1408 if (call->tqWaiters == 0) {
1409 call->flags &= ~RX_CALL_TQ_WAIT;
1416 rxi_WakeUpTransmitQueue(struct rx_call *call)
1418 if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
1419 dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
1420 call, call->tqWaiters, call->flags));
1421 #ifdef RX_ENABLE_LOCKS
1422 MUTEX_ASSERT(&call->lock);
1423 CV_BROADCAST(&call->cv_tq);
1424 #else /* RX_ENABLE_LOCKS */
1425 osi_rxWakeup(&call->tq);
1426 #endif /* RX_ENABLE_LOCKS */
1430 /* Start a new rx remote procedure call, on the specified connection.
1431 * If wait is set to 1, wait for a free call channel; otherwise return
1432 * 0. Maxtime gives the maximum number of seconds this call may take,
1433 * after rx_NewCall returns. After this time interval, a call to any
1434 * of rx_SendData, rx_ReadData, etc. will fail with RX_CALL_TIMEOUT.
1435 * For fine grain locking, we hold the conn_call_lock in order to
1436 * to ensure that we don't get signalle after we found a call in an active
1437 * state and before we go to sleep.
1440 rx_NewCall(struct rx_connection *conn)
1442 int i, wait, ignoreBusy = 1;
1443 struct rx_call *call;
1444 struct clock queueTime;
1445 afs_uint32 leastBusy = 0;
1449 dpf(("rx_NewCall(conn %"AFS_PTR_FMT")\n", conn));
1452 clock_GetTime(&queueTime);
1454 * Check if there are others waiting for a new call.
1455 * If so, let them go first to avoid starving them.
1456 * This is a fairly simple scheme, and might not be
1457 * a complete solution for large numbers of waiters.
1459 * makeCallWaiters keeps track of the number of
1460 * threads waiting to make calls and the
1461 * RX_CONN_MAKECALL_WAITING flag bit is used to
1462 * indicate that there are indeed calls waiting.
1463 * The flag is set when the waiter is incremented.
1464 * It is only cleared when makeCallWaiters is 0.
1465 * This prevents us from accidently destroying the
1466 * connection while it is potentially about to be used.
1468 MUTEX_ENTER(&conn->conn_call_lock);
1469 MUTEX_ENTER(&conn->conn_data_lock);
1470 while (conn->flags & RX_CONN_MAKECALL_ACTIVE) {
1471 conn->flags |= RX_CONN_MAKECALL_WAITING;
1472 conn->makeCallWaiters++;
1473 MUTEX_EXIT(&conn->conn_data_lock);
1475 #ifdef RX_ENABLE_LOCKS
1476 CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
1480 MUTEX_ENTER(&conn->conn_data_lock);
1481 conn->makeCallWaiters--;
1482 if (conn->makeCallWaiters == 0)
1483 conn->flags &= ~RX_CONN_MAKECALL_WAITING;
1486 /* We are now the active thread in rx_NewCall */
1487 conn->flags |= RX_CONN_MAKECALL_ACTIVE;
1488 MUTEX_EXIT(&conn->conn_data_lock);
1493 for (i = 0; i < RX_MAXCALLS; i++) {
1494 call = conn->call[i];
1496 if (!ignoreBusy && conn->lastBusy[i] != leastBusy) {
1497 /* we're not ignoring busy call slots; only look at the
1498 * call slot that is the "least" busy */
1502 if (call->state == RX_STATE_DALLY) {
1503 MUTEX_ENTER(&call->lock);
1504 if (call->state == RX_STATE_DALLY) {
1505 if (ignoreBusy && conn->lastBusy[i]) {
1506 /* if we're ignoring busy call slots, skip any ones that
1507 * have lastBusy set */
1508 if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
1509 leastBusy = conn->lastBusy[i];
1511 MUTEX_EXIT(&call->lock);
1516 * We are setting the state to RX_STATE_RESET to
1517 * ensure that no one else will attempt to use this
1518 * call once we drop the conn->conn_call_lock and
1519 * call->lock. We must drop the conn->conn_call_lock
1520 * before calling rxi_ResetCall because the process
1521 * of clearing the transmit queue can block for an
1522 * extended period of time. If we block while holding
1523 * the conn->conn_call_lock, then all rx_EndCall
1524 * processing will block as well. This has a detrimental
1525 * effect on overall system performance.
1527 call->state = RX_STATE_RESET;
1528 (*call->callNumber)++;
1529 MUTEX_EXIT(&conn->conn_call_lock);
1530 CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
1531 rxi_ResetCall(call, 0);
1532 if (MUTEX_TRYENTER(&conn->conn_call_lock))
1536 * If we failed to be able to safely obtain the
1537 * conn->conn_call_lock we will have to drop the
1538 * call->lock to avoid a deadlock. When the call->lock
1539 * is released the state of the call can change. If it
1540 * is no longer RX_STATE_RESET then some other thread is
1543 MUTEX_EXIT(&call->lock);
1544 MUTEX_ENTER(&conn->conn_call_lock);
1545 MUTEX_ENTER(&call->lock);
1547 if (call->state == RX_STATE_RESET)
1551 * If we get here it means that after dropping
1552 * the conn->conn_call_lock and call->lock that
1553 * the call is no longer ours. If we can't find
1554 * a free call in the remaining slots we should
1555 * not go immediately to RX_CONN_MAKECALL_WAITING
1556 * because by dropping the conn->conn_call_lock
1557 * we have given up synchronization with rx_EndCall.
1558 * Instead, cycle through one more time to see if
1559 * we can find a call that can call our own.
1561 CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
1564 MUTEX_EXIT(&call->lock);
1567 if (ignoreBusy && conn->lastBusy[i]) {
1568 /* if we're ignoring busy call slots, skip any ones that
1569 * have lastBusy set */
1570 if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
1571 leastBusy = conn->lastBusy[i];
1576 /* rxi_NewCall returns with mutex locked */
1577 call = rxi_NewCall(conn, i);
1578 CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
1582 if (i < RX_MAXCALLS) {
1583 conn->lastBusy[i] = 0;
1588 if (leastBusy && ignoreBusy) {
1589 /* we didn't find a useable call slot, but we did see at least one
1590 * 'busy' slot; look again and only use a slot with the 'least
1596 MUTEX_ENTER(&conn->conn_data_lock);
1597 conn->flags |= RX_CONN_MAKECALL_WAITING;
1598 conn->makeCallWaiters++;
1599 MUTEX_EXIT(&conn->conn_data_lock);
1601 #ifdef RX_ENABLE_LOCKS
1602 CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
1606 MUTEX_ENTER(&conn->conn_data_lock);
1607 conn->makeCallWaiters--;
1608 if (conn->makeCallWaiters == 0)
1609 conn->flags &= ~RX_CONN_MAKECALL_WAITING;
1610 MUTEX_EXIT(&conn->conn_data_lock);
1612 /* Client is initially in send mode */
1613 call->state = RX_STATE_ACTIVE;
1614 call->error = conn->error;
1616 call->app.mode = RX_MODE_ERROR;
1618 call->app.mode = RX_MODE_SENDING;
1620 #ifdef AFS_RXERRQ_ENV
1621 /* remember how many network errors the peer has when we started, so if
1622 * more errors are encountered after the call starts, we know the other endpoint won't be
1623 * responding to us */
1624 call->neterr_gen = rx_atomic_read(&conn->peer->neterrs);
1627 /* remember start time for call in case we have hard dead time limit */
1628 call->queueTime = queueTime;
1629 clock_GetTime(&call->startTime);
1630 call->app.bytesSent = 0;
1631 call->app.bytesRcvd = 0;
1633 /* Turn on busy protocol. */
1634 rxi_KeepAliveOn(call);
1636 /* Attempt MTU discovery */
1637 rxi_GrowMTUOn(call);
1640 * We are no longer the active thread in rx_NewCall
1642 MUTEX_ENTER(&conn->conn_data_lock);
1643 conn->flags &= ~RX_CONN_MAKECALL_ACTIVE;
1644 MUTEX_EXIT(&conn->conn_data_lock);
1647 * Wake up anyone else who might be giving us a chance to
1648 * run (see code above that avoids resource starvation).
1650 #ifdef RX_ENABLE_LOCKS
1651 if (call->flags & (RX_CALL_TQ_BUSY | RX_CALL_TQ_CLEARME)) {
1652 osi_Panic("rx_NewCall call about to be used without an empty tq");
1655 CV_BROADCAST(&conn->conn_call_cv);
1659 MUTEX_EXIT(&conn->conn_call_lock);
1660 MUTEX_EXIT(&call->lock);
1663 dpf(("rx_NewCall(call %"AFS_PTR_FMT")\n", call));
1668 rxi_HasActiveCalls(struct rx_connection *aconn)
1671 struct rx_call *tcall;
1675 for (i = 0; i < RX_MAXCALLS; i++) {
1676 if ((tcall = aconn->call[i])) {
1677 if ((tcall->state == RX_STATE_ACTIVE)
1678 || (tcall->state == RX_STATE_PRECALL)) {
1689 rxi_GetCallNumberVector(struct rx_connection *aconn,
1690 afs_int32 * aint32s)
1693 struct rx_call *tcall;
1697 MUTEX_ENTER(&aconn->conn_call_lock);
1698 for (i = 0; i < RX_MAXCALLS; i++) {
1699 if ((tcall = aconn->call[i]) && (tcall->state == RX_STATE_DALLY))
1700 aint32s[i] = aconn->callNumber[i] + 1;
1702 aint32s[i] = aconn->callNumber[i];
1704 MUTEX_EXIT(&aconn->conn_call_lock);
1710 rxi_SetCallNumberVector(struct rx_connection *aconn,
1711 afs_int32 * aint32s)
1714 struct rx_call *tcall;
1718 MUTEX_ENTER(&aconn->conn_call_lock);
1719 for (i = 0; i < RX_MAXCALLS; i++) {
1720 if ((tcall = aconn->call[i]) && (tcall->state == RX_STATE_DALLY))
1721 aconn->callNumber[i] = aint32s[i] - 1;
1723 aconn->callNumber[i] = aint32s[i];
1725 MUTEX_EXIT(&aconn->conn_call_lock);
1730 /* Advertise a new service. A service is named locally by a UDP port
1731 * number plus a 16-bit service id. Returns (struct rx_service *) 0
1734 char *serviceName; Name for identification purposes (e.g. the
1735 service name might be used for probing for
1738 rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId,
1739 char *serviceName, struct rx_securityClass **securityObjects,
1740 int nSecurityObjects,
1741 afs_int32(*serviceProc) (struct rx_call * acall))
1743 osi_socket socket = OSI_NULLSOCKET;
1744 struct rx_service *tservice;
1750 if (serviceId == 0) {
1752 "rx_NewService: service id for service %s is not non-zero.\n",
1759 "rx_NewService: A non-zero port must be specified on this call if a non-zero port was not provided at Rx initialization (service %s).\n",
1767 tservice = rxi_AllocService();
1770 MUTEX_INIT(&tservice->svc_data_lock, "svc data lock", MUTEX_DEFAULT, 0);
1772 for (i = 0; i < RX_MAX_SERVICES; i++) {
1773 struct rx_service *service = rx_services[i];
1775 if (port == service->servicePort && host == service->serviceHost) {
1776 if (service->serviceId == serviceId) {
1777 /* The identical service has already been
1778 * installed; if the caller was intending to
1779 * change the security classes used by this
1780 * service, he/she loses. */
1782 "rx_NewService: tried to install service %s with service id %d, which is already in use for service %s\n",
1783 serviceName, serviceId, service->serviceName);
1785 rxi_FreeService(tservice);
1788 /* Different service, same port: re-use the socket
1789 * which is bound to the same port */
1790 socket = service->socket;
1793 if (socket == OSI_NULLSOCKET) {
1794 /* If we don't already have a socket (from another
1795 * service on same port) get a new one */
1796 socket = rxi_GetHostUDPSocket(host, port);
1797 if (socket == OSI_NULLSOCKET) {
1799 rxi_FreeService(tservice);
1804 service->socket = socket;
1805 service->serviceHost = host;
1806 service->servicePort = port;
1807 service->serviceId = serviceId;
1808 service->serviceName = serviceName;
1809 service->nSecurityObjects = nSecurityObjects;
1810 service->securityObjects = securityObjects;
1811 service->minProcs = 0;
1812 service->maxProcs = 1;
1813 service->idleDeadTime = 60;
1814 service->connDeadTime = rx_connDeadTime;
1815 service->executeRequestProc = serviceProc;
1816 service->checkReach = 0;
1817 service->nSpecific = 0;
1818 service->specific = NULL;
1819 rx_services[i] = service; /* not visible until now */
1825 rxi_FreeService(tservice);
1826 (osi_Msg "rx_NewService: cannot support > %d services\n",
1831 /* Set configuration options for all of a service's security objects */
1834 rx_SetSecurityConfiguration(struct rx_service *service,
1835 rx_securityConfigVariables type,
1839 for (i = 0; i<service->nSecurityObjects; i++) {
1840 if (service->securityObjects[i]) {
1841 RXS_SetConfiguration(service->securityObjects[i], NULL, type,
1849 rx_NewService(u_short port, u_short serviceId, char *serviceName,
1850 struct rx_securityClass **securityObjects, int nSecurityObjects,
1851 afs_int32(*serviceProc) (struct rx_call * acall))
1853 return rx_NewServiceHost(htonl(INADDR_ANY), port, serviceId, serviceName, securityObjects, nSecurityObjects, serviceProc);
1856 /* Generic request processing loop. This routine should be called
1857 * by the implementation dependent rx_ServerProc. If socketp is
1858 * non-null, it will be set to the file descriptor that this thread
1859 * is now listening on. If socketp is null, this routine will never
1862 rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket * socketp)
1864 struct rx_call *call;
1866 struct rx_service *tservice = NULL;
1873 call = rx_GetCall(threadID, tservice, socketp);
1874 if (socketp && *socketp != OSI_NULLSOCKET) {
1875 /* We are now a listener thread */
1881 if (afs_termState == AFSOP_STOP_RXCALLBACK) {
1882 #ifdef RX_ENABLE_LOCKS
1884 #endif /* RX_ENABLE_LOCKS */
1885 afs_termState = AFSOP_STOP_AFS;
1886 afs_osi_Wakeup(&afs_termState);
1887 #ifdef RX_ENABLE_LOCKS
1889 #endif /* RX_ENABLE_LOCKS */
1894 /* if server is restarting( typically smooth shutdown) then do not
1895 * allow any new calls.
1898 if (rx_tranquil && (call != NULL)) {
1902 MUTEX_ENTER(&call->lock);
1904 rxi_CallError(call, RX_RESTARTING);
1905 rxi_SendCallAbort(call, (struct rx_packet *)0, 0, 0);
1907 MUTEX_EXIT(&call->lock);
1912 tservice = call->conn->service;
1914 if (tservice->beforeProc)
1915 (*tservice->beforeProc) (call);
1917 code = tservice->executeRequestProc(call);
1919 if (tservice->afterProc)
1920 (*tservice->afterProc) (call, code);
1922 rx_EndCall(call, code);
1924 if (tservice->postProc)
1925 (*tservice->postProc) (code);
1927 if (rx_stats_active) {
1928 MUTEX_ENTER(&rx_stats_mutex);
1930 MUTEX_EXIT(&rx_stats_mutex);
1937 rx_WakeupServerProcs(void)
1939 struct rx_serverQueueEntry *np, *tqp;
1940 struct opr_queue *cursor;
1944 MUTEX_ENTER(&rx_serverPool_lock);
1946 #ifdef RX_ENABLE_LOCKS
1947 if (rx_waitForPacket)
1948 CV_BROADCAST(&rx_waitForPacket->cv);
1949 #else /* RX_ENABLE_LOCKS */
1950 if (rx_waitForPacket)
1951 osi_rxWakeup(rx_waitForPacket);
1952 #endif /* RX_ENABLE_LOCKS */
1953 MUTEX_ENTER(&freeSQEList_lock);
1954 for (np = rx_FreeSQEList; np; np = tqp) {
1955 tqp = *(struct rx_serverQueueEntry **)np;
1956 #ifdef RX_ENABLE_LOCKS
1957 CV_BROADCAST(&np->cv);
1958 #else /* RX_ENABLE_LOCKS */
1960 #endif /* RX_ENABLE_LOCKS */
1962 MUTEX_EXIT(&freeSQEList_lock);
1963 for (opr_queue_Scan(&rx_idleServerQueue, cursor)) {
1964 np = opr_queue_Entry(cursor, struct rx_serverQueueEntry, entry);
1965 #ifdef RX_ENABLE_LOCKS
1966 CV_BROADCAST(&np->cv);
1967 #else /* RX_ENABLE_LOCKS */
1969 #endif /* RX_ENABLE_LOCKS */
1971 MUTEX_EXIT(&rx_serverPool_lock);
1976 * One thing that seems to happen is that all the server threads get
1977 * tied up on some empty or slow call, and then a whole bunch of calls
1978 * arrive at once, using up the packet pool, so now there are more
1979 * empty calls. The most critical resources here are server threads
1980 * and the free packet pool. The "doreclaim" code seems to help in
1981 * general. I think that eventually we arrive in this state: there
1982 * are lots of pending calls which do have all their packets present,
1983 * so they won't be reclaimed, are multi-packet calls, so they won't
1984 * be scheduled until later, and thus are tying up most of the free
1985 * packet pool for a very long time.
1987 * 1. schedule multi-packet calls if all the packets are present.
1988 * Probably CPU-bound operation, useful to return packets to pool.
1989 * Do what if there is a full window, but the last packet isn't here?
1990 * 3. preserve one thread which *only* runs "best" calls, otherwise
1991 * it sleeps and waits for that type of call.
1992 * 4. Don't necessarily reserve a whole window for each thread. In fact,
1993 * the current dataquota business is badly broken. The quota isn't adjusted
1994 * to reflect how many packets are presently queued for a running call.
1995 * So, when we schedule a queued call with a full window of packets queued
1996 * up for it, that *should* free up a window full of packets for other 2d-class
1997 * calls to be able to use from the packet pool. But it doesn't.
1999 * NB. Most of the time, this code doesn't run -- since idle server threads
2000 * sit on the idle server queue and are assigned by "...ReceivePacket" as soon
2001 * as a new call arrives.
2003 /* Sleep until a call arrives. Returns a pointer to the call, ready
2004 * for an rx_Read. */
2005 #ifdef RX_ENABLE_LOCKS
2007 rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
2009 struct rx_serverQueueEntry *sq;
2010 struct rx_call *call = (struct rx_call *)0;
2011 struct rx_service *service = NULL;
2013 MUTEX_ENTER(&freeSQEList_lock);
2015 if ((sq = rx_FreeSQEList)) {
2016 rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
2017 MUTEX_EXIT(&freeSQEList_lock);
2018 } else { /* otherwise allocate a new one and return that */
2019 MUTEX_EXIT(&freeSQEList_lock);
2020 sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
2021 MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
2022 CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
2025 MUTEX_ENTER(&rx_serverPool_lock);
2026 if (cur_service != NULL) {
2027 ReturnToServerPool(cur_service);
2030 if (!opr_queue_IsEmpty(&rx_incomingCallQueue)) {
2031 struct rx_call *tcall, *choice2 = NULL;
2032 struct opr_queue *cursor;
2034 /* Scan for eligible incoming calls. A call is not eligible
2035 * if the maximum number of calls for its service type are
2036 * already executing */
2037 /* One thread will process calls FCFS (to prevent starvation),
2038 * while the other threads may run ahead looking for calls which
2039 * have all their input data available immediately. This helps
2040 * keep threads from blocking, waiting for data from the client. */
2041 for (opr_queue_Scan(&rx_incomingCallQueue, cursor)) {
2042 tcall = opr_queue_Entry(cursor, struct rx_call, entry);
2044 service = tcall->conn->service;
2045 if (!QuotaOK(service)) {
2048 MUTEX_ENTER(&rx_pthread_mutex);
2049 if (tno == rxi_fcfs_thread_num
2050 || opr_queue_IsEnd(&rx_incomingCallQueue, cursor)) {
2051 MUTEX_EXIT(&rx_pthread_mutex);
2052 /* If we're the fcfs thread , then we'll just use
2053 * this call. If we haven't been able to find an optimal
2054 * choice, and we're at the end of the list, then use a
2055 * 2d choice if one has been identified. Otherwise... */
2056 call = (choice2 ? choice2 : tcall);
2057 service = call->conn->service;
2059 MUTEX_EXIT(&rx_pthread_mutex);
2060 if (!opr_queue_IsEmpty(&tcall->rq)) {
2061 struct rx_packet *rp;
2062 rp = opr_queue_First(&tcall->rq, struct rx_packet,
2064 if (rp->header.seq == 1) {
2066 || (rp->header.flags & RX_LAST_PACKET)) {
2068 } else if (rxi_2dchoice && !choice2
2069 && !(tcall->flags & RX_CALL_CLEARED)
2070 && (tcall->rprev > rxi_HardAckRate)) {
2080 ReturnToServerPool(service);
2086 opr_queue_Remove(&call->entry);
2087 MUTEX_EXIT(&rx_serverPool_lock);
2088 MUTEX_ENTER(&call->lock);
2090 if (call->flags & RX_CALL_WAIT_PROC) {
2091 call->flags &= ~RX_CALL_WAIT_PROC;
2092 rx_atomic_dec(&rx_nWaiting);
2095 if (call->state != RX_STATE_PRECALL || call->error) {
2096 MUTEX_EXIT(&call->lock);
2097 MUTEX_ENTER(&rx_serverPool_lock);
2098 ReturnToServerPool(service);
2103 if (opr_queue_IsEmpty(&call->rq)
2104 || opr_queue_First(&call->rq, struct rx_packet, entry)->header.seq != 1)
2105 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
2107 CLEAR_CALL_QUEUE_LOCK(call);
2110 /* If there are no eligible incoming calls, add this process
2111 * to the idle server queue, to wait for one */
2115 *socketp = OSI_NULLSOCKET;
2117 sq->socketp = socketp;
2118 opr_queue_Append(&rx_idleServerQueue, &sq->entry);
2119 #ifndef AFS_AIX41_ENV
2120 rx_waitForPacket = sq;
2121 #endif /* AFS_AIX41_ENV */
2123 CV_WAIT(&sq->cv, &rx_serverPool_lock);
2125 if (afs_termState == AFSOP_STOP_RXCALLBACK) {
2126 MUTEX_EXIT(&rx_serverPool_lock);
2127 return (struct rx_call *)0;
2130 } while (!(call = sq->newcall)
2131 && !(socketp && *socketp != OSI_NULLSOCKET));
2132 MUTEX_EXIT(&rx_serverPool_lock);
2134 MUTEX_ENTER(&call->lock);
2140 MUTEX_ENTER(&freeSQEList_lock);
2141 *(struct rx_serverQueueEntry **)sq = rx_FreeSQEList;
2142 rx_FreeSQEList = sq;
2143 MUTEX_EXIT(&freeSQEList_lock);
2146 clock_GetTime(&call->startTime);
2147 call->state = RX_STATE_ACTIVE;
2148 call->app.mode = RX_MODE_RECEIVING;
2149 #ifdef RX_KERNEL_TRACE
2150 if (ICL_SETACTIVE(afs_iclSetp)) {
2151 int glockOwner = ISAFS_GLOCK();
2154 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
2155 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
2162 rxi_calltrace(RX_CALL_START, call);
2163 dpf(("rx_GetCall(port=%d, service=%d) ==> call %"AFS_PTR_FMT"\n",
2164 call->conn->service->servicePort, call->conn->service->serviceId,
2167 MUTEX_EXIT(&call->lock);
2168 CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
2170 dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
2175 #else /* RX_ENABLE_LOCKS */
2177 rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
2179 struct rx_serverQueueEntry *sq;
2180 struct rx_call *call = (struct rx_call *)0, *choice2;
2181 struct rx_service *service = NULL;
2185 MUTEX_ENTER(&freeSQEList_lock);
2187 if ((sq = rx_FreeSQEList)) {
2188 rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
2189 MUTEX_EXIT(&freeSQEList_lock);
2190 } else { /* otherwise allocate a new one and return that */
2191 MUTEX_EXIT(&freeSQEList_lock);
2192 sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
2193 MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
2194 CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
2196 MUTEX_ENTER(&sq->lock);
2198 if (cur_service != NULL) {
2199 cur_service->nRequestsRunning--;
2200 MUTEX_ENTER(&rx_quota_mutex);
2201 if (cur_service->nRequestsRunning < cur_service->minProcs)
2204 MUTEX_EXIT(&rx_quota_mutex);
2206 if (!opr_queue_IsEmpty(&rx_incomingCallQueue)) {
2207 struct rx_call *tcall;
2208 struct opr_queue *cursor;
2209 /* Scan for eligible incoming calls. A call is not eligible
2210 * if the maximum number of calls for its service type are
2211 * already executing */
2212 /* One thread will process calls FCFS (to prevent starvation),
2213 * while the other threads may run ahead looking for calls which
2214 * have all their input data available immediately. This helps
2215 * keep threads from blocking, waiting for data from the client. */
2216 choice2 = (struct rx_call *)0;
2217 for (opr_queue_Scan(&rx_incomingCallQueue, cursor)) {
2218 tcall = opr_queue_Entry(cursor, struct rx_call, entry);
2219 service = tcall->conn->service;
2220 if (QuotaOK(service)) {
2221 MUTEX_ENTER(&rx_pthread_mutex);
2222 /* XXX - If tcall->entry.next is NULL, then we're no longer
2223 * on a queue at all. This shouldn't happen. */
2224 if (tno == rxi_fcfs_thread_num || !tcall->entry.next) {
2225 MUTEX_EXIT(&rx_pthread_mutex);
2226 /* If we're the fcfs thread, then we'll just use
2227 * this call. If we haven't been able to find an optimal
2228 * choice, and we're at the end of the list, then use a
2229 * 2d choice if one has been identified. Otherwise... */
2230 call = (choice2 ? choice2 : tcall);
2231 service = call->conn->service;
2233 MUTEX_EXIT(&rx_pthread_mutex);
2234 if (!opr_queue_IsEmpty(&tcall->rq)) {
2235 struct rx_packet *rp;
2236 rp = opr_queue_First(&tcall->rq, struct rx_packet,
2238 if (rp->header.seq == 1
2240 || (rp->header.flags & RX_LAST_PACKET))) {
2242 } else if (rxi_2dchoice && !choice2
2243 && !(tcall->flags & RX_CALL_CLEARED)
2244 && (tcall->rprev > rxi_HardAckRate)) {
2257 opr_queue_Remove(&call->entry);
2258 /* we can't schedule a call if there's no data!!! */
2259 /* send an ack if there's no data, if we're missing the
2260 * first packet, or we're missing something between first
2261 * and last -- there's a "hole" in the incoming data. */
2262 if (opr_queue_IsEmpty(&call->rq)
2263 || opr_queue_First(&call->rq, struct rx_packet, entry)->header.seq != 1
2264 || call->rprev != opr_queue_Last(&call->rq, struct rx_packet, entry)->header.seq)
2265 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
2267 call->flags &= (~RX_CALL_WAIT_PROC);
2268 service->nRequestsRunning++;
2269 /* just started call in minProcs pool, need fewer to maintain
2271 MUTEX_ENTER(&rx_quota_mutex);
2272 if (service->nRequestsRunning <= service->minProcs)
2275 MUTEX_EXIT(&rx_quota_mutex);
2276 rx_atomic_dec(&rx_nWaiting);
2277 /* MUTEX_EXIT(&call->lock); */
2279 /* If there are no eligible incoming calls, add this process
2280 * to the idle server queue, to wait for one */
2283 *socketp = OSI_NULLSOCKET;
2285 sq->socketp = socketp;
2286 opr_queue_Append(&rx_idleServerQueue, &sq->entry);
2290 if (afs_termState == AFSOP_STOP_RXCALLBACK) {
2292 rxi_Free(sq, sizeof(struct rx_serverQueueEntry));
2293 return (struct rx_call *)0;
2296 } while (!(call = sq->newcall)
2297 && !(socketp && *socketp != OSI_NULLSOCKET));
2299 MUTEX_EXIT(&sq->lock);
2301 MUTEX_ENTER(&freeSQEList_lock);
2302 *(struct rx_serverQueueEntry **)sq = rx_FreeSQEList;
2303 rx_FreeSQEList = sq;
2304 MUTEX_EXIT(&freeSQEList_lock);
2307 clock_GetTime(&call->startTime);
2308 call->state = RX_STATE_ACTIVE;
2309 call->app.mode = RX_MODE_RECEIVING;
2310 #ifdef RX_KERNEL_TRACE
2311 if (ICL_SETACTIVE(afs_iclSetp)) {
2312 int glockOwner = ISAFS_GLOCK();
2315 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
2316 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
2323 rxi_calltrace(RX_CALL_START, call);
2324 dpf(("rx_GetCall(port=%d, service=%d) ==> call %p\n",
2325 call->conn->service->servicePort, call->conn->service->serviceId,
2328 dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
2335 #endif /* RX_ENABLE_LOCKS */
2339 /* Establish a procedure to be called when a packet arrives for a
2340 * call. This routine will be called at most once after each call,
2341 * and will also be called if there is an error condition on the or
2342 * the call is complete. Used by multi rx to build a selection
2343 * function which determines which of several calls is likely to be a
2344 * good one to read from.
2345 * NOTE: the way this is currently implemented it is probably only a
2346 * good idea to (1) use it immediately after a newcall (clients only)
2347 * and (2) only use it once. Other uses currently void your warranty
2350 rx_SetArrivalProc(struct rx_call *call,
2351 void (*proc) (struct rx_call * call,
2354 void * handle, int arg)
2356 call->arrivalProc = proc;
2357 call->arrivalProcHandle = handle;
2358 call->arrivalProcArg = arg;
2361 /* Call is finished (possibly prematurely). Return rc to the peer, if
2362 * appropriate, and return the final error code from the conversation
2366 rx_EndCall(struct rx_call *call, afs_int32 rc)
2368 struct rx_connection *conn = call->conn;
2372 dpf(("rx_EndCall(call %"AFS_PTR_FMT" rc %d error %d abortCode %d)\n",
2373 call, rc, call->error, call->abortCode));
2376 MUTEX_ENTER(&call->lock);
2378 if (rc == 0 && call->error == 0) {
2379 call->abortCode = 0;
2380 call->abortCount = 0;
2383 call->arrivalProc = (void (*)())0;
2384 if (rc && call->error == 0) {
2385 rxi_CallError(call, rc);
2386 call->app.mode = RX_MODE_ERROR;
2387 /* Send an abort message to the peer if this error code has
2388 * only just been set. If it was set previously, assume the
2389 * peer has already been sent the error code or will request it
2391 rxi_SendCallAbort(call, (struct rx_packet *)0, 0, 0);
2393 if (conn->type == RX_SERVER_CONNECTION) {
2394 /* Make sure reply or at least dummy reply is sent */
2395 if (call->app.mode == RX_MODE_RECEIVING) {
2396 MUTEX_EXIT(&call->lock);
2397 rxi_WriteProc(call, 0, 0);
2398 MUTEX_ENTER(&call->lock);
2400 if (call->app.mode == RX_MODE_SENDING) {
2401 rxi_FlushWriteLocked(call);
2403 rxi_calltrace(RX_CALL_END, call);
2404 /* Call goes to hold state until reply packets are acknowledged */
2405 if (call->tfirst + call->nSoftAcked < call->tnext) {
2406 call->state = RX_STATE_HOLD;
2408 call->state = RX_STATE_DALLY;
2409 rxi_ClearTransmitQueue(call, 0);
2410 rxi_rto_cancel(call);
2411 rxi_CancelKeepAliveEvent(call);
2413 } else { /* Client connection */
2415 /* Make sure server receives input packets, in the case where
2416 * no reply arguments are expected */
2418 if ((call->app.mode == RX_MODE_SENDING)
2419 || (call->app.mode == RX_MODE_RECEIVING && call->rnext == 1)) {
2420 MUTEX_EXIT(&call->lock);
2421 (void)rxi_ReadProc(call, &dummy, 1);
2422 MUTEX_ENTER(&call->lock);
2425 /* If we had an outstanding delayed ack, be nice to the server
2426 * and force-send it now.
2428 if (call->delayedAckEvent) {
2429 rxi_CancelDelayedAckEvent(call);
2430 rxi_SendDelayedAck(NULL, call, NULL, 0);
2433 /* We need to release the call lock since it's lower than the
2434 * conn_call_lock and we don't want to hold the conn_call_lock
2435 * over the rx_ReadProc call. The conn_call_lock needs to be held
2436 * here for the case where rx_NewCall is perusing the calls on
2437 * the connection structure. We don't want to signal until
2438 * rx_NewCall is in a stable state. Otherwise, rx_NewCall may
2439 * have checked this call, found it active and by the time it
2440 * goes to sleep, will have missed the signal.
2442 MUTEX_EXIT(&call->lock);
2443 MUTEX_ENTER(&conn->conn_call_lock);
2444 MUTEX_ENTER(&call->lock);
2447 /* While there are some circumstances where a call with an error is
2448 * obviously not on a "busy" channel, be conservative (clearing
2449 * lastBusy is just best-effort to possibly speed up rx_NewCall).
2450 * The call channel is definitely not busy if we just successfully
2451 * completed a call on it. */
2452 conn->lastBusy[call->channel] = 0;
2454 } else if (call->error == RX_CALL_TIMEOUT) {
2455 /* The call is still probably running on the server side, so try to
2456 * avoid this call channel in the future. */
2457 conn->lastBusy[call->channel] = clock_Sec();
2460 MUTEX_ENTER(&conn->conn_data_lock);
2461 conn->flags |= RX_CONN_BUSY;
2462 if (conn->flags & RX_CONN_MAKECALL_WAITING) {
2463 MUTEX_EXIT(&conn->conn_data_lock);
2464 #ifdef RX_ENABLE_LOCKS
2465 CV_BROADCAST(&conn->conn_call_cv);
2470 #ifdef RX_ENABLE_LOCKS
2472 MUTEX_EXIT(&conn->conn_data_lock);
2474 #endif /* RX_ENABLE_LOCKS */
2475 call->state = RX_STATE_DALLY;
2477 error = call->error;
2479 /* currentPacket, nLeft, and NFree must be zeroed here, because
2480 * ResetCall cannot: ResetCall may be called at splnet(), in the
2481 * kernel version, and may interrupt the macros rx_Read or
2482 * rx_Write, which run at normal priority for efficiency. */
2483 if (call->app.currentPacket) {
2484 #ifdef RX_TRACK_PACKETS
2485 call->app.currentPacket->flags &= ~RX_PKTFLAG_CP;
2487 rxi_FreePacket(call->app.currentPacket);
2488 call->app.currentPacket = (struct rx_packet *)0;
2491 call->app.nLeft = call->app.nFree = call->app.curlen = 0;
2493 /* Free any packets from the last call to ReadvProc/WritevProc */
2494 #ifdef RXDEBUG_PACKET
2496 #endif /* RXDEBUG_PACKET */
2497 rxi_FreePackets(0, &call->app.iovq);
2498 MUTEX_EXIT(&call->lock);
2500 CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
2501 if (conn->type == RX_CLIENT_CONNECTION) {
2502 MUTEX_ENTER(&conn->conn_data_lock);
2503 conn->flags &= ~RX_CONN_BUSY;
2504 MUTEX_EXIT(&conn->conn_data_lock);
2505 MUTEX_EXIT(&conn->conn_call_lock);
2509 * Map errors to the local host's errno.h format.
2511 error = ntoh_syserr_conv(error);
2513 /* If the caller said the call failed with some error, we had better
2514 * return an error code. */
2515 osi_Assert(!rc || error);
2519 #if !defined(KERNEL)
2521 /* Call this routine when shutting down a server or client (especially
2522 * clients). This will allow Rx to gracefully garbage collect server
2523 * connections, and reduce the number of retries that a server might
2524 * make to a dead client.
2525 * This is not quite right, since some calls may still be ongoing and
2526 * we can't lock them to destroy them. */
2532 if (!rxi_IsRunning()) {
2534 return; /* Already shutdown. */
2536 rxi_Finalize_locked();
2541 rxi_Finalize_locked(void)
2543 struct rx_connection **conn_ptr, **conn_end;
2544 rx_atomic_set(&rxi_running, 0);
2545 rxi_DeleteCachedConnections();
2546 if (rx_connHashTable) {
2547 MUTEX_ENTER(&rx_connHashTable_lock);
2548 for (conn_ptr = &rx_connHashTable[0], conn_end =
2549 &rx_connHashTable[rx_hashTableSize]; conn_ptr < conn_end;
2551 struct rx_connection *conn, *next;
2552 for (conn = *conn_ptr; conn; conn = next) {
2554 if (conn->type == RX_CLIENT_CONNECTION) {
2555 rx_GetConnection(conn);
2556 #ifdef RX_ENABLE_LOCKS
2557 rxi_DestroyConnectionNoLock(conn);
2558 #else /* RX_ENABLE_LOCKS */
2559 rxi_DestroyConnection(conn);
2560 #endif /* RX_ENABLE_LOCKS */
2564 #ifdef RX_ENABLE_LOCKS
2565 while (rx_connCleanup_list) {
2566 struct rx_connection *conn;
2567 conn = rx_connCleanup_list;
2568 rx_connCleanup_list = rx_connCleanup_list->next;
2569 MUTEX_EXIT(&rx_connHashTable_lock);
2570 rxi_CleanupConnection(conn);
2571 MUTEX_ENTER(&rx_connHashTable_lock);
2573 MUTEX_EXIT(&rx_connHashTable_lock);
2574 #endif /* RX_ENABLE_LOCKS */
2579 afs_winsockCleanup();
2584 /* if we wakeup packet waiter too often, can get in loop with two
2585 AllocSendPackets each waking each other up (from ReclaimPacket calls) */
2587 rxi_PacketsUnWait(void)
2589 if (!rx_waitingForPackets) {
2593 if (rxi_OverQuota(RX_PACKET_CLASS_SEND)) {
2594 return; /* still over quota */
2597 rx_waitingForPackets = 0;
2598 #ifdef RX_ENABLE_LOCKS
2599 CV_BROADCAST(&rx_waitingForPackets_cv);
2601 osi_rxWakeup(&rx_waitingForPackets);
2607 /* ------------------Internal interfaces------------------------- */
2609 /* Return this process's service structure for the
2610 * specified socket and service */
2611 static struct rx_service *
2612 rxi_FindService(osi_socket socket, u_short serviceId)
2614 struct rx_service **sp;
2615 for (sp = &rx_services[0]; *sp; sp++) {
2616 if ((*sp)->serviceId == serviceId && (*sp)->socket == socket)
2622 #ifdef RXDEBUG_PACKET
2623 #ifdef KDUMP_RX_LOCK
2624 static struct rx_call_rx_lock *rx_allCallsp = 0;
2626 static struct rx_call *rx_allCallsp = 0;
2628 #endif /* RXDEBUG_PACKET */
2630 /* Allocate a call structure, for the indicated channel of the
2631 * supplied connection. The mode and state of the call must be set by
2632 * the caller. Returns the call with mutex locked. */
2633 static struct rx_call *
2634 rxi_NewCall(struct rx_connection *conn, int channel)
2636 struct rx_call *call;
2637 #ifdef RX_ENABLE_LOCKS
2638 struct rx_call *cp; /* Call pointer temp */
2639 struct opr_queue *cursor;
2642 dpf(("rxi_NewCall(conn %"AFS_PTR_FMT", channel %d)\n", conn, channel));
2644 /* Grab an existing call structure, or allocate a new one.
2645 * Existing call structures are assumed to have been left reset by
2647 MUTEX_ENTER(&rx_freeCallQueue_lock);
2649 #ifdef RX_ENABLE_LOCKS
2651 * EXCEPT that the TQ might not yet be cleared out.
2652 * Skip over those with in-use TQs.
2655 for (opr_queue_Scan(&rx_freeCallQueue, cursor)) {
2656 cp = opr_queue_Entry(cursor, struct rx_call, entry);
2657 if (!(cp->flags & RX_CALL_TQ_BUSY)) {
2663 #else /* RX_ENABLE_LOCKS */
2664 if (!opr_queue_IsEmpty(&rx_freeCallQueue)) {
2665 call = opr_queue_First(&rx_freeCallQueue, struct rx_call, entry);
2666 #endif /* RX_ENABLE_LOCKS */
2667 opr_queue_Remove(&call->entry);
2668 if (rx_stats_active)
2669 rx_atomic_dec(&rx_stats.nFreeCallStructs);
2670 MUTEX_EXIT(&rx_freeCallQueue_lock);
2671 MUTEX_ENTER(&call->lock);
2672 CLEAR_CALL_QUEUE_LOCK(call);
2673 #ifdef RX_ENABLE_LOCKS
2674 /* Now, if TQ wasn't cleared earlier, do it now. */
2675 rxi_WaitforTQBusy(call);
2676 if (call->flags & RX_CALL_TQ_CLEARME) {
2677 rxi_ClearTransmitQueue(call, 1);
2678 /*queue_Init(&call->tq);*/
2680 #endif /* RX_ENABLE_LOCKS */
2681 /* Bind the call to its connection structure */
2683 rxi_ResetCall(call, 1);
2686 call = rxi_Alloc(sizeof(struct rx_call));
2687 #ifdef RXDEBUG_PACKET
2688 call->allNextp = rx_allCallsp;
2689 rx_allCallsp = call;
2691 rx_atomic_inc_and_read(&rx_stats.nCallStructs);
2692 #else /* RXDEBUG_PACKET */
2693 rx_atomic_inc(&rx_stats.nCallStructs);
2694 #endif /* RXDEBUG_PACKET */
2696 MUTEX_EXIT(&rx_freeCallQueue_lock);
2697 MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
2698 MUTEX_ENTER(&call->lock);
2699 CV_INIT(&call->cv_twind, "call twind", CV_DEFAULT, 0);
2700 CV_INIT(&call->cv_rq, "call rq", CV_DEFAULT, 0);
2701 CV_INIT(&call->cv_tq, "call tq", CV_DEFAULT, 0);
2703 /* Initialize once-only items */
2704 opr_queue_Init(&call->tq);
2705 opr_queue_Init(&call->rq);
2706 opr_queue_Init(&call->app.iovq);
2707 #ifdef RXDEBUG_PACKET
2708 call->rqc = call->tqc = call->iovqc = 0;
2709 #endif /* RXDEBUG_PACKET */
2710 /* Bind the call to its connection structure (prereq for reset) */
2712 rxi_ResetCall(call, 1);
2714 call->channel = channel;
2715 call->callNumber = &conn->callNumber[channel];
2716 call->rwind = conn->rwind[channel];
2717 call->twind = conn->twind[channel];
2718 /* Note that the next expected call number is retained (in
2719 * conn->callNumber[i]), even if we reallocate the call structure
2721 conn->call[channel] = call;
2722 /* if the channel's never been used (== 0), we should start at 1, otherwise
2723 * the call number is valid from the last time this channel was used */
2724 if (*call->callNumber == 0)
2725 *call->callNumber = 1;
2730 /* A call has been inactive long enough that so we can throw away
2731 * state, including the call structure, which is placed on the call
2734 * call->lock amd rx_refcnt_mutex are held upon entry.
2735 * haveCTLock is set when called from rxi_ReapConnections.
2737 * return 1 if the call is freed, 0 if not.
2740 rxi_FreeCall(struct rx_call *call, int haveCTLock)
2742 int channel = call->channel;
2743 struct rx_connection *conn = call->conn;
2744 u_char state = call->state;
2747 * We are setting the state to RX_STATE_RESET to
2748 * ensure that no one else will attempt to use this
2749 * call once we drop the refcnt lock. We must drop
2750 * the refcnt lock before calling rxi_ResetCall
2751 * because it cannot be held across acquiring the
2752 * freepktQ lock. NewCall does the same.
2754 call->state = RX_STATE_RESET;
2755 MUTEX_EXIT(&rx_refcnt_mutex);
2756 rxi_ResetCall(call, 0);
2758 if (MUTEX_TRYENTER(&conn->conn_call_lock))
2760 if (state == RX_STATE_DALLY || state == RX_STATE_HOLD)
2761 (*call->callNumber)++;
2763 if (call->conn->call[channel] == call)
2764 call->conn->call[channel] = 0;
2765 MUTEX_EXIT(&conn->conn_call_lock);
2768 * We couldn't obtain the conn_call_lock so we can't
2769 * disconnect the call from the connection. Set the
2770 * call state to dally so that the call can be reused.
2772 MUTEX_ENTER(&rx_refcnt_mutex);
2773 call->state = RX_STATE_DALLY;
2777 MUTEX_ENTER(&rx_freeCallQueue_lock);
2778 SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
2779 #ifdef RX_ENABLE_LOCKS
2780 /* A call may be free even though its transmit queue is still in use.
2781 * Since we search the call list from head to tail, put busy calls at
2782 * the head of the list, and idle calls at the tail.
2784 if (call->flags & RX_CALL_TQ_BUSY)
2785 opr_queue_Prepend(&rx_freeCallQueue, &call->entry);
2787 opr_queue_Append(&rx_freeCallQueue, &call->entry);
2788 #else /* RX_ENABLE_LOCKS */
2789 opr_queue_Append(&rx_freeCallQueue, &call->entry);
2790 #endif /* RX_ENABLE_LOCKS */
2791 if (rx_stats_active)
2792 rx_atomic_inc(&rx_stats.nFreeCallStructs);
2793 MUTEX_EXIT(&rx_freeCallQueue_lock);
2795 /* Destroy the connection if it was previously slated for
2796 * destruction, i.e. the Rx client code previously called
2797 * rx_DestroyConnection (client connections), or
2798 * rxi_ReapConnections called the same routine (server
2799 * connections). Only do this, however, if there are no
2800 * outstanding calls. Note that for fine grain locking, there appears
2801 * to be a deadlock in that rxi_FreeCall has a call locked and
2802 * DestroyConnectionNoLock locks each call in the conn. But note a
2803 * few lines up where we have removed this call from the conn.
2804 * If someone else destroys a connection, they either have no
2805 * call lock held or are going through this section of code.
2807 MUTEX_ENTER(&conn->conn_data_lock);
2808 if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) {
2809 rx_GetConnection(conn);
2810 MUTEX_EXIT(&conn->conn_data_lock);
2811 #ifdef RX_ENABLE_LOCKS
2813 rxi_DestroyConnectionNoLock(conn);
2815 rxi_DestroyConnection(conn);
2816 #else /* RX_ENABLE_LOCKS */
2817 rxi_DestroyConnection(conn);
2818 #endif /* RX_ENABLE_LOCKS */
2820 MUTEX_EXIT(&conn->conn_data_lock);
2822 MUTEX_ENTER(&rx_refcnt_mutex);
2826 rx_atomic_t rxi_Allocsize = RX_ATOMIC_INIT(0);
2827 rx_atomic_t rxi_Alloccnt = RX_ATOMIC_INIT(0);
2830 rxi_Alloc(size_t size)
2834 if (rx_stats_active) {
2835 rx_atomic_add(&rxi_Allocsize, (int) size);
2836 rx_atomic_inc(&rxi_Alloccnt);
2840 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
2841 afs_osi_Alloc_NoSleep(size);
2846 osi_Panic("rxi_Alloc error");
2852 rxi_Free(void *addr, size_t size)
2854 if (rx_stats_active) {
2855 rx_atomic_sub(&rxi_Allocsize, (int) size);
2856 rx_atomic_dec(&rxi_Alloccnt);
2858 osi_Free(addr, size);
2862 rxi_SetPeerMtu(struct rx_peer *peer, afs_uint32 host, afs_uint32 port, int mtu)
2864 struct rx_peer **peer_ptr = NULL, **peer_end = NULL;
2865 struct rx_peer *next = NULL;
2869 MUTEX_ENTER(&rx_peerHashTable_lock);
2871 peer_ptr = &rx_peerHashTable[0];
2872 peer_end = &rx_peerHashTable[rx_hashTableSize];
2875 for ( ; peer_ptr < peer_end; peer_ptr++) {
2878 for ( ; peer; peer = next) {
2880 if (host == peer->host)
2885 hashIndex = PEER_HASH(host, port);
2886 for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
2887 if ((peer->host == host) && (peer->port == port))
2892 MUTEX_ENTER(&rx_peerHashTable_lock);
2897 MUTEX_EXIT(&rx_peerHashTable_lock);
2899 MUTEX_ENTER(&peer->peer_lock);
2900 /* We don't handle dropping below min, so don't */
2901 mtu = MAX(mtu, RX_MIN_PACKET_SIZE);
2902 peer->ifMTU=MIN(mtu, peer->ifMTU);
2903 peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
2904 /* if we tweaked this down, need to tune our peer MTU too */
2905 peer->MTU = MIN(peer->MTU, peer->natMTU);
2906 /* if we discovered a sub-1500 mtu, degrade */
2907 if (peer->ifMTU < OLD_MAX_PACKET_SIZE)
2908 peer->maxDgramPackets = 1;
2909 /* We no longer have valid peer packet information */
2910 if (peer->maxPacketSize + RX_HEADER_SIZE > peer->ifMTU)
2911 peer->maxPacketSize = 0;
2912 MUTEX_EXIT(&peer->peer_lock);
2914 MUTEX_ENTER(&rx_peerHashTable_lock);
2916 if (host && !port) {
2918 /* pick up where we left off */
2922 MUTEX_EXIT(&rx_peerHashTable_lock);
2925 #ifdef AFS_RXERRQ_ENV
2927 rxi_SetPeerDead(struct sock_extended_err *err, afs_uint32 host, afs_uint16 port)
2929 int hashIndex = PEER_HASH(host, port);
2930 struct rx_peer *peer;
2932 MUTEX_ENTER(&rx_peerHashTable_lock);
2934 for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
2935 if (peer->host == host && peer->port == port) {
2941 MUTEX_EXIT(&rx_peerHashTable_lock);
2944 rx_atomic_inc(&peer->neterrs);
2945 MUTEX_ENTER(&peer->peer_lock);
2946 peer->last_err_origin = RX_NETWORK_ERROR_ORIGIN_ICMP;
2947 peer->last_err_type = err->ee_type;
2948 peer->last_err_code = err->ee_code;
2949 MUTEX_EXIT(&peer->peer_lock);
2951 MUTEX_ENTER(&rx_peerHashTable_lock);
2953 MUTEX_EXIT(&rx_peerHashTable_lock);
2958 rxi_ProcessNetError(struct sock_extended_err *err, afs_uint32 addr, afs_uint16 port)
2960 # ifdef AFS_ADAPT_PMTU
2961 if (err->ee_errno == EMSGSIZE && err->ee_info >= 68) {
2962 rxi_SetPeerMtu(NULL, addr, port, err->ee_info - RX_IPUDP_SIZE);
2966 if (err->ee_origin == SO_EE_ORIGIN_ICMP && err->ee_type == ICMP_DEST_UNREACH) {
2967 switch (err->ee_code) {
2968 case ICMP_NET_UNREACH:
2969 case ICMP_HOST_UNREACH:
2970 case ICMP_PORT_UNREACH:
2973 rxi_SetPeerDead(err, addr, port);
2980 rxi_TranslateICMP(int type, int code)
2983 case ICMP_DEST_UNREACH:
2985 case ICMP_NET_UNREACH:
2986 return "Destination Net Unreachable";
2987 case ICMP_HOST_UNREACH:
2988 return "Destination Host Unreachable";
2989 case ICMP_PROT_UNREACH:
2990 return "Destination Protocol Unreachable";
2991 case ICMP_PORT_UNREACH:
2992 return "Destination Port Unreachable";
2994 return "Destination Net Prohibited";
2996 return "Destination Host Prohibited";
3002 #endif /* AFS_RXERRQ_ENV */
3005 * Get the last network error for a connection
3007 * A "network error" here means an error retrieved from ICMP, or some other
3008 * mechanism outside of Rx that informs us of errors in network reachability.
3010 * If a peer associated with the given Rx connection has received a network
3011 * error recently, this function allows the caller to know what error
3012 * specifically occurred. This can be useful to know, since e.g. ICMP errors
3013 * can cause calls to that peer to be quickly aborted. So, this function can
3014 * help see why a call was aborted due to network errors.
3016 * If we have received traffic from a peer since the last network error, we
3017 * treat that peer as if we had not received an network error for it.
3019 * @param[in] conn The Rx connection to examine
3020 * @param[out] err_origin The origin of the last network error (e.g. ICMP);
3021 * one of the RX_NETWORK_ERROR_ORIGIN_* constants
3022 * @param[out] err_type The type of the last error
3023 * @param[out] err_code The code of the last error
3024 * @param[out] msg Human-readable error message, if applicable; NULL otherwise
3026 * @return If we have an error
3027 * @retval -1 No error to get; 'out' params are undefined
3028 * @retval 0 We have an error; 'out' params contain the last error
3031 rx_GetNetworkError(struct rx_connection *conn, int *err_origin, int *err_type,
3032 int *err_code, const char **msg)
3034 #ifdef AFS_RXERRQ_ENV
3035 struct rx_peer *peer = conn->peer;
3036 if (rx_atomic_read(&peer->neterrs)) {
3037 MUTEX_ENTER(&peer->peer_lock);
3038 *err_origin = peer->last_err_origin;
3039 *err_type = peer->last_err_type;
3040 *err_code = peer->last_err_code;
3041 MUTEX_EXIT(&peer->peer_lock);
3044 if (*err_origin == RX_NETWORK_ERROR_ORIGIN_ICMP) {
3045 *msg = rxi_TranslateICMP(*err_type, *err_code);
3054 /* Find the peer process represented by the supplied (host,port)
3055 * combination. If there is no appropriate active peer structure, a
3056 * new one will be allocated and initialized
3059 rxi_FindPeer(afs_uint32 host, u_short port, int create)
3063 hashIndex = PEER_HASH(host, port);
3064 MUTEX_ENTER(&rx_peerHashTable_lock);
3065 for (pp = rx_peerHashTable[hashIndex]; pp; pp = pp->next) {
3066 if ((pp->host == host) && (pp->port == port))
3071 pp = rxi_AllocPeer(); /* This bzero's *pp */
3072 pp->host = host; /* set here or in InitPeerParams is zero */
3074 #ifdef AFS_RXERRQ_ENV
3075 rx_atomic_set(&pp->neterrs, 0);
3077 MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0);
3078 opr_queue_Init(&pp->rpcStats);
3079 pp->next = rx_peerHashTable[hashIndex];
3080 rx_peerHashTable[hashIndex] = pp;
3081 rxi_InitPeerParams(pp);
3082 if (rx_stats_active)
3083 rx_atomic_inc(&rx_stats.nPeerStructs);
3089 MUTEX_EXIT(&rx_peerHashTable_lock);
3094 /* Find the connection at (host, port) started at epoch, and with the
3095 * given connection id. Creates the server connection if necessary.
3096 * The type specifies whether a client connection or a server
3097 * connection is desired. In both cases, (host, port) specify the
3098 * peer's (host, pair) pair. Client connections are not made
3099 * automatically by this routine. The parameter socket gives the
3100 * socket descriptor on which the packet was received. This is used,
3101 * in the case of server connections, to check that *new* connections
3102 * come via a valid (port, serviceId). Finally, the securityIndex
3103 * parameter must match the existing index for the connection. If a
3104 * server connection is created, it will be created using the supplied
3105 * index, if the index is valid for this service */
3106 static struct rx_connection *
3107 rxi_FindConnection(osi_socket socket, afs_uint32 host,
3108 u_short port, u_short serviceId, afs_uint32 cid,
3109 afs_uint32 epoch, int type, u_int securityIndex,
3110 int *unknownService)
3112 int hashindex, flag, i;
3113 struct rx_connection *conn;
3114 *unknownService = 0;
3115 hashindex = CONN_HASH(host, port, cid, epoch, type);
3116 MUTEX_ENTER(&rx_connHashTable_lock);
3117 rxLastConn ? (conn = rxLastConn, flag = 0) : (conn =
3118 rx_connHashTable[hashindex],
3121 if ((conn->type == type) && ((cid & RX_CIDMASK) == conn->cid)
3122 && (epoch == conn->epoch)) {
3123 struct rx_peer *pp = conn->peer;
3124 if (securityIndex != conn->securityIndex) {
3125 /* this isn't supposed to happen, but someone could forge a packet
3126 * like this, and there seems to be some CM bug that makes this
3127 * happen from time to time -- in which case, the fileserver
3129 MUTEX_EXIT(&rx_connHashTable_lock);
3130 return (struct rx_connection *)0;
3132 if (pp->host == host && pp->port == port)
3134 if (type == RX_CLIENT_CONNECTION && pp->port == port)
3136 /* So what happens when it's a callback connection? */
3137 if ( /*type == RX_CLIENT_CONNECTION && */
3138 (conn->epoch & 0x80000000))
3142 /* the connection rxLastConn that was used the last time is not the
3143 ** one we are looking for now. Hence, start searching in the hash */
3145 conn = rx_connHashTable[hashindex];
3150 struct rx_service *service;
3151 if (type == RX_CLIENT_CONNECTION) {
3152 MUTEX_EXIT(&rx_connHashTable_lock);
3153 return (struct rx_connection *)0;
3155 service = rxi_FindService(socket, serviceId);
3156 if (!service || (securityIndex >= service->nSecurityObjects)
3157 || (service->securityObjects[securityIndex] == 0)) {
3158 MUTEX_EXIT(&rx_connHashTable_lock);
3159 *unknownService = 1;
3160 return (struct rx_connection *)0;
3162 conn = rxi_AllocConnection(); /* This bzero's the connection */
3163 MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
3164 MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
3165 CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
3166 conn->next = rx_connHashTable[hashindex];
3167 rx_connHashTable[hashindex] = conn;
3168 conn->peer = rxi_FindPeer(host, port, 1);
3169 conn->type = RX_SERVER_CONNECTION;
3170 conn->lastSendTime = clock_Sec(); /* don't GC immediately */
3171 conn->epoch = epoch;
3172 conn->cid = cid & RX_CIDMASK;
3173 conn->ackRate = RX_FAST_ACK_RATE;
3174 conn->service = service;
3175 conn->serviceId = serviceId;
3176 conn->securityIndex = securityIndex;
3177 conn->securityObject = service->securityObjects[securityIndex];
3178 conn->nSpecific = 0;
3179 conn->specific = NULL;
3180 rx_SetConnDeadTime(conn, service->connDeadTime);
3181 rx_SetConnIdleDeadTime(conn, service->idleDeadTime);
3182 for (i = 0; i < RX_MAXCALLS; i++) {
3183 conn->twind[i] = rx_initSendWindow;
3184 conn->rwind[i] = rx_initReceiveWindow;
3186 /* Notify security object of the new connection */
3187 RXS_NewConnection(conn->securityObject, conn);
3188 /* XXXX Connection timeout? */
3189 if (service->newConnProc)
3190 (*service->newConnProc) (conn);
3191 if (rx_stats_active)
3192 rx_atomic_inc(&rx_stats.nServerConns);
3195 rx_GetConnection(conn);
3197 rxLastConn = conn; /* store this connection as the last conn used */
3198 MUTEX_EXIT(&rx_connHashTable_lock);
3203 * Abort the call if the server is over the busy threshold. This
3204 * can be used without requiring a call structure be initialised,
3205 * or connected to a particular channel
3208 rxi_AbortIfServerBusy(osi_socket socket, struct rx_connection *conn,
3209 struct rx_packet *np)
3213 if ((rx_BusyThreshold > 0) &&
3214 (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
3215 MUTEX_ENTER(&conn->conn_data_lock);
3216 serial = ++conn->serial;
3217 MUTEX_EXIT(&conn->conn_data_lock);
3218 rxi_SendRawAbort(socket, conn->peer->host, conn->peer->port,
3219 serial, rx_BusyError, np, 0);
3220 if (rx_stats_active)
3221 rx_atomic_inc(&rx_stats.nBusies);
3228 static_inline struct rx_call *
3229 rxi_ReceiveClientCall(struct rx_packet *np, struct rx_connection *conn)
3232 struct rx_call *call;
3234 channel = np->header.cid & RX_CHANNELMASK;
3235 MUTEX_ENTER(&conn->conn_call_lock);
3236 call = conn->call[channel];
3237 if (np->header.type == RX_PACKET_TYPE_BUSY) {
3238 conn->lastBusy[channel] = clock_Sec();
3240 if (!call || conn->callNumber[channel] != np->header.callNumber) {
3241 MUTEX_EXIT(&conn->conn_call_lock);
3242 if (rx_stats_active)
3243 rx_atomic_inc(&rx_stats.spuriousPacketsRead);
3247 MUTEX_ENTER(&call->lock);
3248 MUTEX_EXIT(&conn->conn_call_lock);
3250 if ((call->state == RX_STATE_DALLY)
3251 && np->header.type == RX_PACKET_TYPE_ACK) {
3252 if (rx_stats_active)
3253 rx_atomic_inc(&rx_stats.ignorePacketDally);
3254 MUTEX_EXIT(&call->lock);
3261 static_inline struct rx_call *
3262 rxi_ReceiveServerCall(osi_socket socket, struct rx_packet *np,
3263 struct rx_connection *conn)
3266 struct rx_call *call;
3268 channel = np->header.cid & RX_CHANNELMASK;
3269 MUTEX_ENTER(&conn->conn_call_lock);
3270 call = conn->call[channel];
3273 if (rxi_AbortIfServerBusy(socket, conn, np)) {
3274 MUTEX_EXIT(&conn->conn_call_lock);
3278 call = rxi_NewCall(conn, channel); /* returns locked call */
3279 *call->callNumber = np->header.callNumber;
3280 MUTEX_EXIT(&conn->conn_call_lock);
3282 call->state = RX_STATE_PRECALL;
3283 clock_GetTime(&call->queueTime);
3284 call->app.bytesSent = 0;
3285 call->app.bytesRcvd = 0;
3286 rxi_KeepAliveOn(call);
3291 if (np->header.callNumber == conn->callNumber[channel]) {
3292 MUTEX_ENTER(&call->lock);
3293 MUTEX_EXIT(&conn->conn_call_lock);
3297 if (np->header.callNumber < conn->callNumber[channel]) {
3298 MUTEX_EXIT(&conn->conn_call_lock);
3299 if (rx_stats_active)
3300 rx_atomic_inc(&rx_stats.spuriousPacketsRead);
3304 MUTEX_ENTER(&call->lock);
3305 MUTEX_EXIT(&conn->conn_call_lock);
3307 /* Wait until the transmit queue is idle before deciding
3308 * whether to reset the current call. Chances are that the
3309 * call will be in ether DALLY or HOLD state once the TQ_BUSY
3312 #ifdef RX_ENABLE_LOCKS
3313 if (call->state == RX_STATE_ACTIVE && !call->error) {
3314 rxi_WaitforTQBusy(call);
3315 /* If we entered error state while waiting,
3316 * must call rxi_CallError to permit rxi_ResetCall
3317 * to processed when the tqWaiter count hits zero.
3320 rxi_CallError(call, call->error);
3321 MUTEX_EXIT(&call->lock);
3325 #endif /* RX_ENABLE_LOCKS */
3326 /* If the new call cannot be taken right now send a busy and set
3327 * the error condition in this call, so that it terminates as
3328 * quickly as possible */
3329 if (call->state == RX_STATE_ACTIVE) {
3330 rxi_CallError(call, RX_CALL_DEAD);
3331 rxi_SendSpecial(call, conn, NULL, RX_PACKET_TYPE_BUSY,
3333 MUTEX_EXIT(&call->lock);
3337 if (rxi_AbortIfServerBusy(socket, conn, np)) {
3338 MUTEX_EXIT(&call->lock);
3342 rxi_ResetCall(call, 0);
3343 /* The conn_call_lock is not held but no one else should be
3344 * using this call channel while we are processing this incoming
3345 * packet. This assignment should be safe.
3347 *call->callNumber = np->header.callNumber;
3348 call->state = RX_STATE_PRECALL;
3349 clock_GetTime(&call->queueTime);
3350 call->app.bytesSent = 0;
3351 call->app.bytesRcvd = 0;
3352 rxi_KeepAliveOn(call);
3358 /* There are two packet tracing routines available for testing and monitoring
3359 * Rx. One is called just after every packet is received and the other is
3360 * called just before every packet is sent. Received packets, have had their
3361 * headers decoded, and packets to be sent have not yet had their headers
3362 * encoded. Both take two parameters: a pointer to the packet and a sockaddr
3363 * containing the network address. Both can be modified. The return value, if
3364 * non-zero, indicates that the packet should be dropped. */
3366 int (*rx_justReceived) (struct rx_packet *, struct sockaddr_in *) = 0;
3367 int (*rx_almostSent) (struct rx_packet *, struct sockaddr_in *) = 0;
3369 /* A packet has been received off the interface. Np is the packet, socket is
3370 * the socket number it was received from (useful in determining which service
3371 * this packet corresponds to), and (host, port) reflect the host,port of the
3372 * sender. This call returns the packet to the caller if it is finished with
3373 * it, rather than de-allocating it, just as a small performance hack */
3376 rxi_ReceivePacket(struct rx_packet *np, osi_socket socket,
3377 afs_uint32 host, u_short port, int *tnop,
3378 struct rx_call **newcallp)
3380 struct rx_call *call;
3381 struct rx_connection *conn;
3383 int unknownService = 0;
3387 struct rx_packet *tnp;
3390 /* We don't print out the packet until now because (1) the time may not be
3391 * accurate enough until now in the lwp implementation (rx_Listener only gets
3392 * the time after the packet is read) and (2) from a protocol point of view,
3393 * this is the first time the packet has been seen */
3394 packetType = (np->header.type > 0 && np->header.type < RX_N_PACKET_TYPES)
3395 ? rx_packetTypes[np->header.type - 1] : "*UNKNOWN*";
3396 dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %"AFS_PTR_FMT"\n",
3397 np->header.serial, packetType, ntohl(host), ntohs(port), np->header.serviceId,
3398 np->header.epoch, np->header.cid, np->header.callNumber,
3399 np->header.seq, np->header.flags, np));
3402 /* Account for connectionless packets */
3403 if (rx_stats_active &&
3404 ((np->header.type == RX_PACKET_TYPE_VERSION) ||
3405 (np->header.type == RX_PACKET_TYPE_DEBUG))) {
3406 struct rx_peer *peer;
3408 /* Try to look up the peer structure, but don't create one */
3409 peer = rxi_FindPeer(host, port, 0);
3411 /* Since this may not be associated with a connection, it may have
3412 * no refCount, meaning we could race with ReapConnections
3415 if (peer && (peer->refCount > 0)) {
3416 #ifdef AFS_RXERRQ_ENV
3417 if (rx_atomic_read(&peer->neterrs)) {
3418 rx_atomic_set(&peer->neterrs, 0);
3421 MUTEX_ENTER(&peer->peer_lock);
3422 peer->bytesReceived += np->length;
3423 MUTEX_EXIT(&peer->peer_lock);
3427 if (np->header.type == RX_PACKET_TYPE_VERSION) {
3428 return rxi_ReceiveVersionPacket(np, socket, host, port, 1);
3431 if (np->header.type == RX_PACKET_TYPE_DEBUG) {
3432 return rxi_ReceiveDebugPacket(np, socket, host, port, 1);
3435 /* If an input tracer function is defined, call it with the packet and
3436 * network address. Note this function may modify its arguments. */
3437 if (rx_justReceived) {
3438 struct sockaddr_in addr;
3440 addr.sin_family = AF_INET;
3441 addr.sin_port = port;
3442 addr.sin_addr.s_addr = host;
3443 memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
3444 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
3445 addr.sin_len = sizeof(addr);
3447 drop = (*rx_justReceived) (np, &addr);
3448 /* drop packet if return value is non-zero */
3451 port = addr.sin_port; /* in case fcn changed addr */
3452 host = addr.sin_addr.s_addr;
3456 /* If packet was not sent by the client, then *we* must be the client */
3457 type = ((np->header.flags & RX_CLIENT_INITIATED) != RX_CLIENT_INITIATED)
3458 ? RX_CLIENT_CONNECTION : RX_SERVER_CONNECTION;
3460 /* Find the connection (or fabricate one, if we're the server & if
3461 * necessary) associated with this packet */
3463 rxi_FindConnection(socket, host, port, np->header.serviceId,
3464 np->header.cid, np->header.epoch, type,
3465 np->header.securityIndex, &unknownService);
3467 /* To avoid having 2 connections just abort at each other,
3468 don't abort an abort. */
3470 if (unknownService && (np->header.type != RX_PACKET_TYPE_ABORT))
3471 rxi_SendRawAbort(socket, host, port, 0, RX_INVALID_OPERATION,
3476 #ifdef AFS_RXERRQ_ENV
3477 if (rx_atomic_read(&conn->peer->neterrs)) {
3478 rx_atomic_set(&conn->peer->neterrs, 0);
3482 /* If we're doing statistics, then account for the incoming packet */
3483 if (rx_stats_active) {
3484 MUTEX_ENTER(&conn->peer->peer_lock);
3485 conn->peer->bytesReceived += np->length;
3486 MUTEX_EXIT(&conn->peer->peer_lock);
3489 /* If the connection is in an error state, send an abort packet and ignore
3490 * the incoming packet */
3492 /* Don't respond to an abort packet--we don't want loops! */
3493 MUTEX_ENTER(&conn->conn_data_lock);
3494 if (np->header.type != RX_PACKET_TYPE_ABORT)
3495 np = rxi_SendConnectionAbort(conn, np, 1, 0);
3496 putConnection(conn);
3497 MUTEX_EXIT(&conn->conn_data_lock);
3501 /* Check for connection-only requests (i.e. not call specific). */
3502 if (np->header.callNumber == 0) {
3503 switch (np->header.type) {
3504 case RX_PACKET_TYPE_ABORT: {
3505 /* What if the supplied error is zero? */
3506 afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
3507 dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d\n", errcode));
3508 rxi_ConnectionError(conn, errcode);
3509 putConnection(conn);
3512 case RX_PACKET_TYPE_CHALLENGE:
3513 tnp = rxi_ReceiveChallengePacket(conn, np, 1);
3514 putConnection(conn);
3516 case RX_PACKET_TYPE_RESPONSE:
3517 tnp = rxi_ReceiveResponsePacket(conn, np, 1);
3518 putConnection(conn);
3520 case RX_PACKET_TYPE_PARAMS:
3521 case RX_PACKET_TYPE_PARAMS + 1:
3522 case RX_PACKET_TYPE_PARAMS + 2:
3523 /* ignore these packet types for now */
3524 putConnection(conn);
3528 /* Should not reach here, unless the peer is broken: send an
3530 rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
3531 MUTEX_ENTER(&conn->conn_data_lock);
3532 tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
3533 putConnection(conn);
3534 MUTEX_EXIT(&conn->conn_data_lock);
3539 if (type == RX_SERVER_CONNECTION)
3540 call = rxi_ReceiveServerCall(socket, np, conn);
3542 call = rxi_ReceiveClientCall(np, conn);
3545 putConnection(conn);
3549 MUTEX_ASSERT(&call->lock);
3550 /* Set remote user defined status from packet */
3551 call->remoteStatus = np->header.userStatus;
3553 /* Now do packet type-specific processing */
3554 switch (np->header.type) {
3555 case RX_PACKET_TYPE_DATA:
3556 /* If we're a client, and receiving a response, then all the packets
3557 * we transmitted packets are implicitly acknowledged. */
3558 if (type == RX_CLIENT_CONNECTION && !opr_queue_IsEmpty(&call->tq))
3559 rxi_AckAllInTransmitQueue(call);
3561 np = rxi_ReceiveDataPacket(call, np, 1, socket, host, port, tnop,
3564 case RX_PACKET_TYPE_ACK:
3565 /* Respond immediately to ack packets requesting acknowledgement
3567 if (np->header.flags & RX_REQUEST_ACK) {
3569 (void)rxi_SendCallAbort(call, 0, 1, 0);
3571 (void)rxi_SendAck(call, 0, np->header.serial,
3572 RX_ACK_PING_RESPONSE, 1);
3574 np = rxi_ReceiveAckPacket(call, np, 1);
3576 case RX_PACKET_TYPE_ABORT: {
3577 /* An abort packet: reset the call, passing the error up to the user. */
3578 /* What if error is zero? */
3579 /* What if the error is -1? the application will treat it as a timeout. */
3580 afs_int32 errdata = ntohl(*(afs_int32 *) rx_DataOf(np));
3581 dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d\n", errdata));
3582 rxi_CallError(call, errdata);
3583 MUTEX_EXIT(&call->lock);
3584 putConnection(conn);
3585 return np; /* xmitting; drop packet */
3587 case RX_PACKET_TYPE_BUSY:
3588 /* Mostly ignore BUSY packets. We will update lastReceiveTime below,
3589 * so we don't think the endpoint is completely dead, but otherwise
3590 * just act as if we never saw anything. If all we get are BUSY packets
3591 * back, then we will eventually error out with RX_CALL_TIMEOUT if the
3592 * connection is configured with idle/hard timeouts. */
3595 case RX_PACKET_TYPE_ACKALL:
3596 /* All packets acknowledged, so we can drop all packets previously
3597 * readied for sending */
3598 rxi_AckAllInTransmitQueue(call);
3601 /* Should not reach here, unless the peer is broken: send an abort
3603 rxi_CallError(call, RX_PROTOCOL_ERROR);
3604 np = rxi_SendCallAbort(call, np, 1, 0);
3607 /* Note when this last legitimate packet was received, for keep-alive
3608 * processing. Note, we delay getting the time until now in the hope that
3609 * the packet will be delivered to the user before any get time is required
3610 * (if not, then the time won't actually be re-evaluated here). */
3611 call->lastReceiveTime = clock_Sec();
3612 MUTEX_EXIT(&call->lock);
3613 putConnection(conn);
3617 /* return true if this is an "interesting" connection from the point of view
3618 of someone trying to debug the system */
3620 rxi_IsConnInteresting(struct rx_connection *aconn)
3623 struct rx_call *tcall;
3625 if (aconn->flags & (RX_CONN_MAKECALL_WAITING | RX_CONN_DESTROY_ME))
3628 for (i = 0; i < RX_MAXCALLS; i++) {
3629 tcall = aconn->call[i];
3631 if ((tcall->state == RX_STATE_PRECALL)
3632 || (tcall->state == RX_STATE_ACTIVE))
3634 if ((tcall->app.mode == RX_MODE_SENDING)
3635 || (tcall->app.mode == RX_MODE_RECEIVING))
3643 /* if this is one of the last few packets AND it wouldn't be used by the
3644 receiving call to immediately satisfy a read request, then drop it on
3645 the floor, since accepting it might prevent a lock-holding thread from
3646 making progress in its reading. If a call has been cleared while in
3647 the precall state then ignore all subsequent packets until the call
3648 is assigned to a thread. */
3651 TooLow(struct rx_packet *ap, struct rx_call *acall)
3655 MUTEX_ENTER(&rx_quota_mutex);
3656 if (((ap->header.seq != 1) && (acall->flags & RX_CALL_CLEARED)
3657 && (acall->state == RX_STATE_PRECALL))
3658 || ((rx_nFreePackets < rxi_dataQuota + 2)
3659 && !((ap->header.seq < acall->rnext + rx_initSendWindow)
3660 && (acall->flags & RX_CALL_READER_WAIT)))) {
3663 MUTEX_EXIT(&rx_quota_mutex);
3669 * Clear the attach wait flag on a connection and proceed.
3671 * Any processing waiting for a connection to be attached should be
3672 * unblocked. We clear the flag and do any other needed tasks.
3675 * the conn to unmark waiting for attach
3677 * @pre conn's conn_data_lock must be locked before calling this function
3681 rxi_ConnClearAttachWait(struct rx_connection *conn)
3683 /* Indicate that rxi_CheckReachEvent is no longer running by
3684 * clearing the flag. Must be atomic under conn_data_lock to
3685 * avoid a new call slipping by: rxi_CheckConnReach holds
3686 * conn_data_lock while checking RX_CONN_ATTACHWAIT.
3688 conn->flags &= ~RX_CONN_ATTACHWAIT;
3689 if (conn->flags & RX_CONN_NAT_PING) {
3690 conn->flags &= ~RX_CONN_NAT_PING;
3691 rxi_ScheduleNatKeepAliveEvent(conn);
3696 * Event handler function for connection-specific events for checking
3697 * reachability. Also called directly from main code with |event| == NULL
3698 * in order to trigger the initial reachability check.
3700 * When |event| == NULL, must be called with the connection data lock held,
3701 * but returns with the lock unlocked.
3704 rxi_CheckReachEvent(struct rxevent *event, void *arg1, void *arg2, int dummy)
3706 struct rx_connection *conn = arg1;
3707 struct rx_call *acall = arg2;
3708 struct rx_call *call = acall;
3709 struct clock when, now;
3713 MUTEX_ENTER(&conn->conn_data_lock);
3715 MUTEX_ASSERT(&conn->conn_data_lock);
3717 if (event != NULL && event == conn->checkReachEvent)
3718 rxevent_Put(&conn->checkReachEvent);
3719 waiting = conn->flags & RX_CONN_ATTACHWAIT;
3720 MUTEX_EXIT(&conn->conn_data_lock);
3724 MUTEX_ENTER(&conn->conn_call_lock);
3725 MUTEX_ENTER(&conn->conn_data_lock);
3726 for (i = 0; i < RX_MAXCALLS; i++) {
3727 struct rx_call *tc = conn->call[i];
3728 if (tc && tc->state == RX_STATE_PRECALL) {
3734 rxi_ConnClearAttachWait(conn);
3735 MUTEX_EXIT(&conn->conn_data_lock);
3736 MUTEX_EXIT(&conn->conn_call_lock);
3741 MUTEX_ENTER(&call->lock);
3742 rxi_SendAck(call, NULL, 0, RX_ACK_PING, 0);
3744 MUTEX_EXIT(&call->lock);
3746 clock_GetTime(&now);
3748 when.sec += RX_CHECKREACH_TIMEOUT;
3749 MUTEX_ENTER(&conn->conn_data_lock);
3750 if (!conn->checkReachEvent) {
3751 rx_GetConnection(conn);
3752 conn->checkReachEvent = rxevent_Post(&when, &now,
3753 rxi_CheckReachEvent, conn,
3756 MUTEX_EXIT(&conn->conn_data_lock);
3759 /* If fired as an event handler, drop our refcount on the connection. */
3761 putConnection(conn);
3765 rxi_CheckConnReach(struct rx_connection *conn, struct rx_call *call)
3767 struct rx_service *service = conn->service;
3768 struct rx_peer *peer = conn->peer;
3769 afs_uint32 now, lastReach;
3771 if (service->checkReach == 0)
3775 MUTEX_ENTER(&peer->peer_lock);
3776 lastReach = peer->lastReachTime;
3777 MUTEX_EXIT(&peer->peer_lock);
3778 if (now - lastReach < RX_CHECKREACH_TTL)
3781 MUTEX_ENTER(&conn->conn_data_lock);
3782 if (conn->flags & RX_CONN_ATTACHWAIT) {
3783 MUTEX_EXIT(&conn->conn_data_lock);