2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* RX: Extended Remote Procedure Call */
12 #include <afsconfig.h>
13 #include <afs/param.h>
16 # include "afs/sysincludes.h"
17 # include "afsincludes.h"
22 # ifdef AFS_LINUX20_ENV
23 # include "h/socket.h"
25 # include "netinet/in.h"
27 # include "netinet/ip6.h"
30 # include "inet/common.h"
32 # include "inet/ip_ire.h"
34 # include "afs/afs_args.h"
35 # include "afs/afs_osi.h"
36 # ifdef RX_KERNEL_TRACE
37 # include "rx_kcommon.h"
39 # if defined(AFS_AIX_ENV)
43 # undef RXDEBUG /* turn off debugging */
45 # if defined(AFS_SGI_ENV)
46 # include "sys/debug.h"
49 # include "afs/sysincludes.h"
50 # include "afsincludes.h"
51 # endif /* !UKERNEL */
52 # include "afs/lock.h"
53 # include "rx_kmutex.h"
54 # include "rx_kernel.h"
55 # define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
56 # define AFSOP_STOP_AFS 211 /* Stop AFS process */
57 # define AFSOP_STOP_BKG 212 /* Stop BKG process */
58 extern afs_int32 afs_termState;
60 # include "sys/lockl.h"
61 # include "sys/lock_def.h"
62 # endif /* AFS_AIX41_ENV */
63 # include "afs/rxgen_consts.h"
68 # include <afs/afsutil.h>
69 # include <WINNT\afsreg.h>
78 #include "rx_atomic.h"
79 #include "rx_globals.h"
81 #include "rx_internal.h"
84 #include <afs/rxgen_consts.h>
87 #ifdef AFS_PTHREAD_ENV
89 int (*registerProgram) (pid_t, char *) = 0;
90 int (*swapNameProgram) (pid_t, const char *, char *) = 0;
93 int (*registerProgram) (PROCESS, char *) = 0;
94 int (*swapNameProgram) (PROCESS, const char *, char *) = 0;
98 /* Local static routines */
99 static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
100 static void rxi_ComputeRoundTripTime(struct rx_packet *, struct rx_ackPacket *,
101 struct rx_peer *, struct clock *);
103 #ifdef RX_ENABLE_LOCKS
104 static void rxi_SetAcksInTransmitQueue(struct rx_call *call);
107 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
109 rx_atomic_t rxi_start_aborted; /* rxi_start awoke after rxi_Send in error.*/
110 rx_atomic_t rxi_start_in_error;
112 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
114 /* Constant delay time before sending an acknowledge of the last packet
115 * received. This is to avoid sending an extra acknowledge when the
116 * client is about to make another call, anyway, or the server is
119 * The lastAckDelay may not exceeed 400ms without causing peers to
120 * unecessarily timeout.
122 struct clock rx_lastAckDelay = {0, 400000};
124 /* Constant delay time before sending a soft ack when none was requested.
125 * This is to make sure we send soft acks before the sender times out,
126 * Normally we wait and send a hard ack when the receiver consumes the packet
128 * This value has been 100ms in all shipping versions of OpenAFS. Changing it
129 * will require changes to the peer's RTT calculations.
131 struct clock rx_softAckDelay = {0, 100000};
134 * rxi_rpc_peer_stat_cnt counts the total number of peer stat structures
135 * currently allocated within rx. This number is used to allocate the
136 * memory required to return the statistics when queried.
137 * Protected by the rx_rpc_stats mutex.
140 static unsigned int rxi_rpc_peer_stat_cnt;
143 * rxi_rpc_process_stat_cnt counts the total number of local process stat
144 * structures currently allocated within rx. The number is used to allocate
145 * the memory required to return the statistics when queried.
146 * Protected by the rx_rpc_stats mutex.
149 static unsigned int rxi_rpc_process_stat_cnt;
152 * rxi_busyChannelError is the error to return to the application when a call
153 * channel appears busy (inferred from the receipt of RX_PACKET_TYPE_BUSY
154 * packets on the channel), and there are other call channels in the
155 * connection that are not busy. If 0, we do not return errors upon receiving
156 * busy packets; we just keep trying on the same call channel until we hit a
159 static afs_int32 rxi_busyChannelError = 0;
161 rx_atomic_t rx_nWaiting = RX_ATOMIC_INIT(0);
162 rx_atomic_t rx_nWaited = RX_ATOMIC_INIT(0);
164 #if !defined(offsetof)
165 #include <stddef.h> /* for definition of offsetof() */
168 #ifdef RX_ENABLE_LOCKS
169 afs_kmutex_t rx_atomic_mutex;
172 /* Forward prototypes */
173 static struct rx_call * rxi_NewCall(struct rx_connection *, int);
175 #ifdef AFS_PTHREAD_ENV
178 * Use procedural initialization of mutexes/condition variables
182 extern afs_kmutex_t rx_quota_mutex;
183 extern afs_kmutex_t rx_pthread_mutex;
184 extern afs_kmutex_t rx_packets_mutex;
185 extern afs_kmutex_t rx_refcnt_mutex;
186 extern afs_kmutex_t des_init_mutex;
187 extern afs_kmutex_t des_random_mutex;
188 extern afs_kmutex_t rx_clock_mutex;
189 extern afs_kmutex_t rxi_connCacheMutex;
190 extern afs_kmutex_t rx_event_mutex;
191 extern afs_kmutex_t event_handler_mutex;
192 extern afs_kmutex_t listener_mutex;
193 extern afs_kmutex_t rx_if_init_mutex;
194 extern afs_kmutex_t rx_if_mutex;
195 extern afs_kmutex_t rxkad_client_uid_mutex;
196 extern afs_kmutex_t rxkad_random_mutex;
198 extern afs_kcondvar_t rx_event_handler_cond;
199 extern afs_kcondvar_t rx_listener_cond;
201 static afs_kmutex_t epoch_mutex;
202 static afs_kmutex_t rx_init_mutex;
203 static afs_kmutex_t rx_debug_mutex;
204 static afs_kmutex_t rx_rpc_stats;
207 rxi_InitPthread(void)
209 MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0);
210 MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0);
211 MUTEX_INIT(&rx_atomic_mutex, "atomic", MUTEX_DEFAULT, 0);
212 MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0);
213 MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0);
214 MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0);
215 MUTEX_INIT(&rx_refcnt_mutex, "refcnts", MUTEX_DEFAULT, 0);
216 MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
217 MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
218 MUTEX_INIT(&rx_event_mutex, "event", MUTEX_DEFAULT, 0);
219 MUTEX_INIT(&event_handler_mutex, "event handler", MUTEX_DEFAULT, 0);
220 MUTEX_INIT(&rxi_connCacheMutex, "conn cache", MUTEX_DEFAULT, 0);
221 MUTEX_INIT(&listener_mutex, "listener", MUTEX_DEFAULT, 0);
222 MUTEX_INIT(&rx_if_init_mutex, "if init", MUTEX_DEFAULT, 0);
223 MUTEX_INIT(&rx_if_mutex, "if", MUTEX_DEFAULT, 0);
224 MUTEX_INIT(&rxkad_client_uid_mutex, "uid", MUTEX_DEFAULT, 0);
225 MUTEX_INIT(&rxkad_random_mutex, "rxkad random", MUTEX_DEFAULT, 0);
226 MUTEX_INIT(&rx_debug_mutex, "debug", MUTEX_DEFAULT, 0);
228 CV_INIT(&rx_event_handler_cond, "evhand", CV_DEFAULT, 0);
229 CV_INIT(&rx_listener_cond, "rxlisten", CV_DEFAULT, 0);
231 osi_Assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
232 osi_Assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
234 rxkad_global_stats_init();
236 MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
237 MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
238 #ifdef RX_ENABLE_LOCKS
241 #endif /* RX_LOCKS_DB */
242 MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
243 MUTEX_INIT(&rx_freeCallQueue_lock, "rx_freeCallQueue_lock", MUTEX_DEFAULT,
245 CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
247 MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
249 MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
251 MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
252 MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
253 #endif /* RX_ENABLE_LOCKS */
256 pthread_once_t rx_once_init = PTHREAD_ONCE_INIT;
257 #define INIT_PTHREAD_LOCKS osi_Assert(pthread_once(&rx_once_init, rxi_InitPthread)==0)
259 * The rx_stats_mutex mutex protects the following global variables:
260 * rxi_lowConnRefCount
261 * rxi_lowPeerRefCount
270 * The rx_quota_mutex mutex protects the following global variables:
278 * The rx_freePktQ_lock protects the following global variables:
283 * The rx_packets_mutex mutex protects the following global variables:
291 * The rx_pthread_mutex mutex protects the following global variables:
292 * rxi_fcfs_thread_num
295 #define INIT_PTHREAD_LOCKS
299 /* Variables for handling the minProcs implementation. availProcs gives the
300 * number of threads available in the pool at this moment (not counting dudes
301 * executing right now). totalMin gives the total number of procs required
302 * for handling all minProcs requests. minDeficit is a dynamic variable
303 * tracking the # of procs required to satisfy all of the remaining minProcs
305 * For fine grain locking to work, the quota check and the reservation of
306 * a server thread has to come while rxi_availProcs and rxi_minDeficit
307 * are locked. To this end, the code has been modified under #ifdef
308 * RX_ENABLE_LOCKS so that quota checks and reservation occur at the
309 * same time. A new function, ReturnToServerPool() returns the allocation.
311 * A call can be on several queue's (but only one at a time). When
312 * rxi_ResetCall wants to remove the call from a queue, it has to ensure
313 * that no one else is touching the queue. To this end, we store the address
314 * of the queue lock in the call structure (under the call lock) when we
315 * put the call on a queue, and we clear the call_queue_lock when the
316 * call is removed from a queue (once the call lock has been obtained).
317 * This allows rxi_ResetCall to safely synchronize with others wishing
318 * to manipulate the queue.
321 #if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
322 static afs_kmutex_t rx_rpc_stats;
323 static void rxi_StartUnlocked(struct rxevent *event, void *call,
324 void *arg1, int istack);
327 /* We keep a "last conn pointer" in rxi_FindConnection. The odds are
328 ** pretty good that the next packet coming in is from the same connection
329 ** as the last packet, since we're send multiple packets in a transmit window.
331 struct rx_connection *rxLastConn = 0;
333 #ifdef RX_ENABLE_LOCKS
334 /* The locking hierarchy for rx fine grain locking is composed of these
337 * rx_connHashTable_lock - synchronizes conn creation, rx_connHashTable access
338 * conn_call_lock - used to synchonize rx_EndCall and rx_NewCall
339 * call->lock - locks call data fields.
340 * These are independent of each other:
341 * rx_freeCallQueue_lock
346 * serverQueueEntry->lock
347 * rx_peerHashTable_lock - locked under rx_connHashTable_lock
349 * peer->lock - locks peer data fields.
350 * conn_data_lock - that more than one thread is not updating a conn data
351 * field at the same time.
362 * Do we need a lock to protect the peer field in the conn structure?
363 * conn->peer was previously a constant for all intents and so has no
364 * lock protecting this field. The multihomed client delta introduced
365 * a RX code change : change the peer field in the connection structure
366 * to that remote interface from which the last packet for this
367 * connection was sent out. This may become an issue if further changes
370 #define SET_CALL_QUEUE_LOCK(C, L) (C)->call_queue_lock = (L)
371 #define CLEAR_CALL_QUEUE_LOCK(C) (C)->call_queue_lock = NULL
373 /* rxdb_fileID is used to identify the lock location, along with line#. */
374 static int rxdb_fileID = RXDB_FILE_RX;
375 #endif /* RX_LOCKS_DB */
376 #else /* RX_ENABLE_LOCKS */
377 #define SET_CALL_QUEUE_LOCK(C, L)
378 #define CLEAR_CALL_QUEUE_LOCK(C)
379 #endif /* RX_ENABLE_LOCKS */
380 struct rx_serverQueueEntry *rx_waitForPacket = 0;
381 struct rx_serverQueueEntry *rx_waitingForPacket = 0;
383 /* ------------Exported Interfaces------------- */
385 /* This function allows rxkad to set the epoch to a suitably random number
386 * which rx_NewConnection will use in the future. The principle purpose is to
387 * get rxnull connections to use the same epoch as the rxkad connections do, at
388 * least once the first rxkad connection is established. This is important now
389 * that the host/port addresses aren't used in FindConnection: the uniqueness
390 * of epoch/cid matters and the start time won't do. */
392 #ifdef AFS_PTHREAD_ENV
394 * This mutex protects the following global variables:
398 #define LOCK_EPOCH MUTEX_ENTER(&epoch_mutex)
399 #define UNLOCK_EPOCH MUTEX_EXIT(&epoch_mutex)
403 #endif /* AFS_PTHREAD_ENV */
406 rx_SetEpoch(afs_uint32 epoch)
413 /* Initialize rx. A port number may be mentioned, in which case this
414 * becomes the default port number for any service installed later.
415 * If 0 is provided for the port number, a random port will be chosen
416 * by the kernel. Whether this will ever overlap anything in
417 * /etc/services is anybody's guess... Returns 0 on success, -1 on
422 int rxinit_status = 1;
423 #ifdef AFS_PTHREAD_ENV
425 * This mutex protects the following global variables:
429 #define LOCK_RX_INIT MUTEX_ENTER(&rx_init_mutex)
430 #define UNLOCK_RX_INIT MUTEX_EXIT(&rx_init_mutex)
433 #define UNLOCK_RX_INIT
437 rx_InitHost(u_int host, u_int port)
444 char *htable, *ptable;
451 if (rxinit_status == 0) {
452 tmp_status = rxinit_status;
454 return tmp_status; /* Already started; return previous error code. */
460 if (afs_winsockInit() < 0)
466 * Initialize anything necessary to provide a non-premptive threading
469 rxi_InitializeThreadSupport();
472 /* Allocate and initialize a socket for client and perhaps server
475 rx_socket = rxi_GetHostUDPSocket(host, (u_short) port);
476 if (rx_socket == OSI_NULLSOCKET) {
480 #if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
483 #endif /* RX_LOCKS_DB */
484 MUTEX_INIT(&rx_stats_mutex, "rx_stats_mutex", MUTEX_DEFAULT, 0);
485 MUTEX_INIT(&rx_quota_mutex, "rx_quota_mutex", MUTEX_DEFAULT, 0);
486 MUTEX_INIT(&rx_pthread_mutex, "rx_pthread_mutex", MUTEX_DEFAULT, 0);
487 MUTEX_INIT(&rx_packets_mutex, "rx_packets_mutex", MUTEX_DEFAULT, 0);
488 MUTEX_INIT(&rx_refcnt_mutex, "rx_refcnt_mutex", MUTEX_DEFAULT, 0);
489 MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
490 MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
491 MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
492 MUTEX_INIT(&rx_freeCallQueue_lock, "rx_freeCallQueue_lock", MUTEX_DEFAULT,
494 CV_INIT(&rx_waitingForPackets_cv, "rx_waitingForPackets_cv", CV_DEFAULT,
496 MUTEX_INIT(&rx_peerHashTable_lock, "rx_peerHashTable_lock", MUTEX_DEFAULT,
498 MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
500 MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
501 #if defined(AFS_HPUX110_ENV)
503 rx_sleepLock = alloc_spinlock(LAST_HELD_ORDER - 10, "rx_sleepLock");
504 #endif /* AFS_HPUX110_ENV */
505 #endif /* RX_ENABLE_LOCKS && KERNEL */
508 rx_connDeadTime = 12;
509 rx_tranquil = 0; /* reset flag */
510 rxi_ResetStatistics();
512 osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
513 PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
514 memset(htable, 0, rx_hashTableSize * sizeof(struct rx_connection *));
515 ptable = (char *)osi_Alloc(rx_hashTableSize * sizeof(struct rx_peer *));
516 PIN(ptable, rx_hashTableSize * sizeof(struct rx_peer *)); /* XXXXX */
517 memset(ptable, 0, rx_hashTableSize * sizeof(struct rx_peer *));
519 /* Malloc up a bunch of packets & buffers */
521 queue_Init(&rx_freePacketQueue);
522 rxi_NeedMorePackets = FALSE;
523 rx_nPackets = 0; /* rx_nPackets is managed by rxi_MorePackets* */
525 /* enforce a minimum number of allocated packets */
526 if (rx_extraPackets < rxi_nSendFrags * rx_maxSendWindow)
527 rx_extraPackets = rxi_nSendFrags * rx_maxSendWindow;
529 /* allocate the initial free packet pool */
530 #ifdef RX_ENABLE_TSFPQ
531 rxi_MorePacketsTSFPQ(rx_extraPackets + RX_MAX_QUOTA + 2, RX_TS_FPQ_FLUSH_GLOBAL, 0);
532 #else /* RX_ENABLE_TSFPQ */
533 rxi_MorePackets(rx_extraPackets + RX_MAX_QUOTA + 2); /* fudge */
534 #endif /* RX_ENABLE_TSFPQ */
541 #if defined(AFS_NT40_ENV) && !defined(AFS_PTHREAD_ENV)
542 tv.tv_sec = clock_now.sec;
543 tv.tv_usec = clock_now.usec;
544 srand((unsigned int)tv.tv_usec);
551 #if defined(KERNEL) && !defined(UKERNEL)
552 /* Really, this should never happen in a real kernel */
555 struct sockaddr_in addr;
557 int addrlen = sizeof(addr);
559 socklen_t addrlen = sizeof(addr);
561 if (getsockname((intptr_t)rx_socket, (struct sockaddr *)&addr, &addrlen)) {
565 rx_port = addr.sin_port;
568 rx_stats.minRtt.sec = 9999999;
570 rx_SetEpoch(tv.tv_sec | 0x80000000);
572 rx_SetEpoch(tv.tv_sec); /* Start time of this package, rxkad
573 * will provide a randomer value. */
575 MUTEX_ENTER(&rx_quota_mutex);
576 rxi_dataQuota += rx_extraQuota; /* + extra pkts caller asked to rsrv */
577 MUTEX_EXIT(&rx_quota_mutex);
578 /* *Slightly* random start time for the cid. This is just to help
579 * out with the hashing function at the peer */
580 rx_nextCid = ((tv.tv_sec ^ tv.tv_usec) << RX_CIDSHIFT);
581 rx_connHashTable = (struct rx_connection **)htable;
582 rx_peerHashTable = (struct rx_peer **)ptable;
584 rx_hardAckDelay.sec = 0;
585 rx_hardAckDelay.usec = 100000; /* 100 milliseconds */
587 rxevent_Init(20, rxi_ReScheduleEvents);
589 /* Initialize various global queues */
590 queue_Init(&rx_idleServerQueue);
591 queue_Init(&rx_incomingCallQueue);
592 queue_Init(&rx_freeCallQueue);
594 #if defined(AFS_NT40_ENV) && !defined(KERNEL)
595 /* Initialize our list of usable IP addresses. */
599 #if defined(RXK_LISTENER_ENV) || !defined(KERNEL)
600 /* Start listener process (exact function is dependent on the
601 * implementation environment--kernel or user space) */
606 tmp_status = rxinit_status = 0;
614 return rx_InitHost(htonl(INADDR_ANY), port);
618 * Sets the error generated when a busy call channel is detected.
620 * @param[in] error The error to return for a call on a busy channel.
622 * @pre Neither rx_Init nor rx_InitHost have been called yet
625 rx_SetBusyChannelError(afs_int32 error)
627 osi_Assert(rxinit_status != 0);
628 rxi_busyChannelError = error;
631 /* called with unincremented nRequestsRunning to see if it is OK to start
632 * a new thread in this service. Could be "no" for two reasons: over the
633 * max quota, or would prevent others from reaching their min quota.
635 #ifdef RX_ENABLE_LOCKS
636 /* This verion of QuotaOK reserves quota if it's ok while the
637 * rx_serverPool_lock is held. Return quota using ReturnToServerPool().
640 QuotaOK(struct rx_service *aservice)
642 /* check if over max quota */
643 if (aservice->nRequestsRunning >= aservice->maxProcs) {
647 /* under min quota, we're OK */
648 /* otherwise, can use only if there are enough to allow everyone
649 * to go to their min quota after this guy starts.
652 MUTEX_ENTER(&rx_quota_mutex);
653 if ((aservice->nRequestsRunning < aservice->minProcs)
654 || (rxi_availProcs > rxi_minDeficit)) {
655 aservice->nRequestsRunning++;
656 /* just started call in minProcs pool, need fewer to maintain
658 if (aservice->nRequestsRunning <= aservice->minProcs)
661 MUTEX_EXIT(&rx_quota_mutex);
664 MUTEX_EXIT(&rx_quota_mutex);
670 ReturnToServerPool(struct rx_service *aservice)
672 aservice->nRequestsRunning--;
673 MUTEX_ENTER(&rx_quota_mutex);
674 if (aservice->nRequestsRunning < aservice->minProcs)
677 MUTEX_EXIT(&rx_quota_mutex);
680 #else /* RX_ENABLE_LOCKS */
682 QuotaOK(struct rx_service *aservice)
685 /* under min quota, we're OK */
686 if (aservice->nRequestsRunning < aservice->minProcs)
689 /* check if over max quota */
690 if (aservice->nRequestsRunning >= aservice->maxProcs)
693 /* otherwise, can use only if there are enough to allow everyone
694 * to go to their min quota after this guy starts.
696 MUTEX_ENTER(&rx_quota_mutex);
697 if (rxi_availProcs > rxi_minDeficit)
699 MUTEX_EXIT(&rx_quota_mutex);
702 #endif /* RX_ENABLE_LOCKS */
705 /* Called by rx_StartServer to start up lwp's to service calls.
706 NExistingProcs gives the number of procs already existing, and which
707 therefore needn't be created. */
709 rxi_StartServerProcs(int nExistingProcs)
711 struct rx_service *service;
716 /* For each service, reserve N processes, where N is the "minimum"
717 * number of processes that MUST be able to execute a request in parallel,
718 * at any time, for that process. Also compute the maximum difference
719 * between any service's maximum number of processes that can run
720 * (i.e. the maximum number that ever will be run, and a guarantee
721 * that this number will run if other services aren't running), and its
722 * minimum number. The result is the extra number of processes that
723 * we need in order to provide the latter guarantee */
724 for (i = 0; i < RX_MAX_SERVICES; i++) {
726 service = rx_services[i];
727 if (service == (struct rx_service *)0)
729 nProcs += service->minProcs;
730 diff = service->maxProcs - service->minProcs;
734 nProcs += maxdiff; /* Extra processes needed to allow max number requested to run in any given service, under good conditions */
735 nProcs -= nExistingProcs; /* Subtract the number of procs that were previously created for use as server procs */
736 for (i = 0; i < nProcs; i++) {
737 rxi_StartServerProc(rx_ServerProc, rx_stackSize);
743 /* This routine is only required on Windows */
745 rx_StartClientThread(void)
747 #ifdef AFS_PTHREAD_ENV
749 pid = pthread_self();
750 #endif /* AFS_PTHREAD_ENV */
752 #endif /* AFS_NT40_ENV */
754 /* This routine must be called if any services are exported. If the
755 * donateMe flag is set, the calling process is donated to the server
758 rx_StartServer(int donateMe)
760 struct rx_service *service;
766 /* Start server processes, if necessary (exact function is dependent
767 * on the implementation environment--kernel or user space). DonateMe
768 * will be 1 if there is 1 pre-existing proc, i.e. this one. In this
769 * case, one less new proc will be created rx_StartServerProcs.
771 rxi_StartServerProcs(donateMe);
773 /* count up the # of threads in minProcs, and add set the min deficit to
774 * be that value, too.
776 for (i = 0; i < RX_MAX_SERVICES; i++) {
777 service = rx_services[i];
778 if (service == (struct rx_service *)0)
780 MUTEX_ENTER(&rx_quota_mutex);
781 rxi_totalMin += service->minProcs;
782 /* below works even if a thread is running, since minDeficit would
783 * still have been decremented and later re-incremented.
785 rxi_minDeficit += service->minProcs;
786 MUTEX_EXIT(&rx_quota_mutex);
789 /* Turn on reaping of idle server connections */
790 rxi_ReapConnections(NULL, NULL, NULL);
799 #ifdef AFS_PTHREAD_ENV
801 pid = afs_pointer_to_int(pthread_self());
802 #else /* AFS_PTHREAD_ENV */
804 LWP_CurrentProcess(&pid);
805 #endif /* AFS_PTHREAD_ENV */
807 sprintf(name, "srv_%d", ++nProcs);
809 (*registerProgram) (pid, name);
811 #endif /* AFS_NT40_ENV */
812 rx_ServerProc(NULL); /* Never returns */
814 #ifdef RX_ENABLE_TSFPQ
815 /* no use leaving packets around in this thread's local queue if
816 * it isn't getting donated to the server thread pool.
818 rxi_FlushLocalPacketsTSFPQ();
819 #endif /* RX_ENABLE_TSFPQ */
823 /* Create a new client connection to the specified service, using the
824 * specified security object to implement the security model for this
826 struct rx_connection *
827 rx_NewConnection(afs_uint32 shost, u_short sport, u_short sservice,
828 struct rx_securityClass *securityObject,
829 int serviceSecurityIndex)
833 struct rx_connection *conn;
838 dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %p, "
839 "serviceSecurityIndex %d)\n",
840 ntohl(shost), ntohs(sport), sservice, securityObject,
841 serviceSecurityIndex));
843 /* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
844 * the case of kmem_alloc? */
845 conn = rxi_AllocConnection();
846 #ifdef RX_ENABLE_LOCKS
847 MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
848 MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
849 CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
852 MUTEX_ENTER(&rx_connHashTable_lock);
853 cid = (rx_nextCid += RX_MAXCALLS);
854 conn->type = RX_CLIENT_CONNECTION;
856 conn->epoch = rx_epoch;
857 conn->peer = rxi_FindPeer(shost, sport, 0, 1);
858 conn->serviceId = sservice;
859 conn->securityObject = securityObject;
860 conn->securityData = (void *) 0;
861 conn->securityIndex = serviceSecurityIndex;
862 rx_SetConnDeadTime(conn, rx_connDeadTime);
863 rx_SetConnSecondsUntilNatPing(conn, 0);
864 conn->ackRate = RX_FAST_ACK_RATE;
866 conn->specific = NULL;
867 conn->challengeEvent = NULL;
868 conn->delayedAbortEvent = NULL;
869 conn->abortCount = 0;
871 for (i = 0; i < RX_MAXCALLS; i++) {
872 conn->twind[i] = rx_initSendWindow;
873 conn->rwind[i] = rx_initReceiveWindow;
874 conn->lastBusy[i] = 0;
877 RXS_NewConnection(securityObject, conn);
879 CONN_HASH(shost, sport, conn->cid, conn->epoch, RX_CLIENT_CONNECTION);
881 conn->refCount++; /* no lock required since only this thread knows... */
882 conn->next = rx_connHashTable[hashindex];
883 rx_connHashTable[hashindex] = conn;
885 rx_atomic_inc(&rx_stats.nClientConns);
886 MUTEX_EXIT(&rx_connHashTable_lock);
892 * Ensure a connection's timeout values are valid.
894 * @param[in] conn The connection to check
896 * @post conn->secondUntilDead <= conn->idleDeadTime <= conn->hardDeadTime,
897 * unless idleDeadTime and/or hardDeadTime are not set
901 rxi_CheckConnTimeouts(struct rx_connection *conn)
903 /* a connection's timeouts must have the relationship
904 * deadTime <= idleDeadTime <= hardDeadTime. Otherwise, for example, a
905 * total loss of network to a peer may cause an idle timeout instead of a
906 * dead timeout, simply because the idle timeout gets hit first. Also set
907 * a minimum deadTime of 6, just to ensure it doesn't get set too low. */
908 /* this logic is slightly complicated by the fact that
909 * idleDeadTime/hardDeadTime may not be set at all, but it's not too bad.
911 conn->secondsUntilDead = MAX(conn->secondsUntilDead, 6);
912 if (conn->idleDeadTime) {
913 conn->idleDeadTime = MAX(conn->idleDeadTime, conn->secondsUntilDead);
915 if (conn->hardDeadTime) {
916 if (conn->idleDeadTime) {
917 conn->hardDeadTime = MAX(conn->idleDeadTime, conn->hardDeadTime);
919 conn->hardDeadTime = MAX(conn->secondsUntilDead, conn->hardDeadTime);
925 rx_SetConnDeadTime(struct rx_connection *conn, int seconds)
927 /* The idea is to set the dead time to a value that allows several
928 * keepalives to be dropped without timing out the connection. */
929 conn->secondsUntilDead = seconds;
930 rxi_CheckConnTimeouts(conn);
931 conn->secondsUntilPing = conn->secondsUntilDead / 6;
935 rx_SetConnHardDeadTime(struct rx_connection *conn, int seconds)
937 conn->hardDeadTime = seconds;
938 rxi_CheckConnTimeouts(conn);
942 rx_SetConnIdleDeadTime(struct rx_connection *conn, int seconds)
944 conn->idleDeadTime = seconds;
945 rxi_CheckConnTimeouts(conn);
948 int rxi_lowPeerRefCount = 0;
949 int rxi_lowConnRefCount = 0;
952 * Cleanup a connection that was destroyed in rxi_DestroyConnectioNoLock.
953 * NOTE: must not be called with rx_connHashTable_lock held.
956 rxi_CleanupConnection(struct rx_connection *conn)
958 /* Notify the service exporter, if requested, that this connection
959 * is being destroyed */
960 if (conn->type == RX_SERVER_CONNECTION && conn->service->destroyConnProc)
961 (*conn->service->destroyConnProc) (conn);
963 /* Notify the security module that this connection is being destroyed */
964 RXS_DestroyConnection(conn->securityObject, conn);
966 /* If this is the last connection using the rx_peer struct, set its
967 * idle time to now. rxi_ReapConnections will reap it if it's still
968 * idle (refCount == 0) after rx_idlePeerTime (60 seconds) have passed.
970 MUTEX_ENTER(&rx_peerHashTable_lock);
971 if (conn->peer->refCount < 2) {
972 conn->peer->idleWhen = clock_Sec();
973 if (conn->peer->refCount < 1) {
974 conn->peer->refCount = 1;
975 if (rx_stats_active) {
976 MUTEX_ENTER(&rx_stats_mutex);
977 rxi_lowPeerRefCount++;
978 MUTEX_EXIT(&rx_stats_mutex);
982 conn->peer->refCount--;
983 MUTEX_EXIT(&rx_peerHashTable_lock);
987 if (conn->type == RX_SERVER_CONNECTION)
988 rx_atomic_dec(&rx_stats.nServerConns);
990 rx_atomic_dec(&rx_stats.nClientConns);
993 if (conn->specific) {
995 for (i = 0; i < conn->nSpecific; i++) {
996 if (conn->specific[i] && rxi_keyCreate_destructor[i])
997 (*rxi_keyCreate_destructor[i]) (conn->specific[i]);
998 conn->specific[i] = NULL;
1000 free(conn->specific);
1002 conn->specific = NULL;
1003 conn->nSpecific = 0;
1004 #endif /* !KERNEL */
1006 MUTEX_DESTROY(&conn->conn_call_lock);
1007 MUTEX_DESTROY(&conn->conn_data_lock);
1008 CV_DESTROY(&conn->conn_call_cv);
1010 rxi_FreeConnection(conn);
1013 /* Destroy the specified connection */
1015 rxi_DestroyConnection(struct rx_connection *conn)
1017 MUTEX_ENTER(&rx_connHashTable_lock);
1018 rxi_DestroyConnectionNoLock(conn);
1019 /* conn should be at the head of the cleanup list */
1020 if (conn == rx_connCleanup_list) {
1021 rx_connCleanup_list = rx_connCleanup_list->next;
1022 MUTEX_EXIT(&rx_connHashTable_lock);
1023 rxi_CleanupConnection(conn);
1025 #ifdef RX_ENABLE_LOCKS
1027 MUTEX_EXIT(&rx_connHashTable_lock);
1029 #endif /* RX_ENABLE_LOCKS */
1033 rxi_DestroyConnectionNoLock(struct rx_connection *conn)
1035 struct rx_connection **conn_ptr;
1037 struct rx_packet *packet;
1044 MUTEX_ENTER(&conn->conn_data_lock);
1045 MUTEX_ENTER(&rx_refcnt_mutex);
1046 if (conn->refCount > 0)
1049 if (rx_stats_active) {
1050 MUTEX_ENTER(&rx_stats_mutex);
1051 rxi_lowConnRefCount++;
1052 MUTEX_EXIT(&rx_stats_mutex);
1056 if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
1057 /* Busy; wait till the last guy before proceeding */
1058 MUTEX_EXIT(&rx_refcnt_mutex);
1059 MUTEX_EXIT(&conn->conn_data_lock);
1064 /* If the client previously called rx_NewCall, but it is still
1065 * waiting, treat this as a running call, and wait to destroy the
1066 * connection later when the call completes. */
1067 if ((conn->type == RX_CLIENT_CONNECTION)
1068 && (conn->flags & (RX_CONN_MAKECALL_WAITING|RX_CONN_MAKECALL_ACTIVE))) {
1069 conn->flags |= RX_CONN_DESTROY_ME;
1070 MUTEX_EXIT(&conn->conn_data_lock);
1074 MUTEX_EXIT(&rx_refcnt_mutex);
1075 MUTEX_EXIT(&conn->conn_data_lock);
1077 /* Check for extant references to this connection */
1078 for (i = 0; i < RX_MAXCALLS; i++) {
1079 struct rx_call *call = conn->call[i];
1082 if (conn->type == RX_CLIENT_CONNECTION) {
1083 MUTEX_ENTER(&call->lock);
1084 if (call->delayedAckEvent) {
1085 /* Push the final acknowledgment out now--there
1086 * won't be a subsequent call to acknowledge the
1087 * last reply packets */
1088 rxevent_Cancel(call->delayedAckEvent, call,
1089 RX_CALL_REFCOUNT_DELAY);
1090 if (call->state == RX_STATE_PRECALL
1091 || call->state == RX_STATE_ACTIVE) {
1092 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
1094 rxi_AckAll(NULL, call, 0);
1097 MUTEX_EXIT(&call->lock);
1101 #ifdef RX_ENABLE_LOCKS
1103 if (MUTEX_TRYENTER(&conn->conn_data_lock)) {
1104 MUTEX_EXIT(&conn->conn_data_lock);
1106 /* Someone is accessing a packet right now. */
1110 #endif /* RX_ENABLE_LOCKS */
1113 /* Don't destroy the connection if there are any call
1114 * structures still in use */
1115 MUTEX_ENTER(&conn->conn_data_lock);
1116 conn->flags |= RX_CONN_DESTROY_ME;
1117 MUTEX_EXIT(&conn->conn_data_lock);
1122 if (conn->natKeepAliveEvent) {
1123 rxi_NatKeepAliveOff(conn);
1126 if (conn->delayedAbortEvent) {
1127 rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0);
1128 packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
1130 MUTEX_ENTER(&conn->conn_data_lock);
1131 rxi_SendConnectionAbort(conn, packet, 0, 1);
1132 MUTEX_EXIT(&conn->conn_data_lock);
1133 rxi_FreePacket(packet);
1137 /* Remove from connection hash table before proceeding */
1139 &rx_connHashTable[CONN_HASH
1140 (peer->host, peer->port, conn->cid, conn->epoch,
1142 for (; *conn_ptr; conn_ptr = &(*conn_ptr)->next) {
1143 if (*conn_ptr == conn) {
1144 *conn_ptr = conn->next;
1148 /* if the conn that we are destroying was the last connection, then we
1149 * clear rxLastConn as well */
1150 if (rxLastConn == conn)
1153 /* Make sure the connection is completely reset before deleting it. */
1154 /* get rid of pending events that could zap us later */
1155 if (conn->challengeEvent)
1156 rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
1157 if (conn->checkReachEvent)
1158 rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
1159 if (conn->natKeepAliveEvent)
1160 rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
1162 /* Add the connection to the list of destroyed connections that
1163 * need to be cleaned up. This is necessary to avoid deadlocks
1164 * in the routines we call to inform others that this connection is
1165 * being destroyed. */
1166 conn->next = rx_connCleanup_list;
1167 rx_connCleanup_list = conn;
1170 /* Externally available version */
1172 rx_DestroyConnection(struct rx_connection *conn)
1177 rxi_DestroyConnection(conn);
1182 rx_GetConnection(struct rx_connection *conn)
1187 MUTEX_ENTER(&rx_refcnt_mutex);
1189 MUTEX_EXIT(&rx_refcnt_mutex);
1193 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1194 /* Wait for the transmit queue to no longer be busy.
1195 * requires the call->lock to be held */
1197 rxi_WaitforTQBusy(struct rx_call *call) {
1198 while (!call->error && (call->flags & RX_CALL_TQ_BUSY)) {
1199 call->flags |= RX_CALL_TQ_WAIT;
1201 #ifdef RX_ENABLE_LOCKS
1202 osirx_AssertMine(&call->lock, "rxi_WaitforTQ lock");
1203 CV_WAIT(&call->cv_tq, &call->lock);
1204 #else /* RX_ENABLE_LOCKS */
1205 osi_rxSleep(&call->tq);
1206 #endif /* RX_ENABLE_LOCKS */
1208 if (call->tqWaiters == 0) {
1209 call->flags &= ~RX_CALL_TQ_WAIT;
1216 rxi_WakeUpTransmitQueue(struct rx_call *call)
1218 if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
1219 dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
1220 call, call->tqWaiters, call->flags));
1221 #ifdef RX_ENABLE_LOCKS
1222 osirx_AssertMine(&call->lock, "rxi_Start start");
1223 CV_BROADCAST(&call->cv_tq);
1224 #else /* RX_ENABLE_LOCKS */
1225 osi_rxWakeup(&call->tq);
1226 #endif /* RX_ENABLE_LOCKS */
1230 /* Start a new rx remote procedure call, on the specified connection.
1231 * If wait is set to 1, wait for a free call channel; otherwise return
1232 * 0. Maxtime gives the maximum number of seconds this call may take,
1233 * after rx_NewCall returns. After this time interval, a call to any
1234 * of rx_SendData, rx_ReadData, etc. will fail with RX_CALL_TIMEOUT.
1235 * For fine grain locking, we hold the conn_call_lock in order to
1236 * to ensure that we don't get signalle after we found a call in an active
1237 * state and before we go to sleep.
1240 rx_NewCall(struct rx_connection *conn)
1242 int i, wait, ignoreBusy = 1;
1243 struct rx_call *call;
1244 struct clock queueTime;
1245 afs_uint32 leastBusy = 0;
1249 dpf(("rx_NewCall(conn %"AFS_PTR_FMT")\n", conn));
1252 clock_GetTime(&queueTime);
1254 * Check if there are others waiting for a new call.
1255 * If so, let them go first to avoid starving them.
1256 * This is a fairly simple scheme, and might not be
1257 * a complete solution for large numbers of waiters.
1259 * makeCallWaiters keeps track of the number of
1260 * threads waiting to make calls and the
1261 * RX_CONN_MAKECALL_WAITING flag bit is used to
1262 * indicate that there are indeed calls waiting.
1263 * The flag is set when the waiter is incremented.
1264 * It is only cleared when makeCallWaiters is 0.
1265 * This prevents us from accidently destroying the
1266 * connection while it is potentially about to be used.
1268 MUTEX_ENTER(&conn->conn_call_lock);
1269 MUTEX_ENTER(&conn->conn_data_lock);
1270 while (conn->flags & RX_CONN_MAKECALL_ACTIVE) {
1271 conn->flags |= RX_CONN_MAKECALL_WAITING;
1272 conn->makeCallWaiters++;
1273 MUTEX_EXIT(&conn->conn_data_lock);
1275 #ifdef RX_ENABLE_LOCKS
1276 CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
1280 MUTEX_ENTER(&conn->conn_data_lock);
1281 conn->makeCallWaiters--;
1282 if (conn->makeCallWaiters == 0)
1283 conn->flags &= ~RX_CONN_MAKECALL_WAITING;
1286 /* We are now the active thread in rx_NewCall */
1287 conn->flags |= RX_CONN_MAKECALL_ACTIVE;
1288 MUTEX_EXIT(&conn->conn_data_lock);
1293 for (i = 0; i < RX_MAXCALLS; i++) {
1294 call = conn->call[i];
1296 if (!ignoreBusy && conn->lastBusy[i] != leastBusy) {
1297 /* we're not ignoring busy call slots; only look at the
1298 * call slot that is the "least" busy */
1302 if (call->state == RX_STATE_DALLY) {
1303 MUTEX_ENTER(&call->lock);
1304 if (call->state == RX_STATE_DALLY) {
1305 if (ignoreBusy && conn->lastBusy[i]) {
1306 /* if we're ignoring busy call slots, skip any ones that
1307 * have lastBusy set */
1308 if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
1309 leastBusy = conn->lastBusy[i];
1311 MUTEX_EXIT(&call->lock);
1316 * We are setting the state to RX_STATE_RESET to
1317 * ensure that no one else will attempt to use this
1318 * call once we drop the conn->conn_call_lock and
1319 * call->lock. We must drop the conn->conn_call_lock
1320 * before calling rxi_ResetCall because the process
1321 * of clearing the transmit queue can block for an
1322 * extended period of time. If we block while holding
1323 * the conn->conn_call_lock, then all rx_EndCall
1324 * processing will block as well. This has a detrimental
1325 * effect on overall system performance.
1327 call->state = RX_STATE_RESET;
1328 MUTEX_EXIT(&conn->conn_call_lock);
1329 MUTEX_ENTER(&rx_refcnt_mutex);
1330 CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
1331 MUTEX_EXIT(&rx_refcnt_mutex);
1332 rxi_ResetCall(call, 0);
1333 (*call->callNumber)++;
1334 if (MUTEX_TRYENTER(&conn->conn_call_lock))
1338 * If we failed to be able to safely obtain the
1339 * conn->conn_call_lock we will have to drop the
1340 * call->lock to avoid a deadlock. When the call->lock
1341 * is released the state of the call can change. If it
1342 * is no longer RX_STATE_RESET then some other thread is
1345 MUTEX_EXIT(&call->lock);
1346 MUTEX_ENTER(&conn->conn_call_lock);
1347 MUTEX_ENTER(&call->lock);
1349 if (call->state == RX_STATE_RESET)
1353 * If we get here it means that after dropping
1354 * the conn->conn_call_lock and call->lock that
1355 * the call is no longer ours. If we can't find
1356 * a free call in the remaining slots we should
1357 * not go immediately to RX_CONN_MAKECALL_WAITING
1358 * because by dropping the conn->conn_call_lock
1359 * we have given up synchronization with rx_EndCall.
1360 * Instead, cycle through one more time to see if
1361 * we can find a call that can call our own.
1363 MUTEX_ENTER(&rx_refcnt_mutex);
1364 CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
1365 MUTEX_EXIT(&rx_refcnt_mutex);
1368 MUTEX_EXIT(&call->lock);
1371 if (ignoreBusy && conn->lastBusy[i]) {
1372 /* if we're ignoring busy call slots, skip any ones that
1373 * have lastBusy set */
1374 if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
1375 leastBusy = conn->lastBusy[i];
1380 /* rxi_NewCall returns with mutex locked */
1381 call = rxi_NewCall(conn, i);
1382 MUTEX_ENTER(&rx_refcnt_mutex);
1383 CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
1384 MUTEX_EXIT(&rx_refcnt_mutex);
1388 if (i < RX_MAXCALLS) {
1389 conn->lastBusy[i] = 0;
1394 if (leastBusy && ignoreBusy) {
1395 /* we didn't find a useable call slot, but we did see at least one
1396 * 'busy' slot; look again and only use a slot with the 'least
1402 MUTEX_ENTER(&conn->conn_data_lock);
1403 conn->flags |= RX_CONN_MAKECALL_WAITING;
1404 conn->makeCallWaiters++;
1405 MUTEX_EXIT(&conn->conn_data_lock);
1407 #ifdef RX_ENABLE_LOCKS
1408 CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
1412 MUTEX_ENTER(&conn->conn_data_lock);
1413 conn->makeCallWaiters--;
1414 if (conn->makeCallWaiters == 0)
1415 conn->flags &= ~RX_CONN_MAKECALL_WAITING;
1416 MUTEX_EXIT(&conn->conn_data_lock);
1418 /* Client is initially in send mode */
1419 call->state = RX_STATE_ACTIVE;
1420 call->error = conn->error;
1422 call->mode = RX_MODE_ERROR;
1424 call->mode = RX_MODE_SENDING;
1426 /* remember start time for call in case we have hard dead time limit */
1427 call->queueTime = queueTime;
1428 clock_GetTime(&call->startTime);
1429 hzero(call->bytesSent);
1430 hzero(call->bytesRcvd);
1432 /* Turn on busy protocol. */
1433 rxi_KeepAliveOn(call);
1435 /* Attempt MTU discovery */
1436 rxi_GrowMTUOn(call);
1439 * We are no longer the active thread in rx_NewCall
1441 MUTEX_ENTER(&conn->conn_data_lock);
1442 conn->flags &= ~RX_CONN_MAKECALL_ACTIVE;
1443 MUTEX_EXIT(&conn->conn_data_lock);
1446 * Wake up anyone else who might be giving us a chance to
1447 * run (see code above that avoids resource starvation).
1449 #ifdef RX_ENABLE_LOCKS
1450 CV_BROADCAST(&conn->conn_call_cv);
1454 MUTEX_EXIT(&conn->conn_call_lock);
1456 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1457 if (call->flags & (RX_CALL_TQ_BUSY | RX_CALL_TQ_CLEARME)) {
1458 osi_Panic("rx_NewCall call about to be used without an empty tq");
1460 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1462 MUTEX_EXIT(&call->lock);
1465 dpf(("rx_NewCall(call %"AFS_PTR_FMT")\n", call));
1470 rxi_HasActiveCalls(struct rx_connection *aconn)
1473 struct rx_call *tcall;
1477 for (i = 0; i < RX_MAXCALLS; i++) {
1478 if ((tcall = aconn->call[i])) {
1479 if ((tcall->state == RX_STATE_ACTIVE)
1480 || (tcall->state == RX_STATE_PRECALL)) {
1491 rxi_GetCallNumberVector(struct rx_connection *aconn,
1492 afs_int32 * aint32s)
1495 struct rx_call *tcall;
1499 for (i = 0; i < RX_MAXCALLS; i++) {
1500 if ((tcall = aconn->call[i]) && (tcall->state == RX_STATE_DALLY))
1501 aint32s[i] = aconn->callNumber[i] + 1;
1503 aint32s[i] = aconn->callNumber[i];
1510 rxi_SetCallNumberVector(struct rx_connection *aconn,
1511 afs_int32 * aint32s)
1514 struct rx_call *tcall;
1518 for (i = 0; i < RX_MAXCALLS; i++) {
1519 if ((tcall = aconn->call[i]) && (tcall->state == RX_STATE_DALLY))
1520 aconn->callNumber[i] = aint32s[i] - 1;
1522 aconn->callNumber[i] = aint32s[i];
1528 /* Advertise a new service. A service is named locally by a UDP port
1529 * number plus a 16-bit service id. Returns (struct rx_service *) 0
1532 char *serviceName; Name for identification purposes (e.g. the
1533 service name might be used for probing for
1536 rx_NewServiceHost(afs_uint32 host, u_short port, u_short serviceId,
1537 char *serviceName, struct rx_securityClass **securityObjects,
1538 int nSecurityObjects,
1539 afs_int32(*serviceProc) (struct rx_call * acall))
1541 osi_socket socket = OSI_NULLSOCKET;
1542 struct rx_service *tservice;
1548 if (serviceId == 0) {
1550 "rx_NewService: service id for service %s is not non-zero.\n",
1557 "rx_NewService: A non-zero port must be specified on this call if a non-zero port was not provided at Rx initialization (service %s).\n",
1565 tservice = rxi_AllocService();
1568 #ifdef RX_ENABLE_LOCKS
1569 MUTEX_INIT(&tservice->svc_data_lock, "svc data lock", MUTEX_DEFAULT, 0);
1572 for (i = 0; i < RX_MAX_SERVICES; i++) {
1573 struct rx_service *service = rx_services[i];
1575 if (port == service->servicePort && host == service->serviceHost) {
1576 if (service->serviceId == serviceId) {
1577 /* The identical service has already been
1578 * installed; if the caller was intending to
1579 * change the security classes used by this
1580 * service, he/she loses. */
1582 "rx_NewService: tried to install service %s with service id %d, which is already in use for service %s\n",
1583 serviceName, serviceId, service->serviceName);
1585 rxi_FreeService(tservice);
1588 /* Different service, same port: re-use the socket
1589 * which is bound to the same port */
1590 socket = service->socket;
1593 if (socket == OSI_NULLSOCKET) {
1594 /* If we don't already have a socket (from another
1595 * service on same port) get a new one */
1596 socket = rxi_GetHostUDPSocket(host, port);
1597 if (socket == OSI_NULLSOCKET) {
1599 rxi_FreeService(tservice);
1604 service->socket = socket;
1605 service->serviceHost = host;
1606 service->servicePort = port;
1607 service->serviceId = serviceId;
1608 service->serviceName = serviceName;
1609 service->nSecurityObjects = nSecurityObjects;
1610 service->securityObjects = securityObjects;
1611 service->minProcs = 0;
1612 service->maxProcs = 1;
1613 service->idleDeadTime = 60;
1614 service->idleDeadErr = 0;
1615 service->connDeadTime = rx_connDeadTime;
1616 service->executeRequestProc = serviceProc;
1617 service->checkReach = 0;
1618 service->nSpecific = 0;
1619 service->specific = NULL;
1620 rx_services[i] = service; /* not visible until now */
1626 rxi_FreeService(tservice);
1627 (osi_Msg "rx_NewService: cannot support > %d services\n",
1632 /* Set configuration options for all of a service's security objects */
1635 rx_SetSecurityConfiguration(struct rx_service *service,
1636 rx_securityConfigVariables type,
1640 for (i = 0; i<service->nSecurityObjects; i++) {
1641 if (service->securityObjects[i]) {
1642 RXS_SetConfiguration(service->securityObjects[i], NULL, type,
1650 rx_NewService(u_short port, u_short serviceId, char *serviceName,
1651 struct rx_securityClass **securityObjects, int nSecurityObjects,
1652 afs_int32(*serviceProc) (struct rx_call * acall))
1654 return rx_NewServiceHost(htonl(INADDR_ANY), port, serviceId, serviceName, securityObjects, nSecurityObjects, serviceProc);
1657 /* Generic request processing loop. This routine should be called
1658 * by the implementation dependent rx_ServerProc. If socketp is
1659 * non-null, it will be set to the file descriptor that this thread
1660 * is now listening on. If socketp is null, this routine will never
1663 rxi_ServerProc(int threadID, struct rx_call *newcall, osi_socket * socketp)
1665 struct rx_call *call;
1667 struct rx_service *tservice = NULL;
1674 call = rx_GetCall(threadID, tservice, socketp);
1675 if (socketp && *socketp != OSI_NULLSOCKET) {
1676 /* We are now a listener thread */
1681 /* if server is restarting( typically smooth shutdown) then do not
1682 * allow any new calls.
1685 if (rx_tranquil && (call != NULL)) {
1689 MUTEX_ENTER(&call->lock);
1691 rxi_CallError(call, RX_RESTARTING);
1692 rxi_SendCallAbort(call, (struct rx_packet *)0, 0, 0);
1694 MUTEX_EXIT(&call->lock);
1698 if (afs_termState == AFSOP_STOP_RXCALLBACK) {
1699 #ifdef RX_ENABLE_LOCKS
1701 #endif /* RX_ENABLE_LOCKS */
1702 afs_termState = AFSOP_STOP_AFS;
1703 afs_osi_Wakeup(&afs_termState);
1704 #ifdef RX_ENABLE_LOCKS
1706 #endif /* RX_ENABLE_LOCKS */
1711 tservice = call->conn->service;
1713 if (tservice->beforeProc)
1714 (*tservice->beforeProc) (call);
1716 code = tservice->executeRequestProc(call);
1718 if (tservice->afterProc)
1719 (*tservice->afterProc) (call, code);
1721 rx_EndCall(call, code);
1722 if (rx_stats_active) {
1723 MUTEX_ENTER(&rx_stats_mutex);
1725 MUTEX_EXIT(&rx_stats_mutex);
1732 rx_WakeupServerProcs(void)
1734 struct rx_serverQueueEntry *np, *tqp;
1738 MUTEX_ENTER(&rx_serverPool_lock);
1740 #ifdef RX_ENABLE_LOCKS
1741 if (rx_waitForPacket)
1742 CV_BROADCAST(&rx_waitForPacket->cv);
1743 #else /* RX_ENABLE_LOCKS */
1744 if (rx_waitForPacket)
1745 osi_rxWakeup(rx_waitForPacket);
1746 #endif /* RX_ENABLE_LOCKS */
1747 MUTEX_ENTER(&freeSQEList_lock);
1748 for (np = rx_FreeSQEList; np; np = tqp) {
1749 tqp = *(struct rx_serverQueueEntry **)np;
1750 #ifdef RX_ENABLE_LOCKS
1751 CV_BROADCAST(&np->cv);
1752 #else /* RX_ENABLE_LOCKS */
1754 #endif /* RX_ENABLE_LOCKS */
1756 MUTEX_EXIT(&freeSQEList_lock);
1757 for (queue_Scan(&rx_idleServerQueue, np, tqp, rx_serverQueueEntry)) {
1758 #ifdef RX_ENABLE_LOCKS
1759 CV_BROADCAST(&np->cv);
1760 #else /* RX_ENABLE_LOCKS */
1762 #endif /* RX_ENABLE_LOCKS */
1764 MUTEX_EXIT(&rx_serverPool_lock);
1769 * One thing that seems to happen is that all the server threads get
1770 * tied up on some empty or slow call, and then a whole bunch of calls
1771 * arrive at once, using up the packet pool, so now there are more
1772 * empty calls. The most critical resources here are server threads
1773 * and the free packet pool. The "doreclaim" code seems to help in
1774 * general. I think that eventually we arrive in this state: there
1775 * are lots of pending calls which do have all their packets present,
1776 * so they won't be reclaimed, are multi-packet calls, so they won't
1777 * be scheduled until later, and thus are tying up most of the free
1778 * packet pool for a very long time.
1780 * 1. schedule multi-packet calls if all the packets are present.
1781 * Probably CPU-bound operation, useful to return packets to pool.
1782 * Do what if there is a full window, but the last packet isn't here?
1783 * 3. preserve one thread which *only* runs "best" calls, otherwise
1784 * it sleeps and waits for that type of call.
1785 * 4. Don't necessarily reserve a whole window for each thread. In fact,
1786 * the current dataquota business is badly broken. The quota isn't adjusted
1787 * to reflect how many packets are presently queued for a running call.
1788 * So, when we schedule a queued call with a full window of packets queued
1789 * up for it, that *should* free up a window full of packets for other 2d-class
1790 * calls to be able to use from the packet pool. But it doesn't.
1792 * NB. Most of the time, this code doesn't run -- since idle server threads
1793 * sit on the idle server queue and are assigned by "...ReceivePacket" as soon
1794 * as a new call arrives.
1796 /* Sleep until a call arrives. Returns a pointer to the call, ready
1797 * for an rx_Read. */
1798 #ifdef RX_ENABLE_LOCKS
1800 rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
1802 struct rx_serverQueueEntry *sq;
1803 struct rx_call *call = (struct rx_call *)0;
1804 struct rx_service *service = NULL;
1806 MUTEX_ENTER(&freeSQEList_lock);
1808 if ((sq = rx_FreeSQEList)) {
1809 rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
1810 MUTEX_EXIT(&freeSQEList_lock);
1811 } else { /* otherwise allocate a new one and return that */
1812 MUTEX_EXIT(&freeSQEList_lock);
1813 sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
1814 MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
1815 CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
1818 MUTEX_ENTER(&rx_serverPool_lock);
1819 if (cur_service != NULL) {
1820 ReturnToServerPool(cur_service);
1823 if (queue_IsNotEmpty(&rx_incomingCallQueue)) {
1824 struct rx_call *tcall, *ncall, *choice2 = NULL;
1826 /* Scan for eligible incoming calls. A call is not eligible
1827 * if the maximum number of calls for its service type are
1828 * already executing */
1829 /* One thread will process calls FCFS (to prevent starvation),
1830 * while the other threads may run ahead looking for calls which
1831 * have all their input data available immediately. This helps
1832 * keep threads from blocking, waiting for data from the client. */
1833 for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
1834 service = tcall->conn->service;
1835 if (!QuotaOK(service)) {
1838 MUTEX_ENTER(&rx_pthread_mutex);
1839 if (tno == rxi_fcfs_thread_num
1840 || !tcall->queue_item_header.next) {
1841 MUTEX_EXIT(&rx_pthread_mutex);
1842 /* If we're the fcfs thread , then we'll just use
1843 * this call. If we haven't been able to find an optimal
1844 * choice, and we're at the end of the list, then use a
1845 * 2d choice if one has been identified. Otherwise... */
1846 call = (choice2 ? choice2 : tcall);
1847 service = call->conn->service;
1849 MUTEX_EXIT(&rx_pthread_mutex);
1850 if (!queue_IsEmpty(&tcall->rq)) {
1851 struct rx_packet *rp;
1852 rp = queue_First(&tcall->rq, rx_packet);
1853 if (rp->header.seq == 1) {
1855 || (rp->header.flags & RX_LAST_PACKET)) {
1857 } else if (rxi_2dchoice && !choice2
1858 && !(tcall->flags & RX_CALL_CLEARED)
1859 && (tcall->rprev > rxi_HardAckRate)) {
1869 ReturnToServerPool(service);
1876 MUTEX_EXIT(&rx_serverPool_lock);
1877 MUTEX_ENTER(&call->lock);
1879 if (call->flags & RX_CALL_WAIT_PROC) {
1880 call->flags &= ~RX_CALL_WAIT_PROC;
1881 rx_atomic_dec(&rx_nWaiting);
1884 if (call->state != RX_STATE_PRECALL || call->error) {
1885 MUTEX_EXIT(&call->lock);
1886 MUTEX_ENTER(&rx_serverPool_lock);
1887 ReturnToServerPool(service);
1892 if (queue_IsEmpty(&call->rq)
1893 || queue_First(&call->rq, rx_packet)->header.seq != 1)
1894 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
1896 CLEAR_CALL_QUEUE_LOCK(call);
1899 /* If there are no eligible incoming calls, add this process
1900 * to the idle server queue, to wait for one */
1904 *socketp = OSI_NULLSOCKET;
1906 sq->socketp = socketp;
1907 queue_Append(&rx_idleServerQueue, sq);
1908 #ifndef AFS_AIX41_ENV
1909 rx_waitForPacket = sq;
1911 rx_waitingForPacket = sq;
1912 #endif /* AFS_AIX41_ENV */
1914 CV_WAIT(&sq->cv, &rx_serverPool_lock);
1916 if (afs_termState == AFSOP_STOP_RXCALLBACK) {
1917 MUTEX_EXIT(&rx_serverPool_lock);
1918 return (struct rx_call *)0;
1921 } while (!(call = sq->newcall)
1922 && !(socketp && *socketp != OSI_NULLSOCKET));
1923 MUTEX_EXIT(&rx_serverPool_lock);
1925 MUTEX_ENTER(&call->lock);
1931 MUTEX_ENTER(&freeSQEList_lock);
1932 *(struct rx_serverQueueEntry **)sq = rx_FreeSQEList;
1933 rx_FreeSQEList = sq;
1934 MUTEX_EXIT(&freeSQEList_lock);
1937 clock_GetTime(&call->startTime);
1938 call->state = RX_STATE_ACTIVE;
1939 call->mode = RX_MODE_RECEIVING;
1940 #ifdef RX_KERNEL_TRACE
1941 if (ICL_SETACTIVE(afs_iclSetp)) {
1942 int glockOwner = ISAFS_GLOCK();
1945 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
1946 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
1953 rxi_calltrace(RX_CALL_START, call);
1954 dpf(("rx_GetCall(port=%d, service=%d) ==> call %"AFS_PTR_FMT"\n",
1955 call->conn->service->servicePort, call->conn->service->serviceId,
1958 MUTEX_EXIT(&call->lock);
1959 MUTEX_ENTER(&rx_refcnt_mutex);
1960 CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
1961 MUTEX_EXIT(&rx_refcnt_mutex);
1963 dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
1968 #else /* RX_ENABLE_LOCKS */
1970 rx_GetCall(int tno, struct rx_service *cur_service, osi_socket * socketp)
1972 struct rx_serverQueueEntry *sq;
1973 struct rx_call *call = (struct rx_call *)0, *choice2;
1974 struct rx_service *service = NULL;
1978 MUTEX_ENTER(&freeSQEList_lock);
1980 if ((sq = rx_FreeSQEList)) {
1981 rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
1982 MUTEX_EXIT(&freeSQEList_lock);
1983 } else { /* otherwise allocate a new one and return that */
1984 MUTEX_EXIT(&freeSQEList_lock);
1985 sq = rxi_Alloc(sizeof(struct rx_serverQueueEntry));
1986 MUTEX_INIT(&sq->lock, "server Queue lock", MUTEX_DEFAULT, 0);
1987 CV_INIT(&sq->cv, "server Queue lock", CV_DEFAULT, 0);
1989 MUTEX_ENTER(&sq->lock);
1991 if (cur_service != NULL) {
1992 cur_service->nRequestsRunning--;
1993 MUTEX_ENTER(&rx_quota_mutex);
1994 if (cur_service->nRequestsRunning < cur_service->minProcs)
1997 MUTEX_EXIT(&rx_quota_mutex);
1999 if (queue_IsNotEmpty(&rx_incomingCallQueue)) {
2000 struct rx_call *tcall, *ncall;
2001 /* Scan for eligible incoming calls. A call is not eligible
2002 * if the maximum number of calls for its service type are
2003 * already executing */
2004 /* One thread will process calls FCFS (to prevent starvation),
2005 * while the other threads may run ahead looking for calls which
2006 * have all their input data available immediately. This helps
2007 * keep threads from blocking, waiting for data from the client. */
2008 choice2 = (struct rx_call *)0;
2009 for (queue_Scan(&rx_incomingCallQueue, tcall, ncall, rx_call)) {
2010 service = tcall->conn->service;
2011 if (QuotaOK(service)) {
2012 MUTEX_ENTER(&rx_pthread_mutex);
2013 if (tno == rxi_fcfs_thread_num
2014 || !tcall->queue_item_header.next) {
2015 MUTEX_EXIT(&rx_pthread_mutex);
2016 /* If we're the fcfs thread, then we'll just use
2017 * this call. If we haven't been able to find an optimal
2018 * choice, and we're at the end of the list, then use a
2019 * 2d choice if one has been identified. Otherwise... */
2020 call = (choice2 ? choice2 : tcall);
2021 service = call->conn->service;
2023 MUTEX_EXIT(&rx_pthread_mutex);
2024 if (!queue_IsEmpty(&tcall->rq)) {
2025 struct rx_packet *rp;
2026 rp = queue_First(&tcall->rq, rx_packet);
2027 if (rp->header.seq == 1
2029 || (rp->header.flags & RX_LAST_PACKET))) {
2031 } else if (rxi_2dchoice && !choice2
2032 && !(tcall->flags & RX_CALL_CLEARED)
2033 && (tcall->rprev > rxi_HardAckRate)) {
2047 /* we can't schedule a call if there's no data!!! */
2048 /* send an ack if there's no data, if we're missing the
2049 * first packet, or we're missing something between first
2050 * and last -- there's a "hole" in the incoming data. */
2051 if (queue_IsEmpty(&call->rq)
2052 || queue_First(&call->rq, rx_packet)->header.seq != 1
2053 || call->rprev != queue_Last(&call->rq, rx_packet)->header.seq)
2054 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
2056 call->flags &= (~RX_CALL_WAIT_PROC);
2057 service->nRequestsRunning++;
2058 /* just started call in minProcs pool, need fewer to maintain
2060 MUTEX_ENTER(&rx_quota_mutex);
2061 if (service->nRequestsRunning <= service->minProcs)
2064 MUTEX_EXIT(&rx_quota_mutex);
2065 rx_atomic_dec(&rx_nWaiting);
2066 /* MUTEX_EXIT(&call->lock); */
2068 /* If there are no eligible incoming calls, add this process
2069 * to the idle server queue, to wait for one */
2072 *socketp = OSI_NULLSOCKET;
2074 sq->socketp = socketp;
2075 queue_Append(&rx_idleServerQueue, sq);
2079 if (afs_termState == AFSOP_STOP_RXCALLBACK) {
2081 rxi_Free(sq, sizeof(struct rx_serverQueueEntry));
2082 return (struct rx_call *)0;
2085 } while (!(call = sq->newcall)
2086 && !(socketp && *socketp != OSI_NULLSOCKET));
2088 MUTEX_EXIT(&sq->lock);
2090 MUTEX_ENTER(&freeSQEList_lock);
2091 *(struct rx_serverQueueEntry **)sq = rx_FreeSQEList;
2092 rx_FreeSQEList = sq;
2093 MUTEX_EXIT(&freeSQEList_lock);
2096 clock_GetTime(&call->startTime);
2097 call->state = RX_STATE_ACTIVE;
2098 call->mode = RX_MODE_RECEIVING;
2099 #ifdef RX_KERNEL_TRACE
2100 if (ICL_SETACTIVE(afs_iclSetp)) {
2101 int glockOwner = ISAFS_GLOCK();
2104 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
2105 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
2112 rxi_calltrace(RX_CALL_START, call);
2113 dpf(("rx_GetCall(port=%d, service=%d) ==> call %p\n",
2114 call->conn->service->servicePort, call->conn->service->serviceId,
2117 dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
2124 #endif /* RX_ENABLE_LOCKS */
2128 /* Establish a procedure to be called when a packet arrives for a
2129 * call. This routine will be called at most once after each call,
2130 * and will also be called if there is an error condition on the or
2131 * the call is complete. Used by multi rx to build a selection
2132 * function which determines which of several calls is likely to be a
2133 * good one to read from.
2134 * NOTE: the way this is currently implemented it is probably only a
2135 * good idea to (1) use it immediately after a newcall (clients only)
2136 * and (2) only use it once. Other uses currently void your warranty
2139 rx_SetArrivalProc(struct rx_call *call,
2140 void (*proc) (struct rx_call * call,
2143 void * handle, int arg)
2145 call->arrivalProc = proc;
2146 call->arrivalProcHandle = handle;
2147 call->arrivalProcArg = arg;
2150 /* Call is finished (possibly prematurely). Return rc to the peer, if
2151 * appropriate, and return the final error code from the conversation
2155 rx_EndCall(struct rx_call *call, afs_int32 rc)
2157 struct rx_connection *conn = call->conn;
2161 dpf(("rx_EndCall(call %"AFS_PTR_FMT" rc %d error %d abortCode %d)\n",
2162 call, rc, call->error, call->abortCode));
2165 MUTEX_ENTER(&call->lock);
2167 if (rc == 0 && call->error == 0) {
2168 call->abortCode = 0;
2169 call->abortCount = 0;
2172 call->arrivalProc = (void (*)())0;
2173 if (rc && call->error == 0) {
2174 rxi_CallError(call, rc);
2175 call->mode = RX_MODE_ERROR;
2176 /* Send an abort message to the peer if this error code has
2177 * only just been set. If it was set previously, assume the
2178 * peer has already been sent the error code or will request it
2180 rxi_SendCallAbort(call, (struct rx_packet *)0, 0, 0);
2182 if (conn->type == RX_SERVER_CONNECTION) {
2183 /* Make sure reply or at least dummy reply is sent */
2184 if (call->mode == RX_MODE_RECEIVING) {
2185 MUTEX_EXIT(&call->lock);
2186 rxi_WriteProc(call, 0, 0);
2187 MUTEX_ENTER(&call->lock);
2189 if (call->mode == RX_MODE_SENDING) {
2190 MUTEX_EXIT(&call->lock);
2191 rxi_FlushWrite(call);
2192 MUTEX_ENTER(&call->lock);
2194 rxi_calltrace(RX_CALL_END, call);
2195 /* Call goes to hold state until reply packets are acknowledged */
2196 if (call->tfirst + call->nSoftAcked < call->tnext) {
2197 call->state = RX_STATE_HOLD;
2199 call->state = RX_STATE_DALLY;
2200 rxi_ClearTransmitQueue(call, 0);
2201 rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
2202 rxevent_Cancel(call->keepAliveEvent, call,
2203 RX_CALL_REFCOUNT_ALIVE);
2205 } else { /* Client connection */
2207 /* Make sure server receives input packets, in the case where
2208 * no reply arguments are expected */
2209 if ((call->mode == RX_MODE_SENDING)
2210 || (call->mode == RX_MODE_RECEIVING && call->rnext == 1)) {
2211 MUTEX_EXIT(&call->lock);
2212 (void)rxi_ReadProc(call, &dummy, 1);
2213 MUTEX_ENTER(&call->lock);
2216 /* If we had an outstanding delayed ack, be nice to the server
2217 * and force-send it now.
2219 if (call->delayedAckEvent) {
2220 rxevent_Cancel(call->delayedAckEvent, call,
2221 RX_CALL_REFCOUNT_DELAY);
2222 call->delayedAckEvent = NULL;
2223 rxi_SendDelayedAck(NULL, call, NULL);
2226 /* We need to release the call lock since it's lower than the
2227 * conn_call_lock and we don't want to hold the conn_call_lock
2228 * over the rx_ReadProc call. The conn_call_lock needs to be held
2229 * here for the case where rx_NewCall is perusing the calls on
2230 * the connection structure. We don't want to signal until
2231 * rx_NewCall is in a stable state. Otherwise, rx_NewCall may
2232 * have checked this call, found it active and by the time it
2233 * goes to sleep, will have missed the signal.
2235 MUTEX_EXIT(&call->lock);
2236 MUTEX_ENTER(&conn->conn_call_lock);
2237 MUTEX_ENTER(&call->lock);
2239 if (!(call->flags & RX_CALL_PEER_BUSY)) {
2240 conn->lastBusy[call->channel] = 0;
2243 MUTEX_ENTER(&conn->conn_data_lock);
2244 conn->flags |= RX_CONN_BUSY;
2245 if (conn->flags & RX_CONN_MAKECALL_WAITING) {
2246 MUTEX_EXIT(&conn->conn_data_lock);
2247 #ifdef RX_ENABLE_LOCKS
2248 CV_BROADCAST(&conn->conn_call_cv);
2253 #ifdef RX_ENABLE_LOCKS
2255 MUTEX_EXIT(&conn->conn_data_lock);
2257 #endif /* RX_ENABLE_LOCKS */
2258 call->state = RX_STATE_DALLY;
2260 error = call->error;
2262 /* currentPacket, nLeft, and NFree must be zeroed here, because
2263 * ResetCall cannot: ResetCall may be called at splnet(), in the
2264 * kernel version, and may interrupt the macros rx_Read or
2265 * rx_Write, which run at normal priority for efficiency. */
2266 if (call->currentPacket) {
2267 #ifdef RX_TRACK_PACKETS
2268 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
2270 rxi_FreePacket(call->currentPacket);
2271 call->currentPacket = (struct rx_packet *)0;
2274 call->nLeft = call->nFree = call->curlen = 0;
2276 /* Free any packets from the last call to ReadvProc/WritevProc */
2277 #ifdef RXDEBUG_PACKET
2279 #endif /* RXDEBUG_PACKET */
2280 rxi_FreePackets(0, &call->iovq);
2281 MUTEX_EXIT(&call->lock);
2283 MUTEX_ENTER(&rx_refcnt_mutex);
2284 CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
2285 MUTEX_EXIT(&rx_refcnt_mutex);
2286 if (conn->type == RX_CLIENT_CONNECTION) {
2287 MUTEX_ENTER(&conn->conn_data_lock);
2288 conn->flags &= ~RX_CONN_BUSY;
2289 MUTEX_EXIT(&conn->conn_data_lock);
2290 MUTEX_EXIT(&conn->conn_call_lock);
2294 * Map errors to the local host's errno.h format.
2296 error = ntoh_syserr_conv(error);
2300 #if !defined(KERNEL)
2302 /* Call this routine when shutting down a server or client (especially
2303 * clients). This will allow Rx to gracefully garbage collect server
2304 * connections, and reduce the number of retries that a server might
2305 * make to a dead client.
2306 * This is not quite right, since some calls may still be ongoing and
2307 * we can't lock them to destroy them. */
2311 struct rx_connection **conn_ptr, **conn_end;
2315 if (rxinit_status == 1) {
2317 return; /* Already shutdown. */
2319 rxi_DeleteCachedConnections();
2320 if (rx_connHashTable) {
2321 MUTEX_ENTER(&rx_connHashTable_lock);
2322 for (conn_ptr = &rx_connHashTable[0], conn_end =
2323 &rx_connHashTable[rx_hashTableSize]; conn_ptr < conn_end;
2325 struct rx_connection *conn, *next;
2326 for (conn = *conn_ptr; conn; conn = next) {
2328 if (conn->type == RX_CLIENT_CONNECTION) {
2329 MUTEX_ENTER(&rx_refcnt_mutex);
2331 MUTEX_EXIT(&rx_refcnt_mutex);
2332 #ifdef RX_ENABLE_LOCKS
2333 rxi_DestroyConnectionNoLock(conn);
2334 #else /* RX_ENABLE_LOCKS */
2335 rxi_DestroyConnection(conn);
2336 #endif /* RX_ENABLE_LOCKS */
2340 #ifdef RX_ENABLE_LOCKS
2341 while (rx_connCleanup_list) {
2342 struct rx_connection *conn;
2343 conn = rx_connCleanup_list;
2344 rx_connCleanup_list = rx_connCleanup_list->next;
2345 MUTEX_EXIT(&rx_connHashTable_lock);
2346 rxi_CleanupConnection(conn);
2347 MUTEX_ENTER(&rx_connHashTable_lock);
2349 MUTEX_EXIT(&rx_connHashTable_lock);
2350 #endif /* RX_ENABLE_LOCKS */
2355 afs_winsockCleanup();
2363 /* if we wakeup packet waiter too often, can get in loop with two
2364 AllocSendPackets each waking each other up (from ReclaimPacket calls) */
2366 rxi_PacketsUnWait(void)
2368 if (!rx_waitingForPackets) {
2372 if (rxi_OverQuota(RX_PACKET_CLASS_SEND)) {
2373 return; /* still over quota */
2376 rx_waitingForPackets = 0;
2377 #ifdef RX_ENABLE_LOCKS
2378 CV_BROADCAST(&rx_waitingForPackets_cv);
2380 osi_rxWakeup(&rx_waitingForPackets);
2386 /* ------------------Internal interfaces------------------------- */
2388 /* Return this process's service structure for the
2389 * specified socket and service */
2390 static struct rx_service *
2391 rxi_FindService(osi_socket socket, u_short serviceId)
2393 struct rx_service **sp;
2394 for (sp = &rx_services[0]; *sp; sp++) {
2395 if ((*sp)->serviceId == serviceId && (*sp)->socket == socket)
2401 #ifdef RXDEBUG_PACKET
2402 #ifdef KDUMP_RX_LOCK
2403 static struct rx_call_rx_lock *rx_allCallsp = 0;
2405 static struct rx_call *rx_allCallsp = 0;
2407 #endif /* RXDEBUG_PACKET */
2409 /* Allocate a call structure, for the indicated channel of the
2410 * supplied connection. The mode and state of the call must be set by
2411 * the caller. Returns the call with mutex locked. */
2412 static struct rx_call *
2413 rxi_NewCall(struct rx_connection *conn, int channel)
2415 struct rx_call *call;
2416 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
2417 struct rx_call *cp; /* Call pointer temp */
2418 struct rx_call *nxp; /* Next call pointer, for queue_Scan */
2419 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
2421 dpf(("rxi_NewCall(conn %"AFS_PTR_FMT", channel %d)\n", conn, channel));
2423 /* Grab an existing call structure, or allocate a new one.
2424 * Existing call structures are assumed to have been left reset by
2426 MUTEX_ENTER(&rx_freeCallQueue_lock);
2428 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
2430 * EXCEPT that the TQ might not yet be cleared out.
2431 * Skip over those with in-use TQs.
2434 for (queue_Scan(&rx_freeCallQueue, cp, nxp, rx_call)) {
2435 if (!(cp->flags & RX_CALL_TQ_BUSY)) {
2441 #else /* AFS_GLOBAL_RXLOCK_KERNEL */
2442 if (queue_IsNotEmpty(&rx_freeCallQueue)) {
2443 call = queue_First(&rx_freeCallQueue, rx_call);
2444 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
2446 if (rx_stats_active)
2447 rx_atomic_dec(&rx_stats.nFreeCallStructs);
2448 MUTEX_EXIT(&rx_freeCallQueue_lock);
2449 MUTEX_ENTER(&call->lock);
2450 CLEAR_CALL_QUEUE_LOCK(call);
2451 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
2452 /* Now, if TQ wasn't cleared earlier, do it now. */
2453 rxi_WaitforTQBusy(call);
2454 if (call->flags & RX_CALL_TQ_CLEARME) {
2455 rxi_ClearTransmitQueue(call, 1);
2456 /*queue_Init(&call->tq);*/
2458 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
2459 /* Bind the call to its connection structure */
2461 rxi_ResetCall(call, 1);
2464 call = rxi_Alloc(sizeof(struct rx_call));
2465 #ifdef RXDEBUG_PACKET
2466 call->allNextp = rx_allCallsp;
2467 rx_allCallsp = call;
2469 rx_atomic_inc_and_read(&rx_stats.nCallStructs);
2470 #else /* RXDEBUG_PACKET */
2471 rx_atomic_inc(&rx_stats.nCallStructs);
2472 #endif /* RXDEBUG_PACKET */
2474 MUTEX_EXIT(&rx_freeCallQueue_lock);
2475 MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
2476 MUTEX_ENTER(&call->lock);
2477 CV_INIT(&call->cv_twind, "call twind", CV_DEFAULT, 0);
2478 CV_INIT(&call->cv_rq, "call rq", CV_DEFAULT, 0);
2479 CV_INIT(&call->cv_tq, "call tq", CV_DEFAULT, 0);
2481 /* Initialize once-only items */
2482 queue_Init(&call->tq);
2483 queue_Init(&call->rq);
2484 queue_Init(&call->iovq);
2485 #ifdef RXDEBUG_PACKET
2486 call->rqc = call->tqc = call->iovqc = 0;
2487 #endif /* RXDEBUG_PACKET */
2488 /* Bind the call to its connection structure (prereq for reset) */
2490 rxi_ResetCall(call, 1);
2492 call->channel = channel;
2493 call->callNumber = &conn->callNumber[channel];
2494 call->rwind = conn->rwind[channel];
2495 call->twind = conn->twind[channel];
2496 /* Note that the next expected call number is retained (in
2497 * conn->callNumber[i]), even if we reallocate the call structure
2499 conn->call[channel] = call;
2500 /* if the channel's never been used (== 0), we should start at 1, otherwise
2501 * the call number is valid from the last time this channel was used */
2502 if (*call->callNumber == 0)
2503 *call->callNumber = 1;
2508 /* A call has been inactive long enough that so we can throw away
2509 * state, including the call structure, which is placed on the call
2512 * call->lock amd rx_refcnt_mutex are held upon entry.
2513 * haveCTLock is set when called from rxi_ReapConnections.
2516 rxi_FreeCall(struct rx_call *call, int haveCTLock)
2518 int channel = call->channel;
2519 struct rx_connection *conn = call->conn;
2522 if (call->state == RX_STATE_DALLY || call->state == RX_STATE_HOLD)
2523 (*call->callNumber)++;
2525 * We are setting the state to RX_STATE_RESET to
2526 * ensure that no one else will attempt to use this
2527 * call once we drop the refcnt lock. We must drop
2528 * the refcnt lock before calling rxi_ResetCall
2529 * because it cannot be held across acquiring the
2530 * freepktQ lock. NewCall does the same.
2532 call->state = RX_STATE_RESET;
2533 MUTEX_EXIT(&rx_refcnt_mutex);
2534 rxi_ResetCall(call, 0);
2535 call->conn->call[channel] = (struct rx_call *)0;
2537 MUTEX_ENTER(&rx_freeCallQueue_lock);
2538 SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
2539 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
2540 /* A call may be free even though its transmit queue is still in use.
2541 * Since we search the call list from head to tail, put busy calls at
2542 * the head of the list, and idle calls at the tail.
2544 if (call->flags & RX_CALL_TQ_BUSY)
2545 queue_Prepend(&rx_freeCallQueue, call);
2547 queue_Append(&rx_freeCallQueue, call);
2548 #else /* AFS_GLOBAL_RXLOCK_KERNEL */
2549 queue_Append(&rx_freeCallQueue, call);
2550 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
2551 if (rx_stats_active)
2552 rx_atomic_inc(&rx_stats.nFreeCallStructs);
2553 MUTEX_EXIT(&rx_freeCallQueue_lock);
2555 /* Destroy the connection if it was previously slated for
2556 * destruction, i.e. the Rx client code previously called
2557 * rx_DestroyConnection (client connections), or
2558 * rxi_ReapConnections called the same routine (server
2559 * connections). Only do this, however, if there are no
2560 * outstanding calls. Note that for fine grain locking, there appears
2561 * to be a deadlock in that rxi_FreeCall has a call locked and
2562 * DestroyConnectionNoLock locks each call in the conn. But note a
2563 * few lines up where we have removed this call from the conn.
2564 * If someone else destroys a connection, they either have no
2565 * call lock held or are going through this section of code.
2567 MUTEX_ENTER(&conn->conn_data_lock);
2568 if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) {
2569 MUTEX_ENTER(&rx_refcnt_mutex);
2571 MUTEX_EXIT(&rx_refcnt_mutex);
2572 MUTEX_EXIT(&conn->conn_data_lock);
2573 #ifdef RX_ENABLE_LOCKS
2575 rxi_DestroyConnectionNoLock(conn);
2577 rxi_DestroyConnection(conn);
2578 #else /* RX_ENABLE_LOCKS */
2579 rxi_DestroyConnection(conn);
2580 #endif /* RX_ENABLE_LOCKS */
2582 MUTEX_EXIT(&conn->conn_data_lock);
2584 MUTEX_ENTER(&rx_refcnt_mutex);
2587 rx_atomic_t rxi_Allocsize = RX_ATOMIC_INIT(0);
2588 rx_atomic_t rxi_Alloccnt = RX_ATOMIC_INIT(0);
2591 rxi_Alloc(size_t size)
2595 if (rx_stats_active) {
2596 rx_atomic_add(&rxi_Allocsize, (int) size);
2597 rx_atomic_inc(&rxi_Alloccnt);
2601 #if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
2602 afs_osi_Alloc_NoSleep(size);
2607 osi_Panic("rxi_Alloc error");
2613 rxi_Free(void *addr, size_t size)
2615 if (rx_stats_active) {
2616 rx_atomic_sub(&rxi_Allocsize, (int) size);
2617 rx_atomic_dec(&rxi_Alloccnt);
2619 osi_Free(addr, size);
2623 rxi_SetPeerMtu(struct rx_peer *peer, afs_uint32 host, afs_uint32 port, int mtu)
2625 struct rx_peer **peer_ptr = NULL, **peer_end = NULL;
2626 struct rx_peer *next = NULL;
2630 MUTEX_ENTER(&rx_peerHashTable_lock);
2632 peer_ptr = &rx_peerHashTable[0];
2633 peer_end = &rx_peerHashTable[rx_hashTableSize];
2636 for ( ; peer_ptr < peer_end; peer_ptr++) {
2639 for ( ; peer; peer = next) {
2641 if (host == peer->host)
2646 hashIndex = PEER_HASH(host, port);
2647 for (peer = rx_peerHashTable[hashIndex]; peer; peer = peer->next) {
2648 if ((peer->host == host) && (peer->port == port))
2653 MUTEX_ENTER(&rx_peerHashTable_lock);
2658 MUTEX_EXIT(&rx_peerHashTable_lock);
2660 MUTEX_ENTER(&peer->peer_lock);
2661 /* We don't handle dropping below min, so don't */
2662 mtu = MAX(mtu, RX_MIN_PACKET_SIZE);
2663 peer->ifMTU=MIN(mtu, peer->ifMTU);
2664 peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
2665 /* if we tweaked this down, need to tune our peer MTU too */
2666 peer->MTU = MIN(peer->MTU, peer->natMTU);
2667 /* if we discovered a sub-1500 mtu, degrade */
2668 if (peer->ifMTU < OLD_MAX_PACKET_SIZE)
2669 peer->maxDgramPackets = 1;
2670 /* We no longer have valid peer packet information */
2671 if (peer->maxPacketSize-RX_IPUDP_SIZE > peer->ifMTU)
2672 peer->maxPacketSize = 0;
2673 MUTEX_EXIT(&peer->peer_lock);
2675 MUTEX_ENTER(&rx_peerHashTable_lock);
2677 if (host && !port) {
2679 /* pick up where we left off */
2683 MUTEX_EXIT(&rx_peerHashTable_lock);
2686 /* Find the peer process represented by the supplied (host,port)
2687 * combination. If there is no appropriate active peer structure, a
2688 * new one will be allocated and initialized
2689 * The origPeer, if set, is a pointer to a peer structure on which the
2690 * refcount will be be decremented. This is used to replace the peer
2691 * structure hanging off a connection structure */
2693 rxi_FindPeer(afs_uint32 host, u_short port,
2694 struct rx_peer *origPeer, int create)
2698 hashIndex = PEER_HASH(host, port);
2699 MUTEX_ENTER(&rx_peerHashTable_lock);
2700 for (pp = rx_peerHashTable[hashIndex]; pp; pp = pp->next) {
2701 if ((pp->host == host) && (pp->port == port))
2706 pp = rxi_AllocPeer(); /* This bzero's *pp */
2707 pp->host = host; /* set here or in InitPeerParams is zero */
2709 MUTEX_INIT(&pp->peer_lock, "peer_lock", MUTEX_DEFAULT, 0);
2710 queue_Init(&pp->congestionQueue);
2711 queue_Init(&pp->rpcStats);
2712 pp->next = rx_peerHashTable[hashIndex];
2713 rx_peerHashTable[hashIndex] = pp;
2714 rxi_InitPeerParams(pp);
2715 if (rx_stats_active)
2716 rx_atomic_inc(&rx_stats.nPeerStructs);
2723 origPeer->refCount--;
2724 MUTEX_EXIT(&rx_peerHashTable_lock);
2729 /* Find the connection at (host, port) started at epoch, and with the
2730 * given connection id. Creates the server connection if necessary.
2731 * The type specifies whether a client connection or a server
2732 * connection is desired. In both cases, (host, port) specify the
2733 * peer's (host, pair) pair. Client connections are not made
2734 * automatically by this routine. The parameter socket gives the
2735 * socket descriptor on which the packet was received. This is used,
2736 * in the case of server connections, to check that *new* connections
2737 * come via a valid (port, serviceId). Finally, the securityIndex
2738 * parameter must match the existing index for the connection. If a
2739 * server connection is created, it will be created using the supplied
2740 * index, if the index is valid for this service */
2741 struct rx_connection *
2742 rxi_FindConnection(osi_socket socket, afs_uint32 host,
2743 u_short port, u_short serviceId, afs_uint32 cid,
2744 afs_uint32 epoch, int type, u_int securityIndex)
2746 int hashindex, flag, i;
2747 struct rx_connection *conn;
2748 hashindex = CONN_HASH(host, port, cid, epoch, type);
2749 MUTEX_ENTER(&rx_connHashTable_lock);
2750 rxLastConn ? (conn = rxLastConn, flag = 0) : (conn =
2751 rx_connHashTable[hashindex],
2754 if ((conn->type == type) && ((cid & RX_CIDMASK) == conn->cid)
2755 && (epoch == conn->epoch)) {
2756 struct rx_peer *pp = conn->peer;
2757 if (securityIndex != conn->securityIndex) {
2758 /* this isn't supposed to happen, but someone could forge a packet
2759 * like this, and there seems to be some CM bug that makes this
2760 * happen from time to time -- in which case, the fileserver
2762 MUTEX_EXIT(&rx_connHashTable_lock);
2763 return (struct rx_connection *)0;
2765 if (pp->host == host && pp->port == port)
2767 if (type == RX_CLIENT_CONNECTION && pp->port == port)
2769 /* So what happens when it's a callback connection? */
2770 if ( /*type == RX_CLIENT_CONNECTION && */
2771 (conn->epoch & 0x80000000))
2775 /* the connection rxLastConn that was used the last time is not the
2776 ** one we are looking for now. Hence, start searching in the hash */
2778 conn = rx_connHashTable[hashindex];
2783 struct rx_service *service;
2784 if (type == RX_CLIENT_CONNECTION) {
2785 MUTEX_EXIT(&rx_connHashTable_lock);
2786 return (struct rx_connection *)0;
2788 service = rxi_FindService(socket, serviceId);
2789 if (!service || (securityIndex >= service->nSecurityObjects)
2790 || (service->securityObjects[securityIndex] == 0)) {
2791 MUTEX_EXIT(&rx_connHashTable_lock);
2792 return (struct rx_connection *)0;
2794 conn = rxi_AllocConnection(); /* This bzero's the connection */
2795 MUTEX_INIT(&conn->conn_call_lock, "conn call lock", MUTEX_DEFAULT, 0);
2796 MUTEX_INIT(&conn->conn_data_lock, "conn data lock", MUTEX_DEFAULT, 0);
2797 CV_INIT(&conn->conn_call_cv, "conn call cv", CV_DEFAULT, 0);
2798 conn->next = rx_connHashTable[hashindex];
2799 rx_connHashTable[hashindex] = conn;
2800 conn->peer = rxi_FindPeer(host, port, 0, 1);
2801 conn->type = RX_SERVER_CONNECTION;
2802 conn->lastSendTime = clock_Sec(); /* don't GC immediately */
2803 conn->epoch = epoch;
2804 conn->cid = cid & RX_CIDMASK;
2805 /* conn->serial = conn->lastSerial = 0; */
2806 /* conn->timeout = 0; */
2807 conn->ackRate = RX_FAST_ACK_RATE;
2808 conn->service = service;
2809 conn->serviceId = serviceId;
2810 conn->securityIndex = securityIndex;
2811 conn->securityObject = service->securityObjects[securityIndex];
2812 conn->nSpecific = 0;
2813 conn->specific = NULL;
2814 rx_SetConnDeadTime(conn, service->connDeadTime);
2815 rx_SetConnIdleDeadTime(conn, service->idleDeadTime);
2816 rx_SetServerConnIdleDeadErr(conn, service->idleDeadErr);
2817 for (i = 0; i < RX_MAXCALLS; i++) {
2818 conn->twind[i] = rx_initSendWindow;
2819 conn->rwind[i] = rx_initReceiveWindow;
2821 /* Notify security object of the new connection */
2822 RXS_NewConnection(conn->securityObject, conn);
2823 /* XXXX Connection timeout? */
2824 if (service->newConnProc)
2825 (*service->newConnProc) (conn);
2826 if (rx_stats_active)
2827 rx_atomic_inc(&rx_stats.nServerConns);
2830 MUTEX_ENTER(&rx_refcnt_mutex);
2832 MUTEX_EXIT(&rx_refcnt_mutex);
2834 rxLastConn = conn; /* store this connection as the last conn used */
2835 MUTEX_EXIT(&rx_connHashTable_lock);
2840 * Timeout a call on a busy call channel if appropriate.
2842 * @param[in] call The busy call.
2844 * @pre 'call' is marked as busy (namely,
2845 * call->conn->lastBusy[call->channel] != 0)
2847 * @pre call->lock is held
2848 * @pre rxi_busyChannelError is nonzero
2850 * @note call->lock is dropped and reacquired
2853 rxi_CheckBusy(struct rx_call *call)
2855 struct rx_connection *conn = call->conn;
2856 int channel = call->channel;
2857 int freechannel = 0;
2859 afs_uint32 callNumber = *call->callNumber;
2861 MUTEX_EXIT(&call->lock);
2863 MUTEX_ENTER(&conn->conn_call_lock);
2865 /* Are there any other call slots on this conn that we should try? Look for
2866 * slots that are empty and are either non-busy, or were marked as busy
2867 * longer than conn->secondsUntilDead seconds before this call started. */
2869 for (i = 0; i < RX_MAXCALLS && !freechannel; i++) {
2871 /* only look at channels that aren't us */
2875 if (conn->lastBusy[i]) {
2876 /* if this channel looked busy too recently, don't look at it */
2877 if (conn->lastBusy[i] >= call->startTime.sec) {
2880 if (call->startTime.sec - conn->lastBusy[i] < conn->secondsUntilDead) {
2885 if (conn->call[i]) {
2886 struct rx_call *tcall = conn->call[i];
2887 MUTEX_ENTER(&tcall->lock);
2888 if (tcall->state == RX_STATE_DALLY) {
2891 MUTEX_EXIT(&tcall->lock);
2897 MUTEX_EXIT(&conn->conn_call_lock);
2899 MUTEX_ENTER(&call->lock);
2901 /* Since the call->lock and conn->conn_call_lock have been released it is
2902 * possible that (1) the call may no longer be busy and/or (2) the call may
2903 * have been reused by another waiting thread. Therefore, we must confirm
2904 * that the call state has not changed when deciding whether or not to
2905 * force this application thread to retry by forcing a Timeout error. */
2907 if (freechannel && *call->callNumber == callNumber &&
2908 (call->flags & RX_CALL_PEER_BUSY)) {
2909 /* Since 'freechannel' is set, there exists another channel in this
2910 * rx_conn that the application thread might be able to use. We know
2911 * that we have the correct call since callNumber is unchanged, and we
2912 * know that the call is still busy. So, set the call error state to
2913 * rxi_busyChannelError so the application can retry the request,
2914 * presumably on a less-busy call channel. */
2916 rxi_CallError(call, rxi_busyChannelError);
2920 /* There are two packet tracing routines available for testing and monitoring
2921 * Rx. One is called just after every packet is received and the other is
2922 * called just before every packet is sent. Received packets, have had their
2923 * headers decoded, and packets to be sent have not yet had their headers
2924 * encoded. Both take two parameters: a pointer to the packet and a sockaddr
2925 * containing the network address. Both can be modified. The return value, if
2926 * non-zero, indicates that the packet should be dropped. */
2928 int (*rx_justReceived) (struct rx_packet *, struct sockaddr_in *) = 0;
2929 int (*rx_almostSent) (struct rx_packet *, struct sockaddr_in *) = 0;
2931 /* A packet has been received off the interface. Np is the packet, socket is
2932 * the socket number it was received from (useful in determining which service
2933 * this packet corresponds to), and (host, port) reflect the host,port of the
2934 * sender. This call returns the packet to the caller if it is finished with
2935 * it, rather than de-allocating it, just as a small performance hack */
2938 rxi_ReceivePacket(struct rx_packet *np, osi_socket socket,
2939 afs_uint32 host, u_short port, int *tnop,
2940 struct rx_call **newcallp)
2942 struct rx_call *call;
2943 struct rx_connection *conn;
2945 afs_uint32 currentCallNumber;
2951 struct rx_packet *tnp;
2954 /* We don't print out the packet until now because (1) the time may not be
2955 * accurate enough until now in the lwp implementation (rx_Listener only gets
2956 * the time after the packet is read) and (2) from a protocol point of view,
2957 * this is the first time the packet has been seen */
2958 packetType = (np->header.type > 0 && np->header.type < RX_N_PACKET_TYPES)
2959 ? rx_packetTypes[np->header.type - 1] : "*UNKNOWN*";
2960 dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %"AFS_PTR_FMT"\n",
2961 np->header.serial, packetType, ntohl(host), ntohs(port), np->header.serviceId,
2962 np->header.epoch, np->header.cid, np->header.callNumber,
2963 np->header.seq, np->header.flags, np));
2966 if (np->header.type == RX_PACKET_TYPE_VERSION) {
2967 return rxi_ReceiveVersionPacket(np, socket, host, port, 1);
2970 if (np->header.type == RX_PACKET_TYPE_DEBUG) {
2971 return rxi_ReceiveDebugPacket(np, socket, host, port, 1);
2974 /* If an input tracer function is defined, call it with the packet and
2975 * network address. Note this function may modify its arguments. */
2976 if (rx_justReceived) {
2977 struct sockaddr_in addr;
2979 addr.sin_family = AF_INET;
2980 addr.sin_port = port;
2981 addr.sin_addr.s_addr = host;
2982 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
2983 addr.sin_len = sizeof(addr);
2984 #endif /* AFS_OSF_ENV */
2985 drop = (*rx_justReceived) (np, &addr);
2986 /* drop packet if return value is non-zero */
2989 port = addr.sin_port; /* in case fcn changed addr */
2990 host = addr.sin_addr.s_addr;
2994 /* If packet was not sent by the client, then *we* must be the client */
2995 type = ((np->header.flags & RX_CLIENT_INITIATED) != RX_CLIENT_INITIATED)
2996 ? RX_CLIENT_CONNECTION : RX_SERVER_CONNECTION;
2998 /* Find the connection (or fabricate one, if we're the server & if
2999 * necessary) associated with this packet */
3001 rxi_FindConnection(socket, host, port, np->header.serviceId,
3002 np->header.cid, np->header.epoch, type,
3003 np->header.securityIndex);
3006 /* If no connection found or fabricated, just ignore the packet.
3007 * (An argument could be made for sending an abort packet for
3012 /* If the connection is in an error state, send an abort packet and ignore
3013 * the incoming packet */
3015 /* Don't respond to an abort packet--we don't want loops! */
3016 MUTEX_ENTER(&conn->conn_data_lock);
3017 if (np->header.type != RX_PACKET_TYPE_ABORT)
3018 np = rxi_SendConnectionAbort(conn, np, 1, 0);
3019 MUTEX_ENTER(&rx_refcnt_mutex);
3021 MUTEX_EXIT(&rx_refcnt_mutex);
3022 MUTEX_EXIT(&conn->conn_data_lock);
3026 /* Check for connection-only requests (i.e. not call specific). */
3027 if (np->header.callNumber == 0) {
3028 switch (np->header.type) {
3029 case RX_PACKET_TYPE_ABORT: {
3030 /* What if the supplied error is zero? */
3031 afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
3032 dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d\n", errcode));
3033 rxi_ConnectionError(conn, errcode);
3034 MUTEX_ENTER(&rx_refcnt_mutex);
3036 MUTEX_EXIT(&rx_refcnt_mutex);
3039 case RX_PACKET_TYPE_CHALLENGE:
3040 tnp = rxi_ReceiveChallengePacket(conn, np, 1);
3041 MUTEX_ENTER(&rx_refcnt_mutex);
3043 MUTEX_EXIT(&rx_refcnt_mutex);
3045 case RX_PACKET_TYPE_RESPONSE:
3046 tnp = rxi_ReceiveResponsePacket(conn, np, 1);
3047 MUTEX_ENTER(&rx_refcnt_mutex);
3049 MUTEX_EXIT(&rx_refcnt_mutex);
3051 case RX_PACKET_TYPE_PARAMS:
3052 case RX_PACKET_TYPE_PARAMS + 1:
3053 case RX_PACKET_TYPE_PARAMS + 2:
3054 /* ignore these packet types for now */
3055 MUTEX_ENTER(&rx_refcnt_mutex);
3057 MUTEX_EXIT(&rx_refcnt_mutex);
3062 /* Should not reach here, unless the peer is broken: send an
3064 rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
3065 MUTEX_ENTER(&conn->conn_data_lock);
3066 tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
3067 MUTEX_ENTER(&rx_refcnt_mutex);
3069 MUTEX_EXIT(&rx_refcnt_mutex);
3070 MUTEX_EXIT(&conn->conn_data_lock);
3075 channel = np->header.cid & RX_CHANNELMASK;
3076 call = conn->call[channel];
3077 #ifdef RX_ENABLE_LOCKS
3079 MUTEX_ENTER(&call->lock);
3080 /* Test to see if call struct is still attached to conn. */
3081 if (call != conn->call[channel]) {
3083 MUTEX_EXIT(&call->lock);
3084 if (type == RX_SERVER_CONNECTION) {
3085 call = conn->call[channel];
3086 /* If we started with no call attached and there is one now,
3087 * another thread is also running this routine and has gotten
3088 * the connection channel. We should drop this packet in the tests
3089 * below. If there was a call on this connection and it's now
3090 * gone, then we'll be making a new call below.
3091 * If there was previously a call and it's now different then
3092 * the old call was freed and another thread running this routine
3093 * has created a call on this channel. One of these two threads
3094 * has a packet for the old call and the code below handles those
3098 MUTEX_ENTER(&call->lock);
3100 /* This packet can't be for this call. If the new call address is
3101 * 0 then no call is running on this channel. If there is a call
3102 * then, since this is a client connection we're getting data for
3103 * it must be for the previous call.
3105 if (rx_stats_active)
3106 rx_atomic_inc(&rx_stats.spuriousPacketsRead);
3107 MUTEX_ENTER(&rx_refcnt_mutex);
3109 MUTEX_EXIT(&rx_refcnt_mutex);
3114 currentCallNumber = conn->callNumber[channel];
3116 if (type == RX_SERVER_CONNECTION) { /* We're the server */
3117 if (np->header.callNumber < currentCallNumber) {
3118 if (rx_stats_active)
3119 rx_atomic_inc(&rx_stats.spuriousPacketsRead);
3120 #ifdef RX_ENABLE_LOCKS
3122 MUTEX_EXIT(&call->lock);
3124 MUTEX_ENTER(&rx_refcnt_mutex);
3126 MUTEX_EXIT(&rx_refcnt_mutex);
3130 MUTEX_ENTER(&conn->conn_call_lock);
3131 call = rxi_NewCall(conn, channel);
3132 MUTEX_EXIT(&conn->conn_call_lock);
3133 *call->callNumber = np->header.callNumber;
3135 if (np->header.callNumber == 0)
3136 dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.06d len %d\n",
3137 np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
3138 np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
3139 np->header.flags, np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
3141 call->state = RX_STATE_PRECALL;
3142 clock_GetTime(&call->queueTime);
3143 hzero(call->bytesSent);
3144 hzero(call->bytesRcvd);
3146 * If the number of queued calls exceeds the overload
3147 * threshold then abort this call.
3149 if ((rx_BusyThreshold > 0) &&
3150 (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
3151 struct rx_packet *tp;
3153 rxi_CallError(call, rx_BusyError);
3154 tp = rxi_SendCallAbort(call, np, 1, 0);
3155 MUTEX_EXIT(&call->lock);
3156 MUTEX_ENTER(&rx_refcnt_mutex);
3158 MUTEX_EXIT(&rx_refcnt_mutex);
3159 if (rx_stats_active)
3160 rx_atomic_inc(&rx_stats.nBusies);
3163 rxi_KeepAliveOn(call);
3164 } else if (np->header.callNumber != currentCallNumber) {
3165 /* Wait until the transmit queue is idle before deciding
3166 * whether to reset the current call. Chances are that the
3167 * call will be in ether DALLY or HOLD state once the TQ_BUSY
3170 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
3171 if (call->state == RX_STATE_ACTIVE) {
3172 rxi_WaitforTQBusy(call);
3174 * If we entered error state while waiting,
3175 * must call rxi_CallError to permit rxi_ResetCall
3176 * to processed when the tqWaiter count hits zero.
3179 rxi_CallError(call, call->error);
3180 MUTEX_EXIT(&call->lock);
3181 MUTEX_ENTER(&rx_refcnt_mutex);
3183 MUTEX_EXIT(&rx_refcnt_mutex);
3187 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
3188 /* If the new call cannot be taken right now send a busy and set
3189 * the error condition in this call, so that it terminates as
3190 * quickly as possible */
3191 if (call->state == RX_STATE_ACTIVE) {
3192 struct rx_packet *tp;
3194 rxi_CallError(call, RX_CALL_DEAD);
3195 tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY,
3197 MUTEX_EXIT(&call->lock);
3198 MUTEX_ENTER(&rx_refcnt_mutex);
3200 MUTEX_EXIT(&rx_refcnt_mutex);
3203 rxi_ResetCall(call, 0);
3204 *call->callNumber = np->header.callNumber;
3206 if (np->header.callNumber == 0)
3207 dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%06d len %d\n",
3208 np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
3209 np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
3210 np->header.flags, np, np->retryTime.sec, np->retryTime.usec, np->length));
3212 call->state = RX_STATE_PRECALL;
3213 clock_GetTime(&call->queueTime);
3214 hzero(call->bytesSent);
3215 hzero(call->bytesRcvd);
3217 * If the number of queued calls exceeds the overload
3218 * threshold then abort this call.
3220 if ((rx_BusyThreshold > 0) &&
3221 (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
3222 struct rx_packet *tp;
3224 rxi_CallError(call, rx_BusyError);
3225 tp = rxi_SendCallAbort(call, np, 1, 0);
3226 MUTEX_EXIT(&call->lock);
3227 MUTEX_ENTER(&rx_refcnt_mutex);
3229 MUTEX_EXIT(&rx_refcnt_mutex);
3230 if (rx_stats_active)
3231 rx_atomic_inc(&rx_stats.nBusies);
3234 rxi_KeepAliveOn(call);
3236 /* Continuing call; do nothing here. */
3238 } else { /* we're the client */
3239 /* Ignore all incoming acknowledgements for calls in DALLY state */
3240 if (call && (call->state == RX_STATE_DALLY)
3241 && (np->header.type == RX_PACKET_TYPE_ACK)) {
3242 if (rx_stats_active)
3243 rx_atomic_inc(&rx_stats.ignorePacketDally);
3244 #ifdef RX_ENABLE_LOCKS
3246 MUTEX_EXIT(&call->lock);
3249 MUTEX_ENTER(&rx_refcnt_mutex);
3251 MUTEX_EXIT(&rx_refcnt_mutex);
3255 /* Ignore anything that's not relevant to the current call. If there
3256 * isn't a current call, then no packet is relevant. */
3257 if (!call || (np->header.callNumber != currentCallNumber)) {
3258 if (rx_stats_active)
3259 rx_atomic_inc(&rx_stats.spuriousPacketsRead);
3260 #ifdef RX_ENABLE_LOCKS
3262 MUTEX_EXIT(&call->lock);
3265 MUTEX_ENTER(&rx_refcnt_mutex);
3267 MUTEX_EXIT(&rx_refcnt_mutex);
3270 /* If the service security object index stamped in the packet does not
3271 * match the connection's security index, ignore the packet */
3272 if (np->header.securityIndex != conn->securityIndex) {
3273 #ifdef RX_ENABLE_LOCKS
3274 MUTEX_EXIT(&call->lock);
3276 MUTEX_ENTER(&rx_refcnt_mutex);
3278 MUTEX_EXIT(&rx_refcnt_mutex);
3282 /* If we're receiving the response, then all transmit packets are
3283 * implicitly acknowledged. Get rid of them. */
3284 if (np->header.type == RX_PACKET_TYPE_DATA) {
3285 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
3286 /* XXX Hack. Because we must release the global rx lock when
3287 * sending packets (osi_NetSend) we drop all acks while we're
3288 * traversing the tq in rxi_Start sending packets out because
3289 * packets may move to the freePacketQueue as result of being here!
3290 * So we drop these packets until we're safely out of the
3291 * traversing. Really ugly!
3292 * For fine grain RX locking, we set the acked field in the
3293 * packets and let rxi_Start remove them from the transmit queue.
3295 if (call->flags & RX_CALL_TQ_BUSY) {
3296 #ifdef RX_ENABLE_LOCKS
3297 rxi_SetAcksInTransmitQueue(call);
3299 MUTEX_ENTER(&rx_refcnt_mutex);
3301 MUTEX_EXIT(&rx_refcnt_mutex);
3302 return np; /* xmitting; drop packet */
3305 rxi_ClearTransmitQueue(call, 0);
3307 #else /* AFS_GLOBAL_RXLOCK_KERNEL */
3308 rxi_ClearTransmitQueue(call, 0);
3309 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
3311 if (np->header.type == RX_PACKET_TYPE_ACK) {
3312 /* now check to see if this is an ack packet acknowledging that the
3313 * server actually *lost* some hard-acked data. If this happens we
3314 * ignore this packet, as it may indicate that the server restarted in
3315 * the middle of a call. It is also possible that this is an old ack
3316 * packet. We don't abort the connection in this case, because this
3317 * *might* just be an old ack packet. The right way to detect a server
3318 * restart in the midst of a call is to notice that the server epoch
3320 /* XXX I'm not sure this is exactly right, since tfirst **IS**
3321 * XXX unacknowledged. I think that this is off-by-one, but
3322 * XXX I don't dare change it just yet, since it will
3323 * XXX interact badly with the server-restart detection
3324 * XXX code in receiveackpacket. */
3325 if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
3326 if (rx_stats_active)
3327 rx_atomic_inc(&rx_stats.spuriousPacketsRead);
3328 MUTEX_EXIT(&call->lock);
3329 MUTEX_ENTER(&rx_refcnt_mutex);
3331 MUTEX_EXIT(&rx_refcnt_mutex);
3335 } /* else not a data packet */
3338 osirx_AssertMine(&call->lock, "rxi_ReceivePacket middle");
3339 /* Set remote user defined status from packet */
3340 call->remoteStatus = np->header.userStatus;
3342 /* Note the gap between the expected next packet and the actual
3343 * packet that arrived, when the new packet has a smaller serial number
3344 * than expected. Rioses frequently reorder packets all by themselves,
3345 * so this will be quite important with very large window sizes.
3346 * Skew is checked against 0 here to avoid any dependence on the type of
3347 * inPacketSkew (which may be unsigned). In C, -1 > (unsigned) 0 is always
3349 * The inPacketSkew should be a smoothed running value, not just a maximum. MTUXXX
3350 * see CalculateRoundTripTime for an example of how to keep smoothed values.
3351 * I think using a beta of 1/8 is probably appropriate. 93.04.21
3353 MUTEX_ENTER(&conn->conn_data_lock);
3354 skew = conn->lastSerial - np->header.serial;
3355 conn->lastSerial = np->header.serial;
3356 MUTEX_EXIT(&conn->conn_data_lock);
3358 struct rx_peer *peer;
3360 if (skew > peer->inPacketSkew) {
3361 dpf(("*** In skew changed from %d to %d\n",
3362 peer->inPacketSkew, skew));
3363 peer->inPacketSkew = skew;
3367 /* Now do packet type-specific processing */
3368 switch (np->header.type) {
3369 case RX_PACKET_TYPE_DATA:
3370 np = rxi_ReceiveDataPacket(call, np, 1, socket, host, port, tnop,
3373 case RX_PACKET_TYPE_ACK:
3374 /* Respond immediately to ack packets requesting acknowledgement
3376 if (np->header.flags & RX_REQUEST_ACK) {
3378 (void)rxi_SendCallAbort(call, 0, 1, 0);
3380 (void)rxi_SendAck(call, 0, np->header.serial,
3381 RX_ACK_PING_RESPONSE, 1);
3383 np = rxi_ReceiveAckPacket(call, np, 1);
3385 case RX_PACKET_TYPE_ABORT: {
3386 /* An abort packet: reset the call, passing the error up to the user. */
3387 /* What if error is zero? */
3388 /* What if the error is -1? the application will treat it as a timeout. */
3389 afs_int32 errdata = ntohl(*(afs_int32 *) rx_DataOf(np));
3390 dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d\n", errdata));
3391 rxi_CallError(call, errdata);
3392 MUTEX_EXIT(&call->lock);
3393 MUTEX_ENTER(&rx_refcnt_mutex);
3395 MUTEX_EXIT(&rx_refcnt_mutex);
3396 return np; /* xmitting; drop packet */
3398 case RX_PACKET_TYPE_BUSY: {
3399 struct clock busyTime;
3401 clock_GetTime(&busyTime);
3403 MUTEX_EXIT(&call->lock);
3405 MUTEX_ENTER(&conn->conn_call_lock);
3406 MUTEX_ENTER(&call->lock);
3407 conn->lastBusy[call->channel] = busyTime.sec;
3408 call->flags |= RX_CALL_PEER_BUSY;
3409 MUTEX_EXIT(&call->lock);
3410 MUTEX_EXIT(&conn->conn_call_lock);
3412 MUTEX_ENTER(&rx_refcnt_mutex);
3414 MUTEX_EXIT(&rx_refcnt_mutex);
3418 case RX_PACKET_TYPE_ACKALL:
3419 /* All packets acknowledged, so we can drop all packets previously
3420 * readied for sending */
3421 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
3422 /* XXX Hack. We because we can't release the global rx lock when
3423 * sending packets (osi_NetSend) we drop all ack pkts while we're
3424 * traversing the tq in rxi_Start sending packets out because
3425 * packets may move to the freePacketQueue as result of being
3426 * here! So we drop these packets until we're safely out of the
3427 * traversing. Really ugly!
3428 * For fine grain RX locking, we set the acked field in the packets
3429 * and let rxi_Start remove the packets from the transmit queue.
3431 if (call->flags & RX_CALL_TQ_BUSY) {
3432 #ifdef RX_ENABLE_LOCKS
3433 rxi_SetAcksInTransmitQueue(call);
3435 #else /* RX_ENABLE_LOCKS */
3436 MUTEX_EXIT(&call->lock);
3437 MUTEX_ENTER(&rx_refcnt_mutex);
3439 MUTEX_EXIT(&rx_refcnt_mutex);
3440 return np; /* xmitting; drop packet */
3441 #endif /* RX_ENABLE_LOCKS */
3443 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
3444 rxi_ClearTransmitQueue(call, 0);
3447 /* Should not reach here, unless the peer is broken: send an abort
3449 rxi_CallError(call, RX_PROTOCOL_ERROR);
3450 np = rxi_SendCallAbort(call, np, 1, 0);
3453 /* Note when this last legitimate packet was received, for keep-alive
3454 * processing. Note, we delay getting the time until now in the hope that
3455 * the packet will be delivered to the user before any get time is required
3456 * (if not, then the time won't actually be re-evaluated here). */
3457 call->lastReceiveTime = clock_Sec();
3458 /* we've received a legit packet, so the channel is not busy */
3459 call->flags &= ~RX_CALL_PEER_BUSY;
3460 MUTEX_EXIT(&call->lock);
3461 MUTEX_ENTER(&rx_refcnt_mutex);
3463 MUTEX_EXIT(&rx_refcnt_mutex);
3467 /* return true if this is an "interesting" connection from the point of view
3468 of someone trying to debug the system */
3470 rxi_IsConnInteresting(struct rx_connection *aconn)
3473 struct rx_call *tcall;
3475 if (aconn->flags & (RX_CONN_MAKECALL_WAITING | RX_CONN_DESTROY_ME))
3478 for (i = 0; i < RX_MAXCALLS; i++) {
3479 tcall = aconn->call[i];
3481 if ((tcall->state == RX_STATE_PRECALL)
3482 || (tcall->state == RX_STATE_ACTIVE))
3484 if ((tcall->mode == RX_MODE_SENDING)
3485 || (tcall->mode == RX_MODE_RECEIVING))
3493 /* if this is one of the last few packets AND it wouldn't be used by the
3494 receiving call to immediately satisfy a read request, then drop it on
3495 the floor, since accepting it might prevent a lock-holding thread from
3496 making progress in its reading. If a call has been cleared while in
3497 the precall state then ignore all subsequent packets until the call
3498 is assigned to a thread. */
3501 TooLow(struct rx_packet *ap, struct rx_call *acall)
3505 MUTEX_ENTER(&rx_quota_mutex);
3506 if (((ap->header.seq != 1) && (acall->flags & RX_CALL_CLEARED)
3507 && (acall->state == RX_STATE_PRECALL))
3508 || ((rx_nFreePackets < rxi_dataQuota + 2)
3509 && !((ap->header.seq < acall->rnext + rx_initSendWindow)
3510 && (acall->flags & RX_CALL_READER_WAIT)))) {
3513 MUTEX_EXIT(&rx_quota_mutex);
3519 rxi_CheckReachEvent(struct rxevent *event, void *arg1, void *arg2)
3521 struct rx_connection *conn = arg1;
3522 struct rx_call *acall = arg2;
3523 struct rx_call *call = acall;
3524 struct clock when, now;
3527 MUTEX_ENTER(&conn->conn_data_lock);
3528 conn->checkReachEvent = NULL;
3529 waiting = conn->flags & RX_CONN_ATTACHWAIT;
3531 MUTEX_ENTER(&rx_refcnt_mutex);
3533 MUTEX_EXIT(&rx_refcnt_mutex);
3535 MUTEX_EXIT(&conn->conn_data_lock);
3539 MUTEX_ENTER(&conn->conn_call_lock);
3540 MUTEX_ENTER(&conn->conn_data_lock);
3541 for (i = 0; i < RX_MAXCALLS; i++) {
3542 struct rx_call *tc = conn->call[i];
3543 if (tc && tc->state == RX_STATE_PRECALL) {
3549 /* Indicate that rxi_CheckReachEvent is no longer running by
3550 * clearing the flag. Must be atomic under conn_data_lock to
3551 * avoid a new call slipping by: rxi_CheckConnReach holds
3552 * conn_data_lock while checking RX_CONN_ATTACHWAIT.
3554 conn->flags &= ~RX_CONN_ATTACHWAIT;
3555 MUTEX_EXIT(&conn->conn_data_lock);
3556 MUTEX_EXIT(&conn->conn_call_lock);
3561 MUTEX_ENTER(&call->lock);
3562 rxi_SendAck(call, NULL, 0, RX_ACK_PING, 0);
3564 MUTEX_EXIT(&call->lock);
3566 clock_GetTime(&now);
3568 when.sec += RX_CHECKREACH_TIMEOUT;
3569 MUTEX_ENTER(&conn->conn_data_lock);
3570 if (!conn->checkReachEvent) {
3571 MUTEX_ENTER(&rx_refcnt_mutex);
3573 MUTEX_EXIT(&rx_refcnt_mutex);
3574 conn->checkReachEvent =
3575 rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
3578 MUTEX_EXIT(&conn->conn_data_lock);
3584 rxi_CheckConnReach(struct rx_connection *conn, struct rx_call *call)
3586 struct rx_service *service = conn->service;
3587 struct rx_peer *peer = conn->peer;
3588 afs_uint32 now, lastReach;
3590 if (service->checkReach == 0)
3594 MUTEX_ENTER(&peer->peer_lock);
3595 lastReach = peer->lastReachTime;
3596 MUTEX_EXIT(&peer->peer_lock);
3597 if (now - lastReach < RX_CHECKREACH_TTL)
3600 MUTEX_ENTER(&conn->conn_data_lock);
3601 if (conn->flags & RX_CONN_ATTACHWAIT) {
3602 MUTEX_EXIT(&conn->conn_data_lock);
3605 conn->flags |= RX_CONN_ATTACHWAIT;
3606 MUTEX_EXIT(&conn->conn_data_lock);
3607 if (!conn->checkReachEvent)
3608 rxi_CheckReachEvent(NULL, conn, call);
3613 /* try to attach call, if authentication is complete */
3615 TryAttach(struct rx_call *acall, osi_socket socket,
3616 int *tnop, struct rx_call **newcallp,
3619 struct rx_connection *conn = acall->conn;
3621 if (conn->type == RX_SERVER_CONNECTION
3622 && acall->state == RX_STATE_PRECALL) {
3623 /* Don't attach until we have any req'd. authentication. */
3624 if (RXS_CheckAuthentication(conn->securityObject, conn) == 0) {
3625 if (reachOverride || rxi_CheckConnReach(conn, acall) == 0)
3626 rxi_AttachServerProc(acall, socket, tnop, newcallp);
3627 /* Note: this does not necessarily succeed; there
3628 * may not any proc available
3631 rxi_ChallengeOn(acall->conn);
3636 /* A data packet has been received off the interface. This packet is
3637 * appropriate to the call (the call is in the right state, etc.). This
3638 * routine can return a packet to the caller, for re-use */
3641 rxi_ReceiveDataPacket(struct rx_call *call,
3642 struct rx_packet *np, int istack,
3643 osi_socket socket, afs_uint32 host, u_short port,
3644 int *tnop, struct rx_call **newcallp)
3646 int ackNeeded = 0; /* 0 means no, otherwise ack_reason */
3651 afs_uint32 serial=0, flags=0;
3653 struct rx_packet *tnp;
3654 struct clock when, now;
3655 if (rx_stats_active)
3656 rx_atomic_inc(&rx_stats.dataPacketsRead);
3659 /* If there are no packet buffers, drop this new packet, unless we can find
3660 * packet buffers from inactive calls */
3662 && (rxi_OverQuota(RX_PACKET_CLASS_RECEIVE) || TooLow(np, call))) {
3663 MUTEX_ENTER(&rx_freePktQ_lock);
3664 rxi_NeedMorePackets = TRUE;
3665 MUTEX_EXIT(&rx_freePktQ_lock);
3666 if (rx_stats_active)
3667 rx_atomic_inc(&rx_stats.noPacketBuffersOnRead);
3668 call->rprev = np->header.serial;
3669 rxi_calltrace(RX_TRACE_DROP, call);
3670 dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems\n", np));
3672 rxi_ClearReceiveQueue(call);
3673 clock_GetTime(&now);
3675 clock_Add(&when, &rx_softAckDelay);
3676 if (!call->delayedAckEvent
3677 || clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
3678 rxevent_Cancel(call->delayedAckEvent, call,
3679 RX_CALL_REFCOUNT_DELAY);
3680 MUTEX_ENTER(&rx_refcnt_mutex);
3681 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
3682 MUTEX_EXIT(&rx_refcnt_mutex);
3684 call->delayedAckEvent =
3685 rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
3687 /* we've damaged this call already, might as well do it in. */
3693 * New in AFS 3.5, if the RX_JUMBO_PACKET flag is set then this
3694 * packet is one of several packets transmitted as a single
3695 * datagram. Do not send any soft or hard acks until all packets
3696 * in a jumbogram have been processed. Send negative acks right away.
3698 for (isFirst = 1, tnp = NULL; isFirst || tnp; isFirst = 0) {
3699 /* tnp is non-null when there are more packets in the
3700 * current jumbo gram */
3707 seq = np->header.seq;
3708 serial = np->header.serial;
3709 flags = np->header.flags;
3711 /* If the call is in an error state, send an abort message */
3713 return rxi_SendCallAbort(call, np, istack, 0);
3715 /* The RX_JUMBO_PACKET is set in all but the last packet in each
3716 * AFS 3.5 jumbogram. */
3717 if (flags & RX_JUMBO_PACKET) {
3718 tnp = rxi_SplitJumboPacket(np, host, port, isFirst);
3723 if (np->header.spare != 0) {
3724 MUTEX_ENTER(&call->conn->conn_data_lock);
3725 call->conn->flags |= RX_CONN_USING_PACKET_CKSUM;
3726 MUTEX_EXIT(&call->conn->conn_data_lock);
3729 /* The usual case is that this is the expected next packet */
3730 if (seq == call->rnext) {
3732 /* Check to make sure it is not a duplicate of one already queued */
3733 if (queue_IsNotEmpty(&call->rq)
3734 && queue_First(&call->rq, rx_packet)->header.seq == seq) {
3735 if (rx_stats_active)
3736 rx_atomic_inc(&rx_stats.dupPacketsRead);
3737 dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate\n", np));
3738 rxevent_Cancel(call->delayedAckEvent, call,
3739 RX_CALL_REFCOUNT_DELAY);
3740 np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
3746 /* It's the next packet. Stick it on the receive queue
3747 * for this call. Set newPackets to make sure we wake
3748 * the reader once all packets have been processed */
3749 #ifdef RX_TRACK_PACKETS
3750 np->flags |= RX_PKTFLAG_RQ;
3752 queue_Prepend(&call->rq, np);
3753 #ifdef RXDEBUG_PACKET
3755 #endif /* RXDEBUG_PACKET */
3757 np = NULL; /* We can't use this anymore */
3760 /* If an ack is requested then set a flag to make sure we
3761 * send an acknowledgement for this packet */
3762 if (flags & RX_REQUEST_ACK) {
3763 ackNeeded = RX_ACK_REQUESTED;
3766 /* Keep track of whether we have received the last packet */
3767 if (flags & RX_LAST_PACKET) {
3768 call->flags |= RX_CALL_HAVE_LAST;
3772 /* Check whether we have all of the packets for this call */
3773 if (call->flags & RX_CALL_HAVE_LAST) {
3774 afs_uint32 tseq; /* temporary sequence number */
3775 struct rx_packet *tp; /* Temporary packet pointer */
3776 struct rx_packet *nxp; /* Next pointer, for queue_Scan */
3778 for (tseq = seq, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
3779 if (tseq != tp->header.seq)
3781 if (tp->header.flags & RX_LAST_PACKET) {
3782 call->flags |= RX_CALL_RECEIVE_DONE;
3789 /* Provide asynchronous notification for those who want it
3790 * (e.g. multi rx) */
3791 if (call->arrivalProc) {
3792 (*call->arrivalProc) (call, call->arrivalProcHandle,
3793 call->arrivalProcArg);
3794 call->arrivalProc = (void (*)())0;
3797 /* Update last packet received */
3800 /* If there is no server process serving this call, grab
3801 * one, if available. We only need to do this once. If a
3802 * server thread is available, this thread becomes a server
3803 * thread and the server thread becomes a listener thread. */
3805 TryAttach(call, socket, tnop, newcallp, 0);
3808 /* This is not the expected next packet. */
3810 /* Determine whether this is a new or old packet, and if it's
3811 * a new one, whether it fits into the current receive window.
3812 * Also figure out whether the packet was delivered in sequence.
3813 * We use the prev variable to determine whether the new packet
3814 * is the successor of its immediate predecessor in the
3815 * receive queue, and the missing flag to determine whether
3816 * any of this packets predecessors are missing. */
3818 afs_uint32 prev; /* "Previous packet" sequence number */
3819 struct rx_packet *tp; /* Temporary packet pointer */
3820 struct rx_packet *nxp; /* Next pointer, for queue_Scan */
3821 int missing; /* Are any predecessors missing? */
3823 /* If the new packet's sequence number has been sent to the
3824 * application already, then this is a duplicate */
3825 if (seq < call->rnext) {
3826 if (rx_stats_active)
3827 rx_atomic_inc(&rx_stats.dupPacketsRead);
3828 rxevent_Cancel(call->delayedAckEvent, call,
3829 RX_CALL_REFCOUNT_DELAY);
3830 np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
3836 /* If the sequence number is greater than what can be
3837 * accomodated by the current window, then send a negative
3838 * acknowledge and drop the packet */
3839 if ((call->rnext + call->rwind) <= seq) {
3840 rxevent_Cancel(call->delayedAckEvent, call,
3841 RX_CALL_REFCOUNT_DELAY);
3842 np = rxi_SendAck(call, np, serial, RX_ACK_EXCEEDS_WINDOW,
3849 /* Look for the packet in the queue of old received packets */
3850 for (prev = call->rnext - 1, missing =
3851 0, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
3852 /*Check for duplicate packet */
3853 if (seq == tp->header.seq) {
3854 if (rx_stats_active)
3855 rx_atomic_inc(&rx_stats.dupPacketsRead);
3856 rxevent_Cancel(call->delayedAckEvent, call,
3857 RX_CALL_REFCOUNT_DELAY);
3858 np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
3864 /* If we find a higher sequence packet, break out and
3865 * insert the new packet here. */
3866 if (seq < tp->header.seq)
3868 /* Check for missing packet */
3869 if (tp->header.seq != prev + 1) {
3873 prev = tp->header.seq;
3876 /* Keep track of whether we have received the last packet. */
3877 if (flags & RX_LAST_PACKET) {
3878 call->flags |= RX_CALL_HAVE_LAST;
3881 /* It's within the window: add it to the the receive queue.
3882 * tp is left by the previous loop either pointing at the
3883 * packet before which to insert the new packet, or at the
3884 * queue head if the queue is empty or the packet should be
3886 #ifdef RX_TRACK_PACKETS
3887 np->flags |= RX_PKTFLAG_RQ;
3889 #ifdef RXDEBUG_PACKET
3891 #endif /* RXDEBUG_PACKET */
3892 queue_InsertBefore(tp, np);
3896 /* Check whether we have all of the packets for this call */
3897 if ((call->flags & RX_CALL_HAVE_LAST)
3898 && !(call->flags & RX_CALL_RECEIVE_DONE)) {
3899 afs_uint32 tseq; /* temporary sequence number */
3902 call->rnext, queue_Scan(&call->rq, tp, nxp, rx_packet)) {
3903 if (tseq != tp->header.seq)
3905 if (tp->header.flags & RX_LAST_PACKET) {
3906 call->flags |= RX_CALL_RECEIVE_DONE;
3913 /* We need to send an ack of the packet is out of sequence,
3914 * or if an ack was requested by the peer. */
3915 if (seq != prev + 1 || missing) {
3916 ackNeeded = RX_ACK_OUT_OF_SEQUENCE;
3917 } else if (flags & RX_REQUEST_ACK) {
3918 ackNeeded = RX_ACK_REQUESTED;
3921 /* Acknowledge the last packet for each call */
3922 if (flags & RX_LAST_PACKET) {
3933 * If the receiver is waiting for an iovec, fill the iovec
3934 * using the data from the receive queue */
3935 if (call->flags & RX_CALL_IOVEC_WAIT) {
3936 didHardAck = rxi_FillReadVec(call, serial);
3937 /* the call may have been aborted */
3946 /* Wakeup the reader if any */
3947 if ((call->flags & RX_CALL_READER_WAIT)
3948 && (!(call->flags & RX_CALL_IOVEC_WAIT) || !(call->iovNBytes)
3949 || (call->iovNext >= call->iovMax)
3950 || (call->flags & RX_CALL_RECEIVE_DONE))) {
3951 call->flags &= ~RX_CALL_READER_WAIT;
3952 #ifdef RX_ENABLE_LOCKS
3953 CV_BROADCAST(&call->cv_rq);
3955 osi_rxWakeup(&call->rq);
3961 * Send an ack when requested by the peer, or once every
3962 * rxi_SoftAckRate packets until the last packet has been
3963 * received. Always send a soft ack for the last packet in
3964 * the server's reply.
3966 * If there was more than one packet received for the call
3967 * and we have received all of them, immediately send an
3968 * RX_PACKET_TYPE_ACKALL packet so that the peer can empty
3969 * its packet transmit queue and cancel all resend events.
3971 * When there is only one packet in the call there is a
3972 * chance that we can race with Ping ACKs sent as part of
3973 * connection establishment if the udp packets are delivered
3974 * out of order. When the race occurs, a two second delay
3975 * will occur while waiting for a new Ping ACK to be sent.
3977 if (!isFirst && (call->flags & RX_CALL_RECEIVE_DONE)) {
3978 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
3979 rxi_AckAll(NULL, call, 0);
3980 } else if (ackNeeded) {
3981 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
3982 np = rxi_SendAck(call, np, serial, ackNeeded, istack);
3983 } else if (call->nSoftAcks > (u_short) rxi_SoftAckRate) {
3984 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
3985 np = rxi_SendAck(call, np, serial, RX_ACK_IDLE, istack);
3986 } else if (call->nSoftAcks) {
3987 clock_GetTime(&now);
3989 if (haveLast && !(flags & RX_CLIENT_INITIATED)) {
3990 clock_Add(&when, &rx_lastAckDelay);
3992 clock_Add(&when, &rx_softAckDelay);
3994 if (!call->delayedAckEvent
3995 || clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
3996 rxevent_Cancel(call->delayedAckEvent, call,
3997 RX_CALL_REFCOUNT_DELAY);
3998 MUTEX_ENTER(&rx_refcnt_mutex);
3999 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
4000 MUTEX_EXIT(&rx_refcnt_mutex);
4001 call->delayedAckEvent =
4002 rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
4010 static void rxi_ComputeRate();
4014 rxi_UpdatePeerReach(struct rx_connection *conn, struct rx_call *acall)
4016 struct rx_peer *peer = conn->peer;
4018 MUTEX_ENTER(&peer->peer_lock);
4019 peer->lastReachTime = clock_Sec();
4020 MUTEX_EXIT(&peer->peer_lock);
4022 MUTEX_ENTER(&conn->conn_data_lock);
4023 if (conn->flags & RX_CONN_ATTACHWAIT) {
4026 conn->flags &= ~RX_CONN_ATTACHWAIT;
4027 MUTEX_EXIT(&conn->conn_data_lock);
4029 for (i = 0; i < RX_MAXCALLS; i++) {
4030 struct rx_call *call = conn->call[i];
4033 MUTEX_ENTER(&call->lock);
4034 /* tnop can be null if newcallp is null */
4035 TryAttach(call, (osi_socket) - 1, NULL, NULL, 1);
4037 MUTEX_EXIT(&call->lock);
4041 MUTEX_EXIT(&conn->conn_data_lock);
4044 #if defined(RXDEBUG) && defined(AFS_NT40_ENV)
4046 rx_ack_reason(int reason)
4049 case RX_ACK_REQUESTED:
4051 case RX_ACK_DUPLICATE:
4053 case RX_ACK_OUT_OF_SEQUENCE:
4055 case RX_ACK_EXCEEDS_WINDOW:
4057 case RX_ACK_NOSPACE:
4061 case RX_ACK_PING_RESPONSE:
4074 /* The real smarts of the whole thing. */
4076 rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
4079 struct rx_ackPacket *ap;
4081 struct rx_packet *tp;
4082 struct rx_packet *nxp; /* Next packet pointer for queue_Scan */
4083 struct rx_connection *conn = call->conn;
4084 struct rx_peer *peer = conn->peer;
4085 struct clock now; /* Current time, for RTT calculations */
4089 /* because there are CM's that are bogus, sending weird values for this. */
4090 afs_uint32 skew = 0;
4095 int newAckCount = 0;
4096 int maxDgramPackets = 0; /* Set if peer supports AFS 3.5 jumbo datagrams */
4097 int pktsize = 0; /* Set if we need to update the peer mtu */
4098 int conn_data_locked = 0;
4100 if (rx_stats_active)
4101 rx_atomic_inc(&rx_stats.ackPacketsRead);
4102 ap = (struct rx_ackPacket *)rx_DataOf(np);
4103 nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
4105 return np; /* truncated ack packet */
4107 /* depends on ack packet struct */
4108 nAcks = MIN((unsigned)nbytes, (unsigned)ap->nAcks);
4109 first = ntohl(ap->firstPacket);
4110 prev = ntohl(ap->previousPacket);
4111 serial = ntohl(ap->serial);
4112 /* temporarily disabled -- needs to degrade over time
4113 * skew = ntohs(ap->maxSkew); */
4115 /* Ignore ack packets received out of order */
4116 if (first < call->tfirst ||
4117 (first == call->tfirst && prev < call->tprev)) {
4123 if (np->header.flags & RX_SLOW_START_OK) {
4124 call->flags |= RX_CALL_SLOW_START_OK;
4127 if (ap->reason == RX_ACK_PING_RESPONSE)
4128 rxi_UpdatePeerReach(conn, call);
4130 if (conn->lastPacketSizeSeq) {
4131 MUTEX_ENTER(&conn->conn_data_lock);
4132 conn_data_locked = 1;
4133 if ((first > conn->lastPacketSizeSeq) && (conn->lastPacketSize)) {
4134 pktsize = conn->lastPacketSize;
4135 conn->lastPacketSize = conn->lastPacketSizeSeq = 0;
4138 if ((ap->reason == RX_ACK_PING_RESPONSE) && (conn->lastPingSizeSer)) {
4139 if (!conn_data_locked) {
4140 MUTEX_ENTER(&conn->conn_data_lock);
4141 conn_data_locked = 1;
4143 if ((conn->lastPingSizeSer == serial) && (conn->lastPingSize)) {
4144 /* process mtu ping ack */
4145 pktsize = conn->lastPingSize;
4146 conn->lastPingSizeSer = conn->lastPingSize = 0;
4150 if (conn_data_locked) {
4151 MUTEX_EXIT(&conn->conn_data_lock);
4152 conn_data_locked = 0;
4156 if (rxdebug_active) {
4160 len = _snprintf(msg, sizeof(msg),
4161 "tid[%d] RACK: reason %s serial %u previous %u seq %u skew %d first %u acks %u space %u ",
4162 GetCurrentThreadId(), rx_ack_reason(ap->reason),
4163 ntohl(ap->serial), ntohl(ap->previousPacket),
4164 (unsigned int)np->header.seq, (unsigned int)skew,
4165 ntohl(ap->firstPacket), ap->nAcks, ntohs(ap->bufferSpace) );
4169 for (offset = 0; offset < nAcks && len < sizeof(msg); offset++)
4170 msg[len++] = (ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*');
4174 OutputDebugString(msg);
4176 #else /* AFS_NT40_ENV */
4179 "RACK: reason %x previous %u seq %u serial %u skew %d first %u",
4180 ap->reason, ntohl(ap->previousPacket),
4181 (unsigned int)np->header.seq, (unsigned int)serial,
4182 (unsigned int)skew, ntohl(ap->firstPacket));
4185 for (offset = 0; offset < nAcks; offset++)
4186 putc(ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*',
4191 #endif /* AFS_NT40_ENV */
4194 MUTEX_ENTER(&peer->peer_lock);
4197 * Start somewhere. Can't assume we can send what we can receive,
4198 * but we are clearly receiving.
4200 if (!peer->maxPacketSize)
4201 peer->maxPacketSize = RX_MIN_PACKET_SIZE+RX_IPUDP_SIZE;
4203 if (pktsize > peer->maxPacketSize) {
4204 peer->maxPacketSize = pktsize;
4205 if ((pktsize-RX_IPUDP_SIZE > peer->ifMTU)) {
4206 peer->ifMTU=pktsize-RX_IPUDP_SIZE;
4207 peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
4208 rxi_ScheduleGrowMTUEvent(call, 1);
4213 /* Update the outgoing packet skew value to the latest value of
4214 * the peer's incoming packet skew value. The ack packet, of
4215 * course, could arrive out of order, but that won't affect things
4217 peer->outPacketSkew = skew;
4220 clock_GetTime(&now);
4222 /* The transmit queue splits into 4 sections.
4224 * The first section is packets which have now been acknowledged
4225 * by a window size change in the ack. These have reached the
4226 * application layer, and may be discarded. These are packets
4227 * with sequence numbers < ap->firstPacket.
4229 * The second section is packets which have sequence numbers in
4230 * the range ap->firstPacket to ap->firstPacket + ap->nAcks. The
4231 * contents of the packet's ack array determines whether these
4232 * packets are acknowledged or not.
4234 * The third section is packets which fall above the range
4235 * addressed in the ack packet. These have not yet been received
4238 * The four section is packets which have not yet been transmitted.
4239 * These packets will have a retryTime of 0.
4242 /* First section - implicitly acknowledged packets that can be
4246 tp = queue_First(&call->tq, rx_packet);
4247 while(!queue_IsEnd(&call->tq, tp) && tp->header.seq < first) {
4248 struct rx_packet *next;
4250 next = queue_Next(tp, rx_packet);
4251 call->tfirst = tp->header.seq + 1;
4253 if (!(tp->flags & RX_PKTFLAG_ACKED)) {
4255 rxi_ComputeRoundTripTime(tp, ap, call->conn->peer, &now);
4259 rxi_ComputeRate(call->conn->peer, call, p, np, ap->reason);
4262 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
4263 /* XXX Hack. Because we have to release the global rx lock when sending
4264 * packets (osi_NetSend) we drop all acks while we're traversing the tq
4265 * in rxi_Start sending packets out because packets may move to the
4266 * freePacketQueue as result of being here! So we drop these packets until
4267 * we're safely out of the traversing. Really ugly!
4268 * To make it even uglier, if we're using fine grain locking, we can
4269 * set the ack bits in the packets and have rxi_Start remove the packets
4270 * when it's done transmitting.
4272 if (call->flags & RX_CALL_TQ_BUSY) {
4273 #ifdef RX_ENABLE_LOCKS
4274 tp->flags |= RX_PKTFLAG_ACKED;
4275 call->flags |= RX_CALL_TQ_SOME_ACKED;
4276 #else /* RX_ENABLE_LOCKS */
4278 #endif /* RX_ENABLE_LOCKS */
4280 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
4283 #ifdef RX_TRACK_PACKETS
4284 tp->flags &= ~RX_PKTFLAG_TQ;
4286 #ifdef RXDEBUG_PACKET
4288 #endif /* RXDEBUG_PACKET */
4289 rxi_FreePacket(tp); /* rxi_FreePacket mustn't wake up anyone, preemptively. */
4295 /* Give rate detector a chance to respond to ping requests */
4296 if (ap->reason == RX_ACK_PING_RESPONSE) {
4297 rxi_ComputeRate(peer, call, 0, np, ap->reason);
4301 /* N.B. we don't turn off any timers here. They'll go away by themselves, anyway */
4303 /* Second section of the queue - packets for which we are receiving
4306 * Go through the explicit acks/nacks and record the results in
4307 * the waiting packets. These are packets that can't be released
4308 * yet, even with a positive acknowledge. This positive
4309 * acknowledge only means the packet has been received by the
4310 * peer, not that it will be retained long enough to be sent to
4311 * the peer's upper level. In addition, reset the transmit timers
4312 * of any missing packets (those packets that must be missing
4313 * because this packet was out of sequence) */
4315 call->nSoftAcked = 0;
4317 while (!queue_IsEnd(&call->tq, tp) && tp->header.seq < first + nAcks) {
4318 /* Set the acknowledge flag per packet based on the
4319 * information in the ack packet. An acknowlegded packet can
4320 * be downgraded when the server has discarded a packet it
4321 * soacked previously, or when an ack packet is received
4322 * out of sequence. */
4323 if (ap->acks[tp->header.seq - first] == RX_ACK_TYPE_ACK) {
4324 if (!(tp->flags & RX_PKTFLAG_ACKED)) {
4326 tp->flags |= RX_PKTFLAG_ACKED;
4328 rxi_ComputeRoundTripTime(tp, ap, call->conn->peer, &now);
4330 rxi_ComputeRate(call->conn->peer, call, tp, np, ap->reason);
4338 } else /* RX_ACK_TYPE_NACK */ {
4339 tp->flags &= ~RX_PKTFLAG_ACKED;
4343 /* If packet isn't yet acked, and it has been transmitted at least
4344 * once, reset retransmit time using latest timeout
4345 * ie, this should readjust the retransmit timer for all outstanding
4346 * packets... So we don't just retransmit when we should know better*/
4348 if (!(tp->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&tp->retryTime)) {
4349 tp->retryTime = tp->timeSent;
4350 clock_Add(&tp->retryTime, &peer->timeout);
4351 /* shift by eight because one quarter-sec ~ 256 milliseconds */
4352 clock_Addmsec(&(tp->retryTime), ((afs_uint32) tp->backoff) << 8);
4355 tp = queue_Next(tp, rx_packet);
4358 /* The third case, packets which the ack packet tells us
4359 * nothing about at all. We just need to adjust the retryTime to match
4360 * any new timeouts that have been calculated for this peer.
4361 * We use the fact that we send in order to terminate this loop as soon as
4362 * we find a packet that has not been sent.
4365 while (!queue_IsEnd(&call->tq, tp) && !clock_IsZero(&tp->retryTime)) {
4366 tp->retryTime = tp->timeSent;
4367 clock_Add(&tp->retryTime, &peer->timeout);
4368 clock_Addmsec(&tp->retryTime, ((afs_uint32) tp->backoff) << 8);
4369 tp = queue_Next(tp, rx_packet);
4372 /* The fourth set of packets - those which have yet to be transmitted,
4373 * we don't care about at all here */
4375 /* If the window has been extended by this acknowledge packet,
4376 * then wakeup a sender waiting in alloc for window space, or try
4377 * sending packets now, if he's been sitting on packets due to
4378 * lack of window space */
4379 if (call->tnext < (call->tfirst + call->twind)) {
4380 #ifdef RX_ENABLE_LOCKS
4381 CV_SIGNAL(&call->cv_twind);
4383 if (call->flags & RX_CALL_WAIT_WINDOW_ALLOC) {
4384 call->flags &= ~RX_CALL_WAIT_WINDOW_ALLOC;
4385 osi_rxWakeup(&call->twind);
4388 if (call->flags & RX_CALL_WAIT_WINDOW_SEND) {
4389 call->flags &= ~RX_CALL_WAIT_WINDOW_SEND;
4393 /* if the ack packet has a receivelen field hanging off it,
4394 * update our state */
4395 if (np->length >= rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32)) {
4398 /* If the ack packet has a "recommended" size that is less than
4399 * what I am using now, reduce my size to match */
4400 rx_packetread(np, rx_AckDataSize(ap->nAcks) + (int)sizeof(afs_int32),
4401 (int)sizeof(afs_int32), &tSize);
4402 tSize = (afs_uint32) ntohl(tSize);
4403 peer->natMTU = rxi_AdjustIfMTU(MIN(tSize, peer->ifMTU));
4405 /* Get the maximum packet size to send to this peer */
4406 rx_packetread(np, rx_AckDataSize(ap->nAcks), (int)sizeof(afs_int32),
4408 tSize = (afs_uint32) ntohl(tSize);
4409 tSize = (afs_uint32) MIN(tSize, rx_MyMaxSendSize);
4410 tSize = rxi_AdjustMaxMTU(peer->natMTU, tSize);
4412 /* sanity check - peer might have restarted with different params.
4413 * If peer says "send less", dammit, send less... Peer should never
4414 * be unable to accept packets of the size that prior AFS versions would
4415 * send without asking. */
4416 if (peer->maxMTU != tSize) {
4417 if (peer->maxMTU > tSize) /* possible cong., maxMTU decreased */
4419 peer->maxMTU = tSize;
4420 peer->MTU = MIN(tSize, peer->MTU);
4421 call->MTU = MIN(call->MTU, tSize);
4424 if (np->length == rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32)) {
4427 rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
4428 (int)sizeof(afs_int32), &tSize);
4429 tSize = (afs_uint32) ntohl(tSize); /* peer's receive window, if it's */
4430 if (tSize < call->twind) { /* smaller than our send */
4431 call->twind = tSize; /* window, we must send less... */
4432 call->ssthresh = MIN(call->twind, call->ssthresh);
4433 call->conn->twind[call->channel] = call->twind;
4436 /* Only send jumbograms to 3.4a fileservers. 3.3a RX gets the
4437 * network MTU confused with the loopback MTU. Calculate the
4438 * maximum MTU here for use in the slow start code below.
4440 /* Did peer restart with older RX version? */
4441 if (peer->maxDgramPackets > 1) {
4442 peer->maxDgramPackets = 1;
4444 } else if (np->length >=
4445 rx_AckDataSize(ap->nAcks) + 4 * sizeof(afs_int32)) {
4448 rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
4449 sizeof(afs_int32), &tSize);
4450 tSize = (afs_uint32) ntohl(tSize);
4452 * As of AFS 3.5 we set the send window to match the receive window.
4454 if (tSize < call->twind) {
4455 call->twind = tSize;
4456 call->conn->twind[call->channel] = call->twind;
4457 call->ssthresh = MIN(call->twind, call->ssthresh);
4458 } else if (tSize > call->twind) {
4459 call->twind = tSize;
4460 call->conn->twind[call->channel] = call->twind;
4464 * As of AFS 3.5, a jumbogram is more than one fixed size
4465 * packet transmitted in a single UDP datagram. If the remote
4466 * MTU is smaller than our local MTU then never send a datagram
4467 * larger than the natural MTU.
4470 rx_AckDataSize(ap->nAcks) + 3 * (int)sizeof(afs_int32),
4471 (int)sizeof(afs_int32), &tSize);
4472 maxDgramPackets = (afs_uint32) ntohl(tSize);
4473 maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets);
4475 MIN(maxDgramPackets, (int)(peer->ifDgramPackets));
4476 if (maxDgramPackets > 1) {
4477 peer->maxDgramPackets = maxDgramPackets;
4478 call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
4480 peer->maxDgramPackets = 1;
4481 call->MTU = peer->natMTU;
4483 } else if (peer->maxDgramPackets > 1) {
4484 /* Restarted with lower version of RX */
4485 peer->maxDgramPackets = 1;
4487 } else if (peer->maxDgramPackets > 1
4488 || peer->maxMTU != OLD_MAX_PACKET_SIZE) {
4489 /* Restarted with lower version of RX */
4490 peer->maxMTU = OLD_MAX_PACKET_SIZE;
4491 peer->natMTU = OLD_MAX_PACKET_SIZE;
4492 peer->MTU = OLD_MAX_PACKET_SIZE;
4493 peer->maxDgramPackets = 1;
4494 peer->nDgramPackets = 1;
4496 call->MTU = OLD_MAX_PACKET_SIZE;
4501 * Calculate how many datagrams were successfully received after
4502 * the first missing packet and adjust the negative ack counter
4507 nNacked = (nNacked + call->nDgramPackets - 1) / call->nDgramPackets;
4508 if (call->nNacks < nNacked) {
4509 call->nNacks = nNacked;
4512 call->nAcks += newAckCount;
4516 if (call->flags & RX_CALL_FAST_RECOVER) {
4518 call->cwind = MIN((int)(call->cwind + 1), rx_maxSendWindow);
4520 call->flags &= ~RX_CALL_FAST_RECOVER;
4521 call->cwind = call->nextCwind;
4522 call->nextCwind = 0;
4525 call->nCwindAcks = 0;
4526 } else if (nNacked && call->nNacks >= (u_short) rx_nackThreshold) {
4527 /* Three negative acks in a row trigger congestion recovery */
4528 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
4529 MUTEX_EXIT(&peer->peer_lock);
4530 if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
4531 /* someone else is waiting to start recovery */
4534 call->flags |= RX_CALL_FAST_RECOVER_WAIT;
4535 rxi_WaitforTQBusy(call);
4536 MUTEX_ENTER(&peer->peer_lock);
4537 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
4538 call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
4539 call->flags |= RX_CALL_FAST_RECOVER;
4540 call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
4542 MIN((int)(call->ssthresh + rx_nackThreshold), rx_maxSendWindow);
4543 call->nDgramPackets = MAX(2, (int)call->nDgramPackets) >> 1;
4544 call->nextCwind = call->ssthresh;
4547 peer->MTU = call->MTU;
4548 peer->cwind = call->nextCwind;
4549 peer->nDgramPackets = call->nDgramPackets;
4551 call->congestSeq = peer->congestSeq;
4552 /* Reset the resend times on the packets that were nacked
4553 * so we will retransmit as soon as the window permits*/
4554 for (acked = 0, queue_ScanBackwards(&call->tq, tp, nxp, rx_packet)) {
4556 if (!(tp->flags & RX_PKTFLAG_ACKED)) {
4557 clock_Zero(&tp->retryTime);
4559 } else if (tp->flags & RX_PKTFLAG_ACKED) {
4564 /* If cwind is smaller than ssthresh, then increase
4565 * the window one packet for each ack we receive (exponential
4567 * If cwind is greater than or equal to ssthresh then increase
4568 * the congestion window by one packet for each cwind acks we
4569 * receive (linear growth). */
4570 if (call->cwind < call->ssthresh) {
4572 MIN((int)call->ssthresh, (int)(call->cwind + newAckCount));
4573 call->nCwindAcks = 0;
4575 call->nCwindAcks += newAckCount;
4576 if (call->nCwindAcks >= call->cwind) {
4577 call->nCwindAcks = 0;
4578 call->cwind = MIN((int)(call->cwind + 1), rx_maxSendWindow);
4582 * If we have received several acknowledgements in a row then
4583 * it is time to increase the size of our datagrams
4585 if ((int)call->nAcks > rx_nDgramThreshold) {
4586 if (peer->maxDgramPackets > 1) {
4587 if (call->nDgramPackets < peer->maxDgramPackets) {
4588 call->nDgramPackets++;
4590 call->MTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE;
4591 } else if (call->MTU < peer->maxMTU) {
4592 /* don't upgrade if we can't handle it */
4593 if ((call->nDgramPackets == 1) && (call->MTU >= peer->ifMTU))
4594 call->MTU = peer->ifMTU;
4596 call->MTU += peer->natMTU;
4597 call->MTU = MIN(call->MTU, peer->maxMTU);
4604 MUTEX_EXIT(&peer->peer_lock); /* rxi_Start will lock peer. */
4606 /* Servers need to hold the call until all response packets have
4607 * been acknowledged. Soft acks are good enough since clients
4608 * are not allowed to clear their receive queues. */
4609 if (call->state == RX_STATE_HOLD
4610 && call->tfirst + call->nSoftAcked >= call->tnext) {
4611 call->state = RX_STATE_DALLY;
4612 rxi_ClearTransmitQueue(call, 0);
4613 rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
4614 } else if (!queue_IsEmpty(&call->tq)) {
4615 rxi_Start(0, call, 0, istack);
4620 /* Received a response to a challenge packet */
4622 rxi_ReceiveResponsePacket(struct rx_connection *conn,
4623 struct rx_packet *np, int istack)
4627 /* Ignore the packet if we're the client */
4628 if (conn->type == RX_CLIENT_CONNECTION)
4631 /* If already authenticated, ignore the packet (it's probably a retry) */
4632 if (RXS_CheckAuthentication(conn->securityObject, conn) == 0)
4635 /* Otherwise, have the security object evaluate the response packet */
4636 error = RXS_CheckResponse(conn->securityObject, conn, np);
4638 /* If the response is invalid, reset the connection, sending
4639 * an abort to the peer */
4643 rxi_ConnectionError(conn, error);
4644 MUTEX_ENTER(&conn->conn_data_lock);
4645 np = rxi_SendConnectionAbort(conn, np, istack, 0);
4646 MUTEX_EXIT(&conn->conn_data_lock);
4649 /* If the response is valid, any calls waiting to attach
4650 * servers can now do so */
4653 for (i = 0; i < RX_MAXCALLS; i++) {
4654 struct rx_call *call = conn->call[i];
4656 MUTEX_ENTER(&call->lock);
4657 if (call->state == RX_STATE_PRECALL)
4658 rxi_AttachServerProc(call, (osi_socket) - 1, NULL, NULL);
4659 /* tnop can be null if newcallp is null */
4660 MUTEX_EXIT(&call->lock);
4664 /* Update the peer reachability information, just in case
4665 * some calls went into attach-wait while we were waiting
4666 * for authentication..
4668 rxi_UpdatePeerReach(conn, NULL);
4673 /* A client has received an authentication challenge: the security
4674 * object is asked to cough up a respectable response packet to send
4675 * back to the server. The server is responsible for retrying the
4676 * challenge if it fails to get a response. */
4679 rxi_ReceiveChallengePacket(struct rx_connection *conn,
4680 struct rx_packet *np, int istack)
4684 /* Ignore the challenge if we're the server */
4685 if (conn->type == RX_SERVER_CONNECTION)
4688 /* Ignore the challenge if the connection is otherwise idle; someone's
4689 * trying to use us as an oracle. */
4690 if (!rxi_HasActiveCalls(conn))
4693 /* Send the security object the challenge packet. It is expected to fill
4694 * in the response. */
4695 error = RXS_GetResponse(conn->securityObject, conn, np);
4697 /* If the security object is unable to return a valid response, reset the
4698 * connection and send an abort to the peer. Otherwise send the response
4699 * packet to the peer connection. */
4701 rxi_ConnectionError(conn, error);
4702 MUTEX_ENTER(&conn->conn_data_lock);
4703 np = rxi_SendConnectionAbort(conn, np, istack, 0);
4704 MUTEX_EXIT(&conn->conn_data_lock);
4706 np = rxi_SendSpecial((struct rx_call *)0, conn, np,
4707 RX_PACKET_TYPE_RESPONSE, NULL, -1, istack);
4713 /* Find an available server process to service the current request in
4714 * the given call structure. If one isn't available, queue up this
4715 * call so it eventually gets one */
4717 rxi_AttachServerProc(struct rx_call *call,
4718 osi_socket socket, int *tnop,
4719 struct rx_call **newcallp)
4721 struct rx_serverQueueEntry *sq;
4722 struct rx_service *service = call->conn->service;
4725 /* May already be attached */
4726 if (call->state == RX_STATE_ACTIVE)
4729 MUTEX_ENTER(&rx_serverPool_lock);
4731 haveQuota = QuotaOK(service);
4732 if ((!haveQuota) || queue_IsEmpty(&rx_idleServerQueue)) {
4733 /* If there are no processes available to service this call,
4734 * put the call on the incoming call queue (unless it's
4735 * already on the queue).
4737 #ifdef RX_ENABLE_LOCKS
4739 ReturnToServerPool(service);
4740 #endif /* RX_ENABLE_LOCKS */
4742 if (!(call->flags & RX_CALL_WAIT_PROC)) {
4743 call->flags |= RX_CALL_WAIT_PROC;
4744 rx_atomic_inc(&rx_nWaiting);
4745 rx_atomic_inc(&rx_nWaited);
4746 rxi_calltrace(RX_CALL_ARRIVAL, call);
4747 SET_CALL_QUEUE_LOCK(call, &rx_serverPool_lock);
4748 queue_Append(&rx_incomingCallQueue, call);
4751 sq = queue_First(&rx_idleServerQueue, rx_serverQueueEntry);
4753 /* If hot threads are enabled, and both newcallp and sq->socketp
4754 * are non-null, then this thread will process the call, and the
4755 * idle server thread will start listening on this threads socket.
4758 if (rx_enable_hot_thread && newcallp && sq->socketp) {
4761 *sq->socketp = socket;
4762 clock_GetTime(&call->startTime);
4763 MUTEX_ENTER(&rx_refcnt_mutex);
4764 CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
4765 MUTEX_EXIT(&rx_refcnt_mutex);
4769 if (call->flags & RX_CALL_WAIT_PROC) {
4770 /* Conservative: I don't think this should happen */
4771 call->flags &= ~RX_CALL_WAIT_PROC;
4772 if (queue_IsOnQueue(call)) {
4775 rx_atomic_dec(&rx_nWaiting);
4778 call->state = RX_STATE_ACTIVE;
4779 call->mode = RX_MODE_RECEIVING;
4780 #ifdef RX_KERNEL_TRACE
4782 int glockOwner = ISAFS_GLOCK();
4785 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
4786 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
4792 if (call->flags & RX_CALL_CLEARED) {
4793 /* send an ack now to start the packet flow up again */
4794 call->flags &= ~RX_CALL_CLEARED;
4795 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
4797 #ifdef RX_ENABLE_LOCKS
4800 service->nRequestsRunning++;
4801 MUTEX_ENTER(&rx_quota_mutex);
4802 if (service->nRequestsRunning <= service->minProcs)
4805 MUTEX_EXIT(&rx_quota_mutex);
4809 MUTEX_EXIT(&rx_serverPool_lock);
4812 /* Delay the sending of an acknowledge event for a short while, while
4813 * a new call is being prepared (in the case of a client) or a reply
4814 * is being prepared (in the case of a server). Rather than sending
4815 * an ack packet, an ACKALL packet is sent. */
4817 rxi_AckAll(struct rxevent *event, struct rx_call *call, char *dummy)
4819 #ifdef RX_ENABLE_LOCKS
4821 MUTEX_ENTER(&call->lock);
4822 call->delayedAckEvent = NULL;
4823 MUTEX_ENTER(&rx_refcnt_mutex);
4824 CALL_RELE(call, RX_CALL_REFCOUNT_ACKALL);
4825 MUTEX_EXIT(&rx_refcnt_mutex);
4827 rxi_SendSpecial(call, call->conn, (struct rx_packet *)0,
4828 RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
4829 call->flags |= RX_CALL_ACKALL_SENT;
4831 MUTEX_EXIT(&call->lock);
4832 #else /* RX_ENABLE_LOCKS */
4834 call->delayedAckEvent = NULL;
4835 rxi_SendSpecial(call, call->conn, (struct rx_packet *)0,
4836 RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
4837 call->flags |= RX_CALL_ACKALL_SENT;
4838 #endif /* RX_ENABLE_LOCKS */
4842 rxi_SendDelayedAck(struct rxevent *event, void *arg1, void *unused)
4844 struct rx_call *call = arg1;
4845 #ifdef RX_ENABLE_LOCKS
4847 MUTEX_ENTER(&call->lock);
4848 if (event == call->delayedAckEvent)
4849 call->delayedAckEvent = NULL;
4850 MUTEX_ENTER(&rx_refcnt_mutex);
4851 CALL_RELE(call, RX_CALL_REFCOUNT_DELAY);
4852 MUTEX_EXIT(&rx_refcnt_mutex);
4854 (void)rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
4856 MUTEX_EXIT(&call->lock);
4857 #else /* RX_ENABLE_LOCKS */
4859 call->delayedAckEvent = NULL;
4860 (void)rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
4861 #endif /* RX_ENABLE_LOCKS */
4865 #ifdef RX_ENABLE_LOCKS
4866 /* Set ack in all packets in transmit queue. rxi_Start will deal with
4867 * clearing them out.
4870 rxi_SetAcksInTransmitQueue(struct rx_call *call)
4872 struct rx_packet *p, *tp;
4875 for (queue_Scan(&call->tq, p, tp, rx_packet)) {
4876 p->flags |= RX_PKTFLAG_ACKED;
4880 call->flags |= RX_CALL_TQ_CLEARME;
4881 call->flags |= RX_CALL_TQ_SOME_ACKED;
4884 rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
4885 call->tfirst = call->tnext;
4886 call->nSoftAcked = 0;
4888 if (call->flags & RX_CALL_FAST_RECOVER) {
4889 call->flags &= ~RX_CALL_FAST_RECOVER;
4890 call->cwind = call->nextCwind;
4891 call->nextCwind = 0;
4894 CV_SIGNAL(&call->cv_twind);
4896 #endif /* RX_ENABLE_LOCKS */
4898 /* Clear out the transmit queue for the current call (all packets have
4899 * been received by peer) */
4901 rxi_ClearTransmitQueue(struct rx_call *call, int force)
4903 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
4904 struct rx_packet *p, *tp;
4906 if (!force && (call->flags & RX_CALL_TQ_BUSY)) {
4908 for (queue_Scan(&call->tq, p, tp, rx_packet)) {
4909 p->flags |= RX_PKTFLAG_ACKED;
4913 call->flags |= RX_CALL_TQ_CLEARME;
4914 call->flags |= RX_CALL_TQ_SOME_ACKED;
4917 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
4918 #ifdef RXDEBUG_PACKET
4920 #endif /* RXDEBUG_PACKET */
4921 rxi_FreePackets(0, &call->tq);
4922 rxi_WakeUpTransmitQueue(call);
4923 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
4924 call->flags &= ~RX_CALL_TQ_CLEARME;
4926 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
4928 rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
4929 call->tfirst = call->tnext; /* implicitly acknowledge all data already sent */
4930 call->nSoftAcked = 0;
4932 if (call->flags & RX_CALL_FAST_RECOVER) {
4933 call->flags &= ~RX_CALL_FAST_RECOVER;
4934 call->cwind = call->nextCwind;
4936 #ifdef RX_ENABLE_LOCKS
4937 CV_SIGNAL(&call->cv_twind);
4939 osi_rxWakeup(&call->twind);
4944 rxi_ClearReceiveQueue(struct rx_call *call)
4946 if (queue_IsNotEmpty(&call->rq)) {
4949 count = rxi_FreePackets(0, &call->rq);
4950 rx_packetReclaims += count;
4951 #ifdef RXDEBUG_PACKET
4953 if ( call->rqc != 0 )
4954 dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0\n", call, call->rqc));
4956 call->flags &= ~(RX_CALL_RECEIVE_DONE | RX_CALL_HAVE_LAST);
4958 if (call->state == RX_STATE_PRECALL) {
4959 call->flags |= RX_CALL_CLEARED;
4963 /* Send an abort packet for the specified call */
4965 rxi_SendCallAbort(struct rx_call *call, struct rx_packet *packet,
4966 int istack, int force)
4969 struct clock when, now;
4974 /* Clients should never delay abort messages */
4975 if (rx_IsClientConn(call->conn))
4978 if (call->abortCode != call->error) {
4979 call->abortCode = call->error;
4980 call->abortCount = 0;
4983 if (force || rxi_callAbortThreshhold == 0
4984 || call->abortCount < rxi_callAbortThreshhold) {
4985 if (call->delayedAbortEvent) {
4986 rxevent_Cancel(call->delayedAbortEvent, call,
4987 RX_CALL_REFCOUNT_ABORT);
4989 error = htonl(call->error);
4992 rxi_SendSpecial(call, call->conn, packet, RX_PACKET_TYPE_ABORT,
4993 (char *)&error, sizeof(error), istack);
4994 } else if (!call->delayedAbortEvent) {
4995 clock_GetTime(&now);
4997 clock_Addmsec(&when, rxi_callAbortDelay);
4998 MUTEX_ENTER(&rx_refcnt_mutex);
4999 CALL_HOLD(call, RX_CALL_REFCOUNT_ABORT);
5000 MUTEX_EXIT(&rx_refcnt_mutex);
5001 call->delayedAbortEvent =
5002 rxevent_PostNow(&when, &now, rxi_SendDelayedCallAbort, call, 0);
5007 /* Send an abort packet for the specified connection. Packet is an
5008 * optional pointer to a packet that can be used to send the abort.
5009 * Once the number of abort messages reaches the threshhold, an
5010 * event is scheduled to send the abort. Setting the force flag
5011 * overrides sending delayed abort messages.
5013 * NOTE: Called with conn_data_lock held. conn_data_lock is dropped
5014 * to send the abort packet.
5017 rxi_SendConnectionAbort(struct rx_connection *conn,
5018 struct rx_packet *packet, int istack, int force)
5021 struct clock when, now;
5026 /* Clients should never delay abort messages */
5027 if (rx_IsClientConn(conn))
5030 if (force || rxi_connAbortThreshhold == 0
5031 || conn->abortCount < rxi_connAbortThreshhold) {
5032 if (conn->delayedAbortEvent) {
5033 rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0);
5035 error = htonl(conn->error);
5037 MUTEX_EXIT(&conn->conn_data_lock);
5039 rxi_SendSpecial((struct rx_call *)0, conn, packet,
5040 RX_PACKET_TYPE_ABORT, (char *)&error,
5041 sizeof(error), istack);
5042 MUTEX_ENTER(&conn->conn_data_lock);
5043 } else if (!conn->delayedAbortEvent) {
5044 clock_GetTime(&now);
5046 clock_Addmsec(&when, rxi_connAbortDelay);
5047 conn->delayedAbortEvent =
5048 rxevent_PostNow(&when, &now, rxi_SendDelayedConnAbort, conn, 0);
5053 /* Associate an error all of the calls owned by a connection. Called
5054 * with error non-zero. This is only for really fatal things, like
5055 * bad authentication responses. The connection itself is set in
5056 * error at this point, so that future packets received will be
5059 rxi_ConnectionError(struct rx_connection *conn,
5065 dpf(("rxi_ConnectionError conn %"AFS_PTR_FMT" error %d\n", conn, error));
5067 MUTEX_ENTER(&conn->conn_data_lock);
5068 if (conn->challengeEvent)
5069 rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
5070 if (conn->natKeepAliveEvent)
5071 rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
5072 if (conn->checkReachEvent) {
5073 rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
5074 conn->checkReachEvent = 0;
5075 conn->flags &= ~RX_CONN_ATTACHWAIT;
5076 MUTEX_ENTER(&rx_refcnt_mutex);
5078 MUTEX_EXIT(&rx_refcnt_mutex);
5080 MUTEX_EXIT(&conn->conn_data_lock);
5081 for (i = 0; i < RX_MAXCALLS; i++) {
5082 struct rx_call *call = conn->call[i];
5084 MUTEX_ENTER(&call->lock);
5085 rxi_CallError(call, error);
5086 MUTEX_EXIT(&call->lock);
5089 conn->error = error;
5090 if (rx_stats_active)
5091 rx_atomic_inc(&rx_stats.fatalErrors);
5096 * Interrupt an in-progress call with the specified error and wakeup waiters.
5098 * @param[in] call The call to interrupt
5099 * @param[in] error The error code to send to the peer
5102 rx_InterruptCall(struct rx_call *call, afs_int32 error)
5104 MUTEX_ENTER(&call->lock);
5105 rxi_CallError(call, error);
5106 rxi_SendCallAbort(call, NULL, 0, 1);
5107 MUTEX_EXIT(&call->lock);
5111 rxi_CallError(struct rx_call *call, afs_int32 error)
5114 osirx_AssertMine(&call->lock, "rxi_CallError");
5116 dpf(("rxi_CallError call %"AFS_PTR_FMT" error %d call->error %d\n", call, error, call->error));
5118 error = call->error;
5120 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
5121 if (!((call->flags & RX_CALL_TQ_BUSY) || (call->tqWaiters > 0))) {
5122 rxi_ResetCall(call, 0);
5125 rxi_ResetCall(call, 0);
5127 call->error = error;
5130 /* Reset various fields in a call structure, and wakeup waiting
5131 * processes. Some fields aren't changed: state & mode are not
5132 * touched (these must be set by the caller), and bufptr, nLeft, and
5133 * nFree are not reset, since these fields are manipulated by
5134 * unprotected macros, and may only be reset by non-interrupting code.
5137 /* this code requires that call->conn be set properly as a pre-condition. */
5138 #endif /* ADAPT_WINDOW */
5141 rxi_ResetCall(struct rx_call *call, int newcall)
5144 struct rx_peer *peer;
5145 struct rx_packet *packet;
5147 osirx_AssertMine(&call->lock, "rxi_ResetCall");
5149 dpf(("rxi_ResetCall(call %"AFS_PTR_FMT", newcall %d)\n", call, newcall));
5151 /* Notify anyone who is waiting for asynchronous packet arrival */
5152 if (call->arrivalProc) {
5153 (*call->arrivalProc) (call, call->arrivalProcHandle,
5154 call->arrivalProcArg);
5155 call->arrivalProc = (void (*)())0;
5158 if (call->growMTUEvent)
5159 rxevent_Cancel(call->growMTUEvent, call,
5160 RX_CALL_REFCOUNT_ALIVE);
5162 if (call->delayedAbortEvent) {
5163 rxevent_Cancel(call->delayedAbortEvent, call, RX_CALL_REFCOUNT_ABORT);
5164 packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
5166 rxi_SendCallAbort(call, packet, 0, 1);
5167 rxi_FreePacket(packet);
5172 * Update the peer with the congestion information in this call
5173 * so other calls on this connection can pick up where this call
5174 * left off. If the congestion sequence numbers don't match then
5175 * another call experienced a retransmission.
5177 peer = call->conn->peer;
5178 MUTEX_ENTER(&peer->peer_lock);
5180 if (call->congestSeq == peer->congestSeq) {
5181 peer->cwind = MAX(peer->cwind, call->cwind);
5182 peer->MTU = MAX(peer->MTU, call->MTU);
5183 peer->nDgramPackets =
5184 MAX(peer->nDgramPackets, call->nDgramPackets);
5187 call->abortCode = 0;
5188 call->abortCount = 0;
5190 if (peer->maxDgramPackets > 1) {
5191 call->MTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE;
5193 call->MTU = peer->MTU;
5195 call->cwind = MIN((int)peer->cwind, (int)peer->nDgramPackets);
5196 call->ssthresh = rx_maxSendWindow;
5197 call->nDgramPackets = peer->nDgramPackets;
5198 call->congestSeq = peer->congestSeq;
5199 MUTEX_EXIT(&peer->peer_lock);
5201 flags = call->flags;
5202 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
5203 rxi_WaitforTQBusy(call);
5204 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
5206 rxi_ClearTransmitQueue(call, 1);
5207 if (call->tqWaiters || (flags & RX_CALL_TQ_WAIT)) {
5208 dpf(("rcall %"AFS_PTR_FMT" has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
5212 if ((flags & RX_CALL_PEER_BUSY)) {
5213 /* The call channel is still busy; resetting the call doesn't change
5215 call->flags |= RX_CALL_PEER_BUSY;
5218 rxi_ClearReceiveQueue(call);
5219 /* why init the queue if you just emptied it? queue_Init(&call->rq); */
5223 call->twind = call->conn->twind[call->channel];
5224 call->rwind = call->conn->rwind[call->channel];
5225 call->nSoftAcked = 0;
5226 call->nextCwind = 0;
5229 call->nCwindAcks = 0;
5230 call->nSoftAcks = 0;
5231 call->nHardAcks = 0;
5233 call->tfirst = call->rnext = call->tnext = 1;
5236 call->lastAcked = 0;
5237 call->localStatus = call->remoteStatus = 0;
5239 if (flags & RX_CALL_READER_WAIT) {
5240 #ifdef RX_ENABLE_LOCKS
5241 CV_BROADCAST(&call->cv_rq);
5243 osi_rxWakeup(&call->rq);
5246 if (flags & RX_CALL_WAIT_PACKETS) {
5247 MUTEX_ENTER(&rx_freePktQ_lock);
5248 rxi_PacketsUnWait(); /* XXX */
5249 MUTEX_EXIT(&rx_freePktQ_lock);
5251 #ifdef RX_ENABLE_LOCKS
5252 CV_SIGNAL(&call->cv_twind);
5254 if (flags & RX_CALL_WAIT_WINDOW_ALLOC)
5255 osi_rxWakeup(&call->twind);
5258 #ifdef RX_ENABLE_LOCKS
5259 /* The following ensures that we don't mess with any queue while some
5260 * other thread might also be doing so. The call_queue_lock field is
5261 * is only modified under the call lock. If the call is in the process
5262 * of being removed from a queue, the call is not locked until the
5263 * the queue lock is dropped and only then is the call_queue_lock field
5264 * zero'd out. So it's safe to lock the queue if call_queue_lock is set.
5265 * Note that any other routine which removes a call from a queue has to
5266 * obtain the queue lock before examing the queue and removing the call.
5268 if (call->call_queue_lock) {
5269 MUTEX_ENTER(call->call_queue_lock);
5270 if (queue_IsOnQueue(call)) {
5272 if (flags & RX_CALL_WAIT_PROC) {
5273 rx_atomic_dec(&rx_nWaiting);
5276 MUTEX_EXIT(call->call_queue_lock);
5277 CLEAR_CALL_QUEUE_LOCK(call);
5279 #else /* RX_ENABLE_LOCKS */
5280 if (queue_IsOnQueue(call)) {
5282 if (flags & RX_CALL_WAIT_PROC)
5283 rx_atomic_dec(&rx_nWaiting);
5285 #endif /* RX_ENABLE_LOCKS */
5287 rxi_KeepAliveOff(call);
5288 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
5291 /* Send an acknowledge for the indicated packet (seq,serial) of the
5292 * indicated call, for the indicated reason (reason). This
5293 * acknowledge will specifically acknowledge receiving the packet, and
5294 * will also specify which other packets for this call have been
5295 * received. This routine returns the packet that was used to the
5296 * caller. The caller is responsible for freeing it or re-using it.
5297 * This acknowledgement also returns the highest sequence number
5298 * actually read out by the higher level to the sender; the sender
5299 * promises to keep around packets that have not been read by the
5300 * higher level yet (unless, of course, the sender decides to abort
5301 * the call altogether). Any of p, seq, serial, pflags, or reason may
5302 * be set to zero without ill effect. That is, if they are zero, they
5303 * will not convey any information.
5304 * NOW there is a trailer field, after the ack where it will safely be
5305 * ignored by mundanes, which indicates the maximum size packet this
5306 * host can swallow. */
5308 struct rx_packet *optionalPacket; use to send ack (or null)
5309 int seq; Sequence number of the packet we are acking
5310 int serial; Serial number of the packet
5311 int pflags; Flags field from packet header
5312 int reason; Reason an acknowledge was prompted
5316 rxi_SendAck(struct rx_call *call,
5317 struct rx_packet *optionalPacket, int serial, int reason,
5320 struct rx_ackPacket *ap;
5321 struct rx_packet *rqp;
5322 struct rx_packet *nxp; /* For queue_Scan */
5323 struct rx_packet *p;
5326 afs_uint32 padbytes = 0;
5327 #ifdef RX_ENABLE_TSFPQ
5328 struct rx_ts_info_t * rx_ts_info;
5332 * Open the receive window once a thread starts reading packets
5334 if (call->rnext > 1) {
5335 call->conn->rwind[call->channel] = call->rwind = rx_maxReceiveWindow;
5338 /* Don't attempt to grow MTU if this is a critical ping */
5339 if (reason == RX_ACK_MTU) {
5340 /* keep track of per-call attempts, if we're over max, do in small
5341 * otherwise in larger? set a size to increment by, decrease
5344 if (call->conn->peer->maxPacketSize &&
5345 (call->conn->peer->maxPacketSize < OLD_MAX_PACKET_SIZE
5347 padbytes = call->conn->peer->maxPacketSize+16;
5349 padbytes = call->conn->peer->maxMTU + 128;
5351 /* do always try a minimum size ping */
5352 padbytes = MAX(padbytes, RX_MIN_PACKET_SIZE+RX_IPUDP_SIZE+4);
5354 /* subtract the ack payload */
5355 padbytes -= (rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32));
5356 reason = RX_ACK_PING;
5359 call->nHardAcks = 0;
5360 call->nSoftAcks = 0;
5361 if (call->rnext > call->lastAcked)
5362 call->lastAcked = call->rnext;
5366 rx_computelen(p, p->length); /* reset length, you never know */
5367 } /* where that's been... */
5368 #ifdef RX_ENABLE_TSFPQ
5370 RX_TS_INFO_GET(rx_ts_info);
5371 if ((p = rx_ts_info->local_special_packet)) {
5372 rx_computelen(p, p->length);
5373 } else if ((p = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL))) {
5374 rx_ts_info->local_special_packet = p;
5375 } else { /* We won't send the ack, but don't panic. */
5376 return optionalPacket;
5380 else if (!(p = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL))) {
5381 /* We won't send the ack, but don't panic. */
5382 return optionalPacket;
5387 rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32) -
5390 if (rxi_AllocDataBuf(p, templ, RX_PACKET_CLASS_SPECIAL) > 0) {
5391 #ifndef RX_ENABLE_TSFPQ
5392 if (!optionalPacket)
5395 return optionalPacket;
5397 templ = rx_AckDataSize(call->rwind) + 2 * sizeof(afs_int32);
5398 if (rx_Contiguous(p) < templ) {
5399 #ifndef RX_ENABLE_TSFPQ
5400 if (!optionalPacket)
5403 return optionalPacket;
5408 /* MTUXXX failing to send an ack is very serious. We should */
5409 /* try as hard as possible to send even a partial ack; it's */
5410 /* better than nothing. */
5411 ap = (struct rx_ackPacket *)rx_DataOf(p);
5412 ap->bufferSpace = htonl(0); /* Something should go here, sometime */
5413 ap->reason = reason;
5415 /* The skew computation used to be bogus, I think it's better now. */
5416 /* We should start paying attention to skew. XXX */
5417 ap->serial = htonl(serial);
5418 ap->maxSkew = 0; /* used to be peer->inPacketSkew */
5421 * First packet not yet forwarded to reader. When ACKALL has been
5422 * sent the peer has been told that all received packets will be
5423 * delivered to the reader. The value 'rnext' is used internally
5424 * to refer to the next packet in the receive queue that must be
5425 * delivered to the reader. From the perspective of the peer it
5426 * already has so report the last sequence number plus one if there
5427 * are packets in the receive queue awaiting processing.
5429 if ((call->flags & RX_CALL_ACKALL_SENT) &&
5430 !queue_IsEmpty(&call->rq)) {
5431 ap->firstPacket = htonl(queue_Last(&call->rq, rx_packet)->header.seq + 1);
5433 ap->firstPacket = htonl(call->rnext);
5435 ap->previousPacket = htonl(call->rprev); /* Previous packet received */
5437 /* No fear of running out of ack packet here because there can only be at most
5438 * one window full of unacknowledged packets. The window size must be constrained
5439 * to be less than the maximum ack size, of course. Also, an ack should always
5440 * fit into a single packet -- it should not ever be fragmented. */
5441 for (offset = 0, queue_Scan(&call->rq, rqp, nxp, rx_packet)) {
5442 if (!rqp || !call->rq.next
5443 || (rqp->header.seq > (call->rnext + call->rwind))) {
5444 #ifndef RX_ENABLE_TSFPQ
5445 if (!optionalPacket)
5448 rxi_CallError(call, RX_CALL_DEAD);
5449 return optionalPacket;
5452 while (rqp->header.seq > call->rnext + offset)
5453 ap->acks[offset++] = RX_ACK_TYPE_NACK;
5454 ap->acks[offset++] = RX_ACK_TYPE_ACK;
5456 if ((offset > (u_char) rx_maxReceiveWindow) || (offset > call->rwind)) {
5457 #ifndef RX_ENABLE_TSFPQ
5458 if (!optionalPacket)
5461 rxi_CallError(call, RX_CALL_DEAD);
5462 return optionalPacket;
5467 p->length = rx_AckDataSize(offset) + 4 * sizeof(afs_int32);
5469 /* these are new for AFS 3.3 */
5470 templ = rxi_AdjustMaxMTU(call->conn->peer->ifMTU, rx_maxReceiveSize);
5471 templ = htonl(templ);
5472 rx_packetwrite(p, rx_AckDataSize(offset), sizeof(afs_int32), &templ);
5473 templ = htonl(call->conn->peer->ifMTU);
5474 rx_packetwrite(p, rx_AckDataSize(offset) + sizeof(afs_int32),
5475 sizeof(afs_int32), &templ);
5477 /* new for AFS 3.4 */
5478 templ = htonl(call->rwind);
5479 rx_packetwrite(p, rx_AckDataSize(offset) + 2 * sizeof(afs_int32),
5480 sizeof(afs_int32), &templ);
5482 /* new for AFS 3.5 */
5483 templ = htonl(call->conn->peer->ifDgramPackets);
5484 rx_packetwrite(p, rx_AckDataSize(offset) + 3 * sizeof(afs_int32),
5485 sizeof(afs_int32), &templ);
5487 p->header.serviceId = call->conn->serviceId;
5488 p->header.cid = (call->conn->cid | call->channel);
5489 p->header.callNumber = *call->callNumber;
5491 p->header.securityIndex = call->conn->securityIndex;
5492 p->header.epoch = call->conn->epoch;
5493 p->header.type = RX_PACKET_TYPE_ACK;
5494 p->header.flags = RX_SLOW_START_OK;
5495 if (reason == RX_ACK_PING) {
5496 p->header.flags |= RX_REQUEST_ACK;
5498 clock_GetTime(&call->pingRequestTime);
5501 p->length = padbytes +
5502 rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32);
5505 /* not fast but we can potentially use this if truncated
5506 * fragments are delivered to figure out the mtu.
5508 rx_packetwrite(p, rx_AckDataSize(offset) + 4 *
5509 sizeof(afs_int32), sizeof(afs_int32),
5513 if (call->conn->type == RX_CLIENT_CONNECTION)
5514 p->header.flags |= RX_CLIENT_INITIATED;
5518 if (rxdebug_active) {
5522 len = _snprintf(msg, sizeof(msg),
5523 "tid[%d] SACK: reason %s serial %u previous %u seq %u first %u acks %u space %u ",
5524 GetCurrentThreadId(), rx_ack_reason(ap->reason),
5525 ntohl(ap->serial), ntohl(ap->previousPacket),
5526 (unsigned int)p->header.seq, ntohl(ap->firstPacket),
5527 ap->nAcks, ntohs(ap->bufferSpace) );
5531 for (offset = 0; offset < ap->nAcks && len < sizeof(msg); offset++)
5532 msg[len++] = (ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*');
5536 OutputDebugString(msg);
5538 #else /* AFS_NT40_ENV */
5540 fprintf(rx_Log, "SACK: reason %x previous %u seq %u first %u ",
5541 ap->reason, ntohl(ap->previousPacket),
5542 (unsigned int)p->header.seq, ntohl(ap->firstPacket));
5544 for (offset = 0; offset < ap->nAcks; offset++)
5545 putc(ap->acks[offset] == RX_ACK_TYPE_NACK ? '-' : '*',
5550 #endif /* AFS_NT40_ENV */
5553 int i, nbytes = p->length;
5555 for (i = 1; i < p->niovecs; i++) { /* vec 0 is ALWAYS header */
5556 if (nbytes <= p->wirevec[i].iov_len) {
5559 savelen = p->wirevec[i].iov_len;
5561 p->wirevec[i].iov_len = nbytes;
5563 rxi_Send(call, p, istack);
5564 p->wirevec[i].iov_len = savelen;
5568 nbytes -= p->wirevec[i].iov_len;
5571 if (rx_stats_active)
5572 rx_atomic_inc(&rx_stats.ackPacketsSent);
5573 #ifndef RX_ENABLE_TSFPQ
5574 if (!optionalPacket)
5577 return optionalPacket; /* Return packet for re-use by caller */
5581 struct rx_packet **list;
5586 /* Send all of the packets in the list in single datagram */
5588 rxi_SendList(struct rx_call *call, struct xmitlist *xmit,
5589 int istack, int moreFlag)
5594 struct clock now, retryTime;
5595 struct rx_connection *conn = call->conn;
5596 struct rx_peer *peer = conn->peer;
5598 MUTEX_ENTER(&peer->peer_lock);
5599 peer->nSent += xmit->len;
5600 if (xmit->resending)
5601 peer->reSends += xmit->len;
5602 retryTime = peer->timeout;
5603 MUTEX_EXIT(&peer->peer_lock);
5605 if (rx_stats_active) {
5606 if (xmit->resending)
5607 rx_atomic_add(&rx_stats.dataPacketsReSent, xmit->len);
5609 rx_atomic_add(&rx_stats.dataPacketsSent, xmit->len);
5612 clock_GetTime(&now);
5613 clock_Add(&retryTime, &now);
5615 if (xmit->list[xmit->len - 1]->header.flags & RX_LAST_PACKET) {
5619 /* Set the packet flags and schedule the resend events */
5620 /* Only request an ack for the last packet in the list */
5621 for (i = 0; i < xmit->len; i++) {
5622 struct rx_packet *packet = xmit->list[i];
5624 packet->retryTime = retryTime;
5625 if (packet->header.serial) {
5626 /* Exponentially backoff retry times */
5627 if (packet->backoff < MAXBACKOFF) {
5628 /* so it can't stay == 0 */
5629 packet->backoff = (packet->backoff << 1) + 1;
5632 clock_Addmsec(&(packet->retryTime),
5633 ((afs_uint32) packet->backoff) << 8);
5636 /* Wait a little extra for the ack on the last packet */
5638 && !(packet->header.flags & RX_CLIENT_INITIATED)) {
5639 clock_Addmsec(&(packet->retryTime), 400);
5642 /* Record the time sent */
5643 packet->timeSent = now;
5645 /* Ask for an ack on retransmitted packets, on every other packet
5646 * if the peer doesn't support slow start. Ask for an ack on every
5647 * packet until the congestion window reaches the ack rate. */
5648 if (packet->header.serial) {
5651 /* improved RTO calculation- not Karn */
5652 packet->firstSent = now;
5653 if (!lastPacket && (call->cwind <= (u_short) (conn->ackRate + 1)
5654 || (!(call->flags & RX_CALL_SLOW_START_OK)
5655 && (packet->header.seq & 1)))) {
5660 /* Tag this packet as not being the last in this group,
5661 * for the receiver's benefit */
5662 if (i < xmit->len - 1 || moreFlag) {
5663 packet->header.flags |= RX_MORE_PACKETS;
5668 xmit->list[xmit->len - 1]->header.flags |= RX_REQUEST_ACK;
5671 /* Since we're about to send a data packet to the peer, it's
5672 * safe to nuke any scheduled end-of-packets ack */
5673 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
5675 MUTEX_EXIT(&call->lock);
5676 MUTEX_ENTER(&rx_refcnt_mutex);
5677 CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
5678 MUTEX_EXIT(&rx_refcnt_mutex);
5679 if (xmit->len > 1) {
5680 rxi_SendPacketList(call, conn, xmit->list, xmit->len, istack);
5682 rxi_SendPacket(call, conn, xmit->list[0], istack);
5684 MUTEX_ENTER(&call->lock);
5685 MUTEX_ENTER(&rx_refcnt_mutex);
5686 CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
5687 MUTEX_EXIT(&rx_refcnt_mutex);
5689 /* Update last send time for this call (for keep-alive
5690 * processing), and for the connection (so that we can discover
5691 * idle connections) */
5692 conn->lastSendTime = call->lastSendTime = clock_Sec();
5693 /* Let a set of retransmits trigger an idle timeout */
5694 if (!xmit->resending)
5695 call->lastSendData = call->lastSendTime;
5698 /* When sending packets we need to follow these rules:
5699 * 1. Never send more than maxDgramPackets in a jumbogram.
5700 * 2. Never send a packet with more than two iovecs in a jumbogram.
5701 * 3. Never send a retransmitted packet in a jumbogram.
5702 * 4. Never send more than cwind/4 packets in a jumbogram
5703 * We always keep the last list we should have sent so we
5704 * can set the RX_MORE_PACKETS flags correctly.
5708 rxi_SendXmitList(struct rx_call *call, struct rx_packet **list, int len,
5712 struct xmitlist working;
5713 struct xmitlist last;
5715 struct rx_peer *peer = call->conn->peer;
5716 int morePackets = 0;
5718 memset(&last, 0, sizeof(struct xmitlist));
5719 working.list = &list[0];
5721 working.resending = 0;
5723 for (i = 0; i < len; i++) {
5724 /* Does the current packet force us to flush the current list? */
5726 && (list[i]->header.serial || (list[i]->flags & RX_PKTFLAG_ACKED)
5727 || list[i]->length > RX_JUMBOBUFFERSIZE)) {
5729 /* This sends the 'last' list and then rolls the current working
5730 * set into the 'last' one, and resets the working set */
5733 rxi_SendList(call, &last, istack, 1);
5734 /* If the call enters an error state stop sending, or if
5735 * we entered congestion recovery mode, stop sending */
5736 if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
5741 working.resending = 0;
5742 working.list = &list[i];
5744 /* Add the current packet to the list if it hasn't been acked.
5745 * Otherwise adjust the list pointer to skip the current packet. */
5746 if (!(list[i]->flags & RX_PKTFLAG_ACKED)) {
5749 if (list[i]->header.serial)
5750 working.resending = 1;
5752 /* Do we need to flush the list? */
5753 if (working.len >= (int)peer->maxDgramPackets
5754 || working.len >= (int)call->nDgramPackets
5755 || working.len >= (int)call->cwind
5756 || list[i]->header.serial
5757 || list[i]->length != RX_JUMBOBUFFERSIZE) {
5759 rxi_SendList(call, &last, istack, 1);
5760 /* If the call enters an error state stop sending, or if
5761 * we entered congestion recovery mode, stop sending */
5763 || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
5768 working.resending = 0;
5769 working.list = &list[i + 1];
5772 if (working.len != 0) {
5773 osi_Panic("rxi_SendList error");
5775 working.list = &list[i + 1];
5779 /* Send the whole list when the call is in receive mode, when
5780 * the call is in eof mode, when we are in fast recovery mode,
5781 * and when we have the last packet */
5782 if ((list[len - 1]->header.flags & RX_LAST_PACKET)
5783 || call->mode == RX_MODE_RECEIVING || call->mode == RX_MODE_EOF
5784 || (call->flags & RX_CALL_FAST_RECOVER)) {
5785 /* Check for the case where the current list contains
5786 * an acked packet. Since we always send retransmissions
5787 * in a separate packet, we only need to check the first
5788 * packet in the list */
5789 if (working.len > 0 && !(working.list[0]->flags & RX_PKTFLAG_ACKED)) {
5793 rxi_SendList(call, &last, istack, morePackets);
5794 /* If the call enters an error state stop sending, or if
5795 * we entered congestion recovery mode, stop sending */
5796 if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
5800 rxi_SendList(call, &working, istack, 0);
5802 } else if (last.len > 0) {
5803 rxi_SendList(call, &last, istack, 0);
5804 /* Packets which are in 'working' are not sent by this call */
5808 #ifdef RX_ENABLE_LOCKS
5809 /* Call rxi_Start, below, but with the call lock held. */
5811 rxi_StartUnlocked(struct rxevent *event,
5812 void *arg0, void *arg1, int istack)
5814 struct rx_call *call = arg0;
5816 MUTEX_ENTER(&call->lock);
5817 rxi_Start(event, call, arg1, istack);
5818 MUTEX_EXIT(&call->lock);
5820 #endif /* RX_ENABLE_LOCKS */
5822 /* This routine is called when new packets are readied for
5823 * transmission and when retransmission may be necessary, or when the
5824 * transmission window or burst count are favourable. This should be
5825 * better optimized for new packets, the usual case, now that we've
5826 * got rid of queues of send packets. XXXXXXXXXXX */
5828 rxi_Start(struct rxevent *event,
5829 void *arg0, void *arg1, int istack)
5831 struct rx_call *call = arg0;
5833 struct rx_packet *p;
5834 struct rx_packet *nxp; /* Next pointer for queue_Scan */
5835 struct clock now, usenow, retryTime;
5840 /* If rxi_Start is being called as a result of a resend event,
5841 * then make sure that the event pointer is removed from the call
5842 * structure, since there is no longer a per-call retransmission
5844 if (event && event == call->resendEvent) {
5845 MUTEX_ENTER(&rx_refcnt_mutex);
5846 CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
5847 MUTEX_EXIT(&rx_refcnt_mutex);
5848 call->resendEvent = NULL;
5850 if (rxi_busyChannelError && (call->flags & RX_CALL_PEER_BUSY)) {
5851 rxi_CheckBusy(call);
5854 if (queue_IsEmpty(&call->tq)) {
5861 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
5862 if (rx_stats_active)
5863 rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
5868 if (queue_IsNotEmpty(&call->tq)) { /* If we have anything to send */
5870 clock_GetTime(&now);
5873 /* Send (or resend) any packets that need it, subject to
5874 * window restrictions and congestion burst control
5875 * restrictions. Ask for an ack on the last packet sent in
5876 * this burst. For now, we're relying upon the window being
5877 * considerably bigger than the largest number of packets that
5878 * are typically sent at once by one initial call to
5879 * rxi_Start. This is probably bogus (perhaps we should ask
5880 * for an ack when we're half way through the current
5881 * window?). Also, for non file transfer applications, this
5882 * may end up asking for an ack for every packet. Bogus. XXXX
5885 * But check whether we're here recursively, and let the other guy
5888 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
5889 if (!(call->flags & RX_CALL_TQ_BUSY)) {
5890 call->flags |= RX_CALL_TQ_BUSY;
5892 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
5894 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
5895 call->flags &= ~RX_CALL_NEED_START;
5896 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
5898 maxXmitPackets = MIN(call->twind, call->cwind);
5899 for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
5900 if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
5901 /* We shouldn't be sending packets if a thread is waiting
5902 * to initiate congestion recovery */
5903 dpf(("call %d waiting to initiate fast recovery\n",
5904 *(call->callNumber)));
5908 && (call->flags & RX_CALL_FAST_RECOVER)) {
5909 /* Only send one packet during fast recovery */
5910 dpf(("call %d restricted to one packet per send during fast recovery\n",
5911 *(call->callNumber)));
5914 #ifdef RX_TRACK_PACKETS
5915 if ((p->flags & RX_PKTFLAG_FREE)
5916 || (!queue_IsEnd(&call->tq, nxp)
5917 && (nxp->flags & RX_PKTFLAG_FREE))
5918 || (p == (struct rx_packet *)&rx_freePacketQueue)
5919 || (nxp == (struct rx_packet *)&rx_freePacketQueue)) {
5920 osi_Panic("rxi_Start: xmit queue clobbered");
5923 if (p->flags & RX_PKTFLAG_ACKED) {
5924 /* Since we may block, don't trust this */
5925 usenow.sec = usenow.usec = 0;
5926 if (rx_stats_active)
5927 rx_atomic_inc(&rx_stats.ignoreAckedPacket);
5928 continue; /* Ignore this packet if it has been acknowledged */
5931 /* Turn off all flags except these ones, which are the same
5932 * on each transmission */
5933 p->header.flags &= RX_PRESET_FLAGS;
5935 if (p->header.seq >=
5936 call->tfirst + MIN((int)call->twind,
5937 (int)(call->nSoftAcked +
5939 call->flags |= RX_CALL_WAIT_WINDOW_SEND; /* Wait for transmit window */
5940 /* Note: if we're waiting for more window space, we can
5941 * still send retransmits; hence we don't return here, but
5942 * break out to schedule a retransmit event */
5943 dpf(("call %d waiting for window (seq %d, twind %d, nSoftAcked %d, cwind %d)\n",
5944 *(call->callNumber), p->header.seq, call->twind, call->nSoftAcked,
5949 /* Transmit the packet if it needs to be sent. */
5950 if (!clock_Lt(&now, &p->retryTime)) {
5951 if (nXmitPackets == maxXmitPackets) {
5952 rxi_SendXmitList(call, call->xmitList,
5953 nXmitPackets, istack);
5956 dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u\n",
5957 *(call->callNumber), p,
5959 p->retryTime.sec, p->retryTime.usec));
5960 call->xmitList[nXmitPackets++] = p;
5964 /* xmitList now hold pointers to all of the packets that are
5965 * ready to send. Now we loop to send the packets */
5966 if (nXmitPackets > 0) {
5967 rxi_SendXmitList(call, call->xmitList, nXmitPackets,
5971 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
5973 * TQ references no longer protected by this flag; they must remain
5974 * protected by the global lock.
5976 if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
5977 call->flags &= ~RX_CALL_TQ_BUSY;
5978 rxi_WakeUpTransmitQueue(call);
5982 /* We went into the error state while sending packets. Now is
5983 * the time to reset the call. This will also inform the using
5984 * process that the call is in an error state.
5986 if (rx_stats_active)
5987 rx_atomic_inc(&rx_tq_debug.rxi_start_aborted);
5988 call->flags &= ~RX_CALL_TQ_BUSY;
5989 rxi_WakeUpTransmitQueue(call);
5990 rxi_CallError(call, call->error);
5993 #ifdef RX_ENABLE_LOCKS
5994 if (call->flags & RX_CALL_TQ_SOME_ACKED) {
5996 call->flags &= ~RX_CALL_TQ_SOME_ACKED;
5997 /* Some packets have received acks. If they all have, we can clear
5998 * the transmit queue.
6001 0, queue_Scan(&call->tq, p, nxp, rx_packet)) {
6002 if (p->header.seq < call->tfirst
6003 && (p->flags & RX_PKTFLAG_ACKED)) {
6005 #ifdef RX_TRACK_PACKETS
6006 p->flags &= ~RX_PKTFLAG_TQ;
6008 #ifdef RXDEBUG_PACKET
6016 call->flags |= RX_CALL_TQ_CLEARME;
6018 #endif /* RX_ENABLE_LOCKS */
6019 /* Don't bother doing retransmits if the TQ is cleared. */
6020 if (call->flags & RX_CALL_TQ_CLEARME) {
6021 rxi_ClearTransmitQueue(call, 1);
6023 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
6026 /* Always post a resend event, if there is anything in the
6027 * queue, and resend is possible. There should be at least
6028 * one unacknowledged packet in the queue ... otherwise none
6029 * of these packets should be on the queue in the first place.
6031 if (call->resendEvent) {
6032 /* Cancel the existing event and post a new one */
6033 rxevent_Cancel(call->resendEvent, call,
6034 RX_CALL_REFCOUNT_RESEND);
6037 /* The retry time is the retry time on the first unacknowledged
6038 * packet inside the current window */
6040 0, queue_Scan(&call->tq, p, nxp, rx_packet)) {
6041 /* Don't set timers for packets outside the window */
6042 if (p->header.seq >= call->tfirst + call->twind) {
6046 if (!(p->flags & RX_PKTFLAG_ACKED)
6047 && !clock_IsZero(&p->retryTime)) {
6049 retryTime = p->retryTime;
6054 /* Post a new event to re-run rxi_Start when retries may be needed */
6055 if (haveEvent && !(call->flags & RX_CALL_NEED_START)) {
6056 #ifdef RX_ENABLE_LOCKS
6057 MUTEX_ENTER(&rx_refcnt_mutex);
6058 CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
6059 MUTEX_EXIT(&rx_refcnt_mutex);
6061 rxevent_PostNow2(&retryTime, &usenow,
6063 (void *)call, 0, istack);
6064 #else /* RX_ENABLE_LOCKS */
6066 rxevent_PostNow2(&retryTime, &usenow, rxi_Start,
6067 (void *)call, 0, istack);
6068 #endif /* RX_ENABLE_LOCKS */
6071 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
6072 } while (call->flags & RX_CALL_NEED_START);
6074 * TQ references no longer protected by this flag; they must remain
6075 * protected by the global lock.
6077 call->flags &= ~RX_CALL_TQ_BUSY;
6078 rxi_WakeUpTransmitQueue(call);
6080 call->flags |= RX_CALL_NEED_START;
6082 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
6084 if (call->resendEvent) {
6085 rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
6090 /* Also adjusts the keep alive parameters for the call, to reflect
6091 * that we have just sent a packet (so keep alives aren't sent
6094 rxi_Send(struct rx_call *call, struct rx_packet *p,
6097 struct rx_connection *conn = call->conn;
6099 /* Stamp each packet with the user supplied status */
6100 p->header.userStatus = call->localStatus;
6102 /* Allow the security object controlling this call's security to
6103 * make any last-minute changes to the packet */
6104 RXS_SendPacket(conn->securityObject, call, p);
6106 /* Since we're about to send SOME sort of packet to the peer, it's
6107 * safe to nuke any scheduled end-of-packets ack */
6108 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
6110 /* Actually send the packet, filling in more connection-specific fields */
6111 MUTEX_EXIT(&call->lock);
6112 MUTEX_ENTER(&rx_refcnt_mutex);
6113 CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
6114 MUTEX_EXIT(&rx_refcnt_mutex);
6115 rxi_SendPacket(call, conn, p, istack);
6116 MUTEX_ENTER(&rx_refcnt_mutex);
6117 CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
6118 MUTEX_EXIT(&rx_refcnt_mutex);
6119 MUTEX_ENTER(&call->lock);
6121 /* Update last send time for this call (for keep-alive
6122 * processing), and for the connection (so that we can discover
6123 * idle connections) */
6124 if ((p->header.type != RX_PACKET_TYPE_ACK) ||
6125 (((struct rx_ackPacket *)rx_DataOf(p))->reason == RX_ACK_PING) ||
6126 (p->length <= (rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32))))
6128 conn->lastSendTime = call->lastSendTime = clock_Sec();
6129 /* Don't count keepalive ping/acks here, so idleness can be tracked. */
6130 if ((p->header.type != RX_PACKET_TYPE_ACK) ||
6131 ((((struct rx_ackPacket *)rx_DataOf(p))->reason != RX_ACK_PING) &&
6132 (((struct rx_ackPacket *)rx_DataOf(p))->reason !=
6133 RX_ACK_PING_RESPONSE)))
6134 call->lastSendData = call->lastSendTime;
6138 /* Check if a call needs to be destroyed. Called by keep-alive code to ensure
6139 * that things are fine. Also called periodically to guarantee that nothing
6140 * falls through the cracks (e.g. (error + dally) connections have keepalive
6141 * turned off. Returns 0 if conn is well, -1 otherwise. If otherwise, call
6143 * haveCTLock Set if calling from rxi_ReapConnections
6145 #ifdef RX_ENABLE_LOCKS
6147 rxi_CheckCall(struct rx_call *call, int haveCTLock)
6148 #else /* RX_ENABLE_LOCKS */
6150 rxi_CheckCall(struct rx_call *call)
6151 #endif /* RX_ENABLE_LOCKS */
6153 struct rx_connection *conn = call->conn;
6155 afs_uint32 deadTime, idleDeadTime = 0, hardDeadTime = 0;
6156 afs_uint32 fudgeFactor;
6160 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
6161 if (call->flags & RX_CALL_TQ_BUSY) {
6162 /* Call is active and will be reset by rxi_Start if it's
6163 * in an error state.
6168 /* RTT + 8*MDEV, rounded up to the next second. */
6169 fudgeFactor = (((afs_uint32) conn->peer->rtt >> 3) +
6170 ((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10;
6172 deadTime = conn->secondsUntilDead + fudgeFactor;
6174 /* These are computed to the second (+- 1 second). But that's
6175 * good enough for these values, which should be a significant
6176 * number of seconds. */
6177 if (now > (call->lastReceiveTime + deadTime)) {
6178 if (call->state == RX_STATE_ACTIVE) {
6180 #if defined(KERNEL) && defined(AFS_SUN57_ENV)
6182 #if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
6183 netstack_t *ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);
6184 ip_stack_t *ipst = ns->netstack_ip;
6186 ire = ire_cache_lookup(conn->peer->host
6187 #if defined(AFS_SUN510_ENV) && defined(ALL_ZONES)
6189 #if defined(AFS_SUN510_ENV) && (defined(ICL_3_ARG) || defined(GLOBAL_NETSTACKID))
6191 #if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
6198 if (ire && ire->ire_max_frag > 0)
6199 rxi_SetPeerMtu(NULL, conn->peer->host, 0,
6201 #if defined(GLOBAL_NETSTACKID)
6205 #endif /* ADAPT_PMTU */
6206 cerror = RX_CALL_DEAD;
6209 #ifdef RX_ENABLE_LOCKS
6210 /* Cancel pending events */
6211 rxevent_Cancel(call->delayedAckEvent, call,
6212 RX_CALL_REFCOUNT_DELAY);
6213 rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
6214 rxevent_Cancel(call->keepAliveEvent, call,
6215 RX_CALL_REFCOUNT_ALIVE);
6216 if (call->growMTUEvent)
6217 rxevent_Cancel(call->growMTUEvent, call,
6218 RX_CALL_REFCOUNT_ALIVE);
6219 MUTEX_ENTER(&rx_refcnt_mutex);
6220 if (call->refCount == 0) {
6221 rxi_FreeCall(call, haveCTLock);
6222 MUTEX_EXIT(&rx_refcnt_mutex);
6225 MUTEX_EXIT(&rx_refcnt_mutex);
6227 #else /* RX_ENABLE_LOCKS */
6228 rxi_FreeCall(call, 0);
6230 #endif /* RX_ENABLE_LOCKS */
6232 /* Non-active calls are destroyed if they are not responding
6233 * to pings; active calls are simply flagged in error, so the
6234 * attached process can die reasonably gracefully. */
6237 if (conn->idleDeadTime) {
6238 idleDeadTime = conn->idleDeadTime + fudgeFactor;
6241 /* see if we have a non-activity timeout */
6242 if (call->startWait && idleDeadTime
6243 && ((call->startWait + idleDeadTime) < now) &&
6244 (call->flags & RX_CALL_READER_WAIT)) {
6245 if (call->state == RX_STATE_ACTIVE) {
6246 cerror = RX_CALL_TIMEOUT;
6250 if (call->lastSendData && idleDeadTime && (conn->idleDeadErr != 0)
6251 && ((call->lastSendData + idleDeadTime) < now)) {
6252 if (call->state == RX_STATE_ACTIVE) {
6253 cerror = conn->idleDeadErr;
6258 if (conn->hardDeadTime) {
6259 hardDeadTime = conn->hardDeadTime + fudgeFactor;
6262 /* see if we have a hard timeout */
6264 && (now > (hardDeadTime + call->startTime.sec))) {
6265 if (call->state == RX_STATE_ACTIVE)
6266 rxi_CallError(call, RX_CALL_TIMEOUT);
6271 if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT
6272 && call->lastReceiveTime) {
6273 int oldMTU = conn->peer->ifMTU;
6275 /* if we thought we could send more, perhaps things got worse */
6276 if (conn->peer->maxPacketSize > conn->lastPacketSize)
6277 /* maxpacketsize will be cleared in rxi_SetPeerMtu */
6278 newmtu = MAX(conn->peer->maxPacketSize-RX_IPUDP_SIZE,
6279 conn->lastPacketSize-(128+RX_IPUDP_SIZE));
6281 newmtu = conn->lastPacketSize-(128+RX_IPUDP_SIZE);
6283 /* minimum capped in SetPeerMtu */
6284 rxi_SetPeerMtu(conn->peer, 0, 0, newmtu);
6287 conn->lastPacketSize = 0;
6289 /* needed so ResetCall doesn't clobber us. */
6290 call->MTU = conn->peer->ifMTU;
6292 /* if we never succeeded, let the error pass out as-is */
6293 if (conn->peer->maxPacketSize && oldMTU != conn->peer->ifMTU)
6294 cerror = conn->msgsizeRetryErr;
6297 rxi_CallError(call, cerror);
6302 rxi_NatKeepAliveEvent(struct rxevent *event, void *arg1, void *dummy)
6304 struct rx_connection *conn = arg1;
6305 struct rx_header theader;
6306 char tbuffer[1 + sizeof(struct rx_header)];
6307 struct sockaddr_in taddr;
6310 struct iovec tmpiov[2];
6313 RX_CLIENT_CONNECTION ? rx_socket : conn->service->socket);
6316 tp = &tbuffer[sizeof(struct rx_header)];
6317 taddr.sin_family = AF_INET;
6318 taddr.sin_port = rx_PortOf(rx_PeerOf(conn));
6319 taddr.sin_addr.s_addr = rx_HostOf(rx_PeerOf(conn));
6320 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
6321 taddr.sin_len = sizeof(struct sockaddr_in);
6323 memset(&theader, 0, sizeof(theader));
6324 theader.epoch = htonl(999);
6326 theader.callNumber = 0;
6329 theader.type = RX_PACKET_TYPE_VERSION;
6330 theader.flags = RX_LAST_PACKET;
6331 theader.serviceId = 0;
6333 memcpy(tbuffer, &theader, sizeof(theader));
6334 memcpy(tp, &a, sizeof(a));
6335 tmpiov[0].iov_base = tbuffer;
6336 tmpiov[0].iov_len = 1 + sizeof(struct rx_header);
6338 osi_NetSend(socket, &taddr, tmpiov, 1, 1 + sizeof(struct rx_header), 1);
6340 MUTEX_ENTER(&conn->conn_data_lock);
6341 MUTEX_ENTER(&rx_refcnt_mutex);
6342 /* Only reschedule ourselves if the connection would not be destroyed */
6343 if (conn->refCount <= 1) {
6344 conn->natKeepAliveEvent = NULL;
6345 MUTEX_EXIT(&rx_refcnt_mutex);
6346 MUTEX_EXIT(&conn->conn_data_lock);
6347 rx_DestroyConnection(conn); /* drop the reference for this */
6349 conn->refCount--; /* drop the reference for this */
6350 MUTEX_EXIT(&rx_refcnt_mutex);
6351 conn->natKeepAliveEvent = NULL;
6352 rxi_ScheduleNatKeepAliveEvent(conn);
6353 MUTEX_EXIT(&conn->conn_data_lock);
6358 rxi_ScheduleNatKeepAliveEvent(struct rx_connection *conn)
6360 if (!conn->natKeepAliveEvent && conn->secondsUntilNatPing) {
6361 struct clock when, now;
6362 clock_GetTime(&now);
6364 when.sec += conn->secondsUntilNatPing;
6365 MUTEX_ENTER(&rx_refcnt_mutex);
6366 conn->refCount++; /* hold a reference for this */
6367 MUTEX_EXIT(&rx_refcnt_mutex);
6368 conn->natKeepAliveEvent =
6369 rxevent_PostNow(&when, &now, rxi_NatKeepAliveEvent, conn, 0);
6374 rx_SetConnSecondsUntilNatPing(struct rx_connection *conn, afs_int32 seconds)
6376 MUTEX_ENTER(&conn->conn_data_lock);
6377 conn->secondsUntilNatPing = seconds;
6379 rxi_ScheduleNatKeepAliveEvent(conn);
6380 MUTEX_EXIT(&conn->conn_data_lock);
6384 rxi_NatKeepAliveOn(struct rx_connection *conn)
6386 MUTEX_ENTER(&conn->conn_data_lock);
6387 rxi_ScheduleNatKeepAliveEvent(conn);
6388 MUTEX_EXIT(&conn->conn_data_lock);
6391 /* When a call is in progress, this routine is called occasionally to
6392 * make sure that some traffic has arrived (or been sent to) the peer.
6393 * If nothing has arrived in a reasonable amount of time, the call is
6394 * declared dead; if nothing has been sent for a while, we send a
6395 * keep-alive packet (if we're actually trying to keep the call alive)
6398 rxi_KeepAliveEvent(struct rxevent *event, void *arg1, void *dummy)
6400 struct rx_call *call = arg1;
6401 struct rx_connection *conn;
6404 MUTEX_ENTER(&rx_refcnt_mutex);
6405 CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
6406 MUTEX_EXIT(&rx_refcnt_mutex);
6407 MUTEX_ENTER(&call->lock);
6408 if (event == call->keepAliveEvent)
6409 call->keepAliveEvent = NULL;
6412 #ifdef RX_ENABLE_LOCKS
6413 if (rxi_CheckCall(call, 0)) {
6414 MUTEX_EXIT(&call->lock);
6417 #else /* RX_ENABLE_LOCKS */
6418 if (rxi_CheckCall(call))
6420 #endif /* RX_ENABLE_LOCKS */
6422 /* Don't try to keep alive dallying calls */
6423 if (call->state == RX_STATE_DALLY) {
6424 MUTEX_EXIT(&call->lock);
6429 if ((now - call->lastSendTime) > conn->secondsUntilPing) {
6430 /* Don't try to send keepalives if there is unacknowledged data */
6431 /* the rexmit code should be good enough, this little hack
6432 * doesn't quite work XXX */
6433 (void)rxi_SendAck(call, NULL, 0, RX_ACK_PING, 0);
6435 rxi_ScheduleKeepAliveEvent(call);
6436 MUTEX_EXIT(&call->lock);
6439 /* Does what's on the nameplate. */
6441 rxi_GrowMTUEvent(struct rxevent *event, void *arg1, void *dummy)
6443 struct rx_call *call = arg1;
6444 struct rx_connection *conn;
6446 MUTEX_ENTER(&rx_refcnt_mutex);
6447 CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
6448 MUTEX_EXIT(&rx_refcnt_mutex);
6449 MUTEX_ENTER(&call->lock);
6451 if (event == call->growMTUEvent)
6452 call->growMTUEvent = NULL;
6454 #ifdef RX_ENABLE_LOCKS
6455 if (rxi_CheckCall(call, 0)) {
6456 MUTEX_EXIT(&call->lock);
6459 #else /* RX_ENABLE_LOCKS */
6460 if (rxi_CheckCall(call))
6462 #endif /* RX_ENABLE_LOCKS */
6464 /* Don't bother with dallying calls */
6465 if (call->state == RX_STATE_DALLY) {
6466 MUTEX_EXIT(&call->lock);
6473 * keep being scheduled, just don't do anything if we're at peak,
6474 * or we're not set up to be properly handled (idle timeout required)
6476 if ((conn->peer->maxPacketSize != 0) &&
6477 (conn->peer->natMTU < RX_MAX_PACKET_SIZE) &&
6478 (conn->idleDeadErr))
6479 (void)rxi_SendAck(call, NULL, 0, RX_ACK_MTU, 0);
6480 rxi_ScheduleGrowMTUEvent(call, 0);
6481 MUTEX_EXIT(&call->lock);
6485 rxi_ScheduleKeepAliveEvent(struct rx_call *call)
6487 if (!call->keepAliveEvent) {
6488 struct clock when, now;
6489 clock_GetTime(&now);
6491 when.sec += call->conn->secondsUntilPing;
6492 MUTEX_ENTER(&rx_refcnt_mutex);
6493 CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
6494 MUTEX_EXIT(&rx_refcnt_mutex);
6495 call->keepAliveEvent =
6496 rxevent_PostNow(&when, &now, rxi_KeepAliveEvent, call, 0);
6501 rxi_ScheduleGrowMTUEvent(struct rx_call *call, int secs)
6503 if (!call->growMTUEvent) {
6504 struct clock when, now;
6506 clock_GetTime(&now);
6509 if (call->conn->secondsUntilPing)
6510 secs = (6*call->conn->secondsUntilPing)-1;
6512 if (call->conn->secondsUntilDead)
6513 secs = MIN(secs, (call->conn->secondsUntilDead-1));
6517 MUTEX_ENTER(&rx_refcnt_mutex);
6518 CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
6519 MUTEX_EXIT(&rx_refcnt_mutex);
6520 call->growMTUEvent =
6521 rxevent_PostNow(&when, &now, rxi_GrowMTUEvent, call, 0);
6525 /* N.B. rxi_KeepAliveOff: is defined earlier as a macro */
6527 rxi_KeepAliveOn(struct rx_call *call)
6529 /* Pretend last packet received was received now--i.e. if another
6530 * packet isn't received within the keep alive time, then the call
6531 * will die; Initialize last send time to the current time--even
6532 * if a packet hasn't been sent yet. This will guarantee that a
6533 * keep-alive is sent within the ping time */
6534 call->lastReceiveTime = call->lastSendTime = clock_Sec();
6535 rxi_ScheduleKeepAliveEvent(call);
6539 rxi_GrowMTUOn(struct rx_call *call)
6541 struct rx_connection *conn = call->conn;
6542 MUTEX_ENTER(&conn->conn_data_lock);
6543 conn->lastPingSizeSer = conn->lastPingSize = 0;
6544 MUTEX_EXIT(&conn->conn_data_lock);
6545 rxi_ScheduleGrowMTUEvent(call, 1);
6548 /* This routine is called to send connection abort messages
6549 * that have been delayed to throttle looping clients. */
6551 rxi_SendDelayedConnAbort(struct rxevent *event,
6552 void *arg1, void *unused)
6554 struct rx_connection *conn = arg1;
6557 struct rx_packet *packet;
6559 MUTEX_ENTER(&conn->conn_data_lock);
6560 conn->delayedAbortEvent = NULL;
6561 error = htonl(conn->error);
6563 MUTEX_EXIT(&conn->conn_data_lock);
6564 packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
6567 rxi_SendSpecial((struct rx_call *)0, conn, packet,
6568 RX_PACKET_TYPE_ABORT, (char *)&error,
6570 rxi_FreePacket(packet);
6574 /* This routine is called to send call abort messages
6575 * that have been delayed to throttle looping clients. */
6577 rxi_SendDelayedCallAbort(struct rxevent *event,
6578 void *arg1, void *dummy)
6580 struct rx_call *call = arg1;
6583 struct rx_packet *packet;
6585 MUTEX_ENTER(&call->lock);
6586 call->delayedAbortEvent = NULL;
6587 error = htonl(call->error);
6589 packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
6592 rxi_SendSpecial(call, call->conn, packet, RX_PACKET_TYPE_ABORT,
6593 (char *)&error, sizeof(error), 0);
6594 rxi_FreePacket(packet);
6596 MUTEX_EXIT(&call->lock);
6597 MUTEX_ENTER(&rx_refcnt_mutex);
6598 CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
6599 MUTEX_EXIT(&rx_refcnt_mutex);
6602 /* This routine is called periodically (every RX_AUTH_REQUEST_TIMEOUT
6603 * seconds) to ask the client to authenticate itself. The routine
6604 * issues a challenge to the client, which is obtained from the
6605 * security object associated with the connection */
6607 rxi_ChallengeEvent(struct rxevent *event,
6608 void *arg0, void *arg1, int tries)
6610 struct rx_connection *conn = arg0;
6612 conn->challengeEvent = NULL;
6613 if (RXS_CheckAuthentication(conn->securityObject, conn) != 0) {
6614 struct rx_packet *packet;
6615 struct clock when, now;
6618 /* We've failed to authenticate for too long.
6619 * Reset any calls waiting for authentication;
6620 * they are all in RX_STATE_PRECALL.
6624 MUTEX_ENTER(&conn->conn_call_lock);
6625 for (i = 0; i < RX_MAXCALLS; i++) {
6626 struct rx_call *call = conn->call[i];
6628 MUTEX_ENTER(&call->lock);
6629 if (call->state == RX_STATE_PRECALL) {
6630 rxi_CallError(call, RX_CALL_DEAD);
6631 rxi_SendCallAbort(call, NULL, 0, 0);
6633 MUTEX_EXIT(&call->lock);
6636 MUTEX_EXIT(&conn->conn_call_lock);
6640 packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
6642 /* If there's no packet available, do this later. */
6643 RXS_GetChallenge(conn->securityObject, conn, packet);
6644 rxi_SendSpecial((struct rx_call *)0, conn, packet,
6645 RX_PACKET_TYPE_CHALLENGE, NULL, -1, 0);
6646 rxi_FreePacket(packet);
6648 clock_GetTime(&now);
6650 when.sec += RX_CHALLENGE_TIMEOUT;
6651 conn->challengeEvent =
6652 rxevent_PostNow2(&when, &now, rxi_ChallengeEvent, conn, 0,
6657 /* Call this routine to start requesting the client to authenticate
6658 * itself. This will continue until authentication is established,
6659 * the call times out, or an invalid response is returned. The
6660 * security object associated with the connection is asked to create
6661 * the challenge at this time. N.B. rxi_ChallengeOff is a macro,
6662 * defined earlier. */
6664 rxi_ChallengeOn(struct rx_connection *conn)
6666 if (!conn->challengeEvent) {
6667 RXS_CreateChallenge(conn->securityObject, conn);
6668 rxi_ChallengeEvent(NULL, conn, 0, RX_CHALLENGE_MAXTRIES);
6673 /* rxi_ComputeRoundTripTime is called with peer locked. */
6674 /* peer may be null */
6676 rxi_ComputeRoundTripTime(struct rx_packet *p,
6677 struct rx_ackPacket *ack,
6678 struct rx_peer *peer,
6681 struct clock thisRtt, *sentp;
6685 /* If the ACK is delayed, then do nothing */
6686 if (ack->reason == RX_ACK_DELAY)
6689 /* On the wire, jumbograms are a single UDP packet. We shouldn't count
6690 * their RTT multiple times, so only include the RTT of the last packet
6692 if (p->flags & RX_JUMBO_PACKET)
6695 /* Use the serial number to determine which transmission the ACK is for,
6696 * and set the sent time to match this. If we have no serial number, then
6697 * only use the ACK for RTT calculations if the packet has not been
6701 serial = ntohl(ack->serial);
6703 if (serial == p->header.serial) {
6704 sentp = &p->timeSent;
6705 } else if (serial == p->firstSerial) {
6706 sentp = &p->firstSent;
6707 } else if (clock_Eq(&p->timeSent, &p->firstSent)) {
6708 sentp = &p->firstSent;
6712 if (clock_Eq(&p->timeSent, &p->firstSent)) {
6713 sentp = &p->firstSent;
6720 if (clock_Lt(&thisRtt, sentp))
6721 return; /* somebody set the clock back, don't count this time. */
6723 clock_Sub(&thisRtt, sentp);
6724 dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
6725 p->header.callNumber, p, thisRtt.sec, thisRtt.usec));
6727 if (clock_IsZero(&thisRtt)) {
6729 * The actual round trip time is shorter than the
6730 * clock_GetTime resolution. It is most likely 1ms or 100ns.
6731 * Since we can't tell which at the moment we will assume 1ms.
6733 thisRtt.usec = 1000;
6736 if (rx_stats_active) {
6737 MUTEX_ENTER(&rx_stats_mutex);
6738 if (clock_Lt(&thisRtt, &rx_stats.minRtt))
6739 rx_stats.minRtt = thisRtt;
6740 if (clock_Gt(&thisRtt, &rx_stats.maxRtt)) {
6741 if (thisRtt.sec > 60) {
6742 MUTEX_EXIT(&rx_stats_mutex);
6743 return; /* somebody set the clock ahead */
6745 rx_stats.maxRtt = thisRtt;
6747 clock_Add(&rx_stats.totalRtt, &thisRtt);
6748 rx_atomic_inc(&rx_stats.nRttSamples);
6749 MUTEX_EXIT(&rx_stats_mutex);
6752 /* better rtt calculation courtesy of UMich crew (dave,larry,peter,?) */
6754 /* Apply VanJacobson round-trip estimations */
6759 * srtt (peer->rtt) is in units of one-eighth-milliseconds.
6760 * srtt is stored as fixed point with 3 bits after the binary
6761 * point (i.e., scaled by 8). The following magic is
6762 * equivalent to the smoothing algorithm in rfc793 with an
6763 * alpha of .875 (srtt' = rtt/8 + srtt*7/8 in fixed point).
6764 * srtt'*8 = rtt + srtt*7
6765 * srtt'*8 = srtt*8 + rtt - srtt
6766 * srtt' = srtt + rtt/8 - srtt/8
6767 * srtt' = srtt + (rtt - srtt)/8
6770 delta = _8THMSEC(&thisRtt) - peer->rtt;
6771 peer->rtt += (delta >> 3);
6774 * We accumulate a smoothed rtt variance (actually, a smoothed
6775 * mean difference), then set the retransmit timer to smoothed
6776 * rtt + 4 times the smoothed variance (was 2x in van's original
6777 * paper, but 4x works better for me, and apparently for him as
6779 * rttvar is stored as
6780 * fixed point with 2 bits after the binary point (scaled by
6781 * 4). The following is equivalent to rfc793 smoothing with
6782 * an alpha of .75 (rttvar' = rttvar*3/4 + |delta| / 4).
6783 * rttvar'*4 = rttvar*3 + |delta|
6784 * rttvar'*4 = rttvar*4 + |delta| - rttvar
6785 * rttvar' = rttvar + |delta|/4 - rttvar/4
6786 * rttvar' = rttvar + (|delta| - rttvar)/4
6787 * This replaces rfc793's wired-in beta.
6788 * dev*4 = dev*4 + (|actual - expected| - dev)
6794 delta -= (peer->rtt_dev << 1);
6795 peer->rtt_dev += (delta >> 3);
6797 /* I don't have a stored RTT so I start with this value. Since I'm
6798 * probably just starting a call, and will be pushing more data down
6799 * this, I expect congestion to increase rapidly. So I fudge a
6800 * little, and I set deviance to half the rtt. In practice,
6801 * deviance tends to approach something a little less than
6802 * half the smoothed rtt. */
6803 peer->rtt = _8THMSEC(&thisRtt) + 8;
6804 peer->rtt_dev = peer->rtt >> 2; /* rtt/2: they're scaled differently */
6806 /* the smoothed RTT time is RTT + 4*MDEV
6808 * We allow a user specified minimum to be set for this, to allow clamping
6809 * at a minimum value in the same way as TCP. In addition, we have to allow
6810 * for the possibility that this packet is answered by a delayed ACK, so we
6811 * add on a fixed 200ms to account for that timer expiring.
6814 rtt_timeout = MAX(((peer->rtt >> 3) + peer->rtt_dev),
6815 rx_minPeerTimeout) + 200;
6816 clock_Zero(&(peer->timeout));
6817 clock_Addmsec(&(peer->timeout), rtt_timeout);
6819 dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n",
6820 p->header.callNumber, p, MSEC(&thisRtt), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
6824 /* Find all server connections that have not been active for a long time, and
6827 rxi_ReapConnections(struct rxevent *unused, void *unused1, void *unused2)
6829 struct clock now, when;
6830 clock_GetTime(&now);
6832 /* Find server connection structures that haven't been used for
6833 * greater than rx_idleConnectionTime */
6835 struct rx_connection **conn_ptr, **conn_end;
6836 int i, havecalls = 0;
6837 MUTEX_ENTER(&rx_connHashTable_lock);
6838 for (conn_ptr = &rx_connHashTable[0], conn_end =
6839 &rx_connHashTable[rx_hashTableSize]; conn_ptr < conn_end;
6841 struct rx_connection *conn, *next;
6842 struct rx_call *call;
6846 for (conn = *conn_ptr; conn; conn = next) {
6847 /* XXX -- Shouldn't the connection be locked? */
6850 for (i = 0; i < RX_MAXCALLS; i++) {
6851 call = conn->call[i];
6855 code = MUTEX_TRYENTER(&call->lock);
6858 #ifdef RX_ENABLE_LOCKS
6859 result = rxi_CheckCall(call, 1);
6860 #else /* RX_ENABLE_LOCKS */
6861 result = rxi_CheckCall(call);
6862 #endif /* RX_ENABLE_LOCKS */
6863 MUTEX_EXIT(&call->lock);
6865 /* If CheckCall freed the call, it might
6866 * have destroyed the connection as well,
6867 * which screws up the linked lists.
6873 if (conn->type == RX_SERVER_CONNECTION) {
6874 /* This only actually destroys the connection if
6875 * there are no outstanding calls */
6876 MUTEX_ENTER(&conn->conn_data_lock);
6877 MUTEX_ENTER(&rx_refcnt_mutex);
6878 if (!havecalls && !conn->refCount
6879 && ((conn->lastSendTime + rx_idleConnectionTime) <
6881 conn->refCount++; /* it will be decr in rx_DestroyConn */
6882 MUTEX_EXIT(&rx_refcnt_mutex);
6883 MUTEX_EXIT(&conn->conn_data_lock);
6884 #ifdef RX_ENABLE_LOCKS
6885 rxi_DestroyConnectionNoLock(conn);
6886 #else /* RX_ENABLE_LOCKS */
6887 rxi_DestroyConnection(conn);
6888 #endif /* RX_ENABLE_LOCKS */
6890 #ifdef RX_ENABLE_LOCKS
6892 MUTEX_EXIT(&rx_refcnt_mutex);
6893 MUTEX_EXIT(&conn->conn_data_lock);
6895 #endif /* RX_ENABLE_LOCKS */
6899 #ifdef RX_ENABLE_LOCKS
6900 while (rx_connCleanup_list) {
6901 struct rx_connection *conn;
6902 conn = rx_connCleanup_list;
6903 rx_connCleanup_list = rx_connCleanup_list->next;
6904 MUTEX_EXIT(&rx_connHashTable_lock);
6905 rxi_CleanupConnection(conn);
6906 MUTEX_ENTER(&rx_connHashTable_lock);
6908 MUTEX_EXIT(&rx_connHashTable_lock);
6909 #endif /* RX_ENABLE_LOCKS */
6912 /* Find any peer structures that haven't been used (haven't had an
6913 * associated connection) for greater than rx_idlePeerTime */
6915 struct rx_peer **peer_ptr, **peer_end;
6919 * Why do we need to hold the rx_peerHashTable_lock across
6920 * the incrementing of peer_ptr since the rx_peerHashTable
6921 * array is not changing? We don't.
6923 * By dropping the lock periodically we can permit other
6924 * activities to be performed while a rxi_ReapConnections
6925 * call is in progress. The goal of reap connections
6926 * is to clean up quickly without causing large amounts
6927 * of contention. Therefore, it is important that global
6928 * mutexes not be held for extended periods of time.
6930 for (peer_ptr = &rx_peerHashTable[0], peer_end =
6931 &rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
6933 struct rx_peer *peer, *next, *prev;
6935 MUTEX_ENTER(&rx_peerHashTable_lock);
6936 for (prev = peer = *peer_ptr; peer; peer = next) {
6938 code = MUTEX_TRYENTER(&peer->peer_lock);
6939 if ((code) && (peer->refCount == 0)
6940 && ((peer->idleWhen + rx_idlePeerTime) < now.sec)) {
6941 rx_interface_stat_p rpc_stat, nrpc_stat;
6945 * now know that this peer object is one to be
6946 * removed from the hash table. Once it is removed
6947 * it can't be referenced by other threads.
6948 * Lets remove it first and decrement the struct
6949 * nPeerStructs count.
6951 if (peer == *peer_ptr) {
6957 if (rx_stats_active)
6958 rx_atomic_dec(&rx_stats.nPeerStructs);
6961 * Now if we hold references on 'prev' and 'next'
6962 * we can safely drop the rx_peerHashTable_lock
6963 * while we destroy this 'peer' object.
6969 MUTEX_EXIT(&rx_peerHashTable_lock);
6971 MUTEX_EXIT(&peer->peer_lock);
6972 MUTEX_DESTROY(&peer->peer_lock);
6974 (&peer->rpcStats, rpc_stat, nrpc_stat,
6975 rx_interface_stat)) {
6976 unsigned int num_funcs;
6979 queue_Remove(&rpc_stat->queue_header);
6980 queue_Remove(&rpc_stat->all_peers);
6981 num_funcs = rpc_stat->stats[0].func_total;
6983 sizeof(rx_interface_stat_t) +
6984 rpc_stat->stats[0].func_total *
6985 sizeof(rx_function_entry_v1_t);
6987 rxi_Free(rpc_stat, space);
6989 MUTEX_ENTER(&rx_rpc_stats);
6990 rxi_rpc_peer_stat_cnt -= num_funcs;
6991 MUTEX_EXIT(&rx_rpc_stats);
6996 * Regain the rx_peerHashTable_lock and
6997 * decrement the reference count on 'prev'
7000 MUTEX_ENTER(&rx_peerHashTable_lock);
7007 MUTEX_EXIT(&peer->peer_lock);
7012 MUTEX_EXIT(&rx_peerHashTable_lock);
7016 /* THIS HACK IS A TEMPORARY HACK. The idea is that the race condition in
7017 * rxi_AllocSendPacket, if it hits, will be handled at the next conn
7018 * GC, just below. Really, we shouldn't have to keep moving packets from
7019 * one place to another, but instead ought to always know if we can
7020 * afford to hold onto a packet in its particular use. */
7021 MUTEX_ENTER(&rx_freePktQ_lock);
7022 if (rx_waitingForPackets) {
7023 rx_waitingForPackets = 0;
7024 #ifdef RX_ENABLE_LOCKS
7025 CV_BROADCAST(&rx_waitingForPackets_cv);
7027 osi_rxWakeup(&rx_waitingForPackets);
7030 MUTEX_EXIT(&rx_freePktQ_lock);
7033 when.sec += RX_REAP_TIME; /* Check every RX_REAP_TIME seconds */
7034 rxevent_Post(&when, rxi_ReapConnections, 0, 0);
7038 /* rxs_Release - This isn't strictly necessary but, since the macro name from
7039 * rx.h is sort of strange this is better. This is called with a security
7040 * object before it is discarded. Each connection using a security object has
7041 * its own refcount to the object so it won't actually be freed until the last
7042 * connection is destroyed.
7044 * This is the only rxs module call. A hold could also be written but no one
7048 rxs_Release(struct rx_securityClass *aobj)
7050 return RXS_Close(aobj);
7054 #define RXRATE_PKT_OH (RX_HEADER_SIZE + RX_IPUDP_SIZE)
7055 #define RXRATE_SMALL_PKT (RXRATE_PKT_OH + sizeof(struct rx_ackPacket))
7056 #define RXRATE_AVG_SMALL_PKT (RXRATE_PKT_OH + (sizeof(struct rx_ackPacket)/2))
7057 #define RXRATE_LARGE_PKT (RXRATE_SMALL_PKT + 256)
7059 /* Adjust our estimate of the transmission rate to this peer, given
7060 * that the packet p was just acked. We can adjust peer->timeout and
7061 * call->twind. Pragmatically, this is called
7062 * only with packets of maximal length.
7063 * Called with peer and call locked.
7067 rxi_ComputeRate(struct rx_peer *peer, struct rx_call *call,
7068 struct rx_packet *p, struct rx_packet *ackp, u_char ackReason)
7070 afs_int32 xferSize, xferMs;
7074 /* Count down packets */
7075 if (peer->rateFlag > 0)
7077 /* Do nothing until we're enabled */
7078 if (peer->rateFlag != 0)
7083 /* Count only when the ack seems legitimate */
7084 switch (ackReason) {
7085 case RX_ACK_REQUESTED:
7087 p->length + RX_HEADER_SIZE + call->conn->securityMaxTrailerSize;
7091 case RX_ACK_PING_RESPONSE:
7092 if (p) /* want the response to ping-request, not data send */
7094 clock_GetTime(&newTO);
7095 if (clock_Gt(&newTO, &call->pingRequestTime)) {
7096 clock_Sub(&newTO, &call->pingRequestTime);
7097 xferMs = (newTO.sec * 1000) + (newTO.usec / 1000);
7101 xferSize = rx_AckDataSize(rx_maxSendWindow) + RX_HEADER_SIZE;
7108 dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)\n",
7109 ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"),
7110 xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
7112 /* Track only packets that are big enough. */
7113 if ((p->length + RX_HEADER_SIZE + call->conn->securityMaxTrailerSize) <
7117 /* absorb RTT data (in milliseconds) for these big packets */
7118 if (peer->smRtt == 0) {
7119 peer->smRtt = xferMs;
7121 peer->smRtt = ((peer->smRtt * 15) + xferMs + 4) >> 4;
7126 if (peer->countDown) {
7130 peer->countDown = 10; /* recalculate only every so often */
7132 /* In practice, we can measure only the RTT for full packets,
7133 * because of the way Rx acks the data that it receives. (If it's
7134 * smaller than a full packet, it often gets implicitly acked
7135 * either by the call response (from a server) or by the next call
7136 * (from a client), and either case confuses transmission times
7137 * with processing times.) Therefore, replace the above
7138 * more-sophisticated processing with a simpler version, where the
7139 * smoothed RTT is kept for full-size packets, and the time to
7140 * transmit a windowful of full-size packets is simply RTT *
7141 * windowSize. Again, we take two steps:
7142 - ensure the timeout is large enough for a single packet's RTT;
7143 - ensure that the window is small enough to fit in the desired timeout.*/
7145 /* First, the timeout check. */
7146 minTime = peer->smRtt;
7147 /* Get a reasonable estimate for a timeout period */
7149 newTO.sec = minTime / 1000;
7150 newTO.usec = (minTime - (newTO.sec * 1000)) * 1000;
7152 /* Increase the timeout period so that we can always do at least
7153 * one packet exchange */
7154 if (clock_Gt(&newTO, &peer->timeout)) {
7156 dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u)\n",
7157 ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec,
7158 newTO.sec, newTO.usec, peer->smRtt));
7160 peer->timeout = newTO;
7163 /* Now, get an estimate for the transmit window size. */
7164 minTime = peer->timeout.sec * 1000 + (peer->timeout.usec / 1000);
7165 /* Now, convert to the number of full packets that could fit in a
7166 * reasonable fraction of that interval */
7167 minTime /= (peer->smRtt << 1);
7168 minTime = MAX(minTime, rx_minPeerTimeout);
7169 xferSize = minTime; /* (make a copy) */
7171 /* Now clamp the size to reasonable bounds. */
7174 else if (minTime > rx_maxSendWindow)
7175 minTime = rx_maxSendWindow;
7176 /* if (minTime != peer->maxWindow) {
7177 dpf(("CONG peer %lx/%u: windowsize %lu ==> %lu (to %lu.%06lu, rtt %u)\n",
7178 ntohl(peer->host), ntohs(peer->port), peer->maxWindow, minTime,
7179 peer->timeout.sec, peer->timeout.usec, peer->smRtt));
7180 peer->maxWindow = minTime;
7181 elide... call->twind = minTime;
7185 /* Cut back on the peer timeout if it had earlier grown unreasonably.
7186 * Discern this by calculating the timeout necessary for rx_Window
7188 if ((xferSize > rx_maxSendWindow) && (peer->timeout.sec >= 3)) {
7189 /* calculate estimate for transmission interval in milliseconds */
7190 minTime = rx_maxSendWindow * peer->smRtt;
7191 if (minTime < 1000) {
7192 dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u)\n",
7193 ntohl(peer->host), ntohs(peer->port), peer->timeout.sec,
7194 peer->timeout.usec, peer->smRtt));
7196 newTO.sec = 0; /* cut back on timeout by half a second */
7197 newTO.usec = 500000;
7198 clock_Sub(&peer->timeout, &newTO);
7203 } /* end of rxi_ComputeRate */
7204 #endif /* ADAPT_WINDOW */
7212 #define TRACE_OPTION_RX_DEBUG 16
7220 code = RegOpenKeyEx(HKEY_LOCAL_MACHINE, AFSREG_CLT_SVC_PARAM_SUBKEY,
7221 0, KEY_QUERY_VALUE, &parmKey);
7222 if (code != ERROR_SUCCESS)
7225 dummyLen = sizeof(TraceOption);
7226 code = RegQueryValueEx(parmKey, "TraceOption", NULL, NULL,
7227 (BYTE *) &TraceOption, &dummyLen);
7228 if (code == ERROR_SUCCESS) {
7229 rxdebug_active = (TraceOption & TRACE_OPTION_RX_DEBUG) ? 1 : 0;
7231 RegCloseKey (parmKey);
7232 #endif /* AFS_NT40_ENV */
7237 rx_DebugOnOff(int on)
7241 rxdebug_active = on;
7247 rx_StatsOnOff(int on)
7249 rx_stats_active = on;
7253 /* Don't call this debugging routine directly; use dpf */
7255 rxi_DebugPrint(char *format, ...)
7264 va_start(ap, format);
7266 len = _snprintf(tformat, sizeof(tformat), "tid[%d] %s", GetCurrentThreadId(), format);
7269 len = _vsnprintf(msg, sizeof(msg)-2, tformat, ap);
7271 OutputDebugString(msg);
7277 va_start(ap, format);
7279 clock_GetTime(&now);
7280 fprintf(rx_Log, " %d.%06d:", (unsigned int)now.sec,
7281 (unsigned int)now.usec);
7282 vfprintf(rx_Log, format, ap);
7290 * This function is used to process the rx_stats structure that is local
7291 * to a process as well as an rx_stats structure received from a remote
7292 * process (via rxdebug). Therefore, it needs to do minimal version
7296 rx_PrintTheseStats(FILE * file, struct rx_statistics *s, int size,
7297 afs_int32 freePackets, char version)
7301 if (size != sizeof(struct rx_statistics)) {
7303 "Unexpected size of stats structure: was %d, expected %" AFS_SIZET_FMT "\n",
7304 size, sizeof(struct rx_statistics));
7307 fprintf(file, "rx stats: free packets %d, allocs %d, ", (int)freePackets,
7310 if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
7311 fprintf(file, "alloc-failures(rcv %u/%u,send %u/%u,ack %u)\n",
7312 s->receivePktAllocFailures, s->receiveCbufPktAllocFailures,
7313 s->sendPktAllocFailures, s->sendCbufPktAllocFailures,
7314 s->specialPktAllocFailures);
7316 fprintf(file, "alloc-failures(rcv %u,send %u,ack %u)\n",
7317 s->receivePktAllocFailures, s->sendPktAllocFailures,
7318 s->specialPktAllocFailures);
7322 " greedy %u, " "bogusReads %u (last from host %x), "
7323 "noPackets %u, " "noBuffers %u, " "selects %u, "
7324 "sendSelects %u\n", s->socketGreedy, s->bogusPacketOnRead,
7325 s->bogusHost, s->noPacketOnRead, s->noPacketBuffersOnRead,
7326 s->selects, s->sendSelects);
7328 fprintf(file, " packets read: ");
7329 for (i = 0; i < RX_N_PACKET_TYPES; i++) {
7330 fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsRead[i]);
7332 fprintf(file, "\n");
7335 " other read counters: data %u, " "ack %u, " "dup %u "
7336 "spurious %u " "dally %u\n", s->dataPacketsRead,
7337 s->ackPacketsRead, s->dupPacketsRead, s->spuriousPacketsRead,
7338 s->ignorePacketDally);
7340 fprintf(file, " packets sent: ");
7341 for (i = 0; i < RX_N_PACKET_TYPES; i++) {
7342 fprintf(file, "%s %u ", rx_packetTypes[i], s->packetsSent[i]);
7344 fprintf(file, "\n");
7347 " other send counters: ack %u, " "data %u (not resends), "
7348 "resends %u, " "pushed %u, " "acked&ignored %u\n",
7349 s->ackPacketsSent, s->dataPacketsSent, s->dataPacketsReSent,
7350 s->dataPacketsPushed, s->ignoreAckedPacket);
7353 " \t(these should be small) sendFailed %u, " "fatalErrors %u\n",
7354 s->netSendFailures, (int)s->fatalErrors);
7356 if (s->nRttSamples) {
7357 fprintf(file, " Average rtt is %0.3f, with %d samples\n",
7358 clock_Float(&s->totalRtt) / s->nRttSamples, s->nRttSamples);
7360 fprintf(file, " Minimum rtt is %0.3f, maximum is %0.3f\n",
7361 clock_Float(&s->minRtt), clock_Float(&s->maxRtt));
7365 " %d server connections, " "%d client connections, "
7366 "%d peer structs, " "%d call structs, " "%d free call structs\n",
7367 s->nServerConns, s->nClientConns, s->nPeerStructs,
7368 s->nCallStructs, s->nFreeCallStructs);
7370 #if !defined(AFS_PTHREAD_ENV) && !defined(AFS_USE_GETTIMEOFDAY)
7371 fprintf(file, " %d clock updates\n", clock_nUpdates);
7375 /* for backward compatibility */
7377 rx_PrintStats(FILE * file)
7379 MUTEX_ENTER(&rx_stats_mutex);
7380 rx_PrintTheseStats(file, (struct rx_statistics *) &rx_stats,
7381 sizeof(rx_stats), rx_nFreePackets,
7383 MUTEX_EXIT(&rx_stats_mutex);
7387 rx_PrintPeerStats(FILE * file, struct rx_peer *peer)
7389 fprintf(file, "Peer %x.%d. " "Burst size %d, " "burst wait %d.%06d.\n",
7390 ntohl(peer->host), (int)ntohs(peer->port), (int)peer->burstSize,
7391 (int)peer->burstWait.sec, (int)peer->burstWait.usec);
7394 " Rtt %d, " "retry time %u.%06d, " "total sent %d, "
7395 "resent %d\n", peer->rtt, (int)peer->timeout.sec,
7396 (int)peer->timeout.usec, peer->nSent, peer->reSends);
7399 " Packet size %d, " "max in packet skew %d, "
7400 "max out packet skew %d\n", peer->ifMTU, (int)peer->inPacketSkew,
7401 (int)peer->outPacketSkew);
7405 #if defined(AFS_PTHREAD_ENV) && defined(RXDEBUG)
7407 * This mutex protects the following static variables:
7411 #define LOCK_RX_DEBUG MUTEX_ENTER(&rx_debug_mutex)
7412 #define UNLOCK_RX_DEBUG MUTEX_EXIT(&rx_debug_mutex)
7414 #define LOCK_RX_DEBUG
7415 #define UNLOCK_RX_DEBUG
7416 #endif /* AFS_PTHREAD_ENV */
7418 #if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
7420 MakeDebugCall(osi_socket socket, afs_uint32 remoteAddr, afs_uint16 remotePort,
7421 u_char type, void *inputData, size_t inputLength,
7422 void *outputData, size_t outputLength)
7424 static afs_int32 counter = 100;
7425 time_t waitTime, waitCount;
7426 struct rx_header theader;
7429 struct timeval tv_now, tv_wake, tv_delta;
7430 struct sockaddr_in taddr, faddr;
7444 tp = &tbuffer[sizeof(struct rx_header)];
7445 taddr.sin_family = AF_INET;
7446 taddr.sin_port = remotePort;
7447 taddr.sin_addr.s_addr = remoteAddr;
7448 #ifdef STRUCT_SOCKADDR_HAS_SA_LEN
7449 taddr.sin_len = sizeof(struct sockaddr_in);
7452 memset(&theader, 0, sizeof(theader));
7453 theader.epoch = htonl(999);
7455 theader.callNumber = htonl(counter);
7458 theader.type = type;
7459 theader.flags = RX_CLIENT_INITIATED | RX_LAST_PACKET;
7460 theader.serviceId = 0;
7462 memcpy(tbuffer, &theader, sizeof(theader));
7463 memcpy(tp, inputData, inputLength);
7465 sendto(socket, tbuffer, inputLength + sizeof(struct rx_header), 0,
7466 (struct sockaddr *)&taddr, sizeof(struct sockaddr_in));
7468 /* see if there's a packet available */
7469 gettimeofday(&tv_wake, NULL);
7470 tv_wake.tv_sec += waitTime;
7473 FD_SET(socket, &imask);
7474 tv_delta.tv_sec = tv_wake.tv_sec;
7475 tv_delta.tv_usec = tv_wake.tv_usec;
7476 gettimeofday(&tv_now, NULL);
7478 if (tv_delta.tv_usec < tv_now.tv_usec) {
7480 tv_delta.tv_usec += 1000000;
7483 tv_delta.tv_usec -= tv_now.tv_usec;
7485 if (tv_delta.tv_sec < tv_now.tv_sec) {
7489 tv_delta.tv_sec -= tv_now.tv_sec;
7492 code = select(0, &imask, 0, 0, &tv_delta);
7493 #else /* AFS_NT40_ENV */
7494 code = select(socket + 1, &imask, 0, 0, &tv_delta);
7495 #endif /* AFS_NT40_ENV */
7496 if (code == 1 && FD_ISSET(socket, &imask)) {
7497 /* now receive a packet */
7498 faddrLen = sizeof(struct sockaddr_in);
7500 recvfrom(socket, tbuffer, sizeof(tbuffer), 0,
7501 (struct sockaddr *)&faddr, &faddrLen);
7504 memcpy(&theader, tbuffer, sizeof(struct rx_header));
7505 if (counter == ntohl(theader.callNumber))
7513 /* see if we've timed out */
7521 code -= sizeof(struct rx_header);
7522 if (code > outputLength)
7523 code = outputLength;
7524 memcpy(outputData, tp, code);
7527 #endif /* RXDEBUG */
7530 rx_GetServerDebug(osi_socket socket, afs_uint32 remoteAddr,
7531 afs_uint16 remotePort, struct rx_debugStats * stat,
7532 afs_uint32 * supportedValues)
7534 #if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
7536 struct rx_debugIn in;
7538 *supportedValues = 0;
7539 in.type = htonl(RX_DEBUGI_GETSTATS);
7542 rc = MakeDebugCall(socket, remoteAddr, remotePort, RX_PACKET_TYPE_DEBUG,
7543 &in, sizeof(in), stat, sizeof(*stat));
7546 * If the call was successful, fixup the version and indicate
7547 * what contents of the stat structure are valid.
7548 * Also do net to host conversion of fields here.
7552 if (stat->version >= RX_DEBUGI_VERSION_W_SECSTATS) {
7553 *supportedValues |= RX_SERVER_DEBUG_SEC_STATS;
7555 if (stat->version >= RX_DEBUGI_VERSION_W_GETALLCONN) {
7556 *supportedValues |= RX_SERVER_DEBUG_ALL_CONN;
7558 if (stat->version >= RX_DEBUGI_VERSION_W_RXSTATS) {
7559 *supportedValues |= RX_SERVER_DEBUG_RX_STATS;
7561 if (stat->version >= RX_DEBUGI_VERSION_W_WAITERS) {
7562 *supportedValues |= RX_SERVER_DEBUG_WAITER_CNT;
7564 if (stat->version >= RX_DEBUGI_VERSION_W_IDLETHREADS) {
7565 *supportedValues |= RX_SERVER_DEBUG_IDLE_THREADS;
7567 if (stat->version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
7568 *supportedValues |= RX_SERVER_DEBUG_NEW_PACKETS;
7570 if (stat->version >= RX_DEBUGI_VERSION_W_GETPEER) {
7571 *supportedValues |= RX_SERVER_DEBUG_ALL_PEER;
7573 if (stat->version >= RX_DEBUGI_VERSION_W_WAITED) {
7574 *supportedValues |= RX_SERVER_DEBUG_WAITED_CNT;
7576 if (stat->version >= RX_DEBUGI_VERSION_W_PACKETS) {
7577 *supportedValues |= RX_SERVER_DEBUG_PACKETS_CNT;
7579 stat->nFreePackets = ntohl(stat->nFreePackets);
7580 stat->packetReclaims = ntohl(stat->packetReclaims);
7581 stat->callsExecuted = ntohl(stat->callsExecuted);
7582 stat->nWaiting = ntohl(stat->nWaiting);
7583 stat->idleThreads = ntohl(stat->idleThreads);
7584 stat->nWaited = ntohl(stat->nWaited);
7585 stat->nPackets = ntohl(stat->nPackets);
7594 rx_GetServerStats(osi_socket socket, afs_uint32 remoteAddr,
7595 afs_uint16 remotePort, struct rx_statistics * stat,
7596 afs_uint32 * supportedValues)
7598 #if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
7600 struct rx_debugIn in;
7601 afs_int32 *lp = (afs_int32 *) stat;
7605 * supportedValues is currently unused, but added to allow future
7606 * versioning of this function.
7609 *supportedValues = 0;
7610 in.type = htonl(RX_DEBUGI_RXSTATS);
7612 memset(stat, 0, sizeof(*stat));
7614 rc = MakeDebugCall(socket, remoteAddr, remotePort, RX_PACKET_TYPE_DEBUG,
7615 &in, sizeof(in), stat, sizeof(*stat));
7620 * Do net to host conversion here
7623 for (i = 0; i < sizeof(*stat) / sizeof(afs_int32); i++, lp++) {
7634 rx_GetServerVersion(osi_socket socket, afs_uint32 remoteAddr,
7635 afs_uint16 remotePort, size_t version_length,
7638 #if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
7640 return MakeDebugCall(socket, remoteAddr, remotePort,
7641 RX_PACKET_TYPE_VERSION, a, 1, version,
7649 rx_GetServerConnections(osi_socket socket, afs_uint32 remoteAddr,
7650 afs_uint16 remotePort, afs_int32 * nextConnection,
7651 int allConnections, afs_uint32 debugSupportedValues,
7652 struct rx_debugConn * conn,
7653 afs_uint32 * supportedValues)
7655 #if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
7657 struct rx_debugIn in;
7661 * supportedValues is currently unused, but added to allow future
7662 * versioning of this function.
7665 *supportedValues = 0;
7666 if (allConnections) {
7667 in.type = htonl(RX_DEBUGI_GETALLCONN);
7669 in.type = htonl(RX_DEBUGI_GETCONN);
7671 in.index = htonl(*nextConnection);
7672 memset(conn, 0, sizeof(*conn));
7674 rc = MakeDebugCall(socket, remoteAddr, remotePort, RX_PACKET_TYPE_DEBUG,
7675 &in, sizeof(in), conn, sizeof(*conn));
7678 *nextConnection += 1;
7681 * Convert old connection format to new structure.
7684 if (debugSupportedValues & RX_SERVER_DEBUG_OLD_CONN) {
7685 struct rx_debugConn_vL *vL = (struct rx_debugConn_vL *)conn;
7686 #define MOVEvL(a) (conn->a = vL->a)
7688 /* any old or unrecognized version... */
7689 for (i = 0; i < RX_MAXCALLS; i++) {
7690 MOVEvL(callState[i]);
7691 MOVEvL(callMode[i]);
7692 MOVEvL(callFlags[i]);
7693 MOVEvL(callOther[i]);
7695 if (debugSupportedValues & RX_SERVER_DEBUG_SEC_STATS) {
7696 MOVEvL(secStats.type);
7697 MOVEvL(secStats.level);
7698 MOVEvL(secStats.flags);
7699 MOVEvL(secStats.expires);
7700 MOVEvL(secStats.packetsReceived);
7701 MOVEvL(secStats.packetsSent);
7702 MOVEvL(secStats.bytesReceived);
7703 MOVEvL(secStats.bytesSent);
7708 * Do net to host conversion here
7710 * I don't convert host or port since we are most likely
7711 * going to want these in NBO.
7713 conn->cid = ntohl(conn->cid);
7714 conn->serial = ntohl(conn->serial);
7715 for (i = 0; i < RX_MAXCALLS; i++) {
7716 conn->callNumber[i] = ntohl(conn->callNumber[i]);
7718 conn->error = ntohl(conn->error);
7719 conn->secStats.flags = ntohl(conn->secStats.flags);
7720 conn->secStats.expires = ntohl(conn->secStats.expires);
7721 conn->secStats.packetsReceived =
7722 ntohl(conn->secStats.packetsReceived);
7723 conn->secStats.packetsSent = ntohl(conn->secStats.packetsSent);
7724 conn->secStats.bytesReceived = ntohl(conn->secStats.bytesReceived);
7725 conn->secStats.bytesSent = ntohl(conn->secStats.bytesSent);
7726 conn->epoch = ntohl(conn->epoch);
7727 conn->natMTU = ntohl(conn->natMTU);
7736 rx_GetServerPeers(osi_socket socket, afs_uint32 remoteAddr,
7737 afs_uint16 remotePort, afs_int32 * nextPeer,
7738 afs_uint32 debugSupportedValues, struct rx_debugPeer * peer,
7739 afs_uint32 * supportedValues)
7741 #if defined(RXDEBUG) || defined(MAKEDEBUGCALL)
7743 struct rx_debugIn in;
7746 * supportedValues is currently unused, but added to allow future
7747 * versioning of this function.
7750 *supportedValues = 0;
7751 in.type = htonl(RX_DEBUGI_GETPEER);
7752 in.index = htonl(*nextPeer);
7753 memset(peer, 0, sizeof(*peer));
7755 rc = MakeDebugCall(socket, remoteAddr, remotePort, RX_PACKET_TYPE_DEBUG,
7756 &in, sizeof(in), peer, sizeof(*peer));
7762 * Do net to host conversion here
7764 * I don't convert host or port since we are most likely
7765 * going to want these in NBO.
7767 peer->ifMTU = ntohs(peer->ifMTU);
7768 peer->idleWhen = ntohl(peer->idleWhen);
7769 peer->refCount = ntohs(peer->refCount);
7770 peer->burstWait.sec = ntohl(peer->burstWait.sec);
7771 peer->burstWait.usec = ntohl(peer->burstWait.usec);
7772 peer->rtt = ntohl(peer->rtt);
7773 peer->rtt_dev = ntohl(peer->rtt_dev);
7774 peer->timeout.sec = ntohl(peer->timeout.sec);
7775 peer->timeout.usec = ntohl(peer->timeout.usec);
7776 peer->nSent = ntohl(peer->nSent);
7777 peer->reSends = ntohl(peer->reSends);
7778 peer->inPacketSkew = ntohl(peer->inPacketSkew);
7779 peer->outPacketSkew = ntohl(peer->outPacketSkew);
7780 peer->rateFlag = ntohl(peer->rateFlag);
7781 peer->natMTU = ntohs(peer->natMTU);
7782 peer->maxMTU = ntohs(peer->maxMTU);
7783 peer->maxDgramPackets = ntohs(peer->maxDgramPackets);
7784 peer->ifDgramPackets = ntohs(peer->ifDgramPackets);
7785 peer->MTU = ntohs(peer->MTU);
7786 peer->cwind = ntohs(peer->cwind);
7787 peer->nDgramPackets = ntohs(peer->nDgramPackets);
7788 peer->congestSeq = ntohs(peer->congestSeq);
7789 peer->bytesSent.high = ntohl(peer->bytesSent.high);
7790 peer->bytesSent.low = ntohl(peer->bytesSent.low);
7791 peer->bytesReceived.high = ntohl(peer->bytesReceived.high);
7792 peer->bytesReceived.low = ntohl(peer->bytesReceived.low);
7801 rx_GetLocalPeers(afs_uint32 peerHost, afs_uint16 peerPort,
7802 struct rx_debugPeer * peerStats)
7805 afs_int32 error = 1; /* default to "did not succeed" */
7806 afs_uint32 hashValue = PEER_HASH(peerHost, peerPort);
7808 MUTEX_ENTER(&rx_peerHashTable_lock);
7809 for(tp = rx_peerHashTable[hashValue];
7810 tp != NULL; tp = tp->next) {
7811 if (tp->host == peerHost)
7817 MUTEX_EXIT(&rx_peerHashTable_lock);
7821 MUTEX_ENTER(&tp->peer_lock);
7822 peerStats->host = tp->host;
7823 peerStats->port = tp->port;
7824 peerStats->ifMTU = tp->ifMTU;
7825 peerStats->idleWhen = tp->idleWhen;
7826 peerStats->refCount = tp->refCount;
7827 peerStats->burstSize = tp->burstSize;
7828 peerStats->burst = tp->burst;
7829 peerStats->burstWait.sec = tp->burstWait.sec;
7830 peerStats->burstWait.usec = tp->burstWait.usec;
7831 peerStats->rtt = tp->rtt;
7832 peerStats->rtt_dev = tp->rtt_dev;
7833 peerStats->timeout.sec = tp->timeout.sec;
7834 peerStats->timeout.usec = tp->timeout.usec;
7835 peerStats->nSent = tp->nSent;
7836 peerStats->reSends = tp->reSends;
7837 peerStats->inPacketSkew = tp->inPacketSkew;
7838 peerStats->outPacketSkew = tp->outPacketSkew;
7839 peerStats->rateFlag = tp->rateFlag;
7840 peerStats->natMTU = tp->natMTU;
7841 peerStats->maxMTU = tp->maxMTU;
7842 peerStats->maxDgramPackets = tp->maxDgramPackets;
7843 peerStats->ifDgramPackets = tp->ifDgramPackets;
7844 peerStats->MTU = tp->MTU;
7845 peerStats->cwind = tp->cwind;
7846 peerStats->nDgramPackets = tp->nDgramPackets;
7847 peerStats->congestSeq = tp->congestSeq;
7848 peerStats->bytesSent.high = tp->bytesSent.high;
7849 peerStats->bytesSent.low = tp->bytesSent.low;
7850 peerStats->bytesReceived.high = tp->bytesReceived.high;
7851 peerStats->bytesReceived.low = tp->bytesReceived.low;
7852 MUTEX_EXIT(&tp->peer_lock);
7854 MUTEX_ENTER(&rx_peerHashTable_lock);
7857 MUTEX_EXIT(&rx_peerHashTable_lock);
7865 struct rx_serverQueueEntry *np;
7868 struct rx_call *call;
7869 struct rx_serverQueueEntry *sq;
7873 if (rxinit_status == 1) {
7875 return; /* Already shutdown. */
7879 #ifndef AFS_PTHREAD_ENV
7880 FD_ZERO(&rx_selectMask);
7881 #endif /* AFS_PTHREAD_ENV */
7882 rxi_dataQuota = RX_MAX_QUOTA;
7883 #ifndef AFS_PTHREAD_ENV
7885 #endif /* AFS_PTHREAD_ENV */
7888 #ifndef AFS_PTHREAD_ENV
7889 #ifndef AFS_USE_GETTIMEOFDAY
7891 #endif /* AFS_USE_GETTIMEOFDAY */
7892 #endif /* AFS_PTHREAD_ENV */
7894 while (!queue_IsEmpty(&rx_freeCallQueue)) {
7895 call = queue_First(&rx_freeCallQueue, rx_call);
7897 rxi_Free(call, sizeof(struct rx_call));
7900 while (!queue_IsEmpty(&rx_idleServerQueue)) {
7901 sq = queue_First(&rx_idleServerQueue, rx_serverQueueEntry);
7907 struct rx_peer **peer_ptr, **peer_end;
7908 for (peer_ptr = &rx_peerHashTable[0], peer_end =
7909 &rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
7911 struct rx_peer *peer, *next;
7913 MUTEX_ENTER(&rx_peerHashTable_lock);
7914 for (peer = *peer_ptr; peer; peer = next) {
7915 rx_interface_stat_p rpc_stat, nrpc_stat;
7918 MUTEX_ENTER(&rx_rpc_stats);
7919 MUTEX_ENTER(&peer->peer_lock);
7921 (&peer->rpcStats, rpc_stat, nrpc_stat,
7922 rx_interface_stat)) {
7923 unsigned int num_funcs;
7926 queue_Remove(&rpc_stat->queue_header);
7927 queue_Remove(&rpc_stat->all_peers);
7928 num_funcs = rpc_stat->stats[0].func_total;
7930 sizeof(rx_interface_stat_t) +
7931 rpc_stat->stats[0].func_total *
7932 sizeof(rx_function_entry_v1_t);
7934 rxi_Free(rpc_stat, space);
7936 /* rx_rpc_stats must be held */
7937 rxi_rpc_peer_stat_cnt -= num_funcs;
7939 MUTEX_EXIT(&peer->peer_lock);
7940 MUTEX_EXIT(&rx_rpc_stats);
7944 if (rx_stats_active)
7945 rx_atomic_dec(&rx_stats.nPeerStructs);
7947 MUTEX_EXIT(&rx_peerHashTable_lock);
7950 for (i = 0; i < RX_MAX_SERVICES; i++) {
7952 rxi_Free(rx_services[i], sizeof(*rx_services[i]));
7954 for (i = 0; i < rx_hashTableSize; i++) {
7955 struct rx_connection *tc, *ntc;
7956 MUTEX_ENTER(&rx_connHashTable_lock);
7957 for (tc = rx_connHashTable[i]; tc; tc = ntc) {
7959 for (j = 0; j < RX_MAXCALLS; j++) {
7961 rxi_Free(tc->call[j], sizeof(*tc->call[j]));
7964 rxi_Free(tc, sizeof(*tc));
7966 MUTEX_EXIT(&rx_connHashTable_lock);
7969 MUTEX_ENTER(&freeSQEList_lock);
7971 while ((np = rx_FreeSQEList)) {
7972 rx_FreeSQEList = *(struct rx_serverQueueEntry **)np;
7973 MUTEX_DESTROY(&np->lock);
7974 rxi_Free(np, sizeof(*np));
7977 MUTEX_EXIT(&freeSQEList_lock);
7978 MUTEX_DESTROY(&freeSQEList_lock);
7979 MUTEX_DESTROY(&rx_freeCallQueue_lock);
7980 MUTEX_DESTROY(&rx_connHashTable_lock);
7981 MUTEX_DESTROY(&rx_peerHashTable_lock);
7982 MUTEX_DESTROY(&rx_serverPool_lock);
7984 osi_Free(rx_connHashTable,
7985 rx_hashTableSize * sizeof(struct rx_connection *));
7986 osi_Free(rx_peerHashTable, rx_hashTableSize * sizeof(struct rx_peer *));
7988 UNPIN(rx_connHashTable,
7989 rx_hashTableSize * sizeof(struct rx_connection *));
7990 UNPIN(rx_peerHashTable, rx_hashTableSize * sizeof(struct rx_peer *));
7992 rxi_FreeAllPackets();
7994 MUTEX_ENTER(&rx_quota_mutex);
7995 rxi_dataQuota = RX_MAX_QUOTA;
7996 rxi_availProcs = rxi_totalMin = rxi_minDeficit = 0;
7997 MUTEX_EXIT(&rx_quota_mutex);
8002 #ifdef RX_ENABLE_LOCKS
8004 osirx_AssertMine(afs_kmutex_t * lockaddr, char *msg)
8006 if (!MUTEX_ISMINE(lockaddr))
8007 osi_Panic("Lock not held: %s", msg);
8009 #endif /* RX_ENABLE_LOCKS */
8014 * Routines to implement connection specific data.
8018 rx_KeyCreate(rx_destructor_t rtn)
8021 MUTEX_ENTER(&rxi_keyCreate_lock);
8022 key = rxi_keyCreate_counter++;
8023 rxi_keyCreate_destructor = (rx_destructor_t *)
8024 realloc((void *)rxi_keyCreate_destructor,
8025 (key + 1) * sizeof(rx_destructor_t));
8026 rxi_keyCreate_destructor[key] = rtn;
8027 MUTEX_EXIT(&rxi_keyCreate_lock);
8032 rx_SetSpecific(struct rx_connection *conn, int key, void *ptr)
8035 MUTEX_ENTER(&conn->conn_data_lock);
8036 if (!conn->specific) {
8037 conn->specific = (void **)malloc((key + 1) * sizeof(void *));
8038 for (i = 0; i < key; i++)
8039 conn->specific[i] = NULL;
8040 conn->nSpecific = key + 1;
8041 conn->specific[key] = ptr;
8042 } else if (key >= conn->nSpecific) {
8043 conn->specific = (void **)
8044 realloc(conn->specific, (key + 1) * sizeof(void *));
8045 for (i = conn->nSpecific; i < key; i++)
8046 conn->specific[i] = NULL;
8047 conn->nSpecific = key + 1;
8048 conn->specific[key] = ptr;
8050 if (conn->specific[key] && rxi_keyCreate_destructor[key])
8051 (*rxi_keyCreate_destructor[key]) (conn->specific[key]);
8052 conn->specific[key] = ptr;
8054 MUTEX_EXIT(&conn->conn_data_lock);
8058 rx_SetServiceSpecific(struct rx_service *svc, int key, void *ptr)
8061 MUTEX_ENTER(&svc->svc_data_lock);
8062 if (!svc->specific) {
8063 svc->specific = (void **)malloc((key + 1) * sizeof(void *));
8064 for (i = 0; i < key; i++)
8065 svc->specific[i] = NULL;
8066 svc->nSpecific = key + 1;
8067 svc->specific[key] = ptr;
8068 } else if (key >= svc->nSpecific) {
8069 svc->specific = (void **)
8070 realloc(svc->specific, (key + 1) * sizeof(void *));
8071 for (i = svc->nSpecific; i < key; i++)
8072 svc->specific[i] = NULL;
8073 svc->nSpecific = key + 1;
8074 svc->specific[key] = ptr;
8076 if (svc->specific[key] && rxi_keyCreate_destructor[key])
8077 (*rxi_keyCreate_destructor[key]) (svc->specific[key]);
8078 svc->specific[key] = ptr;
8080 MUTEX_EXIT(&svc->svc_data_lock);
8084 rx_GetSpecific(struct rx_connection *conn, int key)
8087 MUTEX_ENTER(&conn->conn_data_lock);
8088 if (key >= conn->nSpecific)
8091 ptr = conn->specific[key];
8092 MUTEX_EXIT(&conn->conn_data_lock);
8097 rx_GetServiceSpecific(struct rx_service *svc, int key)
8100 MUTEX_ENTER(&svc->svc_data_lock);
8101 if (key >= svc->nSpecific)
8104 ptr = svc->specific[key];
8105 MUTEX_EXIT(&svc->svc_data_lock);
8110 #endif /* !KERNEL */
8113 * processStats is a queue used to store the statistics for the local
8114 * process. Its contents are similar to the contents of the rpcStats
8115 * queue on a rx_peer structure, but the actual data stored within
8116 * this queue contains totals across the lifetime of the process (assuming
8117 * the stats have not been reset) - unlike the per peer structures
8118 * which can come and go based upon the peer lifetime.
8121 static struct rx_queue processStats = { &processStats, &processStats };
8124 * peerStats is a queue used to store the statistics for all peer structs.
8125 * Its contents are the union of all the peer rpcStats queues.
8128 static struct rx_queue peerStats = { &peerStats, &peerStats };
8131 * rxi_monitor_processStats is used to turn process wide stat collection
8135 static int rxi_monitor_processStats = 0;
8138 * rxi_monitor_peerStats is used to turn per peer stat collection on and off
8141 static int rxi_monitor_peerStats = 0;
8144 * rxi_AddRpcStat - given all of the information for a particular rpc
8145 * call, create (if needed) and update the stat totals for the rpc.
8149 * IN stats - the queue of stats that will be updated with the new value
8151 * IN rxInterface - a unique number that identifies the rpc interface
8153 * IN currentFunc - the index of the function being invoked
8155 * IN totalFunc - the total number of functions in this interface
8157 * IN queueTime - the amount of time this function waited for a thread
8159 * IN execTime - the amount of time this function invocation took to execute
8161 * IN bytesSent - the number bytes sent by this invocation
8163 * IN bytesRcvd - the number bytes received by this invocation
8165 * IN isServer - if true, this invocation was made to a server
8167 * IN remoteHost - the ip address of the remote host
8169 * IN remotePort - the port of the remote host
8171 * IN addToPeerList - if != 0, add newly created stat to the global peer list
8173 * INOUT counter - if a new stats structure is allocated, the counter will
8174 * be updated with the new number of allocated stat structures
8182 rxi_AddRpcStat(struct rx_queue *stats, afs_uint32 rxInterface,
8183 afs_uint32 currentFunc, afs_uint32 totalFunc,
8184 struct clock *queueTime, struct clock *execTime,
8185 afs_hyper_t * bytesSent, afs_hyper_t * bytesRcvd, int isServer,
8186 afs_uint32 remoteHost, afs_uint32 remotePort,
8187 int addToPeerList, unsigned int *counter)
8190 rx_interface_stat_p rpc_stat, nrpc_stat;
8193 * See if there's already a structure for this interface
8196 for (queue_Scan(stats, rpc_stat, nrpc_stat, rx_interface_stat)) {
8197 if ((rpc_stat->stats[0].interfaceId == rxInterface)
8198 && (rpc_stat->stats[0].remote_is_server == isServer))
8203 * Didn't find a match so allocate a new structure and add it to the
8207 if (queue_IsEnd(stats, rpc_stat) || (rpc_stat == NULL)
8208 || (rpc_stat->stats[0].interfaceId != rxInterface)
8209 || (rpc_stat->stats[0].remote_is_server != isServer)) {
8214 sizeof(rx_interface_stat_t) +
8215 totalFunc * sizeof(rx_function_entry_v1_t);
8217 rpc_stat = rxi_Alloc(space);
8218 if (rpc_stat == NULL) {
8222 *counter += totalFunc;
8223 for (i = 0; i < totalFunc; i++) {
8224 rpc_stat->stats[i].remote_peer = remoteHost;
8225 rpc_stat->stats[i].remote_port = remotePort;
8226 rpc_stat->stats[i].remote_is_server = isServer;
8227 rpc_stat->stats[i].interfaceId = rxInterface;
8228 rpc_stat->stats[i].func_total = totalFunc;
8229 rpc_stat->stats[i].func_index = i;
8230 hzero(rpc_stat->stats[i].invocations);
8231 hzero(rpc_stat->stats[i].bytes_sent);
8232 hzero(rpc_stat->stats[i].bytes_rcvd);
8233 rpc_stat->stats[i].queue_time_sum.sec = 0;
8234 rpc_stat->stats[i].queue_time_sum.usec = 0;
8235 rpc_stat->stats[i].queue_time_sum_sqr.sec = 0;
8236 rpc_stat->stats[i].queue_time_sum_sqr.usec = 0;
8237 rpc_stat->stats[i].queue_time_min.sec = 9999999;
8238 rpc_stat->stats[i].queue_time_min.usec = 9999999;
8239 rpc_stat->stats[i].queue_time_max.sec = 0;
8240 rpc_stat->stats[i].queue_time_max.usec = 0;
8241 rpc_stat->stats[i].execution_time_sum.sec = 0;
8242 rpc_stat->stats[i].execution_time_sum.usec = 0;
8243 rpc_stat->stats[i].execution_time_sum_sqr.sec = 0;
8244 rpc_stat->stats[i].execution_time_sum_sqr.usec = 0;
8245 rpc_stat->stats[i].execution_time_min.sec = 9999999;
8246 rpc_stat->stats[i].execution_time_min.usec = 9999999;
8247 rpc_stat->stats[i].execution_time_max.sec = 0;
8248 rpc_stat->stats[i].execution_time_max.usec = 0;
8250 queue_Prepend(stats, rpc_stat);
8251 if (addToPeerList) {
8252 queue_Prepend(&peerStats, &rpc_stat->all_peers);
8257 * Increment the stats for this function
8260 hadd32(rpc_stat->stats[currentFunc].invocations, 1);
8261 hadd(rpc_stat->stats[currentFunc].bytes_sent, *bytesSent);
8262 hadd(rpc_stat->stats[currentFunc].bytes_rcvd, *bytesRcvd);
8263 clock_Add(&rpc_stat->stats[currentFunc].queue_time_sum, queueTime);
8264 clock_AddSq(&rpc_stat->stats[currentFunc].queue_time_sum_sqr, queueTime);
8265 if (clock_Lt(queueTime, &rpc_stat->stats[currentFunc].queue_time_min)) {
8266 rpc_stat->stats[currentFunc].queue_time_min = *queueTime;
8268 if (clock_Gt(queueTime, &rpc_stat->stats[currentFunc].queue_time_max)) {
8269 rpc_stat->stats[currentFunc].queue_time_max = *queueTime;
8271 clock_Add(&rpc_stat->stats[currentFunc].execution_time_sum, execTime);
8272 clock_AddSq(&rpc_stat->stats[currentFunc].execution_time_sum_sqr,
8274 if (clock_Lt(execTime, &rpc_stat->stats[currentFunc].execution_time_min)) {
8275 rpc_stat->stats[currentFunc].execution_time_min = *execTime;
8277 if (clock_Gt(execTime, &rpc_stat->stats[currentFunc].execution_time_max)) {
8278 rpc_stat->stats[currentFunc].execution_time_max = *execTime;
8286 * rx_IncrementTimeAndCount - increment the times and count for a particular
8291 * IN peer - the peer who invoked the rpc
8293 * IN rxInterface - a unique number that identifies the rpc interface
8295 * IN currentFunc - the index of the function being invoked
8297 * IN totalFunc - the total number of functions in this interface
8299 * IN queueTime - the amount of time this function waited for a thread
8301 * IN execTime - the amount of time this function invocation took to execute
8303 * IN bytesSent - the number bytes sent by this invocation
8305 * IN bytesRcvd - the number bytes received by this invocation
8307 * IN isServer - if true, this invocation was made to a server
8315 rx_IncrementTimeAndCount(struct rx_peer *peer, afs_uint32 rxInterface,
8316 afs_uint32 currentFunc, afs_uint32 totalFunc,
8317 struct clock *queueTime, struct clock *execTime,
8318 afs_hyper_t * bytesSent, afs_hyper_t * bytesRcvd,
8322 if (!(rxi_monitor_peerStats || rxi_monitor_processStats))
8325 MUTEX_ENTER(&rx_rpc_stats);
8327 if (rxi_monitor_peerStats) {
8328 MUTEX_ENTER(&peer->peer_lock);
8329 rxi_AddRpcStat(&peer->rpcStats, rxInterface, currentFunc, totalFunc,
8330 queueTime, execTime, bytesSent, bytesRcvd, isServer,
8331 peer->host, peer->port, 1, &rxi_rpc_peer_stat_cnt);
8332 MUTEX_EXIT(&peer->peer_lock);
8335 if (rxi_monitor_processStats) {
8336 rxi_AddRpcStat(&processStats, rxInterface, currentFunc, totalFunc,
8337 queueTime, execTime, bytesSent, bytesRcvd, isServer,
8338 0xffffffff, 0xffffffff, 0, &rxi_rpc_process_stat_cnt);
8341 MUTEX_EXIT(&rx_rpc_stats);
8346 * rx_MarshallProcessRPCStats - marshall an array of rpc statistics
8350 * IN callerVersion - the rpc stat version of the caller.
8352 * IN count - the number of entries to marshall.
8354 * IN stats - pointer to stats to be marshalled.
8356 * OUT ptr - Where to store the marshalled data.
8363 rx_MarshallProcessRPCStats(afs_uint32 callerVersion, int count,
8364 rx_function_entry_v1_t * stats, afs_uint32 ** ptrP)
8370 * We only support the first version
8372 for (ptr = *ptrP, i = 0; i < count; i++, stats++) {
8373 *(ptr++) = stats->remote_peer;
8374 *(ptr++) = stats->remote_port;
8375 *(ptr++) = stats->remote_is_server;
8376 *(ptr++) = stats->interfaceId;
8377 *(ptr++) = stats->func_total;
8378 *(ptr++) = stats->func_index;
8379 *(ptr++) = hgethi(stats->invocations);
8380 *(ptr++) = hgetlo(stats->invocations);
8381 *(ptr++) = hgethi(stats->bytes_sent);
8382 *(ptr++) = hgetlo(stats->bytes_sent);
8383 *(ptr++) = hgethi(stats->bytes_rcvd);
8384 *(ptr++) = hgetlo(stats->bytes_rcvd);
8385 *(ptr++) = stats->queue_time_sum.sec;
8386 *(ptr++) = stats->queue_time_sum.usec;
8387 *(ptr++) = stats->queue_time_sum_sqr.sec;
8388 *(ptr++) = stats->queue_time_sum_sqr.usec;
8389 *(ptr++) = stats->queue_time_min.sec;
8390 *(ptr++) = stats->queue_time_min.usec;
8391 *(ptr++) = stats->queue_time_max.sec;
8392 *(ptr++) = stats->queue_time_max.usec;
8393 *(ptr++) = stats->execution_time_sum.sec;
8394 *(ptr++) = stats->execution_time_sum.usec;
8395 *(ptr++) = stats->execution_time_sum_sqr.sec;
8396 *(ptr++) = stats->execution_time_sum_sqr.usec;
8397 *(ptr++) = stats->execution_time_min.sec;
8398 *(ptr++) = stats->execution_time_min.usec;
8399 *(ptr++) = stats->execution_time_max.sec;
8400 *(ptr++) = stats->execution_time_max.usec;
8406 * rx_RetrieveProcessRPCStats - retrieve all of the rpc statistics for
8411 * IN callerVersion - the rpc stat version of the caller
8413 * OUT myVersion - the rpc stat version of this function
8415 * OUT clock_sec - local time seconds
8417 * OUT clock_usec - local time microseconds
8419 * OUT allocSize - the number of bytes allocated to contain stats
8421 * OUT statCount - the number stats retrieved from this process.
8423 * OUT stats - the actual stats retrieved from this process.
8427 * Returns void. If successful, stats will != NULL.
8431 rx_RetrieveProcessRPCStats(afs_uint32 callerVersion, afs_uint32 * myVersion,
8432 afs_uint32 * clock_sec, afs_uint32 * clock_usec,
8433 size_t * allocSize, afs_uint32 * statCount,
8434 afs_uint32 ** stats)
8444 *myVersion = RX_STATS_RETRIEVAL_VERSION;
8447 * Check to see if stats are enabled
8450 MUTEX_ENTER(&rx_rpc_stats);
8451 if (!rxi_monitor_processStats) {
8452 MUTEX_EXIT(&rx_rpc_stats);
8456 clock_GetTime(&now);
8457 *clock_sec = now.sec;
8458 *clock_usec = now.usec;
8461 * Allocate the space based upon the caller version
8463 * If the client is at an older version than we are,
8464 * we return the statistic data in the older data format, but
8465 * we still return our version number so the client knows we
8466 * are maintaining more data than it can retrieve.
8469 if (callerVersion >= RX_STATS_RETRIEVAL_FIRST_EDITION) {
8470 space = rxi_rpc_process_stat_cnt * sizeof(rx_function_entry_v1_t);
8471 *statCount = rxi_rpc_process_stat_cnt;
8474 * This can't happen yet, but in the future version changes
8475 * can be handled by adding additional code here
8479 if (space > (size_t) 0) {
8481 ptr = *stats = rxi_Alloc(space);
8484 rx_interface_stat_p rpc_stat, nrpc_stat;
8488 (&processStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
8490 * Copy the data based upon the caller version
8492 rx_MarshallProcessRPCStats(callerVersion,
8493 rpc_stat->stats[0].func_total,
8494 rpc_stat->stats, &ptr);
8500 MUTEX_EXIT(&rx_rpc_stats);
8505 * rx_RetrievePeerRPCStats - retrieve all of the rpc statistics for the peers
8509 * IN callerVersion - the rpc stat version of the caller
8511 * OUT myVersion - the rpc stat version of this function
8513 * OUT clock_sec - local time seconds
8515 * OUT clock_usec - local time microseconds
8517 * OUT allocSize - the number of bytes allocated to contain stats
8519 * OUT statCount - the number of stats retrieved from the individual
8522 * OUT stats - the actual stats retrieved from the individual peer structures.
8526 * Returns void. If successful, stats will != NULL.
8530 rx_RetrievePeerRPCStats(afs_uint32 callerVersion, afs_uint32 * myVersion,
8531 afs_uint32 * clock_sec, afs_uint32 * clock_usec,
8532 size_t * allocSize, afs_uint32 * statCount,
8533 afs_uint32 ** stats)
8543 *myVersion = RX_STATS_RETRIEVAL_VERSION;
8546 * Check to see if stats are enabled
8549 MUTEX_ENTER(&rx_rpc_stats);
8550 if (!rxi_monitor_peerStats) {
8551 MUTEX_EXIT(&rx_rpc_stats);
8555 clock_GetTime(&now);
8556 *clock_sec = now.sec;
8557 *clock_usec = now.usec;
8560 * Allocate the space based upon the caller version
8562 * If the client is at an older version than we are,
8563 * we return the statistic data in the older data format, but
8564 * we still return our version number so the client knows we
8565 * are maintaining more data than it can retrieve.
8568 if (callerVersion >= RX_STATS_RETRIEVAL_FIRST_EDITION) {
8569 space = rxi_rpc_peer_stat_cnt * sizeof(rx_function_entry_v1_t);
8570 *statCount = rxi_rpc_peer_stat_cnt;
8573 * This can't happen yet, but in the future version changes
8574 * can be handled by adding additional code here
8578 if (space > (size_t) 0) {
8580 ptr = *stats = rxi_Alloc(space);
8583 rx_interface_stat_p rpc_stat, nrpc_stat;
8587 (&peerStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
8589 * We have to fix the offset of rpc_stat since we are
8590 * keeping this structure on two rx_queues. The rx_queue
8591 * package assumes that the rx_queue member is the first
8592 * member of the structure. That is, rx_queue assumes that
8593 * any one item is only on one queue at a time. We are
8594 * breaking that assumption and so we have to do a little
8595 * math to fix our pointers.
8598 fix_offset = (char *)rpc_stat;
8599 fix_offset -= offsetof(rx_interface_stat_t, all_peers);
8600 rpc_stat = (rx_interface_stat_p) fix_offset;
8603 * Copy the data based upon the caller version
8605 rx_MarshallProcessRPCStats(callerVersion,
8606 rpc_stat->stats[0].func_total,
8607 rpc_stat->stats, &ptr);
8613 MUTEX_EXIT(&rx_rpc_stats);
8618 * rx_FreeRPCStats - free memory allocated by
8619 * rx_RetrieveProcessRPCStats and rx_RetrievePeerRPCStats
8623 * IN stats - stats previously returned by rx_RetrieveProcessRPCStats or
8624 * rx_RetrievePeerRPCStats
8626 * IN allocSize - the number of bytes in stats.
8634 rx_FreeRPCStats(afs_uint32 * stats, size_t allocSize)
8636 rxi_Free(stats, allocSize);
8640 * rx_queryProcessRPCStats - see if process rpc stat collection is
8641 * currently enabled.
8647 * Returns 0 if stats are not enabled != 0 otherwise
8651 rx_queryProcessRPCStats(void)
8654 MUTEX_ENTER(&rx_rpc_stats);
8655 rc = rxi_monitor_processStats;
8656 MUTEX_EXIT(&rx_rpc_stats);
8661 * rx_queryPeerRPCStats - see if peer stat collection is currently enabled.
8667 * Returns 0 if stats are not enabled != 0 otherwise
8671 rx_queryPeerRPCStats(void)
8674 MUTEX_ENTER(&rx_rpc_stats);
8675 rc = rxi_monitor_peerStats;
8676 MUTEX_EXIT(&rx_rpc_stats);
8681 * rx_enableProcessRPCStats - begin rpc stat collection for entire process
8691 rx_enableProcessRPCStats(void)
8693 MUTEX_ENTER(&rx_rpc_stats);
8694 rx_enable_stats = 1;
8695 rxi_monitor_processStats = 1;
8696 MUTEX_EXIT(&rx_rpc_stats);
8700 * rx_enablePeerRPCStats - begin rpc stat collection per peer structure
8710 rx_enablePeerRPCStats(void)
8712 MUTEX_ENTER(&rx_rpc_stats);
8713 rx_enable_stats = 1;
8714 rxi_monitor_peerStats = 1;
8715 MUTEX_EXIT(&rx_rpc_stats);
8719 * rx_disableProcessRPCStats - stop rpc stat collection for entire process
8729 rx_disableProcessRPCStats(void)
8731 rx_interface_stat_p rpc_stat, nrpc_stat;
8734 MUTEX_ENTER(&rx_rpc_stats);
8737 * Turn off process statistics and if peer stats is also off, turn
8741 rxi_monitor_processStats = 0;
8742 if (rxi_monitor_peerStats == 0) {
8743 rx_enable_stats = 0;
8746 for (queue_Scan(&processStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
8747 unsigned int num_funcs = 0;
8750 queue_Remove(rpc_stat);
8751 num_funcs = rpc_stat->stats[0].func_total;
8753 sizeof(rx_interface_stat_t) +
8754 rpc_stat->stats[0].func_total * sizeof(rx_function_entry_v1_t);
8756 rxi_Free(rpc_stat, space);
8757 rxi_rpc_process_stat_cnt -= num_funcs;
8759 MUTEX_EXIT(&rx_rpc_stats);
8763 * rx_disablePeerRPCStats - stop rpc stat collection for peers
8773 rx_disablePeerRPCStats(void)
8775 struct rx_peer **peer_ptr, **peer_end;
8779 * Turn off peer statistics and if process stats is also off, turn
8783 rxi_monitor_peerStats = 0;
8784 if (rxi_monitor_processStats == 0) {
8785 rx_enable_stats = 0;
8788 for (peer_ptr = &rx_peerHashTable[0], peer_end =
8789 &rx_peerHashTable[rx_hashTableSize]; peer_ptr < peer_end;
8791 struct rx_peer *peer, *next, *prev;
8793 MUTEX_ENTER(&rx_peerHashTable_lock);
8794 MUTEX_ENTER(&rx_rpc_stats);
8795 for (prev = peer = *peer_ptr; peer; peer = next) {
8797 code = MUTEX_TRYENTER(&peer->peer_lock);
8799 rx_interface_stat_p rpc_stat, nrpc_stat;
8802 if (prev == *peer_ptr) {
8813 MUTEX_EXIT(&rx_peerHashTable_lock);
8816 (&peer->rpcStats, rpc_stat, nrpc_stat,
8817 rx_interface_stat)) {
8818 unsigned int num_funcs = 0;
8821 queue_Remove(&rpc_stat->queue_header);
8822 queue_Remove(&rpc_stat->all_peers);
8823 num_funcs = rpc_stat->stats[0].func_total;
8825 sizeof(rx_interface_stat_t) +
8826 rpc_stat->stats[0].func_total *
8827 sizeof(rx_function_entry_v1_t);
8829 rxi_Free(rpc_stat, space);
8830 rxi_rpc_peer_stat_cnt -= num_funcs;
8832 MUTEX_EXIT(&peer->peer_lock);
8834 MUTEX_ENTER(&rx_peerHashTable_lock);
8844 MUTEX_EXIT(&rx_rpc_stats);
8845 MUTEX_EXIT(&rx_peerHashTable_lock);
8850 * rx_clearProcessRPCStats - clear the contents of the rpc stats according
8855 * IN clearFlag - flag indicating which stats to clear
8863 rx_clearProcessRPCStats(afs_uint32 clearFlag)
8865 rx_interface_stat_p rpc_stat, nrpc_stat;
8867 MUTEX_ENTER(&rx_rpc_stats);
8869 for (queue_Scan(&processStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
8870 unsigned int num_funcs = 0, i;
8871 num_funcs = rpc_stat->stats[0].func_total;
8872 for (i = 0; i < num_funcs; i++) {
8873 if (clearFlag & AFS_RX_STATS_CLEAR_INVOCATIONS) {
8874 hzero(rpc_stat->stats[i].invocations);
8876 if (clearFlag & AFS_RX_STATS_CLEAR_BYTES_SENT) {
8877 hzero(rpc_stat->stats[i].bytes_sent);
8879 if (clearFlag & AFS_RX_STATS_CLEAR_BYTES_RCVD) {
8880 hzero(rpc_stat->stats[i].bytes_rcvd);
8882 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_SUM) {
8883 rpc_stat->stats[i].queue_time_sum.sec = 0;
8884 rpc_stat->stats[i].queue_time_sum.usec = 0;
8886 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_SQUARE) {
8887 rpc_stat->stats[i].queue_time_sum_sqr.sec = 0;
8888 rpc_stat->stats[i].queue_time_sum_sqr.usec = 0;
8890 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_MIN) {
8891 rpc_stat->stats[i].queue_time_min.sec = 9999999;
8892 rpc_stat->stats[i].queue_time_min.usec = 9999999;
8894 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_MAX) {
8895 rpc_stat->stats[i].queue_time_max.sec = 0;
8896 rpc_stat->stats[i].queue_time_max.usec = 0;
8898 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_SUM) {
8899 rpc_stat->stats[i].execution_time_sum.sec = 0;
8900 rpc_stat->stats[i].execution_time_sum.usec = 0;
8902 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_SQUARE) {
8903 rpc_stat->stats[i].execution_time_sum_sqr.sec = 0;
8904 rpc_stat->stats[i].execution_time_sum_sqr.usec = 0;
8906 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_MIN) {
8907 rpc_stat->stats[i].execution_time_min.sec = 9999999;
8908 rpc_stat->stats[i].execution_time_min.usec = 9999999;
8910 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_MAX) {
8911 rpc_stat->stats[i].execution_time_max.sec = 0;
8912 rpc_stat->stats[i].execution_time_max.usec = 0;
8917 MUTEX_EXIT(&rx_rpc_stats);
8921 * rx_clearPeerRPCStats - clear the contents of the rpc stats according
8926 * IN clearFlag - flag indicating which stats to clear
8934 rx_clearPeerRPCStats(afs_uint32 clearFlag)
8936 rx_interface_stat_p rpc_stat, nrpc_stat;
8938 MUTEX_ENTER(&rx_rpc_stats);
8940 for (queue_Scan(&peerStats, rpc_stat, nrpc_stat, rx_interface_stat)) {
8941 unsigned int num_funcs = 0, i;
8944 * We have to fix the offset of rpc_stat since we are
8945 * keeping this structure on two rx_queues. The rx_queue
8946 * package assumes that the rx_queue member is the first
8947 * member of the structure. That is, rx_queue assumes that
8948 * any one item is only on one queue at a time. We are
8949 * breaking that assumption and so we have to do a little
8950 * math to fix our pointers.
8953 fix_offset = (char *)rpc_stat;
8954 fix_offset -= offsetof(rx_interface_stat_t, all_peers);
8955 rpc_stat = (rx_interface_stat_p) fix_offset;
8957 num_funcs = rpc_stat->stats[0].func_total;
8958 for (i = 0; i < num_funcs; i++) {
8959 if (clearFlag & AFS_RX_STATS_CLEAR_INVOCATIONS) {
8960 hzero(rpc_stat->stats[i].invocations);
8962 if (clearFlag & AFS_RX_STATS_CLEAR_BYTES_SENT) {
8963 hzero(rpc_stat->stats[i].bytes_sent);
8965 if (clearFlag & AFS_RX_STATS_CLEAR_BYTES_RCVD) {
8966 hzero(rpc_stat->stats[i].bytes_rcvd);
8968 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_SUM) {
8969 rpc_stat->stats[i].queue_time_sum.sec = 0;
8970 rpc_stat->stats[i].queue_time_sum.usec = 0;
8972 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_SQUARE) {
8973 rpc_stat->stats[i].queue_time_sum_sqr.sec = 0;
8974 rpc_stat->stats[i].queue_time_sum_sqr.usec = 0;
8976 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_MIN) {
8977 rpc_stat->stats[i].queue_time_min.sec = 9999999;
8978 rpc_stat->stats[i].queue_time_min.usec = 9999999;
8980 if (clearFlag & AFS_RX_STATS_CLEAR_QUEUE_TIME_MAX) {
8981 rpc_stat->stats[i].queue_time_max.sec = 0;
8982 rpc_stat->stats[i].queue_time_max.usec = 0;
8984 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_SUM) {
8985 rpc_stat->stats[i].execution_time_sum.sec = 0;
8986 rpc_stat->stats[i].execution_time_sum.usec = 0;
8988 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_SQUARE) {
8989 rpc_stat->stats[i].execution_time_sum_sqr.sec = 0;
8990 rpc_stat->stats[i].execution_time_sum_sqr.usec = 0;
8992 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_MIN) {
8993 rpc_stat->stats[i].execution_time_min.sec = 9999999;
8994 rpc_stat->stats[i].execution_time_min.usec = 9999999;
8996 if (clearFlag & AFS_RX_STATS_CLEAR_EXEC_TIME_MAX) {
8997 rpc_stat->stats[i].execution_time_max.sec = 0;
8998 rpc_stat->stats[i].execution_time_max.usec = 0;
9003 MUTEX_EXIT(&rx_rpc_stats);
9007 * rxi_rxstat_userok points to a routine that returns 1 if the caller
9008 * is authorized to enable/disable/clear RX statistics.
9010 static int (*rxi_rxstat_userok) (struct rx_call * call) = NULL;
9013 rx_SetRxStatUserOk(int (*proc) (struct rx_call * call))
9015 rxi_rxstat_userok = proc;
9019 rx_RxStatUserOk(struct rx_call *call)
9021 if (!rxi_rxstat_userok)
9023 return rxi_rxstat_userok(call);
9028 * DllMain() -- Entry-point function called by the DllMainCRTStartup()
9029 * function in the MSVC runtime DLL (msvcrt.dll).
9031 * Note: the system serializes calls to this function.
9034 DllMain(HINSTANCE dllInstHandle, /* instance handle for this DLL module */
9035 DWORD reason, /* reason function is being called */
9036 LPVOID reserved) /* reserved for future use */
9039 case DLL_PROCESS_ATTACH:
9040 /* library is being attached to a process */
9044 case DLL_PROCESS_DETACH:
9051 #endif /* AFS_NT40_ENV */
9054 int rx_DumpCalls(FILE *outputFile, char *cookie)
9056 #ifdef RXDEBUG_PACKET
9057 #ifdef KDUMP_RX_LOCK
9058 struct rx_call_rx_lock *c;
9065 #define RXDPRINTF sprintf
9066 #define RXDPRINTOUT output
9068 #define RXDPRINTF fprintf
9069 #define RXDPRINTOUT outputFile
9072 RXDPRINTF(RXDPRINTOUT, "%s - Start dumping all Rx Calls - count=%u\r\n", cookie, rx_stats.nCallStructs);
9074 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
9077 for (c = rx_allCallsp; c; c = c->allNextp) {
9078 u_short rqc, tqc, iovqc;
9079 struct rx_packet *p, *np;
9081 MUTEX_ENTER(&c->lock);
9082 queue_Count(&c->rq, p, np, rx_packet, rqc);
9083 queue_Count(&c->tq, p, np, rx_packet, tqc);
9084 queue_Count(&c->iovq, p, np, rx_packet, iovqc);
9086 RXDPRINTF(RXDPRINTOUT, "%s - call=0x%p, id=%u, state=%u, mode=%u, conn=%p, epoch=%u, cid=%u, callNum=%u, connFlags=0x%x, flags=0x%x, "
9087 "rqc=%u,%u, tqc=%u,%u, iovqc=%u,%u, "
9088 "lstatus=%u, rstatus=%u, error=%d, timeout=%u, "
9089 "resendEvent=%d, timeoutEvt=%d, keepAliveEvt=%d, delayedAckEvt=%d, delayedAbortEvt=%d, abortCode=%d, abortCount=%d, "
9090 "lastSendTime=%u, lastRecvTime=%u, lastSendData=%u"
9091 #ifdef RX_ENABLE_LOCKS
9094 #ifdef RX_REFCOUNT_CHECK
9095 ", refCountBegin=%u, refCountResend=%u, refCountDelay=%u, "
9096 "refCountAlive=%u, refCountPacket=%u, refCountSend=%u, refCountAckAll=%u, refCountAbort=%u"
9099 cookie, c, c->call_id, (afs_uint32)c->state, (afs_uint32)c->mode, c->conn, c->conn?c->conn->epoch:0, c->conn?c->conn->cid:0,
9100 c->callNumber?*c->callNumber:0, c->conn?c->conn->flags:0, c->flags,
9101 (afs_uint32)c->rqc, (afs_uint32)rqc, (afs_uint32)c->tqc, (afs_uint32)tqc, (afs_uint32)c->iovqc, (afs_uint32)iovqc,
9102 (afs_uint32)c->localStatus, (afs_uint32)c->remoteStatus, c->error, c->timeout,
9103 c->resendEvent?1:0, c->timeoutEvent?1:0, c->keepAliveEvent?1:0, c->delayedAckEvent?1:0, c->delayedAbortEvent?1:0,
9104 c->abortCode, c->abortCount, c->lastSendTime, c->lastReceiveTime, c->lastSendData
9105 #ifdef RX_ENABLE_LOCKS
9106 , (afs_uint32)c->refCount
9108 #ifdef RX_REFCOUNT_CHECK
9109 , c->refCDebug[0],c->refCDebug[1],c->refCDebug[2],c->refCDebug[3],c->refCDebug[4],c->refCDebug[5],c->refCDebug[6],c->refCDebug[7]
9112 MUTEX_EXIT(&c->lock);
9115 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
9118 RXDPRINTF(RXDPRINTOUT, "%s - End dumping all Rx Calls\r\n", cookie);
9120 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
9122 #endif /* RXDEBUG_PACKET */