/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
# include "rx.h"
#endif /* KERNEL */
-#ifndef INIT
-#define INIT(x)
-#if defined(AFS_NT40_ENV) && defined(AFS_PTHREAD_ENV)
+#ifndef GLOBALSINIT
+#define GLOBALSINIT(x)
+#if defined(AFS_NT40_ENV)
+#define RX_STATS_INTERLOCKED 1
+#if defined(AFS_PTHREAD_ENV)
#define EXT __declspec(dllimport) extern
-#else
+#else /* AFS_PTHREAD_ENV */
#define EXT extern
-#endif
-#endif /* !INIT */
+#endif /* AFS_PTHREAD_ENV */
+#else /* AFS_NT40_ENV */
+#define EXT extern
+#endif /* AFS_NT40_ENV */
+#endif /* !GLOBALSINIT */
/* Basic socket for client requests; other sockets (for receiving server requests) are in the service structures */
EXT osi_socket rx_socket;
/* Variable to allow introduction of network unreliability */
#ifdef RXDEBUG
-EXT int rx_intentionallyDroppedPacketsPer100 INIT(0); /* Dropped on Send */
+EXT int rx_intentionallyDroppedPacketsPer100 GLOBALSINIT(0); /* Dropped on Send */
+EXT int rx_intentionallyDroppedOnReadPer100 GLOBALSINIT(0); /* Dropped on Read */
#endif
/* extra packets to add to the quota */
-EXT int rx_extraQuota INIT(0);
-/* extra packets to alloc (2 windows by deflt) */
-EXT int rx_extraPackets INIT(32);
+EXT int rx_extraQuota GLOBALSINIT(0);
+/* extra packets to alloc (2 * maxWindowSize by default) */
+EXT int rx_extraPackets GLOBALSINIT(256);
-EXT int rx_stackSize INIT(RX_DEFAULT_STACK_SIZE);
+EXT int rx_stackSize GLOBALSINIT(RX_DEFAULT_STACK_SIZE);
/* Time until an unresponsive connection is declared dead */
-EXT int rx_connDeadTime INIT(12);
+EXT int rx_connDeadTime GLOBALSINIT(12);
+
/* Set rx default connection dead time; set on both services and connections at creation time */
+#ifdef AFS_NT40_ENV
+void rx_SetRxDeadTime(int seconds);
+#else
#define rx_SetRxDeadTime(seconds) (rx_connDeadTime = (seconds))
+#endif
/* Time until we toss an idle connection */
-EXT int rx_idleConnectionTime INIT(700);
+EXT int rx_idleConnectionTime GLOBALSINIT(700);
/* Time until we toss a peer structure, after all connections using are gone */
-EXT int rx_idlePeerTime INIT(60);
+EXT int rx_idlePeerTime GLOBALSINIT(60);
/* The file server is temporarily salvaging */
-EXT int rx_tranquil INIT(0);
+EXT int rx_tranquil GLOBALSINIT(0);
/* UDP rcv buffer size */
-EXT int rx_UdpBufSize INIT(64 * 1024);
+EXT int rx_UdpBufSize GLOBALSINIT(64 * 1024);
+#ifdef AFS_NT40_ENV
+int rx_GetMinUdpBufSize(void);
+void rx_SetUdpBufSize(int x);
+#else
#define rx_GetMinUdpBufSize() (64*1024)
#define rx_SetUdpBufSize(x) (((x)>rx_GetMinUdpBufSize()) ? (rx_UdpBufSize = (x)):0)
-
+#endif
/*
* Variables to control RX overload management. When the number of calls
* waiting for a thread exceed the threshold, new calls are aborted
- * with the busy error.
+ * with the busy error.
*/
-EXT int rx_BusyThreshold INIT(-1); /* default is disabled */
-EXT int rx_BusyError INIT(-1);
+EXT int rx_BusyThreshold GLOBALSINIT(-1); /* default is disabled */
+EXT int rx_BusyError GLOBALSINIT(-1);
/* These definitions should be in one place */
#ifdef AFS_SUN5_ENV
#define RX_REAP_TIME 60 /* Check for tossable connections every 60 seconds */
#endif
-#define RX_FAST_ACK_RATE 1 /* as of 3.4, ask for an ack every
+#define RX_FAST_ACK_RATE 1 /* as of 3.4, ask for an ack every
* other packet. */
-EXT int rx_minWindow INIT(1);
-EXT int rx_initReceiveWindow INIT(16); /* how much to accept */
-EXT int rx_maxReceiveWindow INIT(32); /* how much to accept */
-EXT int rx_initSendWindow INIT(8);
-EXT int rx_maxSendWindow INIT(32);
-EXT int rx_nackThreshold INIT(3); /* Number NACKS to trigger congestion recovery */
-EXT int rx_nDgramThreshold INIT(4); /* Number of packets before increasing
- * packets per datagram */
+EXT int rx_minPeerTimeout GLOBALSINIT(350); /* in milliseconds */
+EXT int rx_minWindow GLOBALSINIT(1);
+EXT int rx_maxWindow GLOBALSINIT(65535); /* twind is u_short */
+EXT int rx_initReceiveWindow GLOBALSINIT(16); /* how much to accept */
+EXT int rx_maxReceiveWindow GLOBALSINIT(128); /* how much to accept */
+EXT int rx_initSendWindow GLOBALSINIT(16);
+EXT int rx_maxSendWindow GLOBALSINIT(128);
+EXT int rx_nackThreshold GLOBALSINIT(3); /* Number NACKS to trigger congestion recovery */
+EXT int rx_nDgramThreshold GLOBALSINIT(4); /* Number of packets before increasing
+ * packets per datagram */
#define RX_MAX_FRAGS 4
-EXT int rxi_nSendFrags INIT(RX_MAX_FRAGS); /* max fragments in a datagram */
-EXT int rxi_nRecvFrags INIT(RX_MAX_FRAGS);
-EXT int rxi_OrphanFragSize INIT(512);
+EXT int rxi_nSendFrags GLOBALSINIT(RX_MAX_FRAGS); /* max fragments in a datagram */
+EXT int rxi_nRecvFrags GLOBALSINIT(RX_MAX_FRAGS);
+EXT int rxi_OrphanFragSize GLOBALSINIT(512);
#define RX_MAX_DGRAM_PACKETS 6 /* max packets per jumbogram */
-EXT int rxi_nDgramPackets INIT(RX_MAX_DGRAM_PACKETS);
+EXT int rxi_nDgramPackets GLOBALSINIT(RX_MAX_DGRAM_PACKETS);
/* allow n packets between soft acks - must be power of 2 -1, else change
* macro below */
-EXT int rxi_SoftAckRate INIT(RX_FAST_ACK_RATE);
+EXT int rxi_SoftAckRate GLOBALSINIT(RX_FAST_ACK_RATE);
/* consume n packets before sending hard ack, should be larger than above,
but not absolutely necessary. If it's smaller, than fast receivers will
send a soft ack, immediately followed by a hard ack. */
-EXT int rxi_HardAckRate INIT(RX_FAST_ACK_RATE + 1);
-
-/* EXT int rx_maxWindow INIT(15); Temporary HACK: transmit/receive window */
+EXT int rxi_HardAckRate GLOBALSINIT(RX_FAST_ACK_RATE + 1);
/* If window sizes become very variable (in terms of #packets), be
* sure that the sender can get back a hard acks without having to wait for
#define ACKHACK(p,r) { if (((p)->header.seq & (rxi_SoftAckRate))==0) (p)->header.flags |= RX_REQUEST_ACK; }
-EXT int rx_nPackets INIT(100); /* obsolete; use rx_extraPackets now */
+EXT int rx_nPackets GLOBALSINIT(0); /* preallocate packets with rx_extraPackets */
/*
* pthreads thread-specific rx info support
struct rx_queue queue;
int len; /* local queue length */
int delta; /* number of new packets alloc'd locally since last sync w/ global queue */
-
+
/* FPQ stats */
int checkin_ops;
int checkin_xfer;
int gtol_xfer;
int ltog_ops;
int ltog_xfer;
- int alloc_ops;
- int alloc_xfer;
+ int lalloc_ops;
+ int lalloc_xfer;
+ int galloc_ops;
+ int galloc_xfer;
} _FPQ;
struct rx_packet * local_special_packet;
} rx_ts_info_t;
-EXT struct rx_ts_info_t * rx_ts_info_init(); /* init function for thread-specific data struct */
+EXT struct rx_ts_info_t * rx_ts_info_init(void); /* init function for thread-specific data struct */
#define RX_TS_INFO_GET(ts_info_p) \
do { \
ts_info_p = (struct rx_ts_info_t*)pthread_getspecific(rx_ts_info_key); \
* in which the first tier is thread-specific, and the second tier is
* a global free packet queue */
EXT struct rx_queue rx_freePacketQueue;
+#ifdef RX_TRACK_PACKETS
#define RX_FPQ_MARK_FREE(p) \
do { \
if ((p)->flags & RX_PKTFLAG_FREE) \
osi_Panic("rx packet already free\n"); \
(p)->flags |= RX_PKTFLAG_FREE; \
+ (p)->flags &= ~(RX_PKTFLAG_TQ|RX_PKTFLAG_IOVQ|RX_PKTFLAG_RQ|RX_PKTFLAG_CP); \
+ (p)->length = 0; \
+ (p)->niovecs = 0; \
} while(0)
#define RX_FPQ_MARK_USED(p) \
do { \
(p)->flags = 0; /* clear RX_PKTFLAG_FREE, initialize the rest */ \
(p)->header.flags = 0; \
} while(0)
+#else
+#define RX_FPQ_MARK_FREE(p) \
+ do { \
+ (p)->length = 0; \
+ (p)->niovecs = 0; \
+ } while(0)
+#define RX_FPQ_MARK_USED(p) \
+ do { \
+ (p)->flags = 0; /* clear RX_PKTFLAG_FREE, initialize the rest */ \
+ (p)->header.flags = 0; \
+ } while(0)
+#endif
#define RX_PACKET_IOV_INIT(p) \
do { \
(p)->wirevec[0].iov_base = (char *)((p)->wirehead); \
#if defined(AFS_PTHREAD_ENV)
#define RX_ENABLE_TSFPQ
-EXT int rx_TSFPQGlobSize INIT(3); /* number of packets to transfer between global and local queues in one op */
-EXT int rx_TSFPQLocalMax INIT(15); /* max number of packets on local FPQ before returning a glob to the global pool */
-EXT int rx_TSFPQMaxProcs INIT(0); /* max number of threads expected */
+EXT int rx_TSFPQGlobSize GLOBALSINIT(3); /* number of packets to transfer between global and local queues in one op */
+EXT int rx_TSFPQLocalMax GLOBALSINIT(15); /* max number of packets on local FPQ before returning a glob to the global pool */
+EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
EXT void rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local); /* more flexible packet alloc function */
EXT void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit); /* adjust thread-local queue length, for places where we know how many packets we will need a priori */
EXT void rxi_FlushLocalPacketsTSFPQ(void); /* flush all thread-local packets to global queue */
#define RX_TS_FPQ_FLUSH_GLOBAL 1
#define RX_TS_FPQ_PULL_GLOBAL 1
#define RX_TS_FPQ_ALLOW_OVERCOMMIT 1
-/* compute the localmax and globsize values from rx_TSFPQMaxProcs and rx_nPackets.
- arbitarily set local max so that all threads consume 90% of packets, if all local queues are full.
- arbitarily set transfer glob size to 20% of max local packet queue length.
- also set minimum values of 15 and 3. */
+/*
+ * compute the localmax and globsize values from rx_TSFPQMaxProcs and rx_nPackets.
+ * arbitarily set local max so that all threads consume 90% of packets, if all local queues are full.
+ * arbitarily set transfer glob size to 20% of max local packet queue length.
+ * also set minimum values of 15 and 3. Given the algorithms, the number of buffers allocated
+ * by each call to AllocPacketBufs() will increase indefinitely without a cap on the transfer
+ * glob size. A cap of 64 is selected because that will produce an allocation of greater than
+ * three times that amount which is greater than half of ncalls * maxReceiveWindow.
+ * Must be called under rx_packets_mutex.
+ */
#define RX_TS_FPQ_COMPUTE_LIMITS \
do { \
- register int newmax, newglob; \
+ int newmax, newglob; \
newmax = (rx_nPackets * 9) / (10 * rx_TSFPQMaxProcs); \
newmax = (newmax >= 15) ? newmax : 15; \
newglob = newmax / 5; \
- newglob = (newglob >= 3) ? newglob : 3; \
+ newglob = (newglob >= 3) ? (newglob < 64 ? newglob : 64) : 3; \
rx_TSFPQLocalMax = newmax; \
rx_TSFPQGlobSize = newglob; \
} while(0)
+/* record the number of packets allocated by this thread
+ * and stored in the thread local queue */
+#define RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info_p,num_alloc) \
+ do { \
+ (rx_ts_info_p)->_FPQ.lalloc_ops++; \
+ (rx_ts_info_p)->_FPQ.lalloc_xfer += num_alloc; \
+ } while (0)
+/* record the number of packets allocated by this thread
+ * and stored in the global queue */
+#define RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info_p,num_alloc) \
+ do { \
+ (rx_ts_info_p)->_FPQ.galloc_ops++; \
+ (rx_ts_info_p)->_FPQ.galloc_xfer += num_alloc; \
+ } while (0)
/* move packets from local (thread-specific) to global free packet queue.
- rx_freePktQ_lock must be held. default is to move the difference between the current lenght, and the
- allowed max plus one extra glob. */
+ rx_freePktQ_lock must be held. default is to reduce the queue size to 40% ofmax */
#define RX_TS_FPQ_LTOG(rx_ts_info_p) \
do { \
- register int i; \
- register struct rx_packet * p; \
- register int tsize = (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + rx_TSFPQGlobSize; \
+ int i; \
+ struct rx_packet * p; \
+ int tsize = MIN((rx_ts_info_p)->_FPQ.len, (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + 3 * rx_TSFPQGlobSize); \
+ if (tsize <= 0) break; \
for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
i < tsize; i++,p=queue_Prev(p, rx_packet)); \
queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
(rx_ts_info_p)->_FPQ.ltog_ops++; \
(rx_ts_info_p)->_FPQ.ltog_xfer += tsize; \
if ((rx_ts_info_p)->_FPQ.delta) { \
- (rx_ts_info_p)->_FPQ.alloc_ops++; \
- (rx_ts_info_p)->_FPQ.alloc_xfer += (rx_ts_info_p)->_FPQ.delta; \
- MUTEX_ENTER(&rx_stats_mutex); \
- rx_nPackets += (rx_ts_info_p)->_FPQ.delta; \
+ MUTEX_ENTER(&rx_packets_mutex); \
RX_TS_FPQ_COMPUTE_LIMITS; \
- MUTEX_EXIT(&rx_stats_mutex); \
+ MUTEX_EXIT(&rx_packets_mutex); \
(rx_ts_info_p)->_FPQ.delta = 0; \
} \
} while(0)
/* same as above, except user has direct control over number to transfer */
#define RX_TS_FPQ_LTOG2(rx_ts_info_p,num_transfer) \
do { \
- register int i; \
- register struct rx_packet * p; \
+ int i; \
+ struct rx_packet * p; \
+ if (num_transfer <= 0) break; \
for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
i < (num_transfer); i++,p=queue_Prev(p, rx_packet)); \
queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
(rx_ts_info_p)->_FPQ.ltog_ops++; \
(rx_ts_info_p)->_FPQ.ltog_xfer += (num_transfer); \
if ((rx_ts_info_p)->_FPQ.delta) { \
- (rx_ts_info_p)->_FPQ.alloc_ops++; \
- (rx_ts_info_p)->_FPQ.alloc_xfer += (rx_ts_info_p)->_FPQ.delta; \
- MUTEX_ENTER(&rx_stats_mutex); \
- rx_nPackets += (rx_ts_info_p)->_FPQ.delta; \
+ MUTEX_ENTER(&rx_packets_mutex); \
RX_TS_FPQ_COMPUTE_LIMITS; \
- MUTEX_EXIT(&rx_stats_mutex); \
+ MUTEX_EXIT(&rx_packets_mutex); \
(rx_ts_info_p)->_FPQ.delta = 0; \
} \
} while(0)
rx_freePktQ_lock must be held. */
#define RX_TS_FPQ_GTOL(rx_ts_info_p) \
do { \
- register int i, tsize; \
- register struct rx_packet * p; \
+ int i, tsize; \
+ struct rx_packet * p; \
tsize = (rx_TSFPQGlobSize <= rx_nFreePackets) ? \
rx_TSFPQGlobSize : rx_nFreePackets; \
for (i=0,p=queue_First(&rx_freePacketQueue, rx_packet); \
/* same as above, except user has direct control over number to transfer */
#define RX_TS_FPQ_GTOL2(rx_ts_info_p,num_transfer) \
do { \
- register int i; \
- register struct rx_packet * p; \
+ int i, tsize; \
+ struct rx_packet * p; \
+ tsize = (num_transfer); \
+ if (tsize > rx_nFreePackets) tsize = rx_nFreePackets; \
for (i=0,p=queue_First(&rx_freePacketQueue, rx_packet); \
- i < (num_transfer); i++,p=queue_Next(p, rx_packet)); \
+ i < tsize; i++,p=queue_Next(p, rx_packet)); \
queue_SplitBeforeAppend(&rx_freePacketQueue,&((rx_ts_info_p)->_FPQ),p); \
(rx_ts_info_p)->_FPQ.len += i; \
rx_nFreePackets -= i; \
(rx_ts_info_p)->_FPQ.checkout_ops++; \
(rx_ts_info_p)->_FPQ.checkout_xfer++; \
} while(0)
-/* checkout multiple packets from the thread-specific free packet queue */
-#define RX_TS_FPQ_CHECKOUT2(rx_ts_info_p,num_transfer,q) \
+/* checkout multiple packets from the thread-specific free packet queue.
+ * num_transfer must be a variable.
+ */
+#define RX_TS_FPQ_QCHECKOUT(rx_ts_info_p,num_transfer,q) \
do { \
- register int i; \
- register struct rx_packet *p; \
+ int i; \
+ struct rx_packet *p; \
+ if (num_transfer > (rx_ts_info_p)->_FPQ.len) num_transfer = (rx_ts_info_p)->_FPQ.len; \
for (i=0, p=queue_First(&((rx_ts_info_p)->_FPQ), rx_packet); \
- i < (num_transfer); \
+ i < num_transfer; \
i++, p=queue_Next(p, rx_packet)) { \
RX_FPQ_MARK_USED(p); \
} \
queue_SplitBeforeAppend(&((rx_ts_info_p)->_FPQ),(q),p); \
- (rx_ts_info_p)->_FPQ.len -= (num_transfer); \
+ (rx_ts_info_p)->_FPQ.len -= num_transfer; \
(rx_ts_info_p)->_FPQ.checkout_ops++; \
- (rx_ts_info_p)->_FPQ.checkout_xfer += (num_transfer); \
+ (rx_ts_info_p)->_FPQ.checkout_xfer += num_transfer; \
} while(0)
/* check a packet into the thread-specific free packet queue */
#define RX_TS_FPQ_CHECKIN(rx_ts_info_p,p) \
(rx_ts_info_p)->_FPQ.checkin_xfer++; \
} while(0)
/* check multiple packets into the thread-specific free packet queue */
-/* num_transfer must equal length of (q); it is not a means of checking
- * in part of (q). passing num_transfer just saves us instructions
+/* num_transfer must equal length of (q); it is not a means of checking
+ * in part of (q). passing num_transfer just saves us instructions
* since caller already knows length of (q) for other reasons */
-#define RX_TS_FPQ_CHECKIN2(rx_ts_info_p,num_transfer,q) \
+#define RX_TS_FPQ_QCHECKIN(rx_ts_info_p,num_transfer,q) \
do { \
- register struct rx_packet *p, *np; \
+ struct rx_packet *p, *np; \
for (queue_Scan((q), p, np, rx_packet)) { \
RX_FPQ_MARK_FREE(p); \
} \
#endif /* AFS_PTHREAD_ENV */
/* Number of free packets */
-EXT int rx_nFreePackets INIT(0);
-EXT int rxi_NeedMorePackets INIT(0);
-EXT int rx_nWaiting INIT(0);
-EXT int rx_nWaited INIT(0);
-EXT int rx_packetReclaims INIT(0);
+EXT int rx_nFreePackets GLOBALSINIT(0);
+EXT int rxi_NeedMorePackets GLOBALSINIT(0);
+EXT int rx_nWaiting GLOBALSINIT(0);
+EXT int rx_nWaited GLOBALSINIT(0);
+EXT int rx_packetReclaims GLOBALSINIT(0);
/* largest packet which we can safely receive, initialized to AFS 3.2 value
* This is provided for backward compatibility with peers which may be unable
* to swallow anything larger. THIS MUST NEVER DECREASE WHILE AN APPLICATION
* IS RUNNING! */
-EXT afs_uint32 rx_maxReceiveSize INIT(OLD_MAX_PACKET_SIZE * RX_MAX_FRAGS +
+EXT afs_uint32 rx_maxReceiveSize GLOBALSINIT(_OLD_MAX_PACKET_SIZE * RX_MAX_FRAGS +
UDP_HDR_SIZE * (RX_MAX_FRAGS - 1));
/* this is the maximum packet size that the user wants us to receive */
/* this is set by rxTune if required */
-EXT afs_uint32 rx_maxReceiveSizeUser INIT(0xffffffff);
+EXT afs_uint32 rx_maxReceiveSizeUser GLOBALSINIT(0xffffffff);
/* rx_MyMaxSendSize is the size of the largest packet we will send,
* including the RX header. Just as rx_maxReceiveSize is the
* max we will receive, including the rx header.
*/
-EXT afs_uint32 rx_MyMaxSendSize INIT(8588);
+EXT afs_uint32 rx_MyMaxSendSize GLOBALSINIT(8588);
/* Maximum size of a jumbo datagram we can receive */
-EXT afs_uint32 rx_maxJumboRecvSize INIT(RX_MAX_PACKET_SIZE);
+EXT afs_uint32 rx_maxJumboRecvSize GLOBALSINIT(RX_MAX_PACKET_SIZE);
/* need this to permit progs to run on AIX systems */
-EXT int (*rxi_syscallp) (afs_uint32 a3, afs_uint32 a4, void *a5)INIT(0);
+EXT int (*rxi_syscallp) (afs_uint32 a3, afs_uint32 a4, void *a5)GLOBALSINIT(0);
/* List of free queue entries */
-EXT struct rx_serverQueueEntry *rx_FreeSQEList INIT(0);
+EXT struct rx_serverQueueEntry *rx_FreeSQEList GLOBALSINIT(0);
#ifdef RX_ENABLE_LOCKS
EXT afs_kmutex_t freeSQEList_lock;
#endif
#ifdef RX_ENABLE_LOCKS
EXT afs_kmutex_t rx_freeCallQueue_lock;
#endif
-EXT afs_int32 rxi_nCalls INIT(0);
+EXT afs_int32 rxi_nCalls GLOBALSINIT(0);
/* Port requested at rx_Init. If this is zero, the actual port used will be different--but it will only be used for client operations. If non-zero, server provided services may use the same port. */
EXT u_short rx_port;
#if !defined(KERNEL) && !defined(AFS_PTHREAD_ENV)
/* 32-bit select Mask for rx_Listener. */
EXT fd_set rx_selectMask;
-EXT int rx_maxSocketNumber; /* Maximum socket number in the select mask. */
+EXT osi_socket rx_maxSocketNumber; /* Maximum socket number in the select mask. */
/* Minumum socket number in the select mask. */
-EXT int rx_minSocketNumber INIT(0x7fffffff);
+EXT osi_socket rx_minSocketNumber GLOBALSINIT(0x7fffffff);
#endif
/* This is actually the minimum number of packets that must remain free,
/* value large enough to guarantee that no allocation fails due to RX_PACKET_QUOTAS.
Make it a little bigger, just for fun */
#define RX_MAX_QUOTA 15 /* part of min packet computation */
-EXT int rx_packetQuota[RX_N_PACKET_CLASSES] INIT(RX_PACKET_QUOTAS);
-EXT int meltdown_1pkt INIT(1); /* prefer to schedule single-packet calls */
-EXT int rxi_doreclaim INIT(1); /* if discard one packet, discard all */
-EXT int rxi_md2cnt INIT(0); /* counter of skipped calls */
-EXT int rxi_2dchoice INIT(1); /* keep track of another call to schedule */
+EXT int rx_packetQuota[RX_N_PACKET_CLASSES] GLOBALSINIT(RX_PACKET_QUOTAS);
+EXT int meltdown_1pkt GLOBALSINIT(1); /* prefer to schedule single-packet calls */
+EXT int rxi_doreclaim GLOBALSINIT(1); /* if discard one packet, discard all */
+EXT int rxi_md2cnt GLOBALSINIT(0); /* counter of skipped calls */
+EXT int rxi_2dchoice GLOBALSINIT(1); /* keep track of another call to schedule */
/* quota system: each attached server process must be able to make
progress to avoid system deadlock, so we ensure that we can always
quota to send any packets) */
/* # to reserve so that thread with input can still make calls (send packets)
without blocking */
-EXT int rxi_dataQuota INIT(RX_MAX_QUOTA); /* packets to reserve for active threads */
+EXT int rxi_dataQuota GLOBALSINIT(RX_MAX_QUOTA); /* packets to reserve for active threads */
-EXT afs_int32 rxi_availProcs INIT(0); /* number of threads in the pool */
-EXT afs_int32 rxi_totalMin INIT(0); /* Sum(minProcs) forall services */
-EXT afs_int32 rxi_minDeficit INIT(0); /* number of procs needed to handle all minProcs */
+EXT afs_int32 rxi_availProcs GLOBALSINIT(0); /* number of threads in the pool */
+EXT afs_int32 rxi_totalMin GLOBALSINIT(0); /* Sum(minProcs) forall services */
+EXT afs_int32 rxi_minDeficit GLOBALSINIT(0); /* number of procs needed to handle all minProcs */
EXT int rx_nextCid; /* Next connection call id */
EXT int rx_epoch; /* Initialization time of rx */
#endif
EXT char rx_waitingForPackets; /* Processes set and wait on this variable when waiting for packet buffers */
-EXT struct rx_stats rx_stats;
+EXT struct rx_statistics rx_stats;
EXT struct rx_peer **rx_peerHashTable;
EXT struct rx_connection **rx_connHashTable;
-EXT struct rx_connection *rx_connCleanup_list INIT(0);
-EXT afs_uint32 rx_hashTableSize INIT(257); /* Prime number */
+EXT struct rx_connection *rx_connCleanup_list GLOBALSINIT(0);
+EXT afs_uint32 rx_hashTableSize GLOBALSINIT(257); /* Prime number */
#ifdef RX_ENABLE_LOCKS
EXT afs_kmutex_t rx_peerHashTable_lock;
EXT afs_kmutex_t rx_connHashTable_lock;
/* Forward definitions of internal procedures */
#define rxi_ChallengeOff(conn) rxevent_Cancel((conn)->challengeEvent, (struct rx_call*)0, 0);
#define rxi_KeepAliveOff(call) rxevent_Cancel((call)->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE)
+#define rxi_NatKeepAliveOff(conn) rxevent_Cancel((conn)->natKeepAliveEvent, (struct rx_call*)0, 0)
#define rxi_AllocSecurityObject() (struct rx_securityClass *) rxi_Alloc(sizeof(struct rx_securityClass))
#define rxi_FreeSecurityObject(obj) rxi_Free(obj, sizeof(struct rx_securityClass))
#define rxi_AllocService() (struct rx_service *) rxi_Alloc(sizeof(struct rx_service))
-#define rxi_FreeService(obj) rxi_Free(obj, sizeof(struct rx_service))
+#define rxi_FreeService(obj) \
+do { \
+ MUTEX_DESTROY(&(obj)->svc_data_lock); \
+ rxi_Free((obj), sizeof(struct rx_service)); \
+} while (0)
#define rxi_AllocPeer() (struct rx_peer *) rxi_Alloc(sizeof(struct rx_peer))
#define rxi_FreePeer(peer) rxi_Free(peer, sizeof(struct rx_peer))
#define rxi_AllocConnection() (struct rx_connection *) rxi_Alloc(sizeof(struct rx_connection))
#define rxi_FreeConnection(conn) (rxi_Free(conn, sizeof(struct rx_connection)))
-#ifdef RXDEBUG
+EXT afs_int32 rx_stats_active GLOBALSINIT(1); /* boolean - rx statistics gathering */
+
+#ifndef KERNEL
/* Some debugging stuff */
EXT FILE *rx_debugFile; /* Set by the user to a stdio file for debugging output */
EXT FILE *rxevent_debugFile; /* Set to an stdio descriptor for event logging to that file */
+#endif
+#ifdef RXDEBUG
#define rx_Log rx_debugFile
#ifdef AFS_NT40_ENV
-#define dpf(args) rxi_DebugPrint args;
+EXT int rxdebug_active;
+#define dpf(args) do { if (rxdebug_active) rxi_DebugPrint args; } while (0)
+#else
+#ifdef DPF_FSLOG
+#include <afs/afsutil.h>
+#define dpf(args) FSLog args
#else
-#define dpf(args) if (rx_debugFile) rxi_DebugPrint args; else
+#define dpf(args) do { if (rx_debugFile) rxi_DebugPrint args; } while (0)
+#endif
#endif
#define rx_Log_event rxevent_debugFile
-EXT char *rx_packetTypes[RX_N_PACKET_TYPES] INIT(RX_PACKET_TYPES); /* Strings defined in rx.h */
+EXT char *rx_packetTypes[RX_N_PACKET_TYPES] GLOBALSINIT(RX_PACKET_TYPES); /* Strings defined in rx.h */
+#else
+#define dpf(args)
+#endif /* RXDEBUG */
#ifndef KERNEL
/*
* Counter used to implement connection specific data
*/
-EXT int rxi_keyCreate_counter INIT(0);
+EXT int rxi_keyCreate_counter GLOBALSINIT(0);
/*
* Array of function pointers used to destory connection specific data
*/
-EXT rx_destructor_t *rxi_keyCreate_destructor INIT(NULL);
+EXT rx_destructor_t *rxi_keyCreate_destructor GLOBALSINIT(NULL);
#ifdef RX_ENABLE_LOCKS
EXT afs_kmutex_t rxi_keyCreate_lock;
#endif /* RX_ENABLE_LOCKS */
#endif /* !KERNEL */
-#else
-#define dpf(args)
-#endif /* RXDEBUG */
-
/*
* SERVER ONLY: Threshholds used to throttle error replies to looping
* clients. When consecutive calls are aborting with the same error, the
* server throttles the client by waiting before sending error messages.
* Disabled if abort thresholds are zero.
*/
-EXT int rxi_connAbortThreshhold INIT(0);
-EXT int rxi_connAbortDelay INIT(3000);
-EXT int rxi_callAbortThreshhold INIT(0);
-EXT int rxi_callAbortDelay INIT(3000);
+EXT int rxi_connAbortThreshhold GLOBALSINIT(0);
+EXT int rxi_connAbortDelay GLOBALSINIT(3000);
+EXT int rxi_callAbortThreshhold GLOBALSINIT(0);
+EXT int rxi_callAbortDelay GLOBALSINIT(3000);
/*
* Thread specific thread ID used to implement LWP_Index().
*/
#if defined(AFS_PTHREAD_ENV)
-EXT int rxi_fcfs_thread_num INIT(0);
+EXT int rxi_fcfs_thread_num GLOBALSINIT(0);
EXT pthread_key_t rx_thread_id_key;
-/* keep track of pthread numbers - protected by rx_stats_mutex,
- except in rx_Init() before mutex exists! */
-EXT int rxi_pthread_hinum INIT(0);
+/* keep track of pthread numbers - protected by rx_stats_mutex,
+ * except in rx_Init() before mutex exists! */
+EXT int rxi_pthread_hinum GLOBALSINIT(0);
#else
#define rxi_fcfs_thread_num (0)
#endif
#if defined(RX_ENABLE_LOCKS)
-EXT afs_kmutex_t rx_stats_mutex; /* used to activate stats gathering */
+EXT afs_kmutex_t rx_stats_mutex; /* used to protect stats gathering */
+EXT afs_kmutex_t rx_waiting_mutex; /* used to protect waiting counters */
+EXT afs_kmutex_t rx_quota_mutex; /* used to protect quota counters */
+EXT afs_kmutex_t rx_pthread_mutex; /* used to protect pthread counters */
+EXT afs_kmutex_t rx_packets_mutex; /* used to protect packet counters */
#endif
-EXT int rx_enable_stats INIT(0);
+EXT int rx_enable_stats GLOBALSINIT(0);
/*
* Set this flag to enable the listener thread to trade places with an idle
* worker thread to move the context switch from listener to worker out of
* the request path.
*/
-EXT int rx_enable_hot_thread INIT(0);
+EXT int rx_enable_hot_thread GLOBALSINIT(0);
+EXT int RX_IPUDP_SIZE GLOBALSINIT(_RX_IPUDP_SIZE);
#endif /* AFS_RX_GLOBALS_H */