/* Time until an unresponsive connection is declared dead */
EXT int rx_connDeadTime GLOBALSINIT(12);
+
/* Set rx default connection dead time; set on both services and connections at creation time */
+#ifdef AFS_NT40_ENV
+void rx_SetRxDeadTime(int seconds);
+#else
#define rx_SetRxDeadTime(seconds) (rx_connDeadTime = (seconds))
+#endif
/* Time until we toss an idle connection */
EXT int rx_idleConnectionTime GLOBALSINIT(700);
/* UDP rcv buffer size */
EXT int rx_UdpBufSize GLOBALSINIT(64 * 1024);
+#ifdef AFS_NT40_ENV
+int rx_GetMinUdpBufSize(void);
+void rx_SetUdpBufSize(int x);
+#else
#define rx_GetMinUdpBufSize() (64*1024)
#define rx_SetUdpBufSize(x) (((x)>rx_GetMinUdpBufSize()) ? (rx_UdpBufSize = (x)):0)
-
+#endif
/*
* Variables to control RX overload management. When the number of calls
* waiting for a thread exceed the threshold, new calls are aborted
int gtol_xfer;
int ltog_ops;
int ltog_xfer;
- int alloc_ops;
- int alloc_xfer;
+ int lalloc_ops;
+ int lalloc_xfer;
+ int galloc_ops;
+ int galloc_xfer;
} _FPQ;
struct rx_packet * local_special_packet;
} rx_ts_info_t;
-EXT struct rx_ts_info_t * rx_ts_info_init(); /* init function for thread-specific data struct */
+struct rx_ts_info_t * rx_ts_info_init(void); /* init function for thread-specific data struct */
#define RX_TS_INFO_GET(ts_info_p) \
do { \
ts_info_p = (struct rx_ts_info_t*)pthread_getspecific(rx_ts_info_key); \
if ((p)->flags & RX_PKTFLAG_FREE) \
osi_Panic("rx packet already free\n"); \
(p)->flags |= RX_PKTFLAG_FREE; \
+ (p)->flags &= ~(RX_PKTFLAG_TQ|RX_PKTFLAG_IOVQ|RX_PKTFLAG_RQ|RX_PKTFLAG_CP); \
(p)->length = 0; \
(p)->niovecs = 0; \
} while(0)
EXT int rx_TSFPQGlobSize GLOBALSINIT(3); /* number of packets to transfer between global and local queues in one op */
EXT int rx_TSFPQLocalMax GLOBALSINIT(15); /* max number of packets on local FPQ before returning a glob to the global pool */
EXT int rx_TSFPQMaxProcs GLOBALSINIT(0); /* max number of threads expected */
-EXT void rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local); /* more flexible packet alloc function */
-EXT void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit); /* adjust thread-local queue length, for places where we know how many packets we will need a priori */
-EXT void rxi_FlushLocalPacketsTSFPQ(void); /* flush all thread-local packets to global queue */
+void rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local); /* more flexible packet alloc function */
+void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit); /* adjust thread-local queue length, for places where we know how many packets we will need a priori */
+void rxi_FlushLocalPacketsTSFPQ(void); /* flush all thread-local packets to global queue */
#define RX_TS_FPQ_FLUSH_GLOBAL 1
#define RX_TS_FPQ_PULL_GLOBAL 1
#define RX_TS_FPQ_ALLOW_OVERCOMMIT 1
rx_TSFPQLocalMax = newmax; \
rx_TSFPQGlobSize = newglob; \
} while(0)
+/* record the number of packets allocated by this thread
+ * and stored in the thread local queue */
+#define RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info_p,num_alloc) \
+ do { \
+ (rx_ts_info_p)->_FPQ.lalloc_ops++; \
+ (rx_ts_info_p)->_FPQ.lalloc_xfer += num_alloc; \
+ } while (0)
+/* record the number of packets allocated by this thread
+ * and stored in the global queue */
+#define RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info_p,num_alloc) \
+ do { \
+ (rx_ts_info_p)->_FPQ.galloc_ops++; \
+ (rx_ts_info_p)->_FPQ.galloc_xfer += num_alloc; \
+ } while (0)
/* move packets from local (thread-specific) to global free packet queue.
- rx_freePktQ_lock must be held. default is to move the difference between the current lenght, and the
- allowed max plus one extra glob. */
+ rx_freePktQ_lock must be held. default is to reduce the queue size to 40% ofmax */
#define RX_TS_FPQ_LTOG(rx_ts_info_p) \
do { \
register int i; \
register struct rx_packet * p; \
- register int tsize = (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + rx_TSFPQGlobSize; \
+ register int tsize = (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + 3 * rx_TSFPQGlobSize; \
+ if (tsize <= 0) break; \
for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
i < tsize; i++,p=queue_Prev(p, rx_packet)); \
queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
(rx_ts_info_p)->_FPQ.ltog_ops++; \
(rx_ts_info_p)->_FPQ.ltog_xfer += tsize; \
if ((rx_ts_info_p)->_FPQ.delta) { \
- (rx_ts_info_p)->_FPQ.alloc_ops++; \
- (rx_ts_info_p)->_FPQ.alloc_xfer += (rx_ts_info_p)->_FPQ.delta; \
MUTEX_ENTER(&rx_stats_mutex); \
- rx_nPackets += (rx_ts_info_p)->_FPQ.delta; \
RX_TS_FPQ_COMPUTE_LIMITS; \
MUTEX_EXIT(&rx_stats_mutex); \
(rx_ts_info_p)->_FPQ.delta = 0; \
do { \
register int i; \
register struct rx_packet * p; \
+ if (num_transfer <= 0) break; \
for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
i < (num_transfer); i++,p=queue_Prev(p, rx_packet)); \
queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
(rx_ts_info_p)->_FPQ.ltog_ops++; \
(rx_ts_info_p)->_FPQ.ltog_xfer += (num_transfer); \
if ((rx_ts_info_p)->_FPQ.delta) { \
- (rx_ts_info_p)->_FPQ.alloc_ops++; \
- (rx_ts_info_p)->_FPQ.alloc_xfer += (rx_ts_info_p)->_FPQ.delta; \
MUTEX_ENTER(&rx_stats_mutex); \
- rx_nPackets += (rx_ts_info_p)->_FPQ.delta; \
RX_TS_FPQ_COMPUTE_LIMITS; \
MUTEX_EXIT(&rx_stats_mutex); \
(rx_ts_info_p)->_FPQ.delta = 0; \
#endif
EXT char rx_waitingForPackets; /* Processes set and wait on this variable when waiting for packet buffers */
-EXT struct rx_stats rx_stats;
+EXT struct rx_statistics rx_stats;
EXT struct rx_peer **rx_peerHashTable;
EXT struct rx_connection **rx_connHashTable;
#define rx_Log rx_debugFile
#ifdef AFS_NT40_ENV
EXT int rxdebug_active;
-#if !defined(_WIN64)
-#define dpf(args) if (rxdebug_active) rxi_DebugPrint args;
-#else
-#define dpf(args)
-#endif
+#define dpf(args) do { if (rxdebug_active) rxi_DebugPrint args; } while (0)
#else
#ifdef DPF_FSLOG
#define dpf(args) FSLog args
#else
-#define dpf(args) if (rx_debugFile) rxi_DebugPrint args; else
+#define dpf(args) do { if (rx_debugFile) rxi_DebugPrint args; } while (0)
#endif
#endif
#define rx_Log_event rxevent_debugFile