#ifndef GLOBALSINIT
#define GLOBALSINIT(x)
#if defined(AFS_NT40_ENV)
+#define RX_STATS_INTERLOCKED 1
#if defined(AFS_PTHREAD_ENV)
#define EXT __declspec(dllimport) extern
#else
EXT int rx_minWindow GLOBALSINIT(1);
EXT int rx_initReceiveWindow GLOBALSINIT(16); /* how much to accept */
-EXT int rx_maxReceiveWindow GLOBALSINIT(32); /* how much to accept */
-EXT int rx_initSendWindow GLOBALSINIT(8);
-EXT int rx_maxSendWindow GLOBALSINIT(32);
+EXT int rx_maxReceiveWindow GLOBALSINIT(64); /* how much to accept */
+EXT int rx_initSendWindow GLOBALSINIT(16);
+EXT int rx_maxSendWindow GLOBALSINIT(64);
EXT int rx_nackThreshold GLOBALSINIT(3); /* Number NACKS to trigger congestion recovery */
EXT int rx_nDgramThreshold GLOBALSINIT(4); /* Number of packets before increasing
* packets per datagram */
} _FPQ;
struct rx_packet * local_special_packet;
} rx_ts_info_t;
-EXT struct rx_ts_info_t * rx_ts_info_init(); /* init function for thread-specific data struct */
+EXT struct rx_ts_info_t * rx_ts_info_init(void); /* init function for thread-specific data struct */
#define RX_TS_INFO_GET(ts_info_p) \
do { \
ts_info_p = (struct rx_ts_info_t*)pthread_getspecific(rx_ts_info_key); \
if ((p)->flags & RX_PKTFLAG_FREE) \
osi_Panic("rx packet already free\n"); \
(p)->flags |= RX_PKTFLAG_FREE; \
+ (p)->flags &= ~(RX_PKTFLAG_TQ|RX_PKTFLAG_IOVQ|RX_PKTFLAG_RQ|RX_PKTFLAG_CP); \
(p)->length = 0; \
(p)->niovecs = 0; \
} while(0)
#define RX_TS_FPQ_FLUSH_GLOBAL 1
#define RX_TS_FPQ_PULL_GLOBAL 1
#define RX_TS_FPQ_ALLOW_OVERCOMMIT 1
-/* compute the localmax and globsize values from rx_TSFPQMaxProcs and rx_nPackets.
- arbitarily set local max so that all threads consume 90% of packets, if all local queues are full.
- arbitarily set transfer glob size to 20% of max local packet queue length.
- also set minimum values of 15 and 3. */
+/*
+ * compute the localmax and globsize values from rx_TSFPQMaxProcs and rx_nPackets.
+ * arbitarily set local max so that all threads consume 90% of packets, if all local queues are full.
+ * arbitarily set transfer glob size to 20% of max local packet queue length.
+ * also set minimum values of 15 and 3. Given the algorithms, the number of buffers allocated
+ * by each call to AllocPacketBufs() will increase indefinitely without a cap on the transfer
+ * glob size. A cap of 64 is selected because that will produce an allocation of greater than
+ * three times that amount which is greater than half of ncalls * maxReceiveWindow.
+ */
#define RX_TS_FPQ_COMPUTE_LIMITS \
do { \
register int newmax, newglob; \
newmax = (rx_nPackets * 9) / (10 * rx_TSFPQMaxProcs); \
newmax = (newmax >= 15) ? newmax : 15; \
newglob = newmax / 5; \
- newglob = (newglob >= 3) ? newglob : 3; \
+ newglob = (newglob >= 3) ? (newglob < 64 ? newglob : 64) : 3; \
rx_TSFPQLocalMax = newmax; \
rx_TSFPQGlobSize = newglob; \
} while(0)
/* same as above, except user has direct control over number to transfer */
#define RX_TS_FPQ_GTOL2(rx_ts_info_p,num_transfer) \
do { \
- register int i; \
+ register int i, tsize; \
register struct rx_packet * p; \
+ tsize = (num_transfer); \
+ if (tsize > rx_nFreePackets) tsize = rx_nFreePackets; \
for (i=0,p=queue_First(&rx_freePacketQueue, rx_packet); \
- i < (num_transfer); i++,p=queue_Next(p, rx_packet)); \
+ i < tsize; i++,p=queue_Next(p, rx_packet)); \
queue_SplitBeforeAppend(&rx_freePacketQueue,&((rx_ts_info_p)->_FPQ),p); \
(rx_ts_info_p)->_FPQ.len += i; \
rx_nFreePackets -= i; \
(rx_ts_info_p)->_FPQ.checkout_ops++; \
(rx_ts_info_p)->_FPQ.checkout_xfer++; \
} while(0)
-/* checkout multiple packets from the thread-specific free packet queue */
+/* checkout multiple packets from the thread-specific free packet queue.
+ * num_transfer must be a variable.
+ */
#define RX_TS_FPQ_QCHECKOUT(rx_ts_info_p,num_transfer,q) \
do { \
register int i; \
register struct rx_packet *p; \
+ if (num_transfer > (rx_ts_info_p)->_FPQ.len) num_transfer = (rx_ts_info_p)->_FPQ.len; \
for (i=0, p=queue_First(&((rx_ts_info_p)->_FPQ), rx_packet); \
- i < (num_transfer); \
+ i < num_transfer; \
i++, p=queue_Next(p, rx_packet)) { \
RX_FPQ_MARK_USED(p); \
} \
queue_SplitBeforeAppend(&((rx_ts_info_p)->_FPQ),(q),p); \
- (rx_ts_info_p)->_FPQ.len -= (num_transfer); \
+ (rx_ts_info_p)->_FPQ.len -= num_transfer; \
(rx_ts_info_p)->_FPQ.checkout_ops++; \
- (rx_ts_info_p)->_FPQ.checkout_xfer += (num_transfer); \
+ (rx_ts_info_p)->_FPQ.checkout_xfer += num_transfer; \
} while(0)
/* check a packet into the thread-specific free packet queue */
#define RX_TS_FPQ_CHECKIN(rx_ts_info_p,p) \
* This is provided for backward compatibility with peers which may be unable
* to swallow anything larger. THIS MUST NEVER DECREASE WHILE AN APPLICATION
* IS RUNNING! */
-EXT afs_uint32 rx_maxReceiveSize GLOBALSINIT(OLD_MAX_PACKET_SIZE * RX_MAX_FRAGS +
+EXT afs_uint32 rx_maxReceiveSize GLOBALSINIT(_OLD_MAX_PACKET_SIZE * RX_MAX_FRAGS +
UDP_HDR_SIZE * (RX_MAX_FRAGS - 1));
/* this is the maximum packet size that the user wants us to receive */
#endif
EXT char rx_waitingForPackets; /* Processes set and wait on this variable when waiting for packet buffers */
-EXT struct rx_stats rx_stats;
+EXT struct rx_statistics rx_stats;
EXT struct rx_peer **rx_peerHashTable;
EXT struct rx_connection **rx_connHashTable;
#ifdef DPF_FSLOG
#define dpf(args) FSLog args
#else
-#define dpf(args) if (rx_debugFile) rxi_DebugPrint args; else
+#define dpf(args) do { if (rx_debugFile) rxi_DebugPrint args; } while (0)
#endif
#endif
#define rx_Log_event rxevent_debugFile
*/
EXT int rx_enable_hot_thread GLOBALSINIT(0);
+/*
+ * Set rx_max_clones_per_connection to a value > 0 to enable connection clone
+ * workaround to RX_MAXCALLS limit.
+ */
+
+#define RX_HARD_MAX_CLONES 10
+
+EXT int rx_max_clones_per_connection GLOBALSINIT(2);
+
+EXT int RX_IPUDP_SIZE GLOBALSINIT(_RX_IPUDP_SIZE);
#endif /* AFS_RX_GLOBALS_H */