int gtol_xfer;
int ltog_ops;
int ltog_xfer;
- int alloc_ops;
- int alloc_xfer;
+ int lalloc_ops;
+ int lalloc_xfer;
+ int galloc_ops;
+ int galloc_xfer;
} _FPQ;
struct rx_packet * local_special_packet;
} rx_ts_info_t;
rx_TSFPQLocalMax = newmax; \
rx_TSFPQGlobSize = newglob; \
} while(0)
+/* record the number of packets allocated by this thread
+ * and stored in the thread local queue */
+#define RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info_p,num_alloc) \
+ do { \
+ (rx_ts_info_p)->_FPQ.lalloc_ops++; \
+ (rx_ts_info_p)->_FPQ.lalloc_xfer += num_alloc; \
+ } while (0)
+/* record the number of packets allocated by this thread
+ * and stored in the global queue */
+#define RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info_p,num_alloc) \
+ do { \
+ (rx_ts_info_p)->_FPQ.galloc_ops++; \
+ (rx_ts_info_p)->_FPQ.galloc_xfer += num_alloc; \
+ } while (0)
/* move packets from local (thread-specific) to global free packet queue.
- rx_freePktQ_lock must be held. default is to move the difference between the current lenght, and the
- allowed max plus one extra glob. */
+ rx_freePktQ_lock must be held. default is to reduce the queue size to 40% ofmax */
#define RX_TS_FPQ_LTOG(rx_ts_info_p) \
do { \
register int i; \
register struct rx_packet * p; \
- register int tsize = (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + rx_TSFPQGlobSize; \
+ register int tsize = (rx_ts_info_p)->_FPQ.len - rx_TSFPQLocalMax + 3 * rx_TSFPQGlobSize; \
+ if (tsize <= 0) break; \
for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
i < tsize; i++,p=queue_Prev(p, rx_packet)); \
queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
(rx_ts_info_p)->_FPQ.ltog_ops++; \
(rx_ts_info_p)->_FPQ.ltog_xfer += tsize; \
if ((rx_ts_info_p)->_FPQ.delta) { \
- (rx_ts_info_p)->_FPQ.alloc_ops++; \
- (rx_ts_info_p)->_FPQ.alloc_xfer += (rx_ts_info_p)->_FPQ.delta; \
MUTEX_ENTER(&rx_stats_mutex); \
- rx_nPackets += (rx_ts_info_p)->_FPQ.delta; \
RX_TS_FPQ_COMPUTE_LIMITS; \
MUTEX_EXIT(&rx_stats_mutex); \
(rx_ts_info_p)->_FPQ.delta = 0; \
do { \
register int i; \
register struct rx_packet * p; \
+ if (num_transfer <= 0) break; \
for (i=0,p=queue_Last(&((rx_ts_info_p)->_FPQ), rx_packet); \
i < (num_transfer); i++,p=queue_Prev(p, rx_packet)); \
queue_SplitAfterPrepend(&((rx_ts_info_p)->_FPQ),&rx_freePacketQueue,p); \
(rx_ts_info_p)->_FPQ.ltog_ops++; \
(rx_ts_info_p)->_FPQ.ltog_xfer += (num_transfer); \
if ((rx_ts_info_p)->_FPQ.delta) { \
- (rx_ts_info_p)->_FPQ.alloc_ops++; \
- (rx_ts_info_p)->_FPQ.alloc_xfer += (rx_ts_info_p)->_FPQ.delta; \
MUTEX_ENTER(&rx_stats_mutex); \
- rx_nPackets += (rx_ts_info_p)->_FPQ.delta; \
RX_TS_FPQ_COMPUTE_LIMITS; \
MUTEX_EXIT(&rx_stats_mutex); \
(rx_ts_info_p)->_FPQ.delta = 0; \
AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
register struct rx_ts_info_t * rx_ts_info;
- int transfer, alloc;
+ int transfer;
SPLVAR;
RX_TS_INFO_GET(rx_ts_info);
if (transfer > 0) {
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
-
- if ((transfer + rx_TSFPQGlobSize) <= rx_nFreePackets) {
- transfer += rx_TSFPQGlobSize;
- } else if (transfer <= rx_nFreePackets) {
- transfer = rx_nFreePackets;
- } else {
+ transfer = MAX(transfer, rx_TSFPQGlobSize);
+ if (transfer > rx_nFreePackets) {
/* alloc enough for us, plus a few globs for other threads */
- alloc = transfer + (3 * rx_TSFPQGlobSize) - rx_nFreePackets;
- rxi_MorePacketsNoLock(MAX(alloc, rx_initSendWindow));
- transfer = rx_TSFPQGlobSize;
+ rxi_MorePacketsNoLock(transfer + 4 * rx_initSendWindow);
}
RX_TS_FPQ_GTOL2(rx_ts_info, transfer);
}
#else /* KERNEL */
if (rx_nFreePackets < num_pkts) {
- rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), rx_initSendWindow));
+ rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), 4 * rx_initSendWindow));
}
#endif /* KERNEL */
SPLVAR;
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
osi_Assert(p);
PIN(p, getme); /* XXXXX */
memset((char *)p, 0, getme);
RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+ /* TSFPQ patch also needs to keep track of total packets */
+ MUTEX_ENTER(&rx_stats_mutex);
+ rx_nPackets += apackets;
+ RX_TS_FPQ_COMPUTE_LIMITS;
+ MUTEX_EXIT(&rx_stats_mutex);
+
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
p->niovecs = 2;
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ rx_mallocedP = p;
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
}
rx_ts_info->_FPQ.delta += apackets;
SPLVAR;
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
osi_Assert(p);
PIN(p, getme); /* XXXXX */
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
+ rx_mallocedP = p;
}
+
rx_nFreePackets += apackets;
rxi_NeedMorePackets = FALSE;
rxi_PacketsUnWait();
SPLVAR;
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
PIN(p, getme); /* XXXXX */
memset((char *)p, 0, getme);
RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+ /* TSFPQ patch also needs to keep track of total packets */
+ MUTEX_ENTER(&rx_stats_mutex);
+ rx_nPackets += apackets;
+ RX_TS_FPQ_COMPUTE_LIMITS;
+ MUTEX_EXIT(&rx_stats_mutex);
+
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
p->niovecs = 2;
-
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ rx_mallocedP = p;
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
}
rx_ts_info->_FPQ.delta += apackets;
void
rxi_MorePacketsNoLock(int apackets)
{
+#ifdef RX_ENABLE_TSFPQ
+ register struct rx_ts_info_t * rx_ts_info;
+#endif /* RX_ENABLE_TSFPQ */
struct rx_packet *p, *e;
int getme;
* ((rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE) / RX_CBUFFERSIZE);
do {
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
if (p == NULL) {
apackets -= apackets / 4;
osi_Assert(apackets > 0);
} while(p == NULL);
memset((char *)p, 0, getme);
+#ifdef RX_ENABLE_TSFPQ
+ RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info,apackets);
+#endif /* RX_ENABLE_TSFPQ */
+
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
p->flags |= RX_PKTFLAG_FREE;
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
+ rx_mallocedP = p;
}
rx_nFreePackets += apackets;
if ((num_keep_local > rx_TSFPQLocalMax) && !allow_overcommit)
xfer = rx_TSFPQLocalMax - rx_ts_info->_FPQ.len;
if (rx_nFreePackets < xfer) {
- rxi_MorePacketsNoLock(xfer - rx_nFreePackets);
+ rxi_MorePacketsNoLock(MAX(xfer - rx_nFreePackets, 4 * rx_initSendWindow));
}
RX_TS_FPQ_GTOL2(rx_ts_info, xfer);
}
osi_Panic("rxi_AllocPacket error");
#else /* KERNEL */
if (queue_IsEmpty(&rx_freePacketQueue))
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ rxi_MorePacketsNoLock(4 * rx_initSendWindow);
#endif /* KERNEL */
osi_Panic("rxi_AllocPacket error");
#else /* KERNEL */
if (queue_IsEmpty(&rx_freePacketQueue))
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ rxi_MorePacketsNoLock(4 * rx_initSendWindow);
#endif /* KERNEL */
rx_nFreePackets--;
MUTEX_ENTER(&rx_freePktQ_lock);
if (queue_IsEmpty(&rx_freePacketQueue))
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ rxi_MorePacketsNoLock(4 * rx_initSendWindow);
RX_TS_FPQ_GTOL(rx_ts_info);
#endif
MUTEX_ENTER(&rx_serverPool_lock);
tstat.nFreePackets = htonl(rx_nFreePackets);
+ tstat.nPackets = htonl(rx_nPackets);
tstat.callsExecuted = htonl(rxi_nCalls);
tstat.packetReclaims = htonl(rx_packetReclaims);
tstat.usedFDs = CountFDs(64);