/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#include <afs/param.h>
#endif
-RCSID
- ("$Header$");
#ifdef KERNEL
#if defined(UKERNEL)
#include "sys/types.h"
#include <sys/stat.h>
#include <errno.h>
-#if defined(AFS_NT40_ENV)
-#ifdef AFS_NT40_ENV
+#if defined(AFS_NT40_ENV)
#include <winsock2.h>
#ifndef EWOULDBLOCK
#define EWOULDBLOCK WSAEWOULDBLOCK
#endif
-#else
-#include <sys/socket.h>
-#include <netinet/in.h>
-#endif /* AFS_NT40_ENV */
#include "rx_user.h"
#include "rx_xmit_nt.h"
#include <stdlib.h>
/* rxdb_fileID is used to identify the lock location, along with line#. */
static int rxdb_fileID = RXDB_FILE_RX_PACKET;
#endif /* RX_LOCKS_DB */
-struct rx_packet *rx_mallocedP = 0;
+static struct rx_packet *rx_mallocedP = 0;
+#ifdef RXDEBUG_PACKET
+static afs_uint32 rx_packet_id = 0;
+#endif
extern char cml_version_number[];
-extern int (*rx_almostSent) ();
static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
- afs_int32 ahost, short aport,
+ afs_uint32 ahost, short aport,
afs_int32 istack);
-static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
- afs_uint32 first,
- struct rx_queue * q);
+#ifdef RX_ENABLE_TSFPQ
static int
rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
-
+#else
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
+ afs_uint32 first,
+ struct rx_queue * q);
+#endif
/* some rules about packets:
* 1. When a packet is allocated, the final iov_buf contains room for
*/
/* Preconditions:
- * all packet buffers (iov_base) are integral multiples of
+ * all packet buffers (iov_base) are integral multiples of
* the word size.
* offset is an integral multiple of the word size.
*/
* offset only applies to the first iovec.
*/
r = resid;
- while ((resid > 0) && (i < packet->niovecs)) {
- j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+ while ((r > 0) && (i < packet->niovecs)) {
+ j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
memcpy(out, (char *)(packet->wirevec[i].iov_base) + (offset - l), j);
- resid -= j;
+ r -= j;
out += j;
l += packet->wirevec[i].iov_len;
offset = l;
i++;
}
- return (resid ? (r - resid) : r);
+ return (r ? (resid - r) : resid);
}
afs_int32
rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
{
- int i, j, l, r;
+ unsigned int i, j, l, o, r;
char *b;
- for (l = 0, i = 1; i < packet->niovecs; i++) {
- if (l + packet->wirevec[i].iov_len > offset) {
+ for (l = 0, i = 1, o = offset; i < packet->niovecs; i++) {
+ if (l + packet->wirevec[i].iov_len > o) {
break;
}
l += packet->wirevec[i].iov_len;
* offset only applies to the first iovec.
*/
r = resid;
- while ((resid > 0) && (i < RX_MAXWVECS)) {
+ while ((r > 0) && (i <= RX_MAXWVECS)) {
if (i >= packet->niovecs)
- if (rxi_AllocDataBuf(packet, resid, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
+ if (rxi_AllocDataBuf(packet, r, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
break;
b = (char *)(packet->wirevec[i].iov_base) + (offset - l);
- j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+ j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
memcpy(b, in, j);
- resid -= j;
+ r -= j;
in += j;
l += packet->wirevec[i].iov_len;
offset = l;
i++;
}
- return (resid ? (r - resid) : r);
+ return (r ? (resid - r) : resid);
}
int
rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
{
- register struct rx_packet *p, *np;
+ struct rx_packet *p, *np;
num_pkts = AllocPacketBufs(class, num_pkts, q);
static int
AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
- register struct rx_ts_info_t * rx_ts_info;
- int transfer, alloc;
+ struct rx_ts_info_t * rx_ts_info;
+ int transfer;
SPLVAR;
RX_TS_INFO_GET(rx_ts_info);
if (transfer > 0) {
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
-
- if ((transfer + rx_TSFPQGlobSize) <= rx_nFreePackets) {
- transfer += rx_TSFPQGlobSize;
- } else if (transfer <= rx_nFreePackets) {
- transfer = rx_nFreePackets;
- } else {
+ transfer = MAX(transfer, rx_TSFPQGlobSize);
+ if (transfer > rx_nFreePackets) {
/* alloc enough for us, plus a few globs for other threads */
- alloc = transfer + (3 * rx_TSFPQGlobSize) - rx_nFreePackets;
- rxi_MorePacketsNoLock(MAX(alloc, rx_initSendWindow));
- transfer = rx_TSFPQGlobSize;
+ rxi_MorePacketsNoLock(transfer + 4 * rx_initSendWindow);
}
RX_TS_FPQ_GTOL2(rx_ts_info, transfer);
AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
struct rx_packet *c;
- int i, overq = 0;
+ int i;
+#ifdef KERNEL
+ int overq = 0;
+#endif
SPLVAR;
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
#ifdef KERNEL
- for (; (num_pkts > 0) && (rxi_OverQuota2(class,num_pkts));
+ for (; (num_pkts > 0) && (rxi_OverQuota2(class,num_pkts));
num_pkts--, overq++);
if (overq) {
rxi_NeedMorePackets = TRUE;
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
- break;
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
}
}
}
#else /* KERNEL */
if (rx_nFreePackets < num_pkts) {
- rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), rx_initSendWindow));
+ rxi_MorePacketsNoLock(MAX((num_pkts-rx_nFreePackets), 4 * rx_initSendWindow));
}
#endif /* KERNEL */
for (i=0, c=queue_First(&rx_freePacketQueue, rx_packet);
- i < num_pkts;
+ i < num_pkts;
i++, c=queue_Next(c, rx_packet)) {
RX_FPQ_MARK_USED(c);
}
int
rxi_FreePackets(int num_pkts, struct rx_queue * q)
{
- register struct rx_ts_info_t * rx_ts_info;
- register struct rx_packet *c, *nc;
+ struct rx_ts_info_t * rx_ts_info;
+ struct rx_packet *c, *nc;
SPLVAR;
osi_Assert(num_pkts >= 0);
rxi_FreePackets(int num_pkts, struct rx_queue *q)
{
struct rx_queue cbs;
- register struct rx_packet *p, *np;
+ struct rx_packet *p, *np;
int qlen = 0;
SPLVAR;
#endif /* RX_ENABLE_TSFPQ */
/* this one is kind of awful.
- * In rxkad, the packet has been all shortened, and everything, ready for
+ * In rxkad, the packet has been all shortened, and everything, ready for
* sending. All of a sudden, we discover we need some of that space back.
* This isn't terribly general, because it knows that the packets are only
* rounded up to the EBS (userdata + security header).
{
int i, nv;
struct rx_queue q;
- register struct rx_packet *cb, *ncb;
+ struct rx_packet *cb, *ncb;
/* compute the number of cbuf's we need */
nv = nb / RX_CBUFFERSIZE;
rxi_MorePackets(int apackets)
{
struct rx_packet *p, *e;
- register struct rx_ts_info_t * rx_ts_info;
+ struct rx_ts_info_t * rx_ts_info;
int getme;
SPLVAR;
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
osi_Assert(p);
PIN(p, getme); /* XXXXX */
- memset((char *)p, 0, getme);
+ memset(p, 0, getme);
RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+ /* TSFPQ patch also needs to keep track of total packets */
+
+ MUTEX_ENTER(&rx_packets_mutex);
+ rx_nPackets += apackets;
+ RX_TS_FPQ_COMPUTE_LIMITS;
+ MUTEX_EXIT(&rx_packets_mutex);
+
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
p->niovecs = 2;
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
}
rx_ts_info->_FPQ.delta += apackets;
SPLVAR;
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
osi_Assert(p);
PIN(p, getme); /* XXXXX */
- memset((char *)p, 0, getme);
+ memset(p, 0, getme);
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
+#ifdef RX_TRACK_PACKETS
p->flags |= RX_PKTFLAG_FREE;
+#endif
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
}
+
+ rx_nPackets += apackets;
rx_nFreePackets += apackets;
rxi_NeedMorePackets = FALSE;
rxi_PacketsUnWait();
rxi_MorePacketsTSFPQ(int apackets, int flush_global, int num_keep_local)
{
struct rx_packet *p, *e;
- register struct rx_ts_info_t * rx_ts_info;
+ struct rx_ts_info_t * rx_ts_info;
int getme;
SPLVAR;
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
PIN(p, getme); /* XXXXX */
- memset((char *)p, 0, getme);
+ memset(p, 0, getme);
RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_LOCAL_ALLOC(rx_ts_info,apackets);
+ /* TSFPQ patch also needs to keep track of total packets */
+ MUTEX_ENTER(&rx_packets_mutex);
+ rx_nPackets += apackets;
+ RX_TS_FPQ_COMPUTE_LIMITS;
+ MUTEX_EXIT(&rx_packets_mutex);
+
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
p->niovecs = 2;
-
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
}
rx_ts_info->_FPQ.delta += apackets;
- if (flush_global &&
+ if (flush_global &&
(num_keep_local < apackets)) {
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
void
rxi_MorePacketsNoLock(int apackets)
{
+#ifdef RX_ENABLE_TSFPQ
+ struct rx_ts_info_t * rx_ts_info;
+#endif /* RX_ENABLE_TSFPQ */
struct rx_packet *p, *e;
int getme;
* ((rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE) / RX_CBUFFERSIZE);
do {
getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ p = (struct rx_packet *)osi_Alloc(getme);
if (p == NULL) {
apackets -= apackets / 4;
osi_Assert(apackets > 0);
}
} while(p == NULL);
- memset((char *)p, 0, getme);
+ memset(p, 0, getme);
+
+#ifdef RX_ENABLE_TSFPQ
+ RX_TS_INFO_GET(rx_ts_info);
+ RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info,apackets);
+#endif /* RX_ENABLE_TSFPQ */
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
+#ifdef RX_TRACK_PACKETS
p->flags |= RX_PKTFLAG_FREE;
+#endif
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
+#ifdef RXDEBUG_PACKET
+ p->packetId = rx_packet_id++;
+ p->allNextp = rx_mallocedP;
+#endif /* RXDEBUG_PACKET */
+ rx_mallocedP = p;
}
rx_nFreePackets += apackets;
-#ifdef RX_ENABLE_TSFPQ
- /* TSFPQ patch also needs to keep track of total packets */
- MUTEX_ENTER(&rx_stats_mutex);
+ MUTEX_ENTER(&rx_packets_mutex);
rx_nPackets += apackets;
+#ifdef RX_ENABLE_TSFPQ
RX_TS_FPQ_COMPUTE_LIMITS;
- MUTEX_EXIT(&rx_stats_mutex);
#endif /* RX_ENABLE_TSFPQ */
+ MUTEX_EXIT(&rx_packets_mutex);
rxi_NeedMorePackets = FALSE;
rxi_PacketsUnWait();
}
void
rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit)
{
- register struct rx_ts_info_t * rx_ts_info;
- register int xfer;
+ struct rx_ts_info_t * rx_ts_info;
+ int xfer;
SPLVAR;
RX_TS_INFO_GET(rx_ts_info);
if ((num_keep_local > rx_TSFPQLocalMax) && !allow_overcommit)
xfer = rx_TSFPQLocalMax - rx_ts_info->_FPQ.len;
if (rx_nFreePackets < xfer) {
- rxi_MorePacketsNoLock(xfer - rx_nFreePackets);
+ rxi_MorePacketsNoLock(MAX(xfer - rx_nFreePackets, 4 * rx_initSendWindow));
}
RX_TS_FPQ_GTOL2(rx_ts_info, xfer);
}
rx_CheckPackets(void)
{
if (rxi_NeedMorePackets) {
- rxi_MorePackets(rx_initSendWindow);
+ rxi_MorePackets(rx_maxSendWindow);
}
}
In any event, we assume the former, and append the packets to the end
of the free list. */
/* This explanation is bogus. The free list doesn't remain in any kind of
- useful order for afs_int32: the packets in use get pretty much randomly scattered
+ useful order for afs_int32: the packets in use get pretty much randomly scattered
across all the pages. In order to permit unused {packets,bufs} to page out, they
- must be stored so that packets which are adjacent in memory are adjacent in the
+ must be stored so that packets which are adjacent in memory are adjacent in the
free list. An array springs rapidly to mind.
*/
void
rxi_FreePacketNoLock(struct rx_packet *p)
{
- register struct rx_ts_info_t * rx_ts_info;
- dpf(("Free %lx\n", (unsigned long)p));
+ struct rx_ts_info_t * rx_ts_info;
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
RX_TS_INFO_GET(rx_ts_info);
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
void
rxi_FreePacketNoLock(struct rx_packet *p)
{
- dpf(("Free %lx\n", (unsigned long)p));
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
RX_FPQ_MARK_FREE(p);
rx_nFreePackets++;
void
rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
{
- register struct rx_ts_info_t * rx_ts_info;
- dpf(("Free %lx\n", (unsigned long)p));
+ struct rx_ts_info_t * rx_ts_info;
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
RX_TS_INFO_GET(rx_ts_info);
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
}
#endif /* RX_ENABLE_TSFPQ */
-/*
+/*
* free continuation buffers off a packet into a queue
*
* [IN] p -- packet from which continuation buffers will be freed
* returns:
* number of continuation buffers freed
*/
+#ifndef RX_ENABLE_TSFPQ
static int
rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
{
return count;
}
+#endif
/*
* free packet continuation buffers into the global free packet pool
*
* [IN] p -- packet from which continuation buffers will be freed
* [IN] first -- iovec offset of first continuation buffer to free
+ * any value less than 2, the min number of iovecs,
+ * is treated as if it is 2.
* [IN] flush_global -- if nonzero, we will flush overquota packets to the
* global free pool before returning
*
rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global)
{
struct iovec *iov;
- register struct rx_ts_info_t * rx_ts_info;
+ struct rx_ts_info_t * rx_ts_info;
RX_TS_INFO_GET(rx_ts_info);
int rxi_nBadIovecs = 0;
-/* rxi_RestoreDataBufs
+/* rxi_RestoreDataBufs
*
* Restore the correct sizes to the iovecs. Called when reusing a packet
* for reading off the wire.
void
rxi_RestoreDataBufs(struct rx_packet *p)
{
- int i;
+ unsigned int i;
struct iovec *iov = &p->wirevec[2];
RX_PACKET_IOV_INIT(p);
{
int length;
struct iovec *iov, *end;
- register struct rx_ts_info_t * rx_ts_info;
+ struct rx_ts_info_t * rx_ts_info;
SPLVAR;
if (first != 1)
}
#endif /* RX_ENABLE_TSFPQ */
-/* rxi_AllocPacket sets up p->length so it reflects the number of
+/* rxi_AllocPacket sets up p->length so it reflects the number of
* bytes in the packet at this point, **not including** the header.
* The header is absolutely necessary, besides, this is the way the
* length field is usually used */
struct rx_packet *
rxi_AllocPacketNoLock(int class)
{
- register struct rx_packet *p;
- register struct rx_ts_info_t * rx_ts_info;
+ struct rx_packet *p;
+ struct rx_ts_info_t * rx_ts_info;
RX_TS_INFO_GET(rx_ts_info);
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
- break;
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
}
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
osi_Panic("rxi_AllocPacket error");
#else /* KERNEL */
if (queue_IsEmpty(&rx_freePacketQueue))
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ rxi_MorePacketsNoLock(rx_maxSendWindow);
#endif /* KERNEL */
RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
- dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
/* have to do this here because rx_FlushWrite fiddles with the iovs in
- * order to truncate outbound packets. In the near future, may need
+ * order to truncate outbound packets. In the near future, may need
* to allocate bufs from a static pool here, and/or in AllocSendPacket
*/
RX_PACKET_IOV_FULLINIT(p);
struct rx_packet *
rxi_AllocPacketNoLock(int class)
{
- register struct rx_packet *p;
+ struct rx_packet *p;
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
- break;
- }
+ if (rx_stats_active) {
+ switch (class) {
+ case RX_PACKET_CLASS_RECEIVE:
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND:
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SPECIAL:
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_RECV_CBUF:
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ case RX_PACKET_CLASS_SEND_CBUF:
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ break;
+ }
+ }
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
osi_Panic("rxi_AllocPacket error");
#else /* KERNEL */
if (queue_IsEmpty(&rx_freePacketQueue))
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ rxi_MorePacketsNoLock(rx_maxSendWindow);
#endif /* KERNEL */
rx_nFreePackets--;
queue_Remove(p);
RX_FPQ_MARK_USED(p);
- dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
/* have to do this here because rx_FlushWrite fiddles with the iovs in
- * order to truncate outbound packets. In the near future, may need
+ * order to truncate outbound packets. In the near future, may need
* to allocate bufs from a static pool here, and/or in AllocSendPacket
*/
RX_PACKET_IOV_FULLINIT(p);
struct rx_packet *
rxi_AllocPacketTSFPQ(int class, int pull_global)
{
- register struct rx_packet *p;
- register struct rx_ts_info_t * rx_ts_info;
+ struct rx_packet *p;
+ struct rx_ts_info_t * rx_ts_info;
RX_TS_INFO_GET(rx_ts_info);
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
if (queue_IsEmpty(&rx_freePacketQueue))
- rxi_MorePacketsNoLock(rx_initSendWindow);
+ rxi_MorePacketsNoLock(rx_maxSendWindow);
RX_TS_FPQ_GTOL(rx_ts_info);
RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
- dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
/* have to do this here because rx_FlushWrite fiddles with the iovs in
- * order to truncate outbound packets. In the near future, may need
+ * order to truncate outbound packets. In the near future, may need
* to allocate bufs from a static pool here, and/or in AllocSendPacket
*/
RX_PACKET_IOV_FULLINIT(p);
struct rx_packet *
rxi_AllocPacket(int class)
{
- register struct rx_packet *p;
+ struct rx_packet *p;
p = rxi_AllocPacketTSFPQ(class, RX_TS_FPQ_PULL_GLOBAL);
return p;
struct rx_packet *
rxi_AllocPacket(int class)
{
- register struct rx_packet *p;
+ struct rx_packet *p;
MUTEX_ENTER(&rx_freePktQ_lock);
p = rxi_AllocPacketNoLock(class);
* Called with call locked.
*/
struct rx_packet *
-rxi_AllocSendPacket(register struct rx_call *call, int want)
+rxi_AllocSendPacket(struct rx_call *call, int want)
{
- register struct rx_packet *p = (struct rx_packet *)0;
- register int mud;
- register unsigned delta;
+ struct rx_packet *p = (struct rx_packet *)0;
+ int mud;
+ unsigned delta;
SPLVAR;
mud = call->MTU - RX_HEADER_SIZE;
(void)rxi_AllocDataBuf(p, (want - p->length),
RX_PACKET_CLASS_SEND_CBUF);
- if ((unsigned)p->length > mud)
+ if (p->length > mud)
p->length = mud;
if (delta >= p->length) {
(void)rxi_AllocDataBuf(p, (want - p->length),
RX_PACKET_CLASS_SEND_CBUF);
- if ((unsigned)p->length > mud)
+ if (p->length > mud)
p->length = mud;
if (delta >= p->length) {
}
#ifndef KERNEL
-#ifdef AFS_NT40_ENV
+#ifdef AFS_NT40_ENV
/* Windows does not use file descriptors. */
#define CountFDs(amax) 0
#else
/* count the number of used FDs */
static int
-CountFDs(register int amax)
+CountFDs(int amax)
{
struct stat tstat;
- register int i, code;
- register int count;
+ int i, code;
+ int count;
count = 0;
for (i = 0; i < amax; i++) {
* the data length of the packet is stored in the packet structure.
* The header is decoded. */
int
-rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * host,
+rxi_ReadPacket(osi_socket socket, struct rx_packet *p, afs_uint32 * host,
u_short * port)
{
struct sockaddr_in from;
- int nbytes;
+ unsigned int nbytes;
afs_int32 rlen;
- register afs_int32 tlen, savelen;
+ afs_uint32 tlen, savelen;
struct msghdr msg;
rx_computelen(p, tlen);
rx_SetDataSize(p, tlen); /* this is the size of the user data area */
} else
tlen = rlen;
- /* Extend the last iovec for padding, it's just to make sure that the
+ /* Extend the last iovec for padding, it's just to make sure that the
* read doesn't return more data than we expect, and is done to get around
* our problems caused by the lack of a length field in the rx header.
* Use the extra buffer that follows the localdata in each packet
savelen = p->wirevec[p->niovecs - 1].iov_len;
p->wirevec[p->niovecs - 1].iov_len += RX_EXTRABUFFERSIZE;
- memset((char *)&msg, 0, sizeof(msg));
+ memset(&msg, 0, sizeof(msg));
msg.msg_name = (char *)&from;
msg.msg_namelen = sizeof(struct sockaddr_in);
msg.msg_iov = p->wirevec;
/* restore the vec to its correct state */
p->wirevec[p->niovecs - 1].iov_len = savelen;
- p->length = (nbytes - RX_HEADER_SIZE);
+ p->length = (u_short)(nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
- rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
} else if (nbytes <= 0) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
- rx_stats.bogusHost = from.sin_addr.s_addr;
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active) {
+ MUTEX_ENTER(&rx_stats_mutex);
+ rx_stats.bogusPacketOnRead++;
+ rx_stats.bogusHost = from.sin_addr.s_addr;
+ MUTEX_EXIT(&rx_stats_mutex);
+ }
dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
ntohs(from.sin_port), nbytes));
}
return 0;
- }
+ }
#ifdef RXDEBUG
else if ((rx_intentionallyDroppedOnReadPer100 > 0)
&& (random() % 100 < rx_intentionallyDroppedOnReadPer100)) {
*port = from.sin_port;
dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
- p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
- p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
+ p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
+ p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
p->length));
+#ifdef RX_TRIMDATABUFS
rxi_TrimDataBufs(p, 1);
+#endif
return 0;
- }
+ }
#endif
else {
/* Extract packet header. */
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
- rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
- * don't create a new one -
+ * don't create a new one -
* we don't keep count of the bytes sent/received if a peer
* structure doesn't already exist.
*
}
}
+#ifdef RX_TRIMDATABUFS
/* Free any empty packet buffers at the end of this packet */
rxi_TrimDataBufs(p, 1);
-
+#endif
return 1;
}
}
* last two pad bytes. */
struct rx_packet *
-rxi_SplitJumboPacket(register struct rx_packet *p, afs_int32 host, short port,
+rxi_SplitJumboPacket(struct rx_packet *p, afs_uint32 host, short port,
int first)
{
struct rx_packet *np;
* The message is NOT changed.
*/
static int
-cpytoc(mblk_t * mp, register int off, register int len, register char *cp)
+cpytoc(mblk_t * mp, int off, int len, char *cp)
{
- register int n;
+ int n;
for (; mp && len > 0; mp = mp->b_cont) {
if (mp->b_datap->db_type != M_DATA) {
}
/* MTUXXX Supposed to skip <off> bytes and copy <len> bytes,
- * but it doesn't really.
- * This sucks, anyway, do it like m_cpy.... below
+ * but it doesn't really.
+ * This sucks, anyway, do it like m_cpy.... below
*/
static int
-cpytoiovec(mblk_t * mp, int off, int len, register struct iovec *iovs,
+cpytoiovec(mblk_t * mp, int off, int len, struct iovec *iovs,
int niovs)
{
- register int m, n, o, t, i;
+ int m, n, o, t, i;
for (i = -1, t = 0; i < niovs && mp && len > 0; mp = mp->b_cont) {
if (mp->b_datap->db_type != M_DATA) {
struct rx_packet *phandle;
int hdr_len, data_len;
{
- register int code;
+ int code;
code =
m_cpytoiovec(amb, hdr_len, data_len, phandle->wirevec,
/* send a response to a debug packet */
struct rx_packet *
-rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
- afs_int32 ahost, short aport, int istack)
+rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
+ afs_uint32 ahost, short aport, int istack)
{
struct rx_debugIn tin;
afs_int32 tl;
}
rx_packetread(ap, 0, sizeof(struct rx_debugIn), (char *)&tin);
- /* all done with packet, now set length to the truth, so we can
+ /* all done with packet, now set length to the truth, so we can
* reuse this packet */
rx_computelen(ap, ap->length);
struct rx_debugStats tstat;
/* get basic stats */
- memset((char *)&tstat, 0, sizeof(tstat)); /* make sure spares are zero */
+ memset(&tstat, 0, sizeof(tstat)); /* make sure spares are zero */
tstat.version = RX_DEBUGI_VERSION;
#ifndef RX_ENABLE_LOCKS
tstat.waitingForPackets = rx_waitingForPackets;
#endif
MUTEX_ENTER(&rx_serverPool_lock);
tstat.nFreePackets = htonl(rx_nFreePackets);
+ tstat.nPackets = htonl(rx_nPackets);
tstat.callsExecuted = htonl(rxi_nCalls);
tstat.packetReclaims = htonl(rx_packetReclaims);
tstat.usedFDs = CountFDs(64);
case RX_DEBUGI_GETALLCONN:
case RX_DEBUGI_GETCONN:{
- int i, j;
- register struct rx_connection *tc;
+ unsigned int i, j;
+ struct rx_connection *tc;
struct rx_call *tcall;
struct rx_debugConn tconn;
int all = (tin.type == RX_DEBUGI_GETALLCONN);
if (tl > 0)
return ap;
- memset((char *)&tconn, 0, sizeof(tconn)); /* make sure spares are zero */
+ memset(&tconn, 0, sizeof(tconn)); /* make sure spares are zero */
/* get N'th (maybe) "interesting" connection info */
for (i = 0; i < rx_hashTableSize; i++) {
#if !defined(KERNEL)
#endif
#endif
MUTEX_ENTER(&rx_connHashTable_lock);
- /* We might be slightly out of step since we are not
+ /* We might be slightly out of step since we are not
* locking each call, but this is only debugging output.
*/
for (tc = rx_connHashTable[i]; tc; tc = tc->next) {
*/
case RX_DEBUGI_GETPEER:{
- int i;
- register struct rx_peer *tp;
+ unsigned int i;
+ struct rx_peer *tp;
struct rx_debugPeer tpeer;
if (tl > 0)
return ap;
- memset((char *)&tpeer, 0, sizeof(tpeer));
+ memset(&tpeer, 0, sizeof(tpeer));
for (i = 0; i < rx_hashTableSize; i++) {
#if !defined(KERNEL)
/* the time complexity of the algorithm used here
MUTEX_ENTER(&rx_peerHashTable_lock);
for (tp = rx_peerHashTable[i]; tp; tp = tp->next) {
if (tin.index-- <= 0) {
+ tp->refCount++;
+ MUTEX_EXIT(&rx_peerHashTable_lock);
+
+ MUTEX_ENTER(&tp->peer_lock);
tpeer.host = tp->host;
tpeer.port = tp->port;
tpeer.ifMTU = htons(tp->ifMTU);
htonl(tp->bytesReceived.high);
tpeer.bytesReceived.low =
htonl(tp->bytesReceived.low);
+ MUTEX_EXIT(&tp->peer_lock);
+ MUTEX_ENTER(&rx_peerHashTable_lock);
+ tp->refCount--;
MUTEX_EXIT(&rx_peerHashTable_lock);
+
rx_packetwrite(ap, 0, sizeof(struct rx_debugPeer),
(char *)&tpeer);
tl = ap->length;
return ap;
/* Since its all int32s convert to network order with a loop. */
+ if (rx_stats_active)
MUTEX_ENTER(&rx_stats_mutex);
s = (afs_int32 *) & rx_stats;
for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
tl = ap->length;
ap->length = sizeof(rx_stats);
+ if (rx_stats_active)
MUTEX_EXIT(&rx_stats_mutex);
rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
}
struct rx_packet *
-rxi_ReceiveVersionPacket(register struct rx_packet *ap, osi_socket asocket,
- afs_int32 ahost, short aport, int istack)
+rxi_ReceiveVersionPacket(struct rx_packet *ap, osi_socket asocket,
+ afs_uint32 ahost, short aport, int istack)
{
afs_int32 tl;
/* send a debug packet back to the sender */
static void
rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
- afs_int32 ahost, short aport, afs_int32 istack)
+ afs_uint32 ahost, short aport, afs_int32 istack)
{
struct sockaddr_in taddr;
- int i;
- int nbytes;
+ unsigned int i, nbytes, savelen = 0;
int saven = 0;
- size_t savelen = 0;
#ifdef KERNEL
int waslocked = ISAFS_GLOCK();
#endif
#endif
int code;
struct sockaddr_in addr;
- register struct rx_peer *peer = conn->peer;
+ struct rx_peer *peer = conn->peer;
osi_socket socket;
#ifdef RXDEBUG
char deliveryType = 'S';
* serial number means the packet was never sent. */
MUTEX_ENTER(&conn->conn_data_lock);
p->header.serial = ++conn->serial;
+ if (p->length > conn->peer->maxPacketSize) {
+ if ((p->header.type == RX_PACKET_TYPE_ACK) &&
+ (p->header.flags & RX_REQUEST_ACK)) {
+ conn->lastPingSize = p->length;
+ conn->lastPingSizeSer = p->header.serial;
+ } else if (p->header.seq != 0) {
+ conn->lastPacketSize = p->length;
+ conn->lastPacketSizeSeq = p->header.seq;
+ }
+ }
MUTEX_EXIT(&conn->conn_data_lock);
- /* This is so we can adjust retransmit time-outs better in the face of
+ /* This is so we can adjust retransmit time-outs better in the face of
* rapidly changing round-trip times. RTO estimation is not a la Karn.
*/
if (p->firstSerial == 0) {
#endif
/* Get network byte order header */
- rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
+ rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
* touch ALL the fields */
/* Send the packet out on the same socket that related packets are being
osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
/* Some systems are nice and tell us right away that we cannot
- * reach this recipient by returning an error code.
+ * reach this recipient by returning an error code.
* So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call &&
+ if (call &&
#ifdef AFS_NT40_ENV
- code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
-#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+ (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
+#elif defined(AFS_LINUX20_ENV)
code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+#elif defined(AFS_DARWIN_ENV)
code == EHOSTUNREACH
#else
0
#endif
#ifdef RXDEBUG
}
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+ ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+ p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
int waslocked;
#endif
struct sockaddr_in addr;
- register struct rx_peer *peer = conn->peer;
+ struct rx_peer *peer = conn->peer;
osi_socket socket;
struct rx_packet *p = NULL;
struct iovec wirevec[RX_MAXIOVECS];
MUTEX_ENTER(&conn->conn_data_lock);
serial = conn->serial;
conn->serial += len;
+ for (i = 0; i < len; i++) {
+ p = list[i];
+ if (p->length > conn->peer->maxPacketSize) {
+ /* a ping *or* a sequenced packet can count */
+ if ((p->length > conn->peer->maxPacketSize)) {
+ if (((p->header.type == RX_PACKET_TYPE_ACK) &&
+ (p->header.flags & RX_REQUEST_ACK)) &&
+ ((i == 0) || (p->length >= conn->lastPingSize))) {
+ conn->lastPingSize = p->length;
+ conn->lastPingSizeSer = serial + i;
+ } else if ((p->header.seq != 0) &&
+ ((i == 0) || (p->length >= conn->lastPacketSize))) {
+ conn->lastPacketSize = p->length;
+ conn->lastPacketSizeSeq = p->header.seq;
+ }
+ }
+ }
+ }
MUTEX_EXIT(&conn->conn_data_lock);
/* Pre-increment, to guarantee no zero serial number; a zero
* serial number means the packet was never sent. */
p->header.serial = ++serial;
- /* This is so we can adjust retransmit time-outs better in the face of
+ /* This is so we can adjust retransmit time-outs better in the face of
* rapidly changing round-trip times. RTO estimation is not a la Karn.
*/
if (p->firstSerial == 0) {
#endif
/* Get network byte order header */
- rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
+ rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
* touch ALL the fields */
}
osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
10 + (((afs_uint32) p->backoff) << 8));
}
/* Some systems are nice and tell us right away that we cannot
- * reach this recipient by returning an error code.
+ * reach this recipient by returning an error code.
* So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call &&
+ if (call &&
#ifdef AFS_NT40_ENV
- code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
-#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+ (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
+#elif defined(AFS_LINUX20_ENV)
code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+#elif defined(AFS_DARWIN_ENV)
code == EHOSTUNREACH
#else
0
assert(p != NULL);
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+ ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+ p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
+ if (rx_stats_active)
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
* in rx.h. Bug: there's a lot of duplication between this and other
* routines. This needs to be cleaned up. */
struct rx_packet *
-rxi_SendSpecial(register struct rx_call *call,
- register struct rx_connection *conn,
+rxi_SendSpecial(struct rx_call *call,
+ struct rx_connection *conn,
struct rx_packet *optionalPacket, int type, char *data,
int nbytes, int istack)
{
/* Some of the following stuff should be common code for all
* packet sends (it's repeated elsewhere) */
- register struct rx_packet *p;
+ struct rx_packet *p;
unsigned int i = 0;
int savelen = 0, saven = 0;
int channel, callNumber;
* the net byte order representation in the wire representation of the
* packet, which is what is actually sent out on the wire) */
void
-rxi_EncodePacketHeader(register struct rx_packet *p)
+rxi_EncodePacketHeader(struct rx_packet *p)
{
- register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
+ afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
- memset((char *)buf, 0, RX_HEADER_SIZE);
+ memset(buf, 0, RX_HEADER_SIZE);
*buf++ = htonl(p->header.epoch);
*buf++ = htonl(p->header.cid);
*buf++ = htonl(p->header.callNumber);
/* Decode the packet's header (from net byte order to a struct header) */
void
-rxi_DecodePacketHeader(register struct rx_packet *p)
+rxi_DecodePacketHeader(struct rx_packet *p)
{
- register afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
+ afs_uint32 *buf = (afs_uint32 *) (p->wirevec[0].iov_base); /* MTUXXX */
afs_uint32 temp;
p->header.epoch = ntohl(*buf);
}
void
-rxi_PrepareSendPacket(register struct rx_call *call,
- register struct rx_packet *p, register int last)
+rxi_PrepareSendPacket(struct rx_call *call,
+ struct rx_packet *p, int last)
{
- register struct rx_connection *conn = call->conn;
- int i;
- ssize_t len; /* len must be a signed type; it can go negative */
+ struct rx_connection *conn = call->conn;
+ unsigned int i;
+ afs_int32 len; /* len must be a signed type; it can go negative */
p->flags &= ~RX_PKTFLAG_ACKED;
p->header.cid = (conn->cid | call->channel);
MUTEX_EXIT(&rx_freePktQ_lock);
#endif /* !RX_ENABLE_TSFPQ */
- p->niovecs = i;
+ p->niovecs = i;
}
- p->wirevec[i - 1].iov_len += len;
+ if (len)
+ p->wirevec[i - 1].iov_len += len;
RXS_PreparePacket(conn->securityObject, call, p);
}
}
return (2 + (maxMTU / (RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE)));
}
+
+#ifndef KERNEL
+/*
+ * This function can be used by the Windows Cache Manager
+ * to dump the list of all rx packets so that we can determine
+ * where the packet leakage is.
+ */
+int rx_DumpPackets(FILE *outputFile, char *cookie)
+{
+#ifdef RXDEBUG_PACKET
+ struct rx_packet *p;
+#ifdef AFS_NT40_ENV
+ int zilch;
+ char output[2048];
+#define RXDPRINTF sprintf
+#define RXDPRINTOUT output
+#else
+#define RXDPRINTF fprintf
+#define RXDPRINTOUT outputFile
+#endif
+
+ NETPRI;
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ RXDPRINTF(RXDPRINTOUT, "%s - Start dumping all Rx Packets - count=%u\r\n", cookie, rx_packet_id);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+
+ for (p = rx_mallocedP; p; p = p->allNextp) {
+ RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
+ cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
+ p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
+ p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
+ (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
+ (afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+ }
+
+ RXDPRINTF(RXDPRINTOUT, "%s - End dumping all Rx Packets\r\n", cookie);
+#ifdef AFS_NT40_ENV
+ WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
+
+ MUTEX_EXIT(&rx_freePktQ_lock);
+ USERPRI;
+#endif /* RXDEBUG_PACKET */
+ return 0;
+}
+#endif