#include "sys/types.h"
#include <sys/stat.h>
#include <errno.h>
-#if defined(AFS_NT40_ENV) || defined(AFS_DJGPP_ENV)
+#if defined(AFS_NT40_ENV)
#ifdef AFS_NT40_ENV
#include <winsock2.h>
#ifndef EWOULDBLOCK
#include <sys/socket.h>
#include <netinet/in.h>
#endif /* AFS_NT40_ENV */
+#include "rx_user.h"
#include "rx_xmit_nt.h"
#include <stdlib.h>
#else
#include "rx_globals.h"
#include <lwp.h>
#include <assert.h>
-#ifdef HAVE_STRING_H
#include <string.h>
-#else
-#ifdef HAVE_STRINGS_H
-#include <strings.h>
-#endif
-#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
afs_int32 ahost, short aport,
afs_int32 istack);
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
+ afs_uint32 first,
+ struct rx_queue * q);
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
+
+
/* some rules about packets:
* 1. When a packet is allocated, the final iov_buf contains room for
* a security trailer, but iov_len masks that fact. If the security
static int
AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
- register struct rx_packet *c;
register struct rx_ts_info_t * rx_ts_info;
int transfer, alloc;
SPLVAR;
/* alloc enough for us, plus a few globs for other threads */
alloc = transfer + (3 * rx_TSFPQGlobSize) - rx_nFreePackets;
rxi_MorePacketsNoLock(MAX(alloc, rx_initSendWindow));
- transfer += rx_TSFPQGlobSize;
+ transfer = rx_TSFPQGlobSize;
}
RX_TS_FPQ_GTOL2(rx_ts_info, transfer);
USERPRI;
}
- RX_TS_FPQ_CHECKOUT2(rx_ts_info, num_pkts, q);
+ RX_TS_FPQ_QCHECKOUT(rx_ts_info, num_pkts, q);
return num_pkts;
}
if (overq) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
}
if (rx_nFreePackets < num_pkts)
register struct rx_packet *c, *nc;
SPLVAR;
+ osi_Assert(num_pkts >= 0);
+ RX_TS_INFO_GET(rx_ts_info);
+
if (!num_pkts) {
- queue_Count(q, c, nc, rx_packet, num_pkts);
- if (!num_pkts)
- return 0;
+ for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
+ rxi_FreeDataBufsTSFPQ(c, 2, 0);
+ }
+ } else {
+ for (queue_Scan(q, c, nc, rx_packet)) {
+ rxi_FreeDataBufsTSFPQ(c, 2, 0);
+ }
}
- RX_TS_INFO_GET(rx_ts_info);
- RX_TS_FPQ_CHECKIN2(rx_ts_info, num_pkts, q);
+ if (num_pkts) {
+ RX_TS_FPQ_QCHECKIN(rx_ts_info, num_pkts, q);
+ }
if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
NETPRI;
int
rxi_FreePackets(int num_pkts, struct rx_queue *q)
{
+ struct rx_queue cbs;
register struct rx_packet *p, *np;
+ int qlen = 0;
SPLVAR;
+ osi_Assert(num_pkts >= 0);
+ queue_Init(&cbs);
+
if (!num_pkts) {
for (queue_Scan(q, p, np, rx_packet), num_pkts++) {
+ if (p->niovecs > 2) {
+ qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
+ }
RX_FPQ_MARK_FREE(p);
}
if (!num_pkts)
return 0;
} else {
for (queue_Scan(q, p, np, rx_packet)) {
+ if (p->niovecs > 2) {
+ qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
+ }
RX_FPQ_MARK_FREE(p);
}
}
+ if (qlen) {
+ queue_SpliceAppend(q, &cbs);
+ qlen += num_pkts;
+ } else
+ qlen = num_pkts;
+
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
queue_SpliceAppend(&rx_freePacketQueue, q);
- rx_nFreePackets += num_pkts;
+ rx_nFreePackets += qlen;
/* Wakeup anyone waiting for packets */
rxi_PacketsUnWait();
getme = apackets * sizeof(struct rx_packet);
p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ osi_Assert(p);
PIN(p, getme); /* XXXXX */
memset((char *)p, 0, getme);
getme = apackets * sizeof(struct rx_packet);
p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ osi_Assert(p);
PIN(p, getme); /* XXXXX */
memset((char *)p, 0, getme);
* to hold maximal amounts of data */
apackets += (apackets / 4)
* ((rx_maxJumboRecvSize - RX_FIRSTBUFFERSIZE) / RX_CBUFFERSIZE);
- getme = apackets * sizeof(struct rx_packet);
- p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
-
+ do {
+ getme = apackets * sizeof(struct rx_packet);
+ p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ if (p == NULL) {
+ apackets -= apackets / 4;
+ osi_Assert(apackets > 0);
+ }
+ } while(p == NULL);
memset((char *)p, 0, getme);
for (e = p + apackets; p < e; p++) {
}
#endif /* RX_ENABLE_TSFPQ */
+/*
+ * free continuation buffers off a packet into a queue
+ *
+ * [IN] p -- packet from which continuation buffers will be freed
+ * [IN] first -- iovec offset of first continuation buffer to free
+ * [IN] q -- queue into which continuation buffers will be chained
+ *
+ * returns:
+ * number of continuation buffers freed
+ */
+static int
+rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
+{
+ struct iovec *iov;
+ struct rx_packet * cb;
+ int count = 0;
+
+ for (first = MAX(2, first); first < p->niovecs; first++, count++) {
+ iov = &p->wirevec[first];
+ if (!iov->iov_base)
+ osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
+ cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
+ RX_FPQ_MARK_FREE(cb);
+ queue_Append(q, cb);
+ }
+ p->length = 0;
+ p->niovecs = 0;
+
+ return count;
+}
+
+/*
+ * free packet continuation buffers into the global free packet pool
+ *
+ * [IN] p -- packet from which to free continuation buffers
+ * [IN] first -- iovec offset of first continuation buffer to free
+ *
+ * returns:
+ * zero always
+ */
int
-rxi_FreeDataBufsNoLock(struct rx_packet *p, int first)
+rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
{
- struct iovec *iov, *end;
+ struct iovec *iov;
- if (first != 1) /* MTUXXX */
- osi_Panic("FreeDataBufs 1: first must be 1");
- iov = &p->wirevec[1];
- end = iov + (p->niovecs - 1);
- if (iov->iov_base != (caddr_t) p->localdata) /* MTUXXX */
- osi_Panic("FreeDataBufs 2: vec 1 must be localdata");
- for (iov++; iov < end; iov++) {
+ for (first = MAX(2, first); first < p->niovecs; first++) {
+ iov = &p->wirevec[first];
if (!iov->iov_base)
- osi_Panic("FreeDataBufs 3: vecs 2-niovecs must not be NULL");
+ osi_Panic("rxi_FreeDataBufsNoLock: unexpected NULL iov");
rxi_FreePacketNoLock(RX_CBUF_TO_PACKET(iov->iov_base, p));
}
p->length = 0;
}
#ifdef RX_ENABLE_TSFPQ
-int
-rxi_FreeDataBufsTSFPQ(struct rx_packet *p, int first, int flush_global)
+/*
+ * free packet continuation buffers into the thread-local free pool
+ *
+ * [IN] p -- packet from which continuation buffers will be freed
+ * [IN] first -- iovec offset of first continuation buffer to free
+ * [IN] flush_global -- if nonzero, we will flush overquota packets to the
+ * global free pool before returning
+ *
+ * returns:
+ * zero always
+ */
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global)
{
- struct iovec *iov, *end;
+ struct iovec *iov;
register struct rx_ts_info_t * rx_ts_info;
RX_TS_INFO_GET(rx_ts_info);
- if (first != 1) /* MTUXXX */
- osi_Panic("FreeDataBufs 1: first must be 1");
- iov = &p->wirevec[1];
- end = iov + (p->niovecs - 1);
- if (iov->iov_base != (caddr_t) p->localdata) /* MTUXXX */
- osi_Panic("FreeDataBufs 2: vec 1 must be localdata");
- for (iov++; iov < end; iov++) {
+ for (first = MAX(2, first); first < p->niovecs; first++) {
+ iov = &p->wirevec[first];
if (!iov->iov_base)
- osi_Panic("FreeDataBufs 3: vecs 2-niovecs must not be NULL");
+ osi_Panic("rxi_FreeDataBufsTSFPQ: unexpected NULL iov");
RX_TS_FPQ_CHECKIN(rx_ts_info,RX_CBUF_TO_PACKET(iov->iov_base, p));
}
p->length = 0;
void
rxi_FreePacket(struct rx_packet *p)
{
- rxi_FreeDataBufsTSFPQ(p, 1, 0);
+ rxi_FreeDataBufsTSFPQ(p, 2, 0);
rxi_FreePacketTSFPQ(p, RX_TS_FPQ_FLUSH_GLOBAL);
}
#else /* RX_ENABLE_TSFPQ */
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
- rxi_FreeDataBufsNoLock(p, 1);
+ rxi_FreeDataBufsNoLock(p, 2);
rxi_FreePacketNoLock(p);
/* Wakeup anyone waiting for packets */
rxi_PacketsUnWait();
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
- return (struct rx_packet *)0;
+ return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
}
#ifndef KERNEL
-
+#ifdef AFS_NT40_ENV
+/* Windows does not use file descriptors. */
+#define CountFDs(amax) 0
+#else
/* count the number of used FDs */
static int
CountFDs(register int amax)
}
return count;
}
-
+#endif /* AFS_NT40_ENV */
#else /* KERNEL */
#define CountFDs(amax) amax
* the data length of the packet is stored in the packet structure.
* The header is decoded. */
int
-rxi_ReadPacket(int socket, register struct rx_packet *p, afs_uint32 * host,
+rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * host,
u_short * port)
{
struct sockaddr_in from;
p->length = (nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
- if (nbytes > 0)
- rxi_MorePackets(rx_initSendWindow);
- else if (nbytes < 0 && errno == EWOULDBLOCK) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.noPacketOnRead++;
- MUTEX_EXIT(&rx_stats_mutex);
- } else {
+ if (nbytes < 0 && errno == EWOULDBLOCK) {
+ rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ } else if (nbytes <= 0) {
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.bogusPacketOnRead++;
rx_stats.bogusHost = from.sin_addr.s_addr;
MUTEX_EXIT(&rx_stats_mutex);
- dpf(("B: bogus packet from [%x,%d] nb=%d", from.sin_addr.s_addr,
- from.sin_port, nbytes));
+ dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
+ ntohs(from.sin_port), nbytes));
}
return 0;
- } else {
+ }
+#ifdef RXDEBUG
+ else if ((rx_intentionallyDroppedOnReadPer100 > 0)
+ && (random() % 100 < rx_intentionallyDroppedOnReadPer100)) {
+ rxi_DecodePacketHeader(p);
+
+ *host = from.sin_addr.s_addr;
+ *port = from.sin_port;
+
+ dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
+ p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
+ p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
+ p->length));
+ rxi_TrimDataBufs(p, 1);
+ return 0;
+ }
+#endif
+ else {
/* Extract packet header. */
rxi_DecodePacketHeader(p);
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsRead[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
#define m_cpytoc(a, b, c, d) cpytoc(a, b, c, d)
#define m_cpytoiovec(a, b, c, d, e) cpytoiovec(a, b, c, d, e)
#else
-#if !defined(AFS_LINUX20_ENV)
+#if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
static int
m_cpytoiovec(struct mbuf *m, int off, int len, struct iovec iovs[], int niovs)
{
#endif /* LINUX */
#endif /* AFS_SUN5_ENV */
-#if !defined(AFS_LINUX20_ENV)
+#if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
int
rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
#if defined(AFS_SUN5_ENV) || defined(AFS_HPUX110_ENV)
osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
-
-#ifdef AFS_NT40_ENV
- /* Windows is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an WSAEHOSTUNREACH error
- * code. So, when this happens let's "down" the host NOW so
+ /* Some systems are nice and tell us right away that we cannot
+ * reach this recipient by returning an error code.
+ * So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call && code == -1 && errno == WSAEHOSTUNREACH)
- call->lastReceiveTime = 0;
+ if (call &&
+#ifdef AFS_NT40_ENV
+ code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
+#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+ code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+ code == EHOSTUNREACH
+#else
+ 0
#endif
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
- /* Linux is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an ENETUNREACH error
- * code. So, when this happens let's "down" the host NOW so
- * we don't sit around waiting for this host to timeout later.
- */
- if (call && code == -ENETUNREACH)
+ )
call->lastReceiveTime = 0;
-#endif
}
#ifdef KERNEL
#ifdef RX_KERNEL_TRACE
#endif
#ifdef RXDEBUG
}
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], peer->host, peer->port, p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
}
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
- /* Linux is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an ENETUNREACH error
- * code. So, when this happens let's "down" the host NOW so
+ /* Some systems are nice and tell us right away that we cannot
+ * reach this recipient by returning an error code.
+ * So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call && code == -ENETUNREACH)
- call->lastReceiveTime = 0;
+ if (call &&
+#ifdef AFS_NT40_ENV
+ code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
+#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+ code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+ code == EHOSTUNREACH
+#else
+ 0
#endif
+ )
+ call->lastReceiveTime = 0;
}
#if defined(AFS_SUN5_ENV) && defined(KERNEL)
if (!istack && waslocked)
assert(p != NULL);
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], peer->host, peer->port, p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
-
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
}
register struct rx_packet *p, register int last)
{
register struct rx_connection *conn = call->conn;
- int i, j;
+ int i;
ssize_t len; /* len must be a signed type; it can go negative */
p->flags &= ~RX_PKTFLAG_ACKED;
}
if (len > 0) {
osi_Panic("PrepareSendPacket 1\n"); /* MTUXXX */
- } else {
- struct rx_queue q;
- int nb;
-
- queue_Init(&q);
-
+ } else if (i < p->niovecs) {
/* Free any extra elements in the wirevec */
- for (j = MAX(2, i), nb = j - p->niovecs; j < p->niovecs; j++) {
- queue_Append(&q,RX_CBUF_TO_PACKET(p->wirevec[j].iov_base, p));
- }
- if (nb)
- rxi_FreePackets(nb, &q);
+#if defined(RX_ENABLE_TSFPQ)
+ rxi_FreeDataBufsTSFPQ(p, i, 1 /* allow global pool flush if overquota */);
+#else /* !RX_ENABLE_TSFPQ */
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ rxi_FreeDataBufsNoLock(p, i);
+ MUTEX_EXIT(&rx_freePktQ_lock);
+#endif /* !RX_ENABLE_TSFPQ */
p->niovecs = i;
- p->wirevec[i - 1].iov_len += len;
}
+ p->wirevec[i - 1].iov_len += len;
RXS_PreparePacket(conn->securityObject, call, p);
}
int adjMTU;
int frags;
+ if (rxi_nRecvFrags == 1 && rxi_nSendFrags == 1)
+ return mtu;
adjMTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
if (mtu <= adjMTU) {
return mtu;