#include "rx_globals.h"
#include <lwp.h>
#include <assert.h>
-#ifdef HAVE_STRING_H
#include <string.h>
-#else
-#ifdef HAVE_STRINGS_H
-#include <strings.h>
-#endif
-#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
- struct sockaddr_storage *saddr, int slen,
+ afs_int32 ahost, short aport,
afs_int32 istack);
-static int rxi_FreeDataBufsToQueue(struct rx_packet *p, int first,
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
+ afs_uint32 first,
struct rx_queue * q);
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
+
/* some rules about packets:
* 1. When a packet is allocated, the final iov_buf contains room for
static int
AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
- register struct rx_packet *c;
register struct rx_ts_info_t * rx_ts_info;
int transfer, alloc;
SPLVAR;
USERPRI;
}
- RX_TS_FPQ_CHECKOUT2(rx_ts_info, num_pkts, q);
+ RX_TS_FPQ_QCHECKOUT(rx_ts_info, num_pkts, q);
return num_pkts;
}
if (overq) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
}
if (rx_nFreePackets < num_pkts)
if (!num_pkts) {
for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
- rxi_FreeDataBufsTSFPQ(c, 1, 0);
+ rxi_FreeDataBufsTSFPQ(c, 2, 0);
}
} else {
for (queue_Scan(q, c, nc, rx_packet)) {
- rxi_FreeDataBufsTSFPQ(c, 1, 0);
+ rxi_FreeDataBufsTSFPQ(c, 2, 0);
}
}
if (num_pkts) {
- RX_TS_FPQ_CHECKIN2(rx_ts_info, num_pkts, q);
+ RX_TS_FPQ_QCHECKIN(rx_ts_info, num_pkts, q);
}
if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
getme = apackets * sizeof(struct rx_packet);
p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ osi_Assert(p);
PIN(p, getme); /* XXXXX */
memset((char *)p, 0, getme);
getme = apackets * sizeof(struct rx_packet);
p = rx_mallocedP = (struct rx_packet *)osi_Alloc(getme);
+ osi_Assert(p);
PIN(p, getme); /* XXXXX */
memset((char *)p, 0, getme);
}
#endif /* RX_ENABLE_TSFPQ */
-/* free continuation buffers off a packet into a queue of buffers */
+/*
+ * free continuation buffers off a packet into a queue
+ *
+ * [IN] p -- packet from which continuation buffers will be freed
+ * [IN] first -- iovec offset of first continuation buffer to free
+ * [IN] q -- queue into which continuation buffers will be chained
+ *
+ * returns:
+ * number of continuation buffers freed
+ */
static int
-rxi_FreeDataBufsToQueue(struct rx_packet *p, int first, struct rx_queue * q)
+rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
{
struct iovec *iov;
struct rx_packet * cb;
int count = 0;
- if (first < 2)
- first = 2;
- for (; first < p->niovecs; first++, count++) {
+ for (first = MAX(2, first); first < p->niovecs; first++, count++) {
iov = &p->wirevec[first];
if (!iov->iov_base)
- osi_Panic("rxi_PacketIOVToQueue: unexpected NULL iov");
+ osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
RX_FPQ_MARK_FREE(cb);
queue_Append(q, cb);
return count;
}
+/*
+ * free packet continuation buffers into the global free packet pool
+ *
+ * [IN] p -- packet from which to free continuation buffers
+ * [IN] first -- iovec offset of first continuation buffer to free
+ *
+ * returns:
+ * zero always
+ */
int
-rxi_FreeDataBufsNoLock(struct rx_packet *p, int first)
+rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
{
- struct iovec *iov, *end;
+ struct iovec *iov;
- if (first != 1) /* MTUXXX */
- osi_Panic("FreeDataBufs 1: first must be 1");
- iov = &p->wirevec[1];
- end = iov + (p->niovecs - 1);
- if (iov->iov_base != (caddr_t) p->localdata) /* MTUXXX */
- osi_Panic("FreeDataBufs 2: vec 1 must be localdata");
- for (iov++; iov < end; iov++) {
+ for (first = MAX(2, first); first < p->niovecs; first++) {
+ iov = &p->wirevec[first];
if (!iov->iov_base)
- osi_Panic("FreeDataBufs 3: vecs 2-niovecs must not be NULL");
+ osi_Panic("rxi_FreeDataBufsNoLock: unexpected NULL iov");
rxi_FreePacketNoLock(RX_CBUF_TO_PACKET(iov->iov_base, p));
}
p->length = 0;
}
#ifdef RX_ENABLE_TSFPQ
-int
-rxi_FreeDataBufsTSFPQ(struct rx_packet *p, int first, int flush_global)
+/*
+ * free packet continuation buffers into the thread-local free pool
+ *
+ * [IN] p -- packet from which continuation buffers will be freed
+ * [IN] first -- iovec offset of first continuation buffer to free
+ * [IN] flush_global -- if nonzero, we will flush overquota packets to the
+ * global free pool before returning
+ *
+ * returns:
+ * zero always
+ */
+static int
+rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global)
{
- struct iovec *iov, *end;
+ struct iovec *iov;
register struct rx_ts_info_t * rx_ts_info;
RX_TS_INFO_GET(rx_ts_info);
- if (first != 1) /* MTUXXX */
- osi_Panic("FreeDataBufs 1: first must be 1");
- iov = &p->wirevec[1];
- end = iov + (p->niovecs - 1);
- if (iov->iov_base != (caddr_t) p->localdata) /* MTUXXX */
- osi_Panic("FreeDataBufs 2: vec 1 must be localdata");
- for (iov++; iov < end; iov++) {
+ for (first = MAX(2, first); first < p->niovecs; first++) {
+ iov = &p->wirevec[first];
if (!iov->iov_base)
- osi_Panic("FreeDataBufs 3: vecs 2-niovecs must not be NULL");
+ osi_Panic("rxi_FreeDataBufsTSFPQ: unexpected NULL iov");
RX_TS_FPQ_CHECKIN(rx_ts_info,RX_CBUF_TO_PACKET(iov->iov_base, p));
}
p->length = 0;
void
rxi_FreePacket(struct rx_packet *p)
{
- rxi_FreeDataBufsTSFPQ(p, 1, 0);
+ rxi_FreeDataBufsTSFPQ(p, 2, 0);
rxi_FreePacketTSFPQ(p, RX_TS_FPQ_FLUSH_GLOBAL);
}
#else /* RX_ENABLE_TSFPQ */
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
- rxi_FreeDataBufsNoLock(p, 1);
+ rxi_FreeDataBufsNoLock(p, 2);
rxi_FreePacketNoLock(p);
/* Wakeup anyone waiting for packets */
rxi_PacketsUnWait();
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
- return (struct rx_packet *)0;
+ return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
* the data length of the packet is stored in the packet structure.
* The header is decoded. */
int
-rxi_ReadPacket(osi_socket socket, register struct rx_packet *p,
- struct sockaddr_storage *saddr, int *slen)
+rxi_ReadPacket(osi_socket socket, register struct rx_packet *p, afs_uint32 * host,
+ u_short * port)
{
+ struct sockaddr_in from;
int nbytes;
afs_int32 rlen;
register afs_int32 tlen, savelen;
p->wirevec[p->niovecs - 1].iov_len += RX_EXTRABUFFERSIZE;
memset((char *)&msg, 0, sizeof(msg));
- msg.msg_name = (char *)saddr;
- msg.msg_namelen = *slen;
+ msg.msg_name = (char *)&from;
+ msg.msg_namelen = sizeof(struct sockaddr_in);
msg.msg_iov = p->wirevec;
msg.msg_iovlen = p->niovecs;
nbytes = rxi_Recvmsg(socket, &msg, 0);
- *slen = msg.msg_namelen;
/* restore the vec to its correct state */
p->wirevec[p->niovecs - 1].iov_len = savelen;
p->length = (nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.noPacketOnRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
} else if (nbytes <= 0) {
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.bogusPacketOnRead++;
- switch (rx_ssfamily(saddr)) {
- case AF_INET:
- rx_stats.bogusHost = rx_ss2sin(saddr)->sin_addr.s_addr;
- break;
- default:
-#ifdef AF_INET6
- case AF_INET6:
-#endif /* AF_INET6 */
- rx_stats.bogusHost = 0xffffffff;
- break;
- }
+ rx_stats.bogusHost = from.sin_addr.s_addr;
MUTEX_EXIT(&rx_stats_mutex);
- dpf(("B: bogus packet from [%x,%d] nb=%d",
- ntohl(rx_ss2v4addr(saddr)), ntohs(rx_ss2pn(saddr)), nbytes));
+ dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
+ ntohs(from.sin_port), nbytes));
}
return 0;
}
&& (random() % 100 < rx_intentionallyDroppedOnReadPer100)) {
rxi_DecodePacketHeader(p);
+ *host = from.sin_addr.s_addr;
+ *port = from.sin_port;
+
dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
- p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(rx_ss2v4addr(saddr)), ntohs(rx_ss2pn(saddr)), p->header.serial,
+ p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
p->length));
rxi_TrimDataBufs(p, 1);
/* Extract packet header. */
rxi_DecodePacketHeader(p);
+ *host = from.sin_addr.s_addr;
+ *port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsRead[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
* and this packet was an rxdebug packet, the peer structure would
* never be cleaned up.
*/
- peer = rxi_FindPeer(saddr, *slen, SOCK_DGRAM, 0, 0);
+ peer = rxi_FindPeer(*host, *port, 0, 0);
/* Since this may not be associated with a connection,
* it may have no refCount, meaning we could race with
* ReapConnections
* last two pad bytes. */
struct rx_packet *
-rxi_SplitJumboPacket(register struct rx_packet *p,
- struct sockaddr_storage *saddr, int slen, int first)
+rxi_SplitJumboPacket(register struct rx_packet *p, afs_int32 host, short port,
+ int first)
{
struct rx_packet *np;
struct rx_jumboHeader *jp;
#ifndef KERNEL
/* Send a udp datagram */
int
-osi_NetSend(osi_socket socket, void *addr, int addrlen, struct iovec *dvec,
- int nvecs, int length, int istack)
+osi_NetSend(osi_socket socket, void *addr, struct iovec *dvec, int nvecs,
+ int length, int istack)
{
struct msghdr msg;
int ret;
msg.msg_iov = dvec;
msg.msg_iovlen = nvecs;
msg.msg_name = addr;
- msg.msg_namelen = addrlen;
+ msg.msg_namelen = sizeof(struct sockaddr_in);
ret = rxi_Sendmsg(socket, &msg, 0);
struct rx_packet *
rxi_ReceiveDebugPacket(register struct rx_packet *ap, osi_socket asocket,
- struct sockaddr_storage *saddr, int slen, int istack)
+ afs_int32 ahost, short aport, int istack)
{
struct rx_debugIn tin;
afs_int32 tl;
rx_packetwrite(ap, 0, sizeof(struct rx_debugStats),
(char *)&tstat);
ap->length = sizeof(struct rx_debugStats);
- rxi_SendDebugPacket(ap, asocket, saddr, slen, istack);
+ rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
rx_computelen(ap, ap->length);
}
break;
for (tc = rx_connHashTable[i]; tc; tc = tc->next) {
if ((all || rxi_IsConnInteresting(tc))
&& tin.index-- <= 0) {
- switch (rx_ssfamily(&tc->peer->saddr)) {
- case AF_INET:
- tconn.host = rx_ss2sin(&tc->peer->saddr)->sin_addr.s_addr;
- break;
- default:
-#ifdef AF_INET6
- case AF_INET6:
-#endif /* AF_INET6 */
- tconn.host = 0xffffffff;
- break;
- }
- tconn.port = rx_ss2pn(&tc->peer->saddr);
+ tconn.host = tc->peer->host;
+ tconn.port = tc->peer->port;
tconn.cid = htonl(tc->cid);
tconn.epoch = htonl(tc->epoch);
tconn.serial = htonl(tc->serial);
(char *)&tconn);
tl = ap->length;
ap->length = sizeof(struct rx_debugConn);
- rxi_SendDebugPacket(ap, asocket, saddr, slen,
+ rxi_SendDebugPacket(ap, asocket, ahost, aport,
istack);
ap->length = tl;
return ap;
(char *)&tconn);
tl = ap->length;
ap->length = sizeof(struct rx_debugConn);
- rxi_SendDebugPacket(ap, asocket, saddr, slen, istack);
+ rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
break;
}
MUTEX_ENTER(&rx_peerHashTable_lock);
for (tp = rx_peerHashTable[i]; tp; tp = tp->next) {
if (tin.index-- <= 0) {
- switch (rx_ssfamily(&tp->saddr)) {
- case AF_INET:
- tpeer.host = rx_ss2sin(&tp->saddr)->sin_addr.s_addr;
- break;
- default:
-#ifdef AF_INET6
- case AF_INET6:
-#endif /* AF_INET6 */
- tpeer.host = 0xffffffff;
- break;
- }
- tpeer.port = rx_ss2pn(&tp->saddr);
+ tpeer.host = tp->host;
+ tpeer.port = tp->port;
tpeer.ifMTU = htons(tp->ifMTU);
tpeer.idleWhen = htonl(tp->idleWhen);
tpeer.refCount = htons(tp->refCount);
(char *)&tpeer);
tl = ap->length;
ap->length = sizeof(struct rx_debugPeer);
- rxi_SendDebugPacket(ap, asocket, saddr, slen,
+ rxi_SendDebugPacket(ap, asocket, ahost, aport,
istack);
ap->length = tl;
return ap;
(char *)&tpeer);
tl = ap->length;
ap->length = sizeof(struct rx_debugPeer);
- rxi_SendDebugPacket(ap, asocket, saddr, slen, istack);
+ rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
break;
}
tl = ap->length;
ap->length = sizeof(rx_stats);
MUTEX_EXIT(&rx_stats_mutex);
- rxi_SendDebugPacket(ap, asocket, saddr, slen, istack);
+ rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
break;
}
rx_packetwrite(ap, 0, sizeof(struct rx_debugIn), (char *)&tin);
tl = ap->length;
ap->length = sizeof(struct rx_debugIn);
- rxi_SendDebugPacket(ap, asocket, saddr, slen, istack);
+ rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
break;
}
struct rx_packet *
rxi_ReceiveVersionPacket(register struct rx_packet *ap, osi_socket asocket,
- struct sockaddr_storage *saddr, int slen, int istack)
+ afs_int32 ahost, short aport, int istack)
{
afs_int32 tl;
rx_packetwrite(ap, 0, 65, buf);
tl = ap->length;
ap->length = 65;
- rxi_SendDebugPacket(ap, asocket, saddr, slen, istack);
+ rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
}
/* send a debug packet back to the sender */
static void
rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
- struct sockaddr_storage *saddr, int slen, afs_int32 istack)
+ afs_int32 ahost, short aport, afs_int32 istack)
{
+ struct sockaddr_in taddr;
int i;
int nbytes;
int saven = 0;
int waslocked = ISAFS_GLOCK();
#endif
+ taddr.sin_family = AF_INET;
+ taddr.sin_port = aport;
+ taddr.sin_addr.s_addr = ahost;
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
+ taddr.sin_len = sizeof(struct sockaddr_in);
+#endif
+
/* We need to trim the niovecs. */
nbytes = apacket->length;
for (i = 1; i < apacket->niovecs; i++) {
#endif
#endif
/* debug packets are not reliably delivered, hence the cast below. */
- (void)osi_NetSend(asocket, saddr, slen, apacket->wirevec, apacket->niovecs,
+ (void)osi_NetSend(asocket, &taddr, apacket->wirevec, apacket->niovecs,
apacket->length + RX_HEADER_SIZE, istack);
#ifdef KERNEL
#ifdef RX_KERNEL_TRACE
int waslocked;
#endif
int code;
+ struct sockaddr_in addr;
register struct rx_peer *peer = conn->peer;
osi_socket socket;
#ifdef RXDEBUG
char deliveryType = 'S';
#endif
+ /* The address we're sending the packet to */
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_port = peer->port;
+ addr.sin_addr.s_addr = peer->host;
+
/* This stuff should be revamped, I think, so that most, if not
* all, of the header stuff is always added here. We could
* probably do away with the encode/decode routines. XXXXX */
/* If an output tracer function is defined, call it with the packet and
* network address. Note this function may modify its arguments. */
if (rx_almostSent) {
- int drop = (*rx_almostSent) (p, &peer->saddr);
+ int drop = (*rx_almostSent) (p, &addr);
/* drop packet if return value is non-zero? */
if (drop)
deliveryType = 'D'; /* Drop the packet */
#endif
#endif
if ((code =
- osi_NetSend(socket, &peer->saddr, peer->saddrlen, p->wirevec,
- p->niovecs, p->length + RX_HEADER_SIZE,
- istack)) != 0) {
+ osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
+ p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
-
-#ifdef AFS_NT40_ENV
- /* Windows is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an WSAEHOSTUNREACH error
- * code. So, when this happens let's "down" the host NOW so
+ /* Some systems are nice and tell us right away that we cannot
+ * reach this recipient by returning an error code.
+ * So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call && code == -1 && errno == WSAEHOSTUNREACH)
- call->lastReceiveTime = 0;
+ if (call &&
+#ifdef AFS_NT40_ENV
+ code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
+#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+ code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+ code == EHOSTUNREACH
+#else
+ 0
#endif
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
- /* Linux is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an ENETUNREACH error
- * code. So, when this happens let's "down" the host NOW so
- * we don't sit around waiting for this host to timeout later.
- */
- if (call && code == -ENETUNREACH)
+ )
call->lastReceiveTime = 0;
-#endif
}
#ifdef KERNEL
#ifdef RX_KERNEL_TRACE
#endif
#ifdef RXDEBUG
}
- dpf(("%c %d %s: %s.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], rx_AddrStringOf(peer), ntohs(rx_PortOf(peer)), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
#if defined(AFS_SUN5_ENV) && defined(KERNEL)
int waslocked;
#endif
+ struct sockaddr_in addr;
register struct rx_peer *peer = conn->peer;
osi_socket socket;
struct rx_packet *p = NULL;
#ifdef RXDEBUG
char deliveryType = 'S';
#endif
+ /* The address we're sending the packet to */
+ addr.sin_family = AF_INET;
+ addr.sin_port = peer->port;
+ addr.sin_addr.s_addr = peer->host;
if (len + 1 > RX_MAXIOVECS) {
osi_Panic("rxi_SendPacketList, len > RX_MAXIOVECS\n");
/* If an output tracer function is defined, call it with the packet and
* network address. Note this function may modify its arguments. */
if (rx_almostSent) {
- int drop = (*rx_almostSent) (p, &peer->saddr);
+ int drop = (*rx_almostSent) (p, &addr);
/* drop packet if return value is non-zero? */
if (drop)
deliveryType = 'D'; /* Drop the packet */
AFS_GUNLOCK();
#endif
if ((code =
- osi_NetSend(socket, &peer->saddr, peer->saddrlen, &wirevec[0],
- len + 1, length, istack)) != 0) {
+ osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
+ istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
}
-#ifdef AFS_NT40_ENV
- /* Windows is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an WSAEHOSTUNREACH error
- * code. So, when this happens let's "down" the host NOW so
+ /* Some systems are nice and tell us right away that we cannot
+ * reach this recipient by returning an error code.
+ * So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call && code == -1 && errno == WSAEHOSTUNREACH)
- call->lastReceiveTime = 0;
+ if (call &&
+#ifdef AFS_NT40_ENV
+ code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
+#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
+ code == -ENETUNREACH
+#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
+ code == EHOSTUNREACH
+#else
+ 0
#endif
-#if defined(KERNEL) && defined(AFS_LINUX20_ENV)
- /* Linux is nice -- it can tell us right away that we cannot
- * reach this recipient by returning an ENETUNREACH error
- * code. So, when this happens let's "down" the host NOW so
- * we don't sit around waiting for this host to timeout later.
- */
- if (call && code == -ENETUNREACH)
+ )
call->lastReceiveTime = 0;
-#endif
}
#if defined(AFS_SUN5_ENV) && defined(KERNEL)
if (!istack && waslocked)
assert(p != NULL);
- dpf(("%c %d %s: %s.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], rx_AddrStringOf(peer), ntohs(rx_PortOf(peer)), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
-
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
}
register struct rx_packet *p, register int last)
{
register struct rx_connection *conn = call->conn;
- int i, j;
+ int i;
ssize_t len; /* len must be a signed type; it can go negative */
p->flags &= ~RX_PKTFLAG_ACKED;
}
if (len > 0) {
osi_Panic("PrepareSendPacket 1\n"); /* MTUXXX */
- } else {
- struct rx_queue q;
- int nb;
-
- queue_Init(&q);
-
+ } else if (i < p->niovecs) {
/* Free any extra elements in the wirevec */
- for (j = MAX(2, i), nb = p->niovecs - j; j < p->niovecs; j++) {
- queue_Append(&q,RX_CBUF_TO_PACKET(p->wirevec[j].iov_base, p));
- }
- if (nb)
- rxi_FreePackets(nb, &q);
+#if defined(RX_ENABLE_TSFPQ)
+ rxi_FreeDataBufsTSFPQ(p, i, 1 /* allow global pool flush if overquota */);
+#else /* !RX_ENABLE_TSFPQ */
+ MUTEX_ENTER(&rx_freePktQ_lock);
+ rxi_FreeDataBufsNoLock(p, i);
+ MUTEX_EXIT(&rx_freePktQ_lock);
+#endif /* !RX_ENABLE_TSFPQ */
- p->niovecs = MAX(2, i);
- p->wirevec[MAX(2, i) - 1].iov_len += len;
+ p->niovecs = i;
}
+ p->wirevec[i - 1].iov_len += len;
RXS_PreparePacket(conn->securityObject, call, p);
}
int adjMTU;
int frags;
+ if (rxi_nRecvFrags == 1 && rxi_nSendFrags == 1)
+ return mtu;
adjMTU = RX_HEADER_SIZE + RX_JUMBOBUFFERSIZE + RX_JUMBOHEADERSIZE;
if (mtu <= adjMTU) {
return mtu;