*/
#include <afsconfig.h>
-#ifdef KERNEL
-#include "afs/param.h"
-#else
#include <afs/param.h>
-#endif
-
#ifdef KERNEL
-#if defined(UKERNEL)
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#include "rx/rx_kcommon.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#include "rx/rx_packet.h"
-#else /* defined(UKERNEL) */
-#ifdef RX_KERNEL_TRACE
-#include "../rx/rx_kcommon.h"
-#endif
-#include "h/types.h"
-#ifndef AFS_LINUX20_ENV
-#include "h/systm.h"
-#endif
-#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
-#include "afs/sysincludes.h"
-#endif
-#if defined(AFS_OBSD_ENV)
-#include "h/proc.h"
-#endif
-#include "h/socket.h"
-#if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
-#if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
-#include "sys/mount.h" /* it gets pulled in by something later anyway */
-#endif
-#include "h/mbuf.h"
-#endif
-#include "netinet/in.h"
-#include "afs/afs_osi.h"
-#include "rx_kmutex.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
-#endif
-#include "rx/rx_packet.h"
-#endif /* defined(UKERNEL) */
-#include "rx/rx_globals.h"
+# if defined(UKERNEL)
+# include "afs/sysincludes.h"
+# include "afsincludes.h"
+# include "rx_kcommon.h"
+# else /* defined(UKERNEL) */
+# ifdef RX_KERNEL_TRACE
+# include "rx_kcommon.h"
+# endif
+# include "h/types.h"
+# ifndef AFS_LINUX20_ENV
+# include "h/systm.h"
+# endif
+# if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV) || defined(AFS_NBSD50_ENV)
+# include "afs/sysincludes.h"
+# endif
+# if defined(AFS_OBSD_ENV)
+# include "h/proc.h"
+# endif
+# include "h/socket.h"
+# if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
+# if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
+# include "sys/mount.h" /* it gets pulled in by something later anyway */
+# endif
+# include "h/mbuf.h"
+# endif
+# include "netinet/in.h"
+# include "afs/afs_osi.h"
+# include "rx_kmutex.h"
+# endif /* defined(UKERNEL) */
#else /* KERNEL */
-#include "sys/types.h"
-#include <sys/stat.h>
-#include <errno.h>
-#if defined(AFS_NT40_ENV)
-#include <winsock2.h>
-#ifndef EWOULDBLOCK
-#define EWOULDBLOCK WSAEWOULDBLOCK
-#endif
-#include "rx_user.h"
-#include "rx_xmit_nt.h"
-#include <stdlib.h>
-#else
-#include <sys/socket.h>
-#include <netinet/in.h>
+# include <roken.h>
+# include <assert.h>
+# if defined(AFS_NT40_ENV)
+# ifndef EWOULDBLOCK
+# define EWOULDBLOCK WSAEWOULDBLOCK
+# endif
+# include "rx_user.h"
+# include "rx_xmit_nt.h"
+# endif
+# include <lwp.h>
+#endif /* KERNEL */
+
+#ifdef AFS_SUN5_ENV
+# include <sys/sysmacros.h>
#endif
-#include "rx_clock.h"
+
#include "rx.h"
+#include "rx_clock.h"
#include "rx_queue.h"
-#ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
-#endif
#include "rx_packet.h"
+#include "rx_atomic.h"
#include "rx_globals.h"
-#include <lwp.h>
-#include <assert.h>
-#include <string.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#endif /* KERNEL */
+#include "rx_internal.h"
+#include "rx_stats.h"
+
+#include "rx_conn.h"
+#include "rx_call.h"
#ifdef RX_LOCKS_DB
/* rxdb_fileID is used to identify the lock location, along with line#. */
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
+#ifdef RX_TRACK_PACKETS
p->flags |= RX_PKTFLAG_FREE;
+#endif
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
+#ifdef RX_TRACK_PACKETS
p->flags |= RX_PKTFLAG_FREE;
+#endif
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
u_short * port)
{
struct sockaddr_in from;
- unsigned int nbytes;
+ int nbytes;
afs_int32 rlen;
afs_uint32 tlen, savelen;
struct msghdr msg;
p->wirevec[p->niovecs - 1].iov_len = savelen;
p->length = (u_short)(nbytes - RX_HEADER_SIZE);
- if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
+ if (nbytes < 0 || (nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketOnRead);
} else if (nbytes <= 0) {
if (rx_stats_active) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
+ rx_atomic_inc(&rx_stats.bogusPacketOnRead);
rx_stats.bogusHost = from.sin_addr.s_addr;
- MUTEX_EXIT(&rx_stats_mutex);
}
- dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
+ dpf(("B: bogus packet from [%x,%d] nb=%d\n", ntohl(from.sin_addr.s_addr),
ntohs(from.sin_port), nbytes));
}
return 0;
*host = from.sin_addr.s_addr;
*port = from.sin_port;
- dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
+ dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d\n",
p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
p->length));
*host = from.sin_addr.s_addr;
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
- struct rx_peer *peer;
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
- /*
- * Try to look up this peer structure. If it doesn't exist,
- * don't create a new one -
- * we don't keep count of the bytes sent/received if a peer
- * structure doesn't already exist.
- *
- * The peer/connection cleanup code assumes that there is 1 peer
- * per connection. If we actually created a peer structure here
- * and this packet was an rxdebug packet, the peer structure would
- * never be cleaned up.
- */
- peer = rxi_FindPeer(*host, *port, 0, 0);
- /* Since this may not be associated with a connection,
- * it may have no refCount, meaning we could race with
- * ReapConnections
- */
- if (peer && (peer->refCount > 0)) {
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesReceived, p->length);
- MUTEX_EXIT(&peer->peer_lock);
- }
+ if (rx_stats_active) {
+ struct rx_peer *peer;
+ rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]);
+ /*
+ * Try to look up this peer structure. If it doesn't exist,
+ * don't create a new one -
+ * we don't keep count of the bytes sent/received if a peer
+ * structure doesn't already exist.
+ *
+ * The peer/connection cleanup code assumes that there is 1 peer
+ * per connection. If we actually created a peer structure here
+ * and this packet was an rxdebug packet, the peer structure would
+ * never be cleaned up.
+ */
+ peer = rxi_FindPeer(*host, *port, 0, 0);
+ /* Since this may not be associated with a connection,
+ * it may have no refCount, meaning we could race with
+ * ReapConnections
+ */
+ if (peer && (peer->refCount > 0)) {
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesReceived, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+ }
}
#ifdef RX_TRIMDATABUFS
#endif /* AFS_SUN5_ENV */
#if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
+#if defined(AFS_NBSD_ENV)
+int
+rx_mb_to_packet(struct mbuf *amb, void (*free) (struct mbuf *), int hdr_len, int data_len, struct rx_packet *phandle)
+#else
int
rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
#if defined(AFS_SUN5_ENV) || defined(AFS_HPUX110_ENV)
void (*free) ();
struct rx_packet *phandle;
int hdr_len, data_len;
+#endif /* AFS_NBSD_ENV */
{
int code;
tstat.callsExecuted = htonl(rxi_nCalls);
tstat.packetReclaims = htonl(rx_packetReclaims);
tstat.usedFDs = CountFDs(64);
- tstat.nWaiting = htonl(rx_nWaiting);
- tstat.nWaited = htonl(rx_nWaited);
+ tstat.nWaiting = htonl(rx_atomic_read(&rx_nWaiting));
+ tstat.nWaited = htonl(rx_atomic_read(&rx_nWaited));
queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
tstat.idleThreads);
MUTEX_EXIT(&rx_serverPool_lock);
tpeer.burstWait.usec = htonl(tp->burstWait.usec);
tpeer.rtt = htonl(tp->rtt);
tpeer.rtt_dev = htonl(tp->rtt_dev);
- tpeer.timeout.sec = htonl(tp->timeout.sec);
- tpeer.timeout.usec = htonl(tp->timeout.usec);
tpeer.nSent = htonl(tp->nSent);
tpeer.reSends = htonl(tp->reSends);
tpeer.inPacketSkew = htonl(tp->inPacketSkew);
tpeer.outPacketSkew = htonl(tp->outPacketSkew);
- tpeer.rateFlag = htonl(tp->rateFlag);
tpeer.natMTU = htons(tp->natMTU);
tpeer.maxMTU = htons(tp->maxMTU);
tpeer.maxDgramPackets = htons(tp->maxDgramPackets);
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
- p->retryTime = p->timeSent; /* resend it very soon */
- clock_Addmsec(&(p->retryTime),
- 10 + (((afs_uint32) p->backoff) << 8));
+ rx_atomic_inc(&rx_stats.netSendFailures);
+ p->flags &= ~RX_PKTFLAG_SENT; /* resend it very soon */
+
/* Some systems are nice and tell us right away that we cannot
* reach this recipient by returning an error code.
* So, when this happens let's "down" the host NOW so
#endif
#ifdef RXDEBUG
}
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
- p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ p->header.seq, p->header.flags, p, p->length));
#endif
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesSent, p->length);
- MUTEX_EXIT(&peer->peer_lock);
+ if (rx_stats_active) {
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesSent, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
}
/* Send a list of packets to appropriate destination for the specified
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.netSendFailures);
for (i = 0; i < len; i++) {
p = list[i];
- p->retryTime = p->timeSent; /* resend it very soon */
- clock_Addmsec(&(p->retryTime),
- 10 + (((afs_uint32) p->backoff) << 8));
+ p->flags &= ~RX_PKTFLAG_SENT; /* resend it very soon */
}
/* Some systems are nice and tell us right away that we cannot
* reach this recipient by returning an error code.
#ifdef RXDEBUG
}
- assert(p != NULL);
+ osi_Assert(p != NULL);
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
- p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ p->header.seq, p->header.flags, p, p->length));
#endif
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesSent, p->length);
- MUTEX_EXIT(&peer->peer_lock);
+ if (rx_stats_active) {
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesSent, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
}
/* Note: top 16 bits of this last word are the security checksum */
}
+/*
+ * LOCKS HELD: called with call->lock held.
+ *
+ * PrepareSendPacket is the only place in the code that
+ * can increment call->tnext. This could become an atomic
+ * in the future. Beyond that there is nothing in this
+ * function that requires the call being locked. This
+ * function can only be called by the application thread.
+ */
void
rxi_PrepareSendPacket(struct rx_call *call,
struct rx_packet *p, int last)
{
struct rx_connection *conn = call->conn;
+ afs_uint32 seq = call->tnext++;
unsigned int i;
afs_int32 len; /* len must be a signed type; it can go negative */
- p->flags &= ~RX_PKTFLAG_ACKED;
- p->header.cid = (conn->cid | call->channel);
- p->header.serviceId = conn->serviceId;
- p->header.securityIndex = conn->securityIndex;
-
/* No data packets on call 0. Where do these come from? */
if (*call->callNumber == 0)
*call->callNumber = 1;
+ MUTEX_EXIT(&call->lock);
+ p->flags &= ~(RX_PKTFLAG_ACKED | RX_PKTFLAG_SENT);
+
+ p->header.cid = (conn->cid | call->channel);
+ p->header.serviceId = conn->serviceId;
+ p->header.securityIndex = conn->securityIndex;
+
p->header.callNumber = *call->callNumber;
- p->header.seq = call->tnext++;
+ p->header.seq = seq;
p->header.epoch = conn->epoch;
p->header.type = RX_PACKET_TYPE_DATA;
p->header.flags = 0;
if (last)
p->header.flags |= RX_LAST_PACKET;
- clock_Zero(&p->retryTime); /* Never yet transmitted */
clock_Zero(&p->firstSent); /* Never yet transmitted */
p->header.serial = 0; /* Another way of saying never transmitted... */
- p->backoff = 0;
/* Now that we're sure this is the last data on the call, make sure
* that the "length" and the sum of the iov_lens matches. */
}
if (len)
p->wirevec[i - 1].iov_len += len;
+ MUTEX_ENTER(&call->lock);
RXS_PreparePacket(conn->securityObject, call, p);
}
#endif
for (p = rx_mallocedP; p; p = p->allNextp) {
- RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
- cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
- p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
+ RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
+ cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec,
+ p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->length,
p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
(afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
(afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);