*/
#include <afsconfig.h>
-#ifdef KERNEL
-#include "afs/param.h"
-#else
#include <afs/param.h>
-#endif
-
#ifdef KERNEL
-#if defined(UKERNEL)
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#include "rx/rx_kcommon.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#include "rx/rx_packet.h"
-#else /* defined(UKERNEL) */
-#ifdef RX_KERNEL_TRACE
-#include "../rx/rx_kcommon.h"
-#endif
-#include "h/types.h"
-#ifndef AFS_LINUX20_ENV
-#include "h/systm.h"
-#endif
-#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
-#include "afs/sysincludes.h"
-#endif
-#if defined(AFS_OBSD_ENV)
-#include "h/proc.h"
-#endif
-#include "h/socket.h"
-#if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
-#if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
-#include "sys/mount.h" /* it gets pulled in by something later anyway */
-#endif
-#include "h/mbuf.h"
-#endif
-#include "netinet/in.h"
-#include "afs/afs_osi.h"
-#include "rx_kmutex.h"
-#include "rx/rx_clock.h"
-#include "rx/rx_queue.h"
-#ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
-#endif
-#include "rx/rx_packet.h"
-#endif /* defined(UKERNEL) */
-#include "rx/rx_globals.h"
+# if defined(UKERNEL)
+# include "afs/sysincludes.h"
+# include "afsincludes.h"
+# include "rx_kcommon.h"
+# else /* defined(UKERNEL) */
+# ifdef RX_KERNEL_TRACE
+# include "rx_kcommon.h"
+# endif
+# include "h/types.h"
+# ifndef AFS_LINUX20_ENV
+# include "h/systm.h"
+# endif
+# if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV) || defined(AFS_NBSD50_ENV)
+# include "afs/sysincludes.h"
+# endif
+# if defined(AFS_OBSD_ENV)
+# include "h/proc.h"
+# endif
+# include "h/socket.h"
+# if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
+# if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
+# include "sys/mount.h" /* it gets pulled in by something later anyway */
+# endif
+# include "h/mbuf.h"
+# endif
+# include "netinet/in.h"
+# include "afs/afs_osi.h"
+# include "rx_kmutex.h"
+# endif /* defined(UKERNEL) */
#else /* KERNEL */
-#include "sys/types.h"
-#include <sys/stat.h>
-#include <errno.h>
-#if defined(AFS_NT40_ENV)
-#include <winsock2.h>
-#ifndef EWOULDBLOCK
-#define EWOULDBLOCK WSAEWOULDBLOCK
-#endif
-#include "rx_user.h"
-#include "rx_xmit_nt.h"
-#include <stdlib.h>
-#else
-#include <sys/socket.h>
-#include <netinet/in.h>
+# include <roken.h>
+# include <assert.h>
+# if defined(AFS_NT40_ENV)
+# ifndef EWOULDBLOCK
+# define EWOULDBLOCK WSAEWOULDBLOCK
+# endif
+# include "rx_user.h"
+# include "rx_xmit_nt.h"
+# endif
+# include <lwp.h>
+#endif /* KERNEL */
+
+#ifdef AFS_SUN5_ENV
+# include <sys/sysmacros.h>
#endif
-#include "rx_clock.h"
+
#include "rx.h"
+#include "rx_clock.h"
#include "rx_queue.h"
-#ifdef AFS_SUN5_ENV
-#include <sys/sysmacros.h>
-#endif
#include "rx_packet.h"
+#include "rx_atomic.h"
#include "rx_globals.h"
-#include <lwp.h>
-#include <assert.h>
-#include <string.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#endif /* KERNEL */
+#include "rx_internal.h"
+#include "rx_stats.h"
+
+#include "rx_peer.h"
+#include "rx_conn.h"
+#include "rx_call.h"
#ifdef RX_LOCKS_DB
/* rxdb_fileID is used to identify the lock location, along with line#. */
static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
afs_uint32 ahost, short aport,
afs_int32 istack);
+static struct rx_packet *rxi_AllocPacketNoLock(int class);
+
+#ifndef KERNEL
+static void rxi_MorePacketsNoLock(int apackets);
+#endif
#ifdef RX_ENABLE_TSFPQ
-static int
-rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first, int flush_global);
+static int rxi_FreeDataBufsTSFPQ(struct rx_packet *p, afs_uint32 first,
+ int flush_global);
+static void rxi_AdjustLocalPacketsTSFPQ(int num_keep_local,
+ int allow_overcommit);
#else
-static int rxi_FreeDataBufsToQueue(struct rx_packet *p,
- afs_uint32 first,
+static void rxi_FreePacketNoLock(struct rx_packet *p);
+static int rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first);
+static int rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first,
struct rx_queue * q);
#endif
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#ifndef KERNEL
/* Add more packet buffers */
-void
+static void
rxi_MorePacketsNoLock(int apackets)
{
#ifdef RX_ENABLE_TSFPQ
}
#ifdef RX_ENABLE_TSFPQ
-void
+static void
rxi_AdjustLocalPacketsTSFPQ(int num_keep_local, int allow_overcommit)
{
struct rx_ts_info_t * rx_ts_info;
*/
/* Actually free the packet p. */
-#ifdef RX_ENABLE_TSFPQ
-void
-rxi_FreePacketNoLock(struct rx_packet *p)
-{
- struct rx_ts_info_t * rx_ts_info;
- dpf(("Free %"AFS_PTR_FMT"\n", p));
-
- RX_TS_INFO_GET(rx_ts_info);
- RX_TS_FPQ_CHECKIN(rx_ts_info,p);
- if (rx_ts_info->_FPQ.len > rx_TSFPQLocalMax) {
- RX_TS_FPQ_LTOG(rx_ts_info);
- }
-}
-#else /* RX_ENABLE_TSFPQ */
-void
+#ifndef RX_ENABLE_TSFPQ
+static void
rxi_FreePacketNoLock(struct rx_packet *p)
{
dpf(("Free %"AFS_PTR_FMT"\n", p));
#endif /* RX_ENABLE_TSFPQ */
#ifdef RX_ENABLE_TSFPQ
-void
+static void
rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
{
struct rx_ts_info_t * rx_ts_info;
return count;
}
-#endif
/*
* free packet continuation buffers into the global free packet pool
* returns:
* zero always
*/
-int
+static int
rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first)
{
struct iovec *iov;
return 0;
}
-#ifdef RX_ENABLE_TSFPQ
+#else
+
/*
* free packet continuation buffers into the thread-local free pool
*
* The header is absolutely necessary, besides, this is the way the
* length field is usually used */
#ifdef RX_ENABLE_TSFPQ
-struct rx_packet *
+static struct rx_packet *
rxi_AllocPacketNoLock(int class)
{
struct rx_packet *p;
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
return p;
}
#else /* RX_ENABLE_TSFPQ */
-struct rx_packet *
+static struct rx_packet *
rxi_AllocPacketNoLock(int class)
{
struct rx_packet *p;
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
#endif /* RX_ENABLE_TSFPQ */
#ifdef RX_ENABLE_TSFPQ
-struct rx_packet *
+static struct rx_packet *
rxi_AllocPacketTSFPQ(int class, int pull_global)
{
struct rx_packet *p;
RX_TS_INFO_GET(rx_ts_info);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
u_short * port)
{
struct sockaddr_in from;
- unsigned int nbytes;
+ int nbytes;
afs_int32 rlen;
afs_uint32 tlen, savelen;
struct msghdr msg;
p->wirevec[p->niovecs - 1].iov_len = savelen;
p->length = (u_short)(nbytes - RX_HEADER_SIZE);
- if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
+ if (nbytes < 0 || (nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketOnRead);
} else if (nbytes <= 0) {
if (rx_stats_active) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
+ rx_atomic_inc(&rx_stats.bogusPacketOnRead);
rx_stats.bogusHost = from.sin_addr.s_addr;
- MUTEX_EXIT(&rx_stats_mutex);
}
- dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
+ dpf(("B: bogus packet from [%x,%d] nb=%d\n", ntohl(from.sin_addr.s_addr),
ntohs(from.sin_port), nbytes));
}
return 0;
*host = from.sin_addr.s_addr;
*port = from.sin_port;
- dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
+ dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d\n",
p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
p->length));
*host = from.sin_addr.s_addr;
*port = from.sin_port;
- if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
- struct rx_peer *peer;
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
- /*
- * Try to look up this peer structure. If it doesn't exist,
- * don't create a new one -
- * we don't keep count of the bytes sent/received if a peer
- * structure doesn't already exist.
- *
- * The peer/connection cleanup code assumes that there is 1 peer
- * per connection. If we actually created a peer structure here
- * and this packet was an rxdebug packet, the peer structure would
- * never be cleaned up.
- */
- peer = rxi_FindPeer(*host, *port, 0, 0);
- /* Since this may not be associated with a connection,
- * it may have no refCount, meaning we could race with
- * ReapConnections
- */
- if (peer && (peer->refCount > 0)) {
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesReceived, p->length);
- MUTEX_EXIT(&peer->peer_lock);
- }
+ if (rx_stats_active
+ && p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
+
+ rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]);
}
#ifdef RX_TRIMDATABUFS
#endif /* AFS_SUN5_ENV */
#if !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN80_ENV)
+#if defined(AFS_NBSD_ENV)
+int
+rx_mb_to_packet(struct mbuf *amb, void (*free) (struct mbuf *), int hdr_len, int data_len, struct rx_packet *phandle)
+#else
int
rx_mb_to_packet(amb, free, hdr_len, data_len, phandle)
#if defined(AFS_SUN5_ENV) || defined(AFS_HPUX110_ENV)
void (*free) ();
struct rx_packet *phandle;
int hdr_len, data_len;
+#endif /* AFS_NBSD_ENV */
{
int code;
tstat.callsExecuted = htonl(rxi_nCalls);
tstat.packetReclaims = htonl(rx_packetReclaims);
tstat.usedFDs = CountFDs(64);
- tstat.nWaiting = htonl(rx_nWaiting);
- tstat.nWaited = htonl(rx_nWaited);
+ tstat.nWaiting = htonl(rx_atomic_read(&rx_nWaiting));
+ tstat.nWaited = htonl(rx_atomic_read(&rx_nWaited));
queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
tstat.idleThreads);
MUTEX_EXIT(&rx_serverPool_lock);
tpeer.ifMTU = htons(tp->ifMTU);
tpeer.idleWhen = htonl(tp->idleWhen);
tpeer.refCount = htons(tp->refCount);
- tpeer.burstSize = tp->burstSize;
- tpeer.burst = tp->burst;
- tpeer.burstWait.sec = htonl(tp->burstWait.sec);
- tpeer.burstWait.usec = htonl(tp->burstWait.usec);
+ tpeer.burstSize = 0;
+ tpeer.burst = 0;
+ tpeer.burstWait.sec = 0;
+ tpeer.burstWait.usec = 0;
tpeer.rtt = htonl(tp->rtt);
tpeer.rtt_dev = htonl(tp->rtt_dev);
- tpeer.timeout.sec = htonl(tp->timeout.sec);
- tpeer.timeout.usec = htonl(tp->timeout.usec);
tpeer.nSent = htonl(tp->nSent);
tpeer.reSends = htonl(tp->reSends);
- tpeer.inPacketSkew = htonl(tp->inPacketSkew);
- tpeer.outPacketSkew = htonl(tp->outPacketSkew);
- tpeer.rateFlag = htonl(tp->rateFlag);
tpeer.natMTU = htons(tp->natMTU);
tpeer.maxMTU = htons(tp->maxMTU);
tpeer.maxDgramPackets = htons(tp->maxDgramPackets);
afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
"before osi_NetSend()");
AFS_GUNLOCK();
- } else
+ }
#else
if (waslocked)
AFS_GUNLOCK();
"after osi_NetSend()");
if (!waslocked)
AFS_GUNLOCK();
- } else
+ }
#else
if (waslocked)
AFS_GLOCK();
afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
"before osi_NetSend()");
AFS_GUNLOCK();
- } else
+ }
#else
if (waslocked)
AFS_GUNLOCK();
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
- p->retryTime = p->timeSent; /* resend it very soon */
- clock_Addmsec(&(p->retryTime),
- 10 + (((afs_uint32) p->backoff) << 8));
+ rx_atomic_inc(&rx_stats.netSendFailures);
+ p->flags &= ~RX_PKTFLAG_SENT; /* resend it very soon */
+
/* Some systems are nice and tell us right away that we cannot
* reach this recipient by returning an error code.
* So, when this happens let's "down" the host NOW so
"after osi_NetSend()");
if (!waslocked)
AFS_GUNLOCK();
- } else
+ }
#else
if (waslocked)
AFS_GLOCK();
#endif
#ifdef RXDEBUG
}
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
- p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ p->header.seq, p->header.flags, p, p->length));
#endif
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesSent, p->length);
- MUTEX_EXIT(&peer->peer_lock);
+ if (rx_stats_active) {
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesSent, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
}
/* Send a list of packets to appropriate destination for the specified
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.netSendFailures);
for (i = 0; i < len; i++) {
p = list[i];
- p->retryTime = p->timeSent; /* resend it very soon */
- clock_Addmsec(&(p->retryTime),
- 10 + (((afs_uint32) p->backoff) << 8));
+ p->flags &= ~RX_PKTFLAG_SENT; /* resend it very soon */
}
/* Some systems are nice and tell us right away that we cannot
* reach this recipient by returning an error code.
#ifdef RXDEBUG
}
- assert(p != NULL);
+ osi_Assert(p != NULL);
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.3d len %d",
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
- p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ p->header.seq, p->header.flags, p, p->length));
#endif
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesSent, p->length);
- MUTEX_EXIT(&peer->peer_lock);
+ if (rx_stats_active) {
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesSent, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
}
/* Note: top 16 bits of this last word are the security checksum */
}
+/*
+ * LOCKS HELD: called with call->lock held.
+ *
+ * PrepareSendPacket is the only place in the code that
+ * can increment call->tnext. This could become an atomic
+ * in the future. Beyond that there is nothing in this
+ * function that requires the call being locked. This
+ * function can only be called by the application thread.
+ */
void
rxi_PrepareSendPacket(struct rx_call *call,
struct rx_packet *p, int last)
{
struct rx_connection *conn = call->conn;
+ afs_uint32 seq = call->tnext++;
unsigned int i;
afs_int32 len; /* len must be a signed type; it can go negative */
- p->flags &= ~RX_PKTFLAG_ACKED;
- p->header.cid = (conn->cid | call->channel);
- p->header.serviceId = conn->serviceId;
- p->header.securityIndex = conn->securityIndex;
-
/* No data packets on call 0. Where do these come from? */
if (*call->callNumber == 0)
*call->callNumber = 1;
+ MUTEX_EXIT(&call->lock);
+ p->flags &= ~(RX_PKTFLAG_ACKED | RX_PKTFLAG_SENT);
+
+ p->header.cid = (conn->cid | call->channel);
+ p->header.serviceId = conn->serviceId;
+ p->header.securityIndex = conn->securityIndex;
+
p->header.callNumber = *call->callNumber;
- p->header.seq = call->tnext++;
+ p->header.seq = seq;
p->header.epoch = conn->epoch;
p->header.type = RX_PACKET_TYPE_DATA;
p->header.flags = 0;
if (last)
p->header.flags |= RX_LAST_PACKET;
- clock_Zero(&p->retryTime); /* Never yet transmitted */
clock_Zero(&p->firstSent); /* Never yet transmitted */
p->header.serial = 0; /* Another way of saying never transmitted... */
- p->backoff = 0;
/* Now that we're sure this is the last data on the call, make sure
* that the "length" and the sum of the iov_lens matches. */
}
if (len)
p->wirevec[i - 1].iov_len += len;
+ MUTEX_ENTER(&call->lock);
RXS_PreparePacket(conn->securityObject, call, p);
}
#endif
for (p = rx_mallocedP; p; p = p->allNextp) {
- RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
- cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
- p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
+ RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
+ cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec,
+ p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->length,
p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
(afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
(afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);