/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
#include <afs/param.h>
#endif
-
#ifdef KERNEL
#if defined(UKERNEL)
#include "afs/sysincludes.h"
#include "rx/rx_clock.h"
#include "rx/rx_queue.h"
#include "rx/rx_packet.h"
+#include "rx/rx_atomic.h"
+#include "rx/rx_internal.h"
+#include "rx/rx_stats.h"
#else /* defined(UKERNEL) */
#ifdef RX_KERNEL_TRACE
#include "../rx/rx_kcommon.h"
#ifndef AFS_LINUX20_ENV
#include "h/systm.h"
#endif
-#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV)
+#if defined(AFS_SGI_ENV) || defined(AFS_HPUX110_ENV) || defined(AFS_NBSD50_ENV)
#include "afs/sysincludes.h"
#endif
#if defined(AFS_OBSD_ENV)
#include "rx_kmutex.h"
#include "rx/rx_clock.h"
#include "rx/rx_queue.h"
+#include "rx_atomic.h"
#ifdef AFS_SUN5_ENV
#include <sys/sysmacros.h>
#endif
#include "rx/rx_packet.h"
+#include "rx_internal.h"
+#include "rx_stats.h"
#endif /* defined(UKERNEL) */
#include "rx/rx_globals.h"
#else /* KERNEL */
#include <sys/sysmacros.h>
#endif
#include "rx_packet.h"
+#include "rx_atomic.h"
#include "rx_globals.h"
+#include "rx_internal.h"
+#include "rx_stats.h"
#include <lwp.h>
#include <assert.h>
#include <string.h>
static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
- afs_int32 ahost, short aport,
+ afs_uint32 ahost, short aport,
afs_int32 istack);
#ifdef RX_ENABLE_TSFPQ
*/
/* Preconditions:
- * all packet buffers (iov_base) are integral multiples of
+ * all packet buffers (iov_base) are integral multiples of
* the word size.
* offset is an integral multiple of the word size.
*/
MUTEX_ENTER(&rx_freePktQ_lock);
#ifdef KERNEL
- for (; (num_pkts > 0) && (rxi_OverQuota2(class,num_pkts));
+ for (; (num_pkts > 0) && (rxi_OverQuota2(class,num_pkts));
num_pkts--, overq++);
if (overq) {
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
for (i=0, c=queue_First(&rx_freePacketQueue, rx_packet);
- i < num_pkts;
+ i < num_pkts;
i++, c=queue_Next(c, rx_packet)) {
RX_FPQ_MARK_USED(c);
}
#endif /* RX_ENABLE_TSFPQ */
/* this one is kind of awful.
- * In rxkad, the packet has been all shortened, and everything, ready for
+ * In rxkad, the packet has been all shortened, and everything, ready for
* sending. All of a sudden, we discover we need some of that space back.
* This isn't terribly general, because it knows that the packets are only
* rounded up to the EBS (userdata + security header).
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
+#ifdef RX_TRACK_PACKETS
p->flags |= RX_PKTFLAG_FREE;
+#endif
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
RX_PACKET_IOV_INIT(p);
p->niovecs = 2;
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
-
+
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
#ifdef RXDEBUG_PACKET
}
rx_ts_info->_FPQ.delta += apackets;
- if (flush_global &&
+ if (flush_global &&
(num_keep_local < apackets)) {
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
#ifdef RX_ENABLE_TSFPQ
RX_TS_INFO_GET(rx_ts_info);
RX_TS_FPQ_GLOBAL_ALLOC(rx_ts_info,apackets);
-#endif /* RX_ENABLE_TSFPQ */
+#endif /* RX_ENABLE_TSFPQ */
for (e = p + apackets; p < e; p++) {
RX_PACKET_IOV_INIT(p);
+#ifdef RX_TRACK_PACKETS
p->flags |= RX_PKTFLAG_FREE;
+#endif
p->niovecs = 2;
queue_Append(&rx_freePacketQueue, p);
In any event, we assume the former, and append the packets to the end
of the free list. */
/* This explanation is bogus. The free list doesn't remain in any kind of
- useful order for afs_int32: the packets in use get pretty much randomly scattered
+ useful order for afs_int32: the packets in use get pretty much randomly scattered
across all the pages. In order to permit unused {packets,bufs} to page out, they
- must be stored so that packets which are adjacent in memory are adjacent in the
+ must be stored so that packets which are adjacent in memory are adjacent in the
free list. An array springs rapidly to mind.
*/
}
#endif /* RX_ENABLE_TSFPQ */
-/*
+/*
* free continuation buffers off a packet into a queue
*
* [IN] p -- packet from which continuation buffers will be freed
int rxi_nBadIovecs = 0;
-/* rxi_RestoreDataBufs
+/* rxi_RestoreDataBufs
*
* Restore the correct sizes to the iovecs. Called when reusing a packet
* for reading off the wire.
}
#endif /* RX_ENABLE_TSFPQ */
-/* rxi_AllocPacket sets up p->length so it reflects the number of
+/* rxi_AllocPacket sets up p->length so it reflects the number of
* bytes in the packet at this point, **not including** the header.
* The header is absolutely necessary, besides, this is the way the
* length field is usually used */
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
/* have to do this here because rx_FlushWrite fiddles with the iovs in
- * order to truncate outbound packets. In the near future, may need
+ * order to truncate outbound packets. In the near future, may need
* to allocate bufs from a static pool here, and/or in AllocSendPacket
*/
RX_PACKET_IOV_FULLINIT(p);
if (rx_stats_active) {
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receivePktAllocFailures);
break;
case RX_PACKET_CLASS_SEND:
- rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendPktAllocFailures);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.specialPktAllocFailures);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
break;
}
}
#endif /* KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
/* have to do this here because rx_FlushWrite fiddles with the iovs in
- * order to truncate outbound packets. In the near future, may need
+ * order to truncate outbound packets. In the near future, may need
* to allocate bufs from a static pool here, and/or in AllocSendPacket
*/
RX_PACKET_IOV_FULLINIT(p);
RX_TS_INFO_GET(rx_ts_info);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.packetRequests);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
/* have to do this here because rx_FlushWrite fiddles with the iovs in
- * order to truncate outbound packets. In the near future, may need
+ * order to truncate outbound packets. In the near future, may need
* to allocate bufs from a static pool here, and/or in AllocSendPacket
*/
RX_PACKET_IOV_FULLINIT(p);
}
#ifndef KERNEL
-#ifdef AFS_NT40_ENV
+#ifdef AFS_NT40_ENV
/* Windows does not use file descriptors. */
#define CountFDs(amax) 0
#else
} else
tlen = rlen;
- /* Extend the last iovec for padding, it's just to make sure that the
+ /* Extend the last iovec for padding, it's just to make sure that the
* read doesn't return more data than we expect, and is done to get around
* our problems caused by the lack of a length field in the rx header.
* Use the extra buffer that follows the localdata in each packet
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketOnRead);
} else if (nbytes <= 0) {
if (rx_stats_active) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.bogusPacketOnRead++;
+ rx_atomic_inc(&rx_stats.bogusPacketOnRead);
rx_stats.bogusHost = from.sin_addr.s_addr;
- MUTEX_EXIT(&rx_stats_mutex);
}
dpf(("B: bogus packet from [%x,%d] nb=%d", ntohl(from.sin_addr.s_addr),
ntohs(from.sin_port), nbytes));
}
return 0;
- }
+ }
#ifdef RXDEBUG
else if ((rx_intentionallyDroppedOnReadPer100 > 0)
&& (random() % 100 < rx_intentionallyDroppedOnReadPer100)) {
*port = from.sin_port;
dpf(("Dropped %d %s: %x.%u.%u.%u.%u.%u.%u flags %d len %d",
- p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
- p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
+ p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(*host), ntohs(*port), p->header.serial,
+ p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags,
p->length));
#ifdef RX_TRIMDATABUFS
rxi_TrimDataBufs(p, 1);
#endif
return 0;
- }
+ }
#endif
else {
/* Extract packet header. */
*host = from.sin_addr.s_addr;
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
- struct rx_peer *peer;
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
- /*
- * Try to look up this peer structure. If it doesn't exist,
- * don't create a new one -
- * we don't keep count of the bytes sent/received if a peer
- * structure doesn't already exist.
- *
- * The peer/connection cleanup code assumes that there is 1 peer
- * per connection. If we actually created a peer structure here
- * and this packet was an rxdebug packet, the peer structure would
- * never be cleaned up.
- */
- peer = rxi_FindPeer(*host, *port, 0, 0);
- /* Since this may not be associated with a connection,
- * it may have no refCount, meaning we could race with
- * ReapConnections
- */
- if (peer && (peer->refCount > 0)) {
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesReceived, p->length);
- MUTEX_EXIT(&peer->peer_lock);
- }
+ if (rx_stats_active) {
+ struct rx_peer *peer;
+ rx_atomic_inc(&rx_stats.packetsRead[p->header.type - 1]);
+ /*
+ * Try to look up this peer structure. If it doesn't exist,
+ * don't create a new one -
+ * we don't keep count of the bytes sent/received if a peer
+ * structure doesn't already exist.
+ *
+ * The peer/connection cleanup code assumes that there is 1 peer
+ * per connection. If we actually created a peer structure here
+ * and this packet was an rxdebug packet, the peer structure would
+ * never be cleaned up.
+ */
+ peer = rxi_FindPeer(*host, *port, 0, 0);
+ /* Since this may not be associated with a connection,
+ * it may have no refCount, meaning we could race with
+ * ReapConnections
+ */
+ if (peer && (peer->refCount > 0)) {
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesReceived, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
+ }
}
#ifdef RX_TRIMDATABUFS
/* Free any empty packet buffers at the end of this packet */
rxi_TrimDataBufs(p, 1);
-#endif
+#endif
return 1;
}
}
* last two pad bytes. */
struct rx_packet *
-rxi_SplitJumboPacket(struct rx_packet *p, afs_int32 host, short port,
+rxi_SplitJumboPacket(struct rx_packet *p, afs_uint32 host, short port,
int first)
{
struct rx_packet *np;
}
/* MTUXXX Supposed to skip <off> bytes and copy <len> bytes,
- * but it doesn't really.
- * This sucks, anyway, do it like m_cpy.... below
+ * but it doesn't really.
+ * This sucks, anyway, do it like m_cpy.... below
*/
static int
cpytoiovec(mblk_t * mp, int off, int len, struct iovec *iovs,
struct rx_packet *
rxi_ReceiveDebugPacket(struct rx_packet *ap, osi_socket asocket,
- afs_int32 ahost, short aport, int istack)
+ afs_uint32 ahost, short aport, int istack)
{
struct rx_debugIn tin;
afs_int32 tl;
}
rx_packetread(ap, 0, sizeof(struct rx_debugIn), (char *)&tin);
- /* all done with packet, now set length to the truth, so we can
+ /* all done with packet, now set length to the truth, so we can
* reuse this packet */
rx_computelen(ap, ap->length);
tstat.callsExecuted = htonl(rxi_nCalls);
tstat.packetReclaims = htonl(rx_packetReclaims);
tstat.usedFDs = CountFDs(64);
- tstat.nWaiting = htonl(rx_nWaiting);
- tstat.nWaited = htonl(rx_nWaited);
+ tstat.nWaiting = htonl(rx_atomic_read(&rx_nWaiting));
+ tstat.nWaited = htonl(rx_atomic_read(&rx_nWaited));
queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
tstat.idleThreads);
MUTEX_EXIT(&rx_serverPool_lock);
#endif
#endif
MUTEX_ENTER(&rx_connHashTable_lock);
- /* We might be slightly out of step since we are not
+ /* We might be slightly out of step since we are not
* locking each call, but this is only debugging output.
*/
for (tc = rx_connHashTable[i]; tc; tc = tc->next) {
struct rx_packet *
rxi_ReceiveVersionPacket(struct rx_packet *ap, osi_socket asocket,
- afs_int32 ahost, short aport, int istack)
+ afs_uint32 ahost, short aport, int istack)
{
afs_int32 tl;
/* send a debug packet back to the sender */
static void
rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
- afs_int32 ahost, short aport, afs_int32 istack)
+ afs_uint32 ahost, short aport, afs_int32 istack)
{
struct sockaddr_in taddr;
unsigned int i, nbytes, savelen = 0;
MUTEX_ENTER(&conn->conn_data_lock);
p->header.serial = ++conn->serial;
if (p->length > conn->peer->maxPacketSize) {
- if (p->header.seq != 0) {
+ if ((p->header.type == RX_PACKET_TYPE_ACK) &&
+ (p->header.flags & RX_REQUEST_ACK)) {
+ conn->lastPingSize = p->length;
+ conn->lastPingSizeSer = p->header.serial;
+ } else if (p->header.seq != 0) {
conn->lastPacketSize = p->length;
conn->lastPacketSizeSeq = p->header.seq;
}
}
MUTEX_EXIT(&conn->conn_data_lock);
- /* This is so we can adjust retransmit time-outs better in the face of
+ /* This is so we can adjust retransmit time-outs better in the face of
* rapidly changing round-trip times. RTO estimation is not a la Karn.
*/
if (p->firstSerial == 0) {
#endif
/* Get network byte order header */
- rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
+ rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
* touch ALL the fields */
/* Send the packet out on the same socket that related packets are being
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.netSendFailures);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
/* Some systems are nice and tell us right away that we cannot
- * reach this recipient by returning an error code.
+ * reach this recipient by returning an error code.
* So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call &&
+ if (call &&
#ifdef AFS_NT40_ENV
(code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
#elif defined(AFS_LINUX20_ENV)
ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesSent, p->length);
- MUTEX_EXIT(&peer->peer_lock);
+ if (rx_stats_active) {
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesSent, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
}
/* Send a list of packets to appropriate destination for the specified
for (i = 0; i < len; i++) {
p = list[i];
if (p->length > conn->peer->maxPacketSize) {
- if ((p->header.seq != 0) &&
- ((i == 0) || (p->length >= conn->lastPacketSize))) {
- conn->lastPacketSize = p->length;
- conn->lastPacketSizeSeq = p->header.seq;
+ /* a ping *or* a sequenced packet can count */
+ if ((p->length > conn->peer->maxPacketSize)) {
+ if (((p->header.type == RX_PACKET_TYPE_ACK) &&
+ (p->header.flags & RX_REQUEST_ACK)) &&
+ ((i == 0) || (p->length >= conn->lastPingSize))) {
+ conn->lastPingSize = p->length;
+ conn->lastPingSizeSer = serial + i;
+ } else if ((p->header.seq != 0) &&
+ ((i == 0) || (p->length >= conn->lastPacketSize))) {
+ conn->lastPacketSize = p->length;
+ conn->lastPacketSizeSeq = p->header.seq;
+ }
}
}
}
/* Pre-increment, to guarantee no zero serial number; a zero
* serial number means the packet was never sent. */
p->header.serial = ++serial;
- /* This is so we can adjust retransmit time-outs better in the face of
+ /* This is so we can adjust retransmit time-outs better in the face of
* rapidly changing round-trip times. RTO estimation is not a la Karn.
*/
if (p->firstSerial == 0) {
#endif
/* Get network byte order header */
- rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
+ rxi_EncodePacketHeader(p); /* XXX in the event of rexmit, etc, don't need to
* touch ALL the fields */
}
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.netSendFailures);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
10 + (((afs_uint32) p->backoff) << 8));
}
/* Some systems are nice and tell us right away that we cannot
- * reach this recipient by returning an error code.
+ * reach this recipient by returning an error code.
* So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call &&
+ if (call &&
#ifdef AFS_NT40_ENV
(code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
#elif defined(AFS_LINUX20_ENV)
p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
- MUTEX_ENTER(&peer->peer_lock);
- hadd32(peer->bytesSent, p->length);
- MUTEX_EXIT(&peer->peer_lock);
+ if (rx_stats_active) {
+ rx_atomic_inc(&rx_stats.packetsSent[p->header.type - 1]);
+ MUTEX_ENTER(&peer->peer_lock);
+ hadd32(peer->bytesSent, p->length);
+ MUTEX_EXIT(&peer->peer_lock);
+ }
}
}
#ifndef KERNEL
-/*
+/*
* This function can be used by the Windows Cache Manager
* to dump the list of all rx packets so that we can determine
* where the packet leakage is.
for (p = rx_mallocedP; p; p = p->allNextp) {
RXDPRINTF(RXDPRINTOUT, "%s - packet=0x%p, id=%u, firstSent=%u.%08u, timeSent=%u.%08u, retryTime=%u.%08u, firstSerial=%u, niovecs=%u, flags=0x%x, backoff=%u, length=%u header: epoch=%u, cid=%u, callNum=%u, seq=%u, serial=%u, type=%u, flags=0x%x, userStatus=%u, securityIndex=%u, serviceId=%u\r\n",
- cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
+ cookie, p, p->packetId, p->firstSent.sec, p->firstSent.usec, p->timeSent.sec, p->timeSent.usec, p->retryTime.sec, p->retryTime.usec,
p->firstSerial, p->niovecs, (afs_uint32)p->flags, (afs_uint32)p->backoff, (afs_uint32)p->length,
p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.serial,
- (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
+ (afs_uint32)p->header.type, (afs_uint32)p->header.flags, (afs_uint32)p->header.userStatus,
(afs_uint32)p->header.securityIndex, (afs_uint32)p->header.serviceId);
#ifdef AFS_NT40_ENV
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);