#include "rx_globals.h"
#include <lwp.h>
#include <assert.h>
-#ifdef HAVE_STRING_H
#include <string.h>
-#else
-#ifdef HAVE_STRINGS_H
-#include <strings.h>
-#endif
-#endif
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
static int
AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
{
- register struct rx_packet *c;
register struct rx_ts_info_t * rx_ts_info;
int transfer, alloc;
SPLVAR;
if (overq) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
}
if (rx_nFreePackets < num_pkts)
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
- return (struct rx_packet *)0;
+ return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
#ifdef KERNEL
#ifdef KERNEL
if (rxi_OverQuota(class)) {
rxi_NeedMorePackets = TRUE;
- MUTEX_ENTER(&rx_stats_mutex);
switch (class) {
case RX_PACKET_CLASS_RECEIVE:
- rx_stats.receivePktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receivePktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND:
- rx_stats.sendPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SPECIAL:
- rx_stats.specialPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.specialPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_RECV_CBUF:
- rx_stats.receiveCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.receiveCbufPktAllocFailures, rx_stats_mutex);
break;
case RX_PACKET_CLASS_SEND_CBUF:
- rx_stats.sendCbufPktAllocFailures++;
+ rx_MutexIncrement(rx_stats.sendCbufPktAllocFailures, rx_stats_mutex);
break;
}
- MUTEX_EXIT(&rx_stats_mutex);
return (struct rx_packet *)0;
}
#endif /* KERNEL */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
#ifdef KERNEL
if (queue_IsEmpty(&rx_freePacketQueue))
RX_TS_INFO_GET(rx_ts_info);
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetRequests++;
- MUTEX_EXIT(&rx_stats_mutex);
-
+ rx_MutexIncrement(rx_stats.packetRequests, rx_stats_mutex);
if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
MUTEX_ENTER(&rx_freePktQ_lock);
p->length = (nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.noPacketOnRead++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.noPacketOnRead, rx_stats_mutex);
} else if (nbytes <= 0) {
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.bogusPacketOnRead++;
*port = from.sin_port;
if (p->header.type > 0 && p->header.type < RX_N_PACKET_TYPES) {
struct rx_peer *peer;
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsRead[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsRead[p->header.type - 1], rx_stats_mutex);
/*
* Try to look up this peer structure. If it doesn't exist,
* don't create a new one -
osi_NetSend(socket, &addr, p->wirevec, p->niovecs,
p->length + RX_HEADER_SIZE, istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
p->retryTime = p->timeSent; /* resend it very soon */
clock_Addmsec(&(p->retryTime),
10 + (((afs_uint32) p->backoff) << 8));
#ifdef AFS_NT40_ENV
code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
- code == -ENETUNRECH
+ code == -ENETUNREACH
#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
code == EHOSTUNREACH
#else
}
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
osi_NetSend(socket, &addr, &wirevec[0], len + 1, length,
istack)) != 0) {
/* send failed, so let's hurry up the resend, eh? */
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.netSendFailures++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.netSendFailures, rx_stats_mutex);
for (i = 0; i < len; i++) {
p = list[i];
p->retryTime = p->timeSent; /* resend it very soon */
#ifdef AFS_NT40_ENV
code == -1 && WSAGetLastError() == WSAEHOSTUNREACH
#elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
- code == -ENETUNRECH
+ code == -ENETUNREACH
#elif defined(AFS_DARWIN_ENV) && defined(KERNEL)
code == EHOSTUNREACH
#else
dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
- MUTEX_ENTER(&rx_stats_mutex);
- rx_stats.packetsSent[p->header.type - 1]++;
- MUTEX_EXIT(&rx_stats_mutex);
+ rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
-
hadd32(peer->bytesSent, p->length);
MUTEX_EXIT(&peer->peer_lock);
}
register struct rx_packet *p, register int last)
{
register struct rx_connection *conn = call->conn;
- int i, j;
+ int i;
ssize_t len; /* len must be a signed type; it can go negative */
p->flags &= ~RX_PKTFLAG_ACKED;