/* RX: Extended Remote Procedure Call */
+#include <afsconfig.h>
#ifdef KERNEL
#include "../afs/param.h"
+#else
+#include <afs/param.h>
+#endif
+
+RCSID("$Header$");
+
+#ifdef KERNEL
#include "../afs/sysincludes.h"
#include "../afs/afsincludes.h"
#ifndef UKERNEL
#endif /* AFS_AIX41_ENV */
# include "../afsint/rxgen_consts.h"
#else /* KERNEL */
-# include <afs/param.h>
# include <sys/types.h>
# include <errno.h>
#ifdef AFS_NT40_ENV
# include <netinet/in.h>
# include <sys/time.h>
#endif
+#ifdef HAVE_STRING_H
+#include <string.h>
+#else
+#ifdef HAVE_STRINGS_H
+#include <strings.h>
+#endif
+#endif
# include "rx.h"
# include "rx_user.h"
# include "rx_clock.h"
# include <afs/rxgen_consts.h>
#endif /* KERNEL */
-#ifdef RXDEBUG
-extern afs_uint32 LWP_ThreadId();
-#endif /* RXDEBUG */
-
int (*registerProgram)() = 0;
int (*swapNameProgram)() = 0;
#define INIT_PTHREAD_LOCKS
#endif
-extern void rxi_DeleteCachedConnections(void);
-
/* Variables for handling the minProcs implementation. availProcs gives the
* number of threads available in the pool at this moment (not counting dudes
* to manipulate the queue.
*/
-extern void rxi_Delay(int);
-
-static int rxi_ServerThreadSelectingCall;
-
#ifdef RX_ENABLE_LOCKS
+static int rxi_ServerThreadSelectingCall;
static afs_kmutex_t rx_rpc_stats;
void rxi_StartUnlocked();
#endif
#define CLEAR_CALL_QUEUE_LOCK(C)
#endif /* RX_ENABLE_LOCKS */
static void rxi_DestroyConnectionNoLock();
-void rxi_DestroyConnection();
-void rxi_CleanupConnection();
struct rx_serverQueueEntry *rx_waitForPacket = 0;
/* ------------Exported Interfaces------------- */
char *htable, *ptable;
int tmp_status;
+#if defined(AFS_DJGPP_ENV) && !defined(DEBUG)
+ __djgpp_set_quiet_socket(1);
+#endif
+
SPLVAR;
INIT_PTHREAD_LOCKS
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- bzero((char *)&rx_stats, sizeof(struct rx_stats));
+ memset((char *)&rx_stats, 0, sizeof(struct rx_stats));
htable = (char *)
osi_Alloc(rx_hashTableSize*sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize*sizeof(struct rx_connection *)); /* XXXXX */
- bzero(htable, rx_hashTableSize*sizeof(struct rx_connection *));
+ memset(htable, 0, rx_hashTableSize*sizeof(struct rx_connection *));
ptable = (char *) osi_Alloc(rx_hashTableSize*sizeof(struct rx_peer *));
PIN(ptable, rx_hashTableSize*sizeof(struct rx_peer *)); /* XXXXX */
- bzero(ptable, rx_hashTableSize*sizeof(struct rx_peer *));
+ memset(ptable, 0, rx_hashTableSize*sizeof(struct rx_peer *));
/* Malloc up a bunch of packets & buffers */
rx_nFreePackets = 0;
}
#else /* RX_ENABLE_LOCKS */
-static QuotaOK(aservice)
+static int QuotaOK(aservice)
register struct rx_service *aservice; {
int rc=0;
/* under min quota, we're OK */
void rx_StartServer(donateMe)
{
register struct rx_service *service;
- register int i, nProcs;
+ register int i, nProcs=0;
SPLVAR;
clock_NewTime();
if (donateMe) {
#ifndef AFS_NT40_ENV
#ifndef KERNEL
- int code;
char name[32];
#ifdef AFS_PTHREAD_ENV
pid_t pid;
- pid = pthread_self();
+ pid = (pid_t) pthread_self();
#else /* AFS_PTHREAD_ENV */
PROCESS pid;
- code = LWP_CurrentProcess(&pid);
+ LWP_CurrentProcess(&pid);
#endif /* AFS_PTHREAD_ENV */
sprintf(name,"srv_%d", ++nProcs);
MUTEX_EXIT(&rx_stats_mutex);
}
- if (conn->refCount > 0) {
+ if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Busy; wait till the last guy before proceeding */
MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
* last reply packets */
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
- rxi_AckAll((struct rxevent *)0, call, 0);
+ if (call->state == RX_STATE_PRECALL ||
+ call->state == RX_STATE_ACTIVE) {
+ rxi_SendDelayedAck(call->delayedAckEvent, call, 0);
+ } else {
+ rxi_AckAll((struct rxevent *)0, call, 0);
+ }
}
MUTEX_EXIT(&call->lock);
}
clock_GetTime(&queueTime);
AFS_RXGLOCK();
MUTEX_ENTER(&conn->conn_call_lock);
+
+ /*
+ * Check if there are others waiting for a new call.
+ * If so, let them go first to avoid starving them.
+ * This is a fairly simple scheme, and might not be
+ * a complete solution for large numbers of waiters.
+ */
+ if (conn->makeCallWaiters) {
+#ifdef RX_ENABLE_LOCKS
+ CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
+#else
+ osi_rxSleep(conn);
+#endif
+ }
+
for (;;) {
for (i=0; i<RX_MAXCALLS; i++) {
call = conn->call[i];
}
else {
call = rxi_NewCall(conn, i);
- MUTEX_ENTER(&call->lock);
break;
}
}
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_MAKECALL_WAITING;
MUTEX_EXIT(&conn->conn_data_lock);
+
+ conn->makeCallWaiters++;
#ifdef RX_ENABLE_LOCKS
CV_WAIT(&conn->conn_call_cv, &conn->conn_call_lock);
#else
osi_rxSleep(conn);
#endif
+ conn->makeCallWaiters--;
}
+ /*
+ * Wake up anyone else who might be giving us a chance to
+ * run (see code above that avoids resource starvation).
+ */
+#ifdef RX_ENABLE_LOCKS
+ CV_BROADCAST(&conn->conn_call_cv);
+#else
+ osi_rxWakeup(conn);
+#endif
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
return call;
}
+int
rxi_HasActiveCalls(aconn)
register struct rx_connection *aconn; {
register int i;
NETPRI;
for(i=0; i<RX_MAXCALLS; i++) {
- if (tcall = aconn->call[i]) {
+ if ((tcall = aconn->call[i])) {
if ((tcall->state == RX_STATE_ACTIVE)
|| (tcall->state == RX_STATE_PRECALL)) {
USERPRI;
return 0;
}
+int
rxi_GetCallNumberVector(aconn, aint32s)
register struct rx_connection *aconn;
register afs_int32 *aint32s; {
return 0;
}
+int
rxi_SetCallNumberVector(aconn, aint32s)
register struct rx_connection *aconn;
register afs_int32 *aint32s; {
{
struct rx_serverQueueEntry *sq;
register struct rx_call *call = (struct rx_call *) 0, *choice2;
- struct rx_service *service;
+ struct rx_service *service = NULL;
SPLVAR;
MUTEX_ENTER(&freeSQEList_lock);
- if (sq = rx_FreeSQEList) {
+ if ((sq = rx_FreeSQEList)) {
rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
{
struct rx_serverQueueEntry *sq;
register struct rx_call *call = (struct rx_call *) 0, *choice2;
- struct rx_service *service;
+ struct rx_service *service = NULL;
SPLVAR;
NETPRI;
AFS_RXGLOCK();
MUTEX_ENTER(&freeSQEList_lock);
- if (sq = rx_FreeSQEList) {
+ if ((sq = rx_FreeSQEList)) {
rx_FreeSQEList = *(struct rx_serverQueueEntry **)sq;
MUTEX_EXIT(&freeSQEList_lock);
} else { /* otherwise allocate a new one and return that */
MUTEX_ENTER(&conn->conn_call_lock);
MUTEX_ENTER(&call->lock);
MUTEX_ENTER(&conn->conn_data_lock);
+ conn->flags |= RX_CONN_BUSY;
if (conn->flags & RX_CONN_MAKECALL_WAITING) {
conn->flags &= (~RX_CONN_MAKECALL_WAITING);
MUTEX_EXIT(&conn->conn_data_lock);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
- if (conn->type == RX_CLIENT_CONNECTION)
+ if (conn->type == RX_CLIENT_CONNECTION) {
MUTEX_EXIT(&conn->conn_call_lock);
+ conn->flags &= ~RX_CONN_BUSY;
+ }
AFS_RXGUNLOCK();
USERPRI;
/*
/* Allocate a call structure, for the indicated channel of the
* supplied connection. The mode and state of the call must be set by
- * the caller. */
+ * the caller. Returns the call with mutex locked. */
struct rx_call *rxi_NewCall(conn, channel)
register struct rx_connection *conn;
register int channel;
the call number is valid from the last time this channel was used */
if (*call->callNumber == 0) *call->callNumber = 1;
- MUTEX_EXIT(&call->lock);
return call;
}
p = (char *) osi_Alloc(size);
#endif
if (!p) osi_Panic("rxi_Alloc error");
- bzero(p, size);
+ memset(p, 0, size);
return p;
}
}
if (!call) {
call = rxi_NewCall(conn, channel);
- MUTEX_ENTER(&call->lock);
*call->callNumber = np->header.callNumber;
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
afs_uint32 serial;
/* because there are CM's that are bogus, sending weird values for this. */
afs_uint32 skew = 0;
- int needRxStart = 0;
int nbytes;
int missing;
int acked;
if (rx_Log) {
fprintf( rx_Log,
"RACK: reason %x previous %u seq %u serial %u skew %d first %u",
- ap->reason, ntohl(ap->previousPacket), np->header.seq, serial,
- skew, ntohl(ap->firstPacket));
+ ap->reason, ntohl(ap->previousPacket),
+ (unsigned int) np->header.seq, (unsigned int) serial,
+ (unsigned int) skew, ntohl(ap->firstPacket));
if (nAcks) {
int offset;
for (offset = 0; offset < nAcks; offset++)
* set the ack bits in the packets and have rxi_Start remove the packets
* when it's done transmitting.
*/
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
if (call->flags & RX_CALL_TQ_BUSY) {
#ifdef RX_ENABLE_LOCKS
- tp->acked = 1;
+ tp->flags |= RX_PKTFLAG_ACKED;
call->flags |= RX_CALL_TQ_SOME_ACKED;
#else /* RX_ENABLE_LOCKS */
break;
* out of sequence. */
if (tp->header.seq < first) {
/* Implicit ack information */
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
- tp->acked = 1;
+ tp->flags |= RX_PKTFLAG_ACKED;
}
else if (tp->header.seq < first + nAcks) {
/* Explicit ack information: set it in the packet appropriately */
if (ap->acks[tp->header.seq - first] == RX_ACK_TYPE_ACK) {
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
- tp->acked = 1;
+ tp->flags |= RX_PKTFLAG_ACKED;
}
if (missing) {
nNacked++;
call->nSoftAcked++;
}
} else {
- tp->acked = 0;
+ tp->flags &= ~RX_PKTFLAG_ACKED;
missing = 1;
}
}
else {
- tp->acked = 0;
+ tp->flags &= ~RX_PKTFLAG_ACKED;
missing = 1;
}
* ie, this should readjust the retransmit timer for all outstanding
* packets... So we don't just retransmit when we should know better*/
- if (!tp->acked && !clock_IsZero(&tp->retryTime)) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&tp->retryTime)) {
tp->retryTime = tp->timeSent;
clock_Add(&tp->retryTime, &peer->timeout);
/* shift by eight because one quarter-sec ~ 256 milliseconds */
/* if the ack packet has a receivelen field hanging off it,
* update our state */
- if ( np->length >= rx_AckDataSize(ap->nAcks) +sizeof(afs_int32)) {
+ if ( np->length >= rx_AckDataSize(ap->nAcks) + 2*sizeof(afs_int32)) {
afs_uint32 tSize;
/* If the ack packet has a "recommended" size that is less than
* so we will retransmit as soon as the window permits*/
for(acked = 0, queue_ScanBackwards(&call->tq, tp, nxp, rx_packet)) {
if (acked) {
- if (!tp->acked) {
+ if (!(tp->flags & RX_PKTFLAG_ACKED)) {
clock_Zero(&tp->retryTime);
}
- } else if (tp->acked) {
+ } else if (tp->flags & RX_PKTFLAG_ACKED) {
acked = 1;
}
}
for (queue_Scan(&call->tq, p, tp, rx_packet)) {
if (!p)
break;
- p->acked = 1;
+ p->flags |= RX_PKTFLAG_ACKED;
someAcked = 1;
}
if (someAcked) {
for (queue_Scan(&call->tq, p, tp, rx_packet)) {
if (!p)
break;
- p->acked = 1;
+ p->flags |= RX_PKTFLAG_ACKED;
someAcked = 1;
}
if (someAcked) {
#ifdef RXDEBUG
if (rx_Log) {
fprintf(rx_Log, "SACK: reason %x previous %u seq %u first %u",
- ap->reason, ntohl(ap->previousPacket), p->header.seq,
- ntohl(ap->firstPacket));
+ ap->reason, ntohl(ap->previousPacket),
+ (unsigned int) p->header.seq, ntohl(ap->firstPacket));
if (ap->nAcks) {
for (offset = 0; offset < ap->nAcks; offset++)
putc(ap->acks[offset] == RX_ACK_TYPE_NACK? '-' : '*', rx_Log);
}
/* Send all of the packets in the list in single datagram */
-static void rxi_SendList(call, list, len, istack, moreFlag, now, retryTime)
+static void rxi_SendList(call, list, len, istack, moreFlag, now, retryTime, resending)
struct rx_call *call;
struct rx_packet **list;
int len;
int moreFlag;
struct clock *now;
struct clock *retryTime;
+ int resending;
{
int i;
int requestAck = 0;
MUTEX_ENTER(&peer->peer_lock);
peer->nSent += len;
+ if (resending) peer->reSends += len;
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.dataPacketsSent += len;
MUTEX_EXIT(&rx_stats_mutex);
MUTEX_ENTER(&peer->peer_lock);
peer->nSent++;
+ if (resending) peer->reSends++;
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.dataPacketsSent++;
MUTEX_EXIT(&rx_stats_mutex);
* We always keep the last list we should have sent so we
* can set the RX_MORE_PACKETS flags correctly.
*/
-static void rxi_SendXmitList(call, list, len, istack, now, retryTime)
+static void rxi_SendXmitList(call, list, len, istack, now, retryTime, resending)
struct rx_call *call;
struct rx_packet **list;
int len;
int istack;
struct clock *now;
struct clock *retryTime;
+ int resending;
{
int i, cnt, lastCnt = 0;
struct rx_packet **listP, **lastP = 0;
/* Does the current packet force us to flush the current list? */
if (cnt > 0
&& (list[i]->header.serial
- || list[i]->acked
+ || (list[i]->flags & RX_PKTFLAG_ACKED)
|| list[i]->length > RX_JUMBOBUFFERSIZE)) {
if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 1, now, retryTime);
+ rxi_SendList(call, lastP, lastCnt, istack, 1, now, retryTime, resending);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
}
/* Add the current packet to the list if it hasn't been acked.
* Otherwise adjust the list pointer to skip the current packet. */
- if (!list[i]->acked) {
+ if (!(list[i]->flags & RX_PKTFLAG_ACKED)) {
cnt++;
/* Do we need to flush the list? */
if (cnt >= (int)peer->maxDgramPackets
|| list[i]->length != RX_JUMBOBUFFERSIZE) {
if (lastCnt > 0) {
rxi_SendList(call, lastP, lastCnt, istack, 1,
- now, retryTime);
+ now, retryTime, resending);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error || (call->flags&RX_CALL_FAST_RECOVER_WAIT))
* an acked packet. Since we always send retransmissions
* in a separate packet, we only need to check the first
* packet in the list */
- if (cnt > 0 && !listP[0]->acked) {
+ if (cnt > 0 && !(listP[0]->flags & RX_PKTFLAG_ACKED)) {
morePackets = 1;
}
if (lastCnt > 0) {
rxi_SendList(call, lastP, lastCnt, istack, morePackets,
- now, retryTime);
+ now, retryTime, resending);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
return;
}
if (morePackets) {
- rxi_SendList(call, listP, cnt, istack, 0, now, retryTime);
+ rxi_SendList(call, listP, cnt, istack, 0, now, retryTime, resending);
}
} else if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 0, now, retryTime);
+ rxi_SendList(call, lastP, lastCnt, istack, 0, now, retryTime, resending);
}
}
int nXmitPackets;
int maxXmitPackets;
struct rx_packet **xmitList;
+ int resending = 0;
/* If rxi_Start is being called as a result of a resend event,
* then make sure that the event pointer is removed from the call
if (event && event == call->resendEvent) {
CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
call->resendEvent = NULL;
+ resending = 1;
if (queue_IsEmpty(&call->tq)) {
/* Nothing to do */
return;
* than recovery rates.
*/
for(queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (!p->acked) {
+ if (!(p->flags & RX_PKTFLAG_ACKED)) {
clock_Zero(&p->retryTime);
}
}
/* Only send one packet during fast recovery */
break;
}
- if ((p->header.flags == RX_FREE_PACKET) ||
+ if ((p->flags & RX_PKTFLAG_FREE) ||
(!queue_IsEnd(&call->tq, nxp)
- && (nxp->header.flags == RX_FREE_PACKET)) ||
+ && (nxp->flags & RX_PKTFLAG_FREE)) ||
(p == (struct rx_packet *)&rx_freePacketQueue) ||
(nxp == (struct rx_packet *)&rx_freePacketQueue)) {
osi_Panic("rxi_Start: xmit queue clobbered");
}
- if (p->acked) {
+ if (p->flags & RX_PKTFLAG_ACKED) {
MUTEX_ENTER(&rx_stats_mutex);
rx_stats.ignoreAckedPacket++;
MUTEX_EXIT(&rx_stats_mutex);
* ready to send. Now we loop to send the packets */
if (nXmitPackets > 0) {
rxi_SendXmitList(call, xmitList, nXmitPackets, istack,
- &now, &retryTime);
+ &now, &retryTime, resending);
}
osi_Free(xmitList, maxXmitPackets * sizeof(struct rx_packet *));
* the transmit queue.
*/
for (missing = 0, queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (p->header.seq < call->tfirst && p->acked) {
+ if (p->header.seq < call->tfirst && (p->flags & RX_PKTFLAG_ACKED)) {
queue_Remove(p);
rxi_FreePacket(p);
}
break;
}
- if (!p->acked && !clock_IsZero(&p->retryTime)) {
+ if (!(p->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&p->retryTime)) {
haveEvent = 1;
retryTime = p->retryTime;
break;
struct timeval temptime;
#endif
register int rtt_timeout;
- static char id[]="@(#)adaptive RTO";
#if defined(AFS_ALPHA_LINUX20_ENV) && defined(AFS_PTHREAD_ENV) && !defined(KERNEL)
/* yet again. This was the worst Heisenbug of the port - stroucki */
{
struct clock now;
clock_GetTime(&now);
- fprintf(rx_Log, " %u.%.3u:", now.sec, now.usec/1000);
+ fprintf(rx_Log, " %u.%.3u:", (unsigned int) now.sec, (unsigned int) now.usec/1000);
fprintf(rx_Log, format, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15);
putc('\n', rx_Log);
}
}
fprintf(file,
- "rx stats: free packets %d, "
- "allocs %d, ",
- freePackets,
+ "rx stats: free packets %d, allocs %d, ",
+ (int) freePackets,
s->packetRequests);
if (version >= RX_DEBUGI_VERSION_W_NEWPACKETTYPES) {
" \t(these should be small) sendFailed %d, "
"fatalErrors %d\n",
s->netSendFailures,
- s->fatalErrors);
+ (int) s->fatalErrors);
if (s->nRttSamples) {
fprintf(file,
"Burst size %d, "
"burst wait %u.%d.\n",
ntohl(peer->host),
- peer->port,
- peer->burstSize,
- peer->burstWait.sec,
- peer->burstWait.usec);
+ (int) peer->port,
+ (int) peer->burstSize,
+ (int) peer->burstWait.sec,
+ (int) peer->burstWait.usec);
fprintf(file,
" Rtt %d, "
"total sent %d, "
"resent %d\n",
peer->rtt,
- peer->timeout.sec,
- peer->timeout.usec,
+ (int) peer->timeout.sec,
+ (int) peer->timeout.usec,
peer->nSent,
peer->reSends);
"max in packet skew %d, "
"max out packet skew %d\n",
peer->ifMTU,
- peer->inPacketSkew,
- peer->outPacketSkew);
+ (int) peer->inPacketSkew,
+ (int) peer->outPacketSkew);
}
#ifdef AFS_PTHREAD_ENV
theader.flags = RX_CLIENT_INITIATED | RX_LAST_PACKET;
theader.serviceId = 0;
- bcopy(&theader, tbuffer, sizeof(theader));
- bcopy(inputData, tp, inputLength);
+ memcpy(tbuffer, &theader, sizeof(theader));
+ memcpy(tp, inputData, inputLength);
code = sendto(socket, tbuffer, inputLength+sizeof(struct rx_header), 0,
(struct sockaddr *) &taddr, sizeof(struct sockaddr_in));
code = recvfrom(socket, tbuffer, sizeof(tbuffer), 0,
(struct sockaddr *) &faddr, &faddrLen);
- bcopy(tbuffer, &theader, sizeof(struct rx_header));
+ memcpy(&theader, tbuffer, sizeof(struct rx_header));
if (counter == ntohl(theader.callNumber)) break;
}
}
code -= sizeof(struct rx_header);
if (code > outputLength) code = outputLength;
- bcopy(tp, outputData, code);
+ memcpy(outputData, tp, code);
return code;
}
{
struct rx_debugIn in;
afs_int32 rc = 0;
- int i;
/*
* supportedValues is currently unused, but added to allow future
MUTEX_ENTER(&freeSQEList_lock);
- while (np = rx_FreeSQEList) {
+ while ((np = rx_FreeSQEList)) {
rx_FreeSQEList = *(struct rx_serverQueueEntry **)np;
MUTEX_DESTROY(&np->lock);
rxi_Free(np, sizeof(*np));
* queue.
*/
- if ((rpc_stat == NULL) ||
+ if (queue_IsEnd(stats, rpc_stat) ||
+ (rpc_stat == NULL) ||
(rpc_stat->stats[0].interfaceId != rxInterface) ||
(rpc_stat->stats[0].remote_is_server != isServer)) {
int i;
ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
if (ptr != NULL) {
- register struct rx_peer *pp;
- int i;
- int num_copied = 0;
rx_interface_stat_p rpc_stat, nrpc_stat;
ptr = *stats = (afs_uint32 *) rxi_Alloc(space);
if (ptr != NULL) {
- int i;
- int num_copied = 0;
rx_interface_stat_p rpc_stat, nrpc_stat;
char *fix_offset;