#include "rx_stats.h"
#include "rx_event.h"
+#include "rx_conn.h"
+#include "rx_call.h"
+#include "rx_packet.h"
+
#include <afs/rxgen_consts.h>
#ifndef KERNEL
static unsigned int rxi_rpc_process_stat_cnt;
/*
- * rxi_busyChannelError is the error to return to the application when a call
- * channel appears busy (inferred from the receipt of RX_PACKET_TYPE_BUSY
- * packets on the channel), and there are other call channels in the
- * connection that are not busy. If 0, we do not return errors upon receiving
- * busy packets; we just keep trying on the same call channel until we hit a
- * timeout.
+ * rxi_busyChannelError is a boolean. It indicates whether or not RX_CALL_BUSY
+ * errors should be reported to the application when a call channel appears busy
+ * (inferred from the receipt of RX_PACKET_TYPE_BUSY packets on the channel),
+ * and there are other call channels in the connection that are not busy.
+ * If 0, we do not return errors upon receiving busy packets; we just keep
+ * trying on the same call channel until we hit a timeout.
*/
static afs_int32 rxi_busyChannelError = 0;
/* Forward prototypes */
static struct rx_call * rxi_NewCall(struct rx_connection *, int);
+static_inline void
+putConnection (struct rx_connection *conn) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+}
+
#ifdef AFS_PTHREAD_ENV
/*
if (lastPacket && call->conn->type == RX_CLIENT_CONNECTION)
clock_Addmsec(&retryTime, 400);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
- MUTEX_EXIT(&rx_refcnt_mutex);
call->resendEvent = rxevent_Post(&retryTime, &now, rxi_Resend,
call, NULL, istack);
}
}
/**
- * Sets the error generated when a busy call channel is detected.
+ * Enables or disables the busy call channel error (RX_CALL_BUSY).
*
- * @param[in] error The error to return for a call on a busy channel.
+ * @param[in] onoff Non-zero to enable busy call channel errors.
*
* @pre Neither rx_Init nor rx_InitHost have been called yet
*/
void
-rx_SetBusyChannelError(afs_int32 error)
+rx_SetBusyChannelError(afs_int32 onoff)
{
osi_Assert(rxinit_status != 0);
- rxi_busyChannelError = error;
+ rxi_busyChannelError = onoff ? 1 : 0;
}
/**
rxevent_Cancel(&call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
- MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAckEvent = rxevent_Post(&when, &now,
rxi_SendDelayedAck,
rx_SetConnIdleDeadTime(struct rx_connection *conn, int seconds)
{
conn->idleDeadTime = seconds;
+ conn->idleDeadDetection = (seconds ? 1 : 0);
rxi_CheckConnTimeouts(conn);
}
* effect on overall system performance.
*/
call->state = RX_STATE_RESET;
+ CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ (*call->callNumber)++;
MUTEX_EXIT(&conn->conn_call_lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&rx_refcnt_mutex);
rxi_ResetCall(call, 0);
- (*call->callNumber)++;
if (MUTEX_TRYENTER(&conn->conn_call_lock))
break;
* Instead, cycle through one more time to see if
* we can find a call that can call our own.
*/
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&rx_refcnt_mutex);
wait = 0;
}
MUTEX_EXIT(&call->lock);
/* rxi_NewCall returns with mutex locked */
call = rxi_NewCall(conn, i);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&rx_refcnt_mutex);
break;
}
}
if (i < RX_MAXCALLS) {
conn->lastBusy[i] = 0;
+ call->flags &= ~RX_CALL_PEER_BUSY;
break;
}
if (!wait)
SPLVAR;
NETPRI;
+ MUTEX_ENTER(&aconn->conn_call_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
if ((tcall = aconn->call[i]) && (tcall->state == RX_STATE_DALLY))
aint32s[i] = aconn->callNumber[i] + 1;
else
aint32s[i] = aconn->callNumber[i];
}
+ MUTEX_EXIT(&aconn->conn_call_lock);
USERPRI;
return 0;
}
SPLVAR;
NETPRI;
+ MUTEX_ENTER(&aconn->conn_call_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
if ((tcall = aconn->call[i]) && (tcall->state == RX_STATE_DALLY))
aconn->callNumber[i] = aint32s[i] - 1;
else
aconn->callNumber[i] = aint32s[i];
}
+ MUTEX_EXIT(&aconn->conn_call_lock);
USERPRI;
return 0;
}
}
MUTEX_ENTER(&rx_pthread_mutex);
if (tno == rxi_fcfs_thread_num
- || !tcall->queue_item_header.next) {
+ || queue_IsLast(&rx_incomingCallQueue, tcall)) {
MUTEX_EXIT(&rx_pthread_mutex);
/* If we're the fcfs thread , then we'll just use
* this call. If we haven't been able to find an optimal
call));
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&rx_refcnt_mutex);
} else {
dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
rxi_FreePackets(0, &call->iovq);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&rx_refcnt_mutex);
if (conn->type == RX_CLIENT_CONNECTION) {
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags &= ~RX_CONN_BUSY;
*
* call->lock amd rx_refcnt_mutex are held upon entry.
* haveCTLock is set when called from rxi_ReapConnections.
+ *
+ * return 1 if the call is freed, 0 if not.
*/
-static void
+static int
rxi_FreeCall(struct rx_call *call, int haveCTLock)
{
int channel = call->channel;
struct rx_connection *conn = call->conn;
+ u_char state = call->state;
-
- if (call->state == RX_STATE_DALLY || call->state == RX_STATE_HOLD)
- (*call->callNumber)++;
/*
* We are setting the state to RX_STATE_RESET to
* ensure that no one else will attempt to use this
MUTEX_EXIT(&rx_refcnt_mutex);
rxi_ResetCall(call, 0);
- MUTEX_ENTER(&conn->conn_call_lock);
- if (call->conn->call[channel] == call)
- call->conn->call[channel] = 0;
- MUTEX_EXIT(&conn->conn_call_lock);
+ if (MUTEX_TRYENTER(&conn->conn_call_lock))
+ {
+ if (state == RX_STATE_DALLY || state == RX_STATE_HOLD)
+ (*call->callNumber)++;
+
+ if (call->conn->call[channel] == call)
+ call->conn->call[channel] = 0;
+ MUTEX_EXIT(&conn->conn_call_lock);
+ } else {
+ /*
+ * We couldn't obtain the conn_call_lock so we can't
+ * disconnect the call from the connection. Set the
+ * call state to dally so that the call can be reused.
+ */
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ call->state = RX_STATE_DALLY;
+ return 0;
+ }
MUTEX_ENTER(&rx_freeCallQueue_lock);
SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
MUTEX_EXIT(&conn->conn_data_lock);
}
MUTEX_ENTER(&rx_refcnt_mutex);
+ return 1;
}
rx_atomic_t rxi_Allocsize = RX_ATOMIC_INIT(0);
conn->nSpecific = 0;
conn->specific = NULL;
rx_SetConnDeadTime(conn, service->connDeadTime);
- rx_SetConnIdleDeadTime(conn, service->idleDeadTime);
- rx_SetServerConnIdleDeadErr(conn, service->idleDeadErr);
+ conn->idleDeadTime = service->idleDeadTime;
+ conn->idleDeadDetection = service->idleDeadErr ? 1 : 0;
for (i = 0; i < RX_MAXCALLS; i++) {
conn->twind[i] = rx_initSendWindow;
conn->rwind[i] = rx_initReceiveWindow;
int channel = call->channel;
int freechannel = 0;
int i;
- afs_uint32 callNumber = *call->callNumber;
+ afs_uint32 callNumber;
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_call_lock);
+ callNumber = *call->callNumber;
/* Are there any other call slots on this conn that we should try? Look for
* slots that are empty and are either non-busy, or were marked as busy
}
}
- MUTEX_EXIT(&conn->conn_call_lock);
-
MUTEX_ENTER(&call->lock);
/* Since the call->lock and conn->conn_call_lock have been released it is
* rxi_busyChannelError so the application can retry the request,
* presumably on a less-busy call channel. */
- rxi_CallError(call, rxi_busyChannelError);
+ rxi_CallError(call, RX_CALL_BUSY);
}
+ MUTEX_EXIT(&conn->conn_call_lock);
}
/* There are two packet tracing routines available for testing and monitoring
MUTEX_ENTER(&conn->conn_data_lock);
if (np->header.type != RX_PACKET_TYPE_ABORT)
np = rxi_SendConnectionAbort(conn, np, 1, 0);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d\n", errcode));
rxi_ConnectionError(conn, errcode);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
case RX_PACKET_TYPE_CHALLENGE:
tnp = rxi_ReceiveChallengePacket(conn, np, 1);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return tnp;
case RX_PACKET_TYPE_RESPONSE:
tnp = rxi_ReceiveResponsePacket(conn, np, 1);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return tnp;
case RX_PACKET_TYPE_PARAMS:
case RX_PACKET_TYPE_PARAMS + 1:
case RX_PACKET_TYPE_PARAMS + 2:
/* ignore these packet types for now */
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
-
default:
/* Should not reach here, unless the peer is broken: send an
* abort packet */
rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
MUTEX_ENTER(&conn->conn_data_lock);
tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
}
}
channel = np->header.cid & RX_CHANNELMASK;
+ MUTEX_ENTER(&conn->conn_call_lock);
call = conn->call[channel];
if (call) {
MUTEX_ENTER(&call->lock);
currentCallNumber = conn->callNumber[channel];
+ MUTEX_EXIT(&conn->conn_call_lock);
} else if (type == RX_SERVER_CONNECTION) { /* No call allocated */
- MUTEX_ENTER(&conn->conn_call_lock);
call = conn->call[channel];
if (call) {
MUTEX_ENTER(&call->lock);
- MUTEX_EXIT(&conn->conn_call_lock);
currentCallNumber = conn->callNumber[channel];
+ MUTEX_EXIT(&conn->conn_call_lock);
} else {
call = rxi_NewCall(conn, channel); /* returns locked call */
- MUTEX_EXIT(&conn->conn_call_lock);
*call->callNumber = currentCallNumber = np->header.callNumber;
+ MUTEX_EXIT(&conn->conn_call_lock);
#ifdef RXDEBUG
if (np->header.callNumber == 0)
dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
if (rx_stats_active)
rx_atomic_inc(&rx_stats.nBusies);
return tp;
*/
if (rx_stats_active)
rx_atomic_inc(&rx_stats.spuriousPacketsRead);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
MUTEX_EXIT(&call->lock);
if (rx_stats_active)
rx_atomic_inc(&rx_stats.spuriousPacketsRead);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
} else if (np->header.callNumber != currentCallNumber) {
/* Wait until the transmit queue is idle before deciding
if (call->error) {
rxi_CallError(call, call->error);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
}
tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY,
NULL, 0, 1);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return tp;
}
rxi_ResetCall(call, 0);
+ /*
+ * The conn_call_lock is not held but no one else should be
+ * using this call channel while we are processing this incoming
+ * packet. This assignment should be safe.
+ */
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
if (rx_stats_active)
rx_atomic_inc(&rx_stats.nBusies);
return tp;
if (rx_stats_active)
rx_atomic_inc(&rx_stats.ignorePacketDally);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
if (rx_stats_active)
rx_atomic_inc(&rx_stats.spuriousPacketsRead);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
/* If the service security object index stamped in the packet does not
* match the connection's security index, ignore the packet */
if (np->header.securityIndex != conn->securityIndex) {
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
#ifdef RX_ENABLE_LOCKS
rxi_SetAcksInTransmitQueue(call);
#else
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np; /* xmitting; drop packet */
#endif
} else {
if (rx_stats_active)
rx_atomic_inc(&rx_stats.spuriousPacketsRead);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
}
dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d\n", errdata));
rxi_CallError(call, errdata);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np; /* xmitting; drop packet */
}
case RX_PACKET_TYPE_BUSY: {
MUTEX_EXIT(&call->lock);
MUTEX_EXIT(&conn->conn_call_lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
break;
#else /* RX_ENABLE_LOCKS */
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np; /* xmitting; drop packet */
#endif /* RX_ENABLE_LOCKS */
}
/* we've received a legit packet, so the channel is not busy */
call->flags &= ~RX_CALL_PEER_BUSY;
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
return np;
}
waiting = conn->flags & RX_CONN_ATTACHWAIT;
if (event) {
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
}
MUTEX_EXIT(&conn->conn_data_lock);
*tnop = sq->tno;
*sq->socketp = socket;
clock_GetTime(&call->startTime);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&rx_refcnt_mutex);
} else {
sq->newcall = call;
}
MUTEX_ENTER(&call->lock);
rxevent_Put(call->delayedAckEvent);
call->delayedAckEvent = NULL;
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ACKALL);
- MUTEX_EXIT(&rx_refcnt_mutex);
}
rxi_SendSpecial(call, call->conn, (struct rx_packet *)0,
RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
rxevent_Put(call->delayedAckEvent);
call->delayedAckEvent = NULL;
}
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_DELAY);
- MUTEX_EXIT(&rx_refcnt_mutex);
}
(void)rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
if (event)
rxi_SendCallAbort(struct rx_call *call, struct rx_packet *packet,
int istack, int force)
{
- afs_int32 error;
+ afs_int32 error, cerror;
struct clock when, now;
if (!call->error)
return packet;
+ switch (call->error) {
+ case RX_CALL_IDLE:
+ case RX_CALL_BUSY:
+ cerror = RX_CALL_TIMEOUT;
+ break;
+ default:
+ cerror = call->error;
+ }
+
/* Clients should never delay abort messages */
if (rx_IsClientConn(call->conn))
force = 1;
- if (call->abortCode != call->error) {
- call->abortCode = call->error;
+ if (call->abortCode != cerror) {
+ call->abortCode = cerror;
call->abortCount = 0;
}
rxevent_Cancel(&call->delayedAbortEvent, call,
RX_CALL_REFCOUNT_ABORT);
}
- error = htonl(call->error);
+ error = htonl(cerror);
call->abortCount++;
packet =
rxi_SendSpecial(call, call->conn, packet, RX_PACKET_TYPE_ABORT,
clock_GetTime(&now);
when = now;
clock_Addmsec(&when, rxi_callAbortDelay);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ABORT);
- MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAbortEvent =
rxevent_Post(&when, &now, rxi_SendDelayedCallAbort, call, 0, 0);
}
if (conn->checkReachEvent) {
rxevent_Cancel(&conn->checkReachEvent, NULL, 0);
conn->flags &= ~(RX_CONN_ATTACHWAIT|RX_CONN_NAT_PING);
- MUTEX_ENTER(&rx_refcnt_mutex);
- conn->refCount--;
- MUTEX_EXIT(&rx_refcnt_mutex);
+ putConnection(conn);
}
MUTEX_EXIT(&conn->conn_data_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
}
- rxevent_Cancel(&call->growMTUEvent, call, RX_CALL_REFCOUNT_ALIVE);
+ rxevent_Cancel(&call->growMTUEvent, call, RX_CALL_REFCOUNT_MTU);
if (call->delayedAbortEvent) {
rxevent_Cancel(&call->delayedAbortEvent, call, RX_CALL_REFCOUNT_ABORT);
}
call->flags = 0;
- if ((flags & RX_CALL_PEER_BUSY)) {
+ if (!newcall && (flags & RX_CALL_PEER_BUSY)) {
/* The call channel is still busy; resetting the call doesn't change
- * that */
+ * that. However, if 'newcall' is set, we are processing a call
+ * structure that has either been recycled from the free list, or has
+ * been newly allocated. So, RX_CALL_PEER_BUSY is not relevant if
+ * 'newcall' is set, since it describes a completely different call
+ * channel which we do not care about. */
call->flags |= RX_CALL_PEER_BUSY;
}
rxevent_Cancel(&call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
- MUTEX_EXIT(&rx_refcnt_mutex);
if (xmit->len > 1) {
rxi_SendPacketList(call, conn, xmit->list, xmit->len, istack);
} else {
rxi_SendPacket(call, conn, xmit->list[0], istack);
}
MUTEX_ENTER(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
- MUTEX_EXIT(&rx_refcnt_mutex);
/* Tell the RTO calculation engine that we have sent a packet, and
* if it was the last one */
* structure, since there is no longer a per-call retransmission
* event pending. */
if (event == call->resendEvent) {
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
- MUTEX_EXIT(&rx_refcnt_mutex);
rxevent_Put(call->resendEvent);
call->resendEvent = NULL;
}
/* Actually send the packet, filling in more connection-specific fields */
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
- MUTEX_EXIT(&rx_refcnt_mutex);
rxi_SendPacket(call, conn, p, istack);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
- MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_ENTER(&call->lock);
/* Update last send time for this call (for keep-alive
afs_uint32 fudgeFactor;
int cerror = 0;
int newmtu = 0;
+ int idle_timeout = 0;
+ afs_int32 clock_diff = 0;
+
+ now = clock_Sec();
+
+ /* Large swings in the clock can have a significant impact on
+ * the performance of RX call processing. Forward clock shifts
+ * will result in premature event triggering or timeouts.
+ * Backward shifts can result in calls not completing until
+ * the clock catches up with the original start clock value.
+ *
+ * If a backward clock shift of more than five minutes is noticed,
+ * just fail the call.
+ */
+ if (now < call->lastSendTime)
+ clock_diff = call->lastSendTime - now;
+ if (now < call->startWait)
+ clock_diff = MAX(clock_diff, call->startWait - now);
+ if (now < call->lastReceiveTime)
+ clock_diff = MAX(clock_diff, call->lastReceiveTime - now);
+ if (clock_diff > 5 * 60)
+ {
+ if (call->state == RX_STATE_ACTIVE)
+ rxi_CallError(call, RX_CALL_TIMEOUT);
+ return -1;
+ }
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (call->flags & RX_CALL_TQ_BUSY) {
((afs_uint32) call->rtt_dev << 1) + 1023) >> 10;
deadTime = conn->secondsUntilDead + fudgeFactor;
- now = clock_Sec();
/* These are computed to the second (+- 1 second). But that's
* good enough for these values, which should be a significant
* number of seconds. */
rxevent_Cancel(&call->keepAliveEvent, call,
RX_CALL_REFCOUNT_ALIVE);
rxevent_Cancel(&call->growMTUEvent, call,
- RX_CALL_REFCOUNT_ALIVE);
+ RX_CALL_REFCOUNT_MTU);
MUTEX_ENTER(&rx_refcnt_mutex);
- if (call->refCount == 0) {
- rxi_FreeCall(call, haveCTLock);
+ /* if rxi_FreeCall returns 1 it has freed the call */
+ if (call->refCount == 0 &&
+ rxi_FreeCall(call, haveCTLock))
+ {
MUTEX_EXIT(&rx_refcnt_mutex);
- return -2;
+ return -2;
}
MUTEX_EXIT(&rx_refcnt_mutex);
return -1;
* attached process can die reasonably gracefully. */
}
- if (conn->idleDeadTime) {
- idleDeadTime = conn->idleDeadTime + fudgeFactor;
- }
+ if (conn->idleDeadDetection) {
+ if (conn->idleDeadTime) {
+ idleDeadTime = conn->idleDeadTime + fudgeFactor;
+ }
- /* see if we have a non-activity timeout */
- if (call->startWait && idleDeadTime
- && ((call->startWait + idleDeadTime) < now) &&
- (call->flags & RX_CALL_READER_WAIT)) {
- if (call->state == RX_STATE_ACTIVE) {
- cerror = RX_CALL_TIMEOUT;
- goto mtuout;
- }
- }
- if (call->lastSendData && idleDeadTime && (conn->idleDeadErr != 0)
- && ((call->lastSendData + idleDeadTime) < now)) {
- if (call->state == RX_STATE_ACTIVE) {
- cerror = conn->idleDeadErr;
- goto mtuout;
- }
+ if (idleDeadTime) {
+ /* see if we have a non-activity timeout */
+ if (call->startWait && ((call->startWait + idleDeadTime) < now) &&
+ (call->flags & RX_CALL_READER_WAIT)) {
+ if (call->state == RX_STATE_ACTIVE) {
+ cerror = RX_CALL_TIMEOUT;
+ goto mtuout;
+ }
+ }
+
+ if (call->lastSendData && ((call->lastSendData + idleDeadTime) < now)) {
+ if (call->state == RX_STATE_ACTIVE) {
+ cerror = conn->service ? conn->service->idleDeadErr : RX_CALL_IDLE;
+ idle_timeout = 1;
+ goto mtuout;
+ }
+ }
+ }
}
if (conn->hardDeadTime) {
}
return 0;
mtuout:
- if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT
- && call->lastReceiveTime) {
+ if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT && !idle_timeout &&
+ call->lastReceiveTime) {
int oldMTU = conn->peer->ifMTU;
/* if we thought we could send more, perhaps things got worse */
struct rx_connection *conn;
afs_uint32 now;
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
- MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_ENTER(&call->lock);
if (event == call->keepAliveEvent) {
struct rx_call *call = arg1;
struct rx_connection *conn;
- MUTEX_ENTER(&rx_refcnt_mutex);
- CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
- MUTEX_EXIT(&rx_refcnt_mutex);
+ CALL_RELE(call, RX_CALL_REFCOUNT_MTU);
MUTEX_ENTER(&call->lock);
if (event == call->growMTUEvent) {
*/
if ((conn->peer->maxPacketSize != 0) &&
(conn->peer->natMTU < RX_MAX_PACKET_SIZE) &&
- (conn->idleDeadErr))
+ conn->idleDeadDetection)
(void)rxi_SendAck(call, NULL, 0, RX_ACK_MTU, 0);
rxi_ScheduleGrowMTUEvent(call, 0);
MUTEX_EXIT(&call->lock);
clock_GetTime(&now);
when = now;
when.sec += call->conn->secondsUntilPing;
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
- MUTEX_EXIT(&rx_refcnt_mutex);
call->keepAliveEvent =
rxevent_Post(&when, &now, rxi_KeepAliveEvent, call, NULL, 0);
}
}
when.sec += secs;
- MUTEX_ENTER(&rx_refcnt_mutex);
- CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
- MUTEX_EXIT(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_MTU);
call->growMTUEvent =
rxevent_Post(&when, &now, rxi_GrowMTUEvent, call, NULL, 0);
}
rxi_ScheduleKeepAliveEvent(call);
}
+/*
+ * Solely in order that callers not need to include rx_call.h
+ */
+void
+rx_KeepAliveOff(struct rx_call *call)
+{
+ rxi_KeepAliveOff(call);
+}
+void
+rx_KeepAliveOn(struct rx_call *call)
+{
+ rxi_KeepAliveOn(call);
+}
+
void
rxi_GrowMTUOn(struct rx_call *call)
{
rxi_FreePacket(packet);
}
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
- MUTEX_EXIT(&rx_refcnt_mutex);
}
/* This routine is called periodically (every RX_AUTH_REQUEST_TIMEOUT