/* Local static routines */
static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
+static void rxi_ComputeRoundTripTime(struct rx_packet *, struct clock *,
+ struct rx_peer *, struct clock *);
+
#ifdef RX_ENABLE_LOCKS
static void rxi_SetAcksInTransmitQueue(struct rx_call *call);
#endif
return conn;
}
+/**
+ * Ensure a connection's timeout values are valid.
+ *
+ * @param[in] conn The connection to check
+ *
+ * @post conn->secondUntilDead <= conn->idleDeadTime <= conn->hardDeadTime,
+ * unless idleDeadTime and/or hardDeadTime are not set
+ * @internal
+ */
+static void
+rxi_CheckConnTimeouts(struct rx_connection *conn)
+{
+ /* a connection's timeouts must have the relationship
+ * deadTime <= idleDeadTime <= hardDeadTime. Otherwise, for example, a
+ * total loss of network to a peer may cause an idle timeout instead of a
+ * dead timeout, simply because the idle timeout gets hit first. Also set
+ * a minimum deadTime of 6, just to ensure it doesn't get set too low. */
+ /* this logic is slightly complicated by the fact that
+ * idleDeadTime/hardDeadTime may not be set at all, but it's not too bad.
+ */
+ conn->secondsUntilDead = MAX(conn->secondsUntilDead, 6);
+ if (conn->idleDeadTime) {
+ conn->idleDeadTime = MAX(conn->idleDeadTime, conn->secondsUntilDead);
+ }
+ if (conn->hardDeadTime) {
+ if (conn->idleDeadTime) {
+ conn->hardDeadTime = MAX(conn->idleDeadTime, conn->hardDeadTime);
+ } else {
+ conn->hardDeadTime = MAX(conn->secondsUntilDead, conn->hardDeadTime);
+ }
+ }
+}
+
void
rx_SetConnDeadTime(struct rx_connection *conn, int seconds)
{
/* The idea is to set the dead time to a value that allows several
* keepalives to be dropped without timing out the connection. */
- conn->secondsUntilDead = MAX(seconds, 6);
+ conn->secondsUntilDead = seconds;
+ rxi_CheckConnTimeouts(conn);
conn->secondsUntilPing = conn->secondsUntilDead / 6;
}
+void
+rx_SetConnHardDeadTime(struct rx_connection *conn, int seconds)
+{
+ conn->hardDeadTime = seconds;
+ rxi_CheckConnTimeouts(conn);
+}
+
+void
+rx_SetConnIdleDeadTime(struct rx_connection *conn, int seconds)
+{
+ conn->idleDeadTime = seconds;
+ rxi_CheckConnTimeouts(conn);
+}
+
int rxi_lowPeerRefCount = 0;
int rxi_lowConnRefCount = 0;
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Wait for the transmit queue to no longer be busy.
* requires the call->lock to be held */
-static void rxi_WaitforTQBusy(struct rx_call *call) {
- while (call->flags & RX_CALL_TQ_BUSY) {
+void rxi_WaitforTQBusy(struct rx_call *call) {
+ while (!call->error && (call->flags & RX_CALL_TQ_BUSY)) {
call->flags |= RX_CALL_TQ_WAIT;
call->tqWaiters++;
#ifdef RX_ENABLE_LOCKS
* flag is cleared.
*/
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- while ((call->state == RX_STATE_ACTIVE)
- && (call->flags & RX_CALL_TQ_BUSY)) {
- call->flags |= RX_CALL_TQ_WAIT;
- call->tqWaiters++;
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start lock3");
- CV_WAIT(&call->cv_tq, &call->lock);
-#else /* RX_ENABLE_LOCKS */
- osi_rxSleep(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- call->tqWaiters--;
- if (call->tqWaiters == 0)
- call->flags &= ~RX_CALL_TQ_WAIT;
- }
+ if (call->state == RX_STATE_ACTIVE) {
+ rxi_WaitforTQBusy(call);
+ /*
+ * If we entered error state while waiting,
+ * must call rxi_CallError to permit rxi_ResetCall
+ * to processed when the tqWaiter count hits zero.
+ */
+ if (call->error) {
+ rxi_CallError(call, call->error);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ return np;
+ }
+ }
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
/* If the new call cannot be taken right now send a busy and set
* the error condition in this call, so that it terminates as
*/
static void
rxi_ComputePeerNetStats(struct rx_call *call, struct rx_packet *p,
- struct rx_ackPacket *ap, struct rx_packet *np)
+ struct rx_ackPacket *ap, struct rx_packet *np,
+ struct clock *now)
{
struct rx_peer *peer = call->conn->peer;
if (!(p->flags & RX_PKTFLAG_ACKED) &&
ap->reason != RX_ACK_DELAY &&
clock_Eq(&p->timeSent, &p->firstSent))
- rxi_ComputeRoundTripTime(p, &p->timeSent, peer);
+ rxi_ComputeRoundTripTime(p, &p->timeSent, peer, now);
#ifdef ADAPT_WINDOW
rxi_ComputeRate(peer, call, p, np, ap->reason);
#endif
struct rx_packet *nxp; /* Next packet pointer for queue_Scan */
struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
+ struct clock now; /* Current time, for RTT calculations */
afs_uint32 first;
afs_uint32 serial;
/* because there are CM's that are bogus, sending weird values for this. */
* acknowledged as having been sent to the peer's upper level.
* All other packets must be retained. So only packets with
* sequence numbers < ap->firstPacket are candidates. */
+
+ clock_GetTime(&now);
+
for (queue_Scan(&call->tq, tp, nxp, rx_packet)) {
if (tp->header.seq >= first)
break;
call->tfirst = tp->header.seq + 1;
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np, &now);
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
}
if (tp->header.seq >= first)
#endif /* RX_ENABLE_LOCKS */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rxi_ComputePeerNetStats(call, tp, ap, np);
+ rxi_ComputePeerNetStats(call, tp, ap, np, &now);
/* Set the acknowledge flag per packet based on the
* information in the ack packet. An acknowlegded packet can
missing = 1;
}
} else {
- tp->flags &= ~RX_PKTFLAG_ACKED;
- missing = 1;
+ if (tp->flags & RX_PKTFLAG_ACKED) {
+ tp->flags &= ~RX_PKTFLAG_ACKED;
+ missing = 1;
+ }
}
/*
int haveEvent;
int nXmitPackets;
int maxXmitPackets;
- struct rx_packet **xmitList;
int resending = 0;
/* If rxi_Start is being called as a result of a resend event,
rxi_WaitforTQBusy(call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
- call->flags |= RX_CALL_FAST_RECOVER;
- if (peer->maxDgramPackets > 1) {
- call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
- } else {
- call->MTU = MIN(peer->natMTU, peer->maxMTU);
- }
- call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
- call->nDgramPackets = 1;
- call->cwind = 1;
- call->nextCwind = 1;
- call->nAcks = 0;
- call->nNacks = 0;
- MUTEX_ENTER(&peer->peer_lock);
- peer->MTU = call->MTU;
- peer->cwind = call->cwind;
- peer->nDgramPackets = 1;
- peer->congestSeq++;
- call->congestSeq = peer->congestSeq;
- MUTEX_EXIT(&peer->peer_lock);
- /* Clear retry times on packets. Otherwise, it's possible for
- * some packets in the queue to force resends at rates faster
- * than recovery rates.
- */
- for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (!(p->flags & RX_PKTFLAG_ACKED)) {
- clock_Zero(&p->retryTime);
- }
- }
+#ifdef AFS_GLOBAL_RXLOCK_KERNEL
+ if (call->error) {
+ if (rx_stats_active)
+ rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
+ return;
+ }
+#endif
+ call->flags |= RX_CALL_FAST_RECOVER;
+
+ if (peer->maxDgramPackets > 1) {
+ call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
+ } else {
+ call->MTU = MIN(peer->natMTU, peer->maxMTU);
+ }
+ call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
+ call->nDgramPackets = 1;
+ call->cwind = 1;
+ call->nextCwind = 1;
+ call->nAcks = 0;
+ call->nNacks = 0;
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->MTU = call->MTU;
+ peer->cwind = call->cwind;
+ peer->nDgramPackets = 1;
+ peer->congestSeq++;
+ call->congestSeq = peer->congestSeq;
+ MUTEX_EXIT(&peer->peer_lock);
+ /* Clear retry times on packets. Otherwise, it's possible for
+ * some packets in the queue to force resends at rates faster
+ * than recovery rates.
+ */
+ for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
+ if (!(p->flags & RX_PKTFLAG_ACKED)) {
+ clock_Zero(&p->retryTime);
+ }
+ }
}
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
nXmitPackets = 0;
maxXmitPackets = MIN(call->twind, call->cwind);
- xmitList = (struct rx_packet **)
-#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
- /* XXXX else we must drop any mtx we hold */
- afs_osi_Alloc_NoSleep(maxXmitPackets * sizeof(struct rx_packet *));
-#else
- osi_Alloc(maxXmitPackets * sizeof(struct rx_packet *));
-#endif
- if (xmitList == NULL)
- osi_Panic("rxi_Start, failed to allocate xmit list");
for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
/* We shouldn't be sending packets if a thread is waiting
/* Transmit the packet if it needs to be sent. */
if (!clock_Lt(&now, &p->retryTime)) {
if (nXmitPackets == maxXmitPackets) {
- rxi_SendXmitList(call, xmitList, nXmitPackets,
- istack, &now, &retryTime,
- resending);
- osi_Free(xmitList, maxXmitPackets *
- sizeof(struct rx_packet *));
+ rxi_SendXmitList(call, call->xmitList,
+ nXmitPackets, istack, &now,
+ &retryTime, resending);
goto restart;
}
dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
now.sec, now.usec,
p->retryTime.sec, p->retryTime.usec,
retryTime.sec, retryTime.usec));
- xmitList[nXmitPackets++] = p;
+ call->xmitList[nXmitPackets++] = p;
}
}
/* xmitList now hold pointers to all of the packets that are
* ready to send. Now we loop to send the packets */
if (nXmitPackets > 0) {
- rxi_SendXmitList(call, xmitList, nXmitPackets, istack,
- &now, &retryTime, resending);
+ rxi_SendXmitList(call, call->xmitList, nXmitPackets,
+ istack, &now, &retryTime, resending);
}
- osi_Free(xmitList,
- maxXmitPackets * sizeof(struct rx_packet *));
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/*
{
struct rx_connection *conn = call->conn;
afs_uint32 now;
- afs_uint32 deadTime;
+ afs_uint32 deadTime, idleDeadTime = 0, hardDeadTime = 0;
+ afs_uint32 fudgeFactor;
int cerror = 0;
int newmtu = 0;
return 0;
}
#endif
- /* dead time + RTT + 8*MDEV, rounded up to next second. */
- deadTime =
- (((afs_uint32) conn->secondsUntilDead << 10) +
- ((afs_uint32) conn->peer->rtt >> 3) +
- ((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10;
+ /* RTT + 8*MDEV, rounded up to the next second. */
+ fudgeFactor = (((afs_uint32) conn->peer->rtt >> 3) +
+ ((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10;
+
+ deadTime = conn->secondsUntilDead + fudgeFactor;
now = clock_Sec();
/* These are computed to the second (+- 1 second). But that's
* good enough for these values, which should be a significant
* to pings; active calls are simply flagged in error, so the
* attached process can die reasonably gracefully. */
}
+
+ if (conn->idleDeadTime) {
+ idleDeadTime = conn->idleDeadTime + fudgeFactor;
+ }
+
/* see if we have a non-activity timeout */
- if (call->startWait && conn->idleDeadTime
- && ((call->startWait + conn->idleDeadTime) < now) &&
+ if (call->startWait && idleDeadTime
+ && ((call->startWait + idleDeadTime) < now) &&
(call->flags & RX_CALL_READER_WAIT)) {
if (call->state == RX_STATE_ACTIVE) {
cerror = RX_CALL_TIMEOUT;
goto mtuout;
}
}
- if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0)
- && ((call->lastSendData + conn->idleDeadTime) < now)) {
+ if (call->lastSendData && idleDeadTime && (conn->idleDeadErr != 0)
+ && ((call->lastSendData + idleDeadTime) < now)) {
if (call->state == RX_STATE_ACTIVE) {
cerror = conn->idleDeadErr;
goto mtuout;
}
}
+
+ if (hardDeadTime) {
+ hardDeadTime = conn->hardDeadTime + fudgeFactor;
+ }
+
/* see if we have a hard timeout */
- if (conn->hardDeadTime
- && (now > (conn->hardDeadTime + call->startTime.sec))) {
+ if (hardDeadTime
+ && (now > (hardDeadTime + call->startTime.sec))) {
if (call->state == RX_STATE_ACTIVE)
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
/* rxi_ComputeRoundTripTime is called with peer locked. */
/* sentp and/or peer may be null */
-void
+static void
rxi_ComputeRoundTripTime(struct rx_packet *p,
struct clock *sentp,
- struct rx_peer *peer)
+ struct rx_peer *peer,
+ struct clock *now)
{
struct clock thisRtt, *rttp = &thisRtt;
-
int rtt_timeout;
- clock_GetTime(rttp);
+ thisRtt = *now;
- if (clock_Lt(rttp, sentp)) {
- clock_Zero(rttp);
+ if (clock_Lt(rttp, sentp))
return; /* somebody set the clock back, don't count this time. */
- }
+
clock_Sub(rttp, sentp);
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
p->header.callNumber, p, rttp->sec, rttp->usec));