rx: Don't let timeouts force fast recovery
authorSimon Wilkinson <sxw@your-file-system.com>
Mon, 25 Oct 2010 09:14:12 +0000 (10:14 +0100)
committerDerrick Brashear <shadow@dementia.org>
Tue, 26 Oct 2010 15:11:51 +0000 (08:11 -0700)
The current RX implementation goes into fast recovery whenever a
timeout occurs. This is incredibly wasteful, particularly on fast
connections. So, remove this in favour of TCP style behaviour.

Change-Id: I7afc08b69e7e1df80a38ac731af57ce91072a184
Reviewed-on: http://gerrit.openafs.org/3138
Reviewed-by: Jeffrey Altman <jaltman@openafs.org>
Tested-by: Derrick Brashear <shadow@dementia.org>
Reviewed-by: Derrick Brashear <shadow@dementia.org>

src/rx/rx.c

index a7d1c00..773c755 100644 (file)
@@ -5638,7 +5638,6 @@ rxi_Start(struct rxevent *event,
 
     struct rx_packet *p;
     struct rx_packet *nxp;     /* Next pointer for queue_Scan */
-    struct rx_peer *peer = call->conn->peer;
     struct clock now, usenow, retryTime;
     int haveEvent;
     int nXmitPackets;
@@ -5657,53 +5656,8 @@ rxi_Start(struct rxevent *event,
            /* Nothing to do */
            return;
        }
-       /* Timeouts trigger congestion recovery */
-#ifdef  AFS_GLOBAL_RXLOCK_KERNEL
-       if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
-           /* someone else is waiting to start recovery */
-           return;
-       }
-       call->flags |= RX_CALL_FAST_RECOVER_WAIT;
-       rxi_WaitforTQBusy(call);
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
-       call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
-        if (call->error) {
-            if (rx_stats_active)
-                rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
-            return;
-        }
-#endif
-        call->flags |= RX_CALL_FAST_RECOVER;
-
-        if (peer->maxDgramPackets > 1) {
-            call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
-        } else {
-            call->MTU = MIN(peer->natMTU, peer->maxMTU);
-        }
-        call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
-        call->nDgramPackets = 1;
-        call->cwind = 1;
-        call->nextCwind = 1;
-        call->nAcks = 0;
-        call->nNacks = 0;
-        MUTEX_ENTER(&peer->peer_lock);
-        peer->MTU = call->MTU;
-        peer->cwind = call->cwind;
-        peer->nDgramPackets = 1;
-        peer->congestSeq++;
-        call->congestSeq = peer->congestSeq;
-        MUTEX_EXIT(&peer->peer_lock);
-        /* Clear retry times on packets. Otherwise, it's possible for
-         * some packets in the queue to force resends at rates faster
-         * than recovery rates.
-         */
-        for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
-            if (!(p->flags & RX_PKTFLAG_ACKED)) {
-                clock_Zero(&p->retryTime);
-            }
-        }
     }
+
     if (call->error) {
 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
         if (rx_stats_active)