# include "h/socket.h"
# endif
# include "netinet/in.h"
-# ifdef AFS_SUN58_ENV
+# ifdef AFS_SUN5_ENV
# include "netinet/ip6.h"
-# endif
-# ifdef AFS_SUN57_ENV
# include "inet/common.h"
# include "inet/ip.h"
# include "inet/ip_ire.h"
static void rxi_ComputeRoundTripTime(struct rx_packet *, struct rx_ackPacket *,
struct rx_call *, struct rx_peer *,
struct clock *);
+static void rxi_Resend(struct rxevent *event, void *arg0, void *arg1,
+ int istack);
#ifdef RX_ENABLE_LOCKS
static void rxi_SetAcksInTransmitQueue(struct rx_call *call);
#if defined(RX_ENABLE_LOCKS)
static afs_kmutex_t rx_rpc_stats;
-static void rxi_StartUnlocked(struct rxevent *event, void *call,
- void *arg1, int istack);
#endif
/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
if (lastPacket && call->conn->type == RX_CLIENT_CONNECTION)
clock_Addmsec(&retryTime, 400);
-#ifdef RX_ENABLE_LOCKS
MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
MUTEX_EXIT(&rx_refcnt_mutex);
- call->resendEvent = rxevent_PostNow2(&retryTime, &now, rxi_StartUnlocked,
+ call->resendEvent = rxevent_PostNow2(&retryTime, &now, rxi_Resend,
call, 0, istack);
-#else /* RX_ENABLE_LOCKS */
- call->resendEvent = rxevent_PostNow2(&retryTime, &now, rxi_Start,
- call, 0, istack);
-#endif /* RX_ENABLE_LOCKS */
}
/*!
* Send an ack when requested by the peer, or once every
* rxi_SoftAckRate packets until the last packet has been
* received. Always send a soft ack for the last packet in
- * the server's reply.
- *
- * If there was more than one packet received for the call
- * and we have received all of them, immediately send an
- * RX_PACKET_TYPE_ACKALL packet so that the peer can empty
- * its packet transmit queue and cancel all resend events.
- *
- * When there is only one packet in the call there is a
- * chance that we can race with Ping ACKs sent as part of
- * connection establishment if the udp packets are delivered
- * out of order. When the race occurs, a two second delay
- * will occur while waiting for a new Ping ACK to be sent.
- */
- if (!isFirst && (call->flags & RX_CALL_RECEIVE_DONE)) {
- rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
- rxi_AckAll(NULL, call, 0);
- } else if (ackNeeded) {
+ * the server's reply. */
+ if (ackNeeded) {
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, ackNeeded, istack);
} else if (call->nSoftAcks > (u_short) rxi_SoftAckRate) {
call->delayedAckEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
}
+ } else if (call->flags & RX_CALL_RECEIVE_DONE) {
+ rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
}
return np;
rxi_rto_packet_acked(call, istack);
if (call->flags & RX_CALL_FAST_RECOVER) {
- if (nNacked) {
+ if (newAckCount == 0) {
call->cwind = MIN((int)(call->cwind + 1), rx_maxSendWindow);
} else {
call->flags &= ~RX_CALL_FAST_RECOVER;
call->nCwindAcks = 0;
} else if (nNacked && call->nNacks >= (u_short) rx_nackThreshold) {
/* Three negative acks in a row trigger congestion recovery */
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- MUTEX_EXIT(&peer->peer_lock);
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* someone else is waiting to start recovery */
- return np;
- }
- call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- rxi_WaitforTQBusy(call);
- MUTEX_ENTER(&peer->peer_lock);
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
call->flags |= RX_CALL_FAST_RECOVER;
call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
call->cwind =
rxi_ClearTransmitQueue(call, 0);
rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
} else if (!queue_IsEmpty(&call->tq)) {
- rxi_Start(0, call, 0, istack);
+ rxi_Start(call, istack);
}
return np;
}
int istack)
{
int i;
+ int recovery;
struct xmitlist working;
struct xmitlist last;
working.len = 0;
working.resending = 0;
+ recovery = call->flags & RX_CALL_FAST_RECOVER;
+
for (i = 0; i < len; i++) {
/* Does the current packet force us to flush the current list? */
if (working.len > 0
rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
- if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ if (call->error
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
last = working;
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error
- || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
last = working;
rxi_SendList(call, &last, istack, morePackets);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
- if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ if (call->error
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
if (morePackets) {
}
}
-#ifdef RX_ENABLE_LOCKS
-/* Call rxi_Start, below, but with the call lock held. */
-void
-rxi_StartUnlocked(struct rxevent *event,
- void *arg0, void *arg1, int istack)
+static void
+rxi_Resend(struct rxevent *event, void *arg0, void *arg1, int istack)
{
struct rx_call *call = arg0;
+ struct rx_peer *peer;
+ struct rx_packet *p, *nxp;
+ struct clock maxTimeout = { 60, 0 };
MUTEX_ENTER(&call->lock);
- rxi_Start(event, call, arg1, istack);
+
+ peer = call->conn->peer;
+
+ /* Make sure that the event pointer is removed from the call
+ * structure, since there is no longer a per-call retransmission
+ * event pending. */
+ if (event == call->resendEvent) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ call->resendEvent = NULL;
+ }
+
+ if (rxi_busyChannelError && (call->flags & RX_CALL_PEER_BUSY)) {
+ rxi_CheckBusy(call);
+ }
+
+ if (queue_IsEmpty(&call->tq)) {
+ /* Nothing to do. This means that we've been raced, and that an
+ * ACK has come in between when we were triggered, and when we
+ * actually got to run. */
+ goto out;
+ }
+
+ /* We're in loss recovery */
+ call->flags |= RX_CALL_FAST_RECOVER;
+
+ /* Mark all of the pending packets in the queue as being lost */
+ for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
+ if (!(p->flags & RX_PKTFLAG_ACKED))
+ p->flags &= ~RX_PKTFLAG_SENT;
+ }
+
+ /* We're resending, so we double the timeout of the call. This will be
+ * dropped back down by the first successful ACK that we receive.
+ *
+ * We apply a maximum value here of 60 seconds
+ */
+ clock_Add(&call->rto, &call->rto);
+ if (clock_Gt(&call->rto, &maxTimeout))
+ call->rto = maxTimeout;
+
+ /* Packet loss is most likely due to congestion, so drop our window size
+ * and start again from the beginning */
+ if (peer->maxDgramPackets >1) {
+ call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
+ call->MTU = MIN(peer->natMTU, peer->maxMTU);
+ }
+ call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
+ call->nDgramPackets = 1;
+ call->cwind = 1;
+ call->nextCwind = 1;
+ call->nAcks = 0;
+ call->nNacks = 0;
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->MTU = call->MTU;
+ peer->cwind = call->cwind;
+ peer->nDgramPackets = 1;
+ peer->congestSeq++;
+ call->congestSeq = peer->congestSeq;
+ MUTEX_EXIT(&peer->peer_lock);
+
+ rxi_Start(call, istack);
+
+out:
MUTEX_EXIT(&call->lock);
}
-#endif /* RX_ENABLE_LOCKS */
/* This routine is called when new packets are readied for
* transmission and when retransmission may be necessary, or when the
* better optimized for new packets, the usual case, now that we've
* got rid of queues of send packets. XXXXXXXXXXX */
void
-rxi_Start(struct rxevent *event,
- void *arg0, void *arg1, int istack)
+rxi_Start(struct rx_call *call, int istack)
{
- struct rx_call *call = arg0;
struct rx_packet *p;
struct rx_packet *nxp; /* Next pointer for queue_Scan */
int nXmitPackets;
int maxXmitPackets;
- /* If rxi_Start is being called as a result of a resend event,
- * then make sure that the event pointer is removed from the call
- * structure, since there is no longer a per-call retransmission
- * event pending. */
- if (event && event == call->resendEvent) {
- MUTEX_ENTER(&rx_refcnt_mutex);
- CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
- MUTEX_EXIT(&rx_refcnt_mutex);
- call->resendEvent = NULL;
-
- if (rxi_busyChannelError && (call->flags & RX_CALL_PEER_BUSY)) {
- rxi_CheckBusy(call);
- }
-
- if (queue_IsEmpty(&call->tq)) {
- /* Nothing to do. This means that we've been raced, and that an
- * ACK has come in between when we were triggered, and when we
- * actually got to run. */
- return;
- }
-
- /* Mark all of the pending packets in the queue as being lost */
- for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (!(p->flags & RX_PKTFLAG_ACKED))
- p->flags &= ~RX_PKTFLAG_SENT;
- }
- }
-
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (rx_stats_active)
nXmitPackets = 0;
maxXmitPackets = MIN(call->twind, call->cwind);
for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* We shouldn't be sending packets if a thread is waiting
- * to initiate congestion recovery */
- dpf(("call %d waiting to initiate fast recovery\n",
- *(call->callNumber)));
- break;
- }
- if ((nXmitPackets)
- && (call->flags & RX_CALL_FAST_RECOVER)) {
- /* Only send one packet during fast recovery */
- dpf(("call %d restricted to one packet per send during fast recovery\n",
- *(call->callNumber)));
- break;
- }
#ifdef RX_TRACK_PACKETS
if ((p->flags & RX_PKTFLAG_FREE)
|| (!queue_IsEnd(&call->tq, nxp)
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- /*
- * TQ references no longer protected by this flag; they must remain
- * protected by the global lock.
- */
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- call->flags &= ~RX_CALL_TQ_BUSY;
- rxi_WakeUpTransmitQueue(call);
- return;
- }
if (call->error) {
/* We went into the error state while sending packets. Now is
* the time to reset the call. This will also inform the using
if (now > (call->lastReceiveTime + deadTime)) {
if (call->state == RX_STATE_ACTIVE) {
#ifdef ADAPT_PMTU
-#if defined(KERNEL) && defined(AFS_SUN57_ENV)
+#if defined(KERNEL) && defined(AFS_SUN5_ENV)
ire_t *ire;
#if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
netstack_t *ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);