/* RX: Extended Remote Procedure Call */
#include <afsconfig.h>
-#ifdef KERNEL
#include "afs/param.h"
-#else
-#include <afs/param.h>
-#endif
-
#ifdef KERNEL
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#ifndef UKERNEL
-#include "h/types.h"
-#include "h/time.h"
-#include "h/stat.h"
-#ifdef AFS_OSF_ENV
-#include <net/net_globals.h>
-#endif /* AFS_OSF_ENV */
-#ifdef AFS_LINUX20_ENV
-#include "h/socket.h"
-#endif
-#include "netinet/in.h"
-#ifdef AFS_SUN57_ENV
-#include "inet/common.h"
-#include "inet/ip.h"
-#include "inet/ip_ire.h"
-#endif
-#include "afs/afs_args.h"
-#include "afs/afs_osi.h"
-#ifdef RX_KERNEL_TRACE
-#include "rx_kcommon.h"
-#endif
-#if (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
-#include "h/systm.h"
-#endif
-#ifdef RXDEBUG
-#undef RXDEBUG /* turn off debugging */
-#endif /* RXDEBUG */
-#if defined(AFS_SGI_ENV)
-#include "sys/debug.h"
-#endif
-#include "afsint.h"
-#ifdef AFS_OSF_ENV
-#undef kmem_alloc
-#undef kmem_free
-#undef mem_alloc
-#undef mem_free
-#endif /* AFS_OSF_ENV */
-#else /* !UKERNEL */
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#endif /* !UKERNEL */
-#include "afs/lock.h"
-#include "rx_kmutex.h"
-#include "rx_kernel.h"
-#include "rx_clock.h"
-#include "rx_queue.h"
-#include "rx.h"
-#include "rx_globals.h"
-#include "rx_trace.h"
-#include "rx_atomic.h"
-#include "rx_internal.h"
-#include "rx_stats.h"
-#define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
-#define AFSOP_STOP_AFS 211 /* Stop AFS process */
-#define AFSOP_STOP_BKG 212 /* Stop BKG process */
-#include "afsint.h"
+# include "afs/sysincludes.h"
+# include "afsincludes.h"
+# ifndef UKERNEL
+# include "h/types.h"
+# include "h/time.h"
+# include "h/stat.h"
+# ifdef AFS_LINUX20_ENV
+# include "h/socket.h"
+# endif
+# include "netinet/in.h"
+# ifdef AFS_SUN58_ENV
+# include "netinet/ip6.h"
+# endif
+# ifdef AFS_SUN57_ENV
+# include "inet/common.h"
+# include "inet/ip.h"
+# include "inet/ip_ire.h"
+# endif
+# include "afs/afs_args.h"
+# include "afs/afs_osi.h"
+# ifdef RX_KERNEL_TRACE
+# include "rx_kcommon.h"
+# endif
+# if defined(AFS_AIX_ENV)
+# include "h/systm.h"
+# endif
+# ifdef RXDEBUG
+# undef RXDEBUG /* turn off debugging */
+# endif /* RXDEBUG */
+# if defined(AFS_SGI_ENV)
+# include "sys/debug.h"
+# endif
+# else /* !UKERNEL */
+# include "afs/sysincludes.h"
+# include "afsincludes.h"
+# endif /* !UKERNEL */
+# include "afs/lock.h"
+# include "rx_kmutex.h"
+# include "rx_kernel.h"
+# define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
+# define AFSOP_STOP_AFS 211 /* Stop AFS process */
+# define AFSOP_STOP_BKG 212 /* Stop BKG process */
extern afs_int32 afs_termState;
-#ifdef AFS_AIX41_ENV
-#include "sys/lockl.h"
-#include "sys/lock_def.h"
-#endif /* AFS_AIX41_ENV */
+# ifdef AFS_AIX41_ENV
+# include "sys/lockl.h"
+# include "sys/lock_def.h"
+# endif /* AFS_AIX41_ENV */
# include "afs/rxgen_consts.h"
#else /* KERNEL */
+# include <roken.h>
# include <sys/types.h>
# include <string.h>
# include <stdarg.h>
# ifdef HAVE_STDINT_H
# include <stdint.h>
# endif
-#ifdef AFS_NT40_ENV
-# include <stdlib.h>
-# include <fcntl.h>
-# include <afs/afsutil.h>
-# include <WINNT\afsreg.h>
-#else
-# include <sys/socket.h>
-# include <sys/file.h>
-# include <netdb.h>
-# include <sys/stat.h>
-# include <netinet/in.h>
-# include <sys/time.h>
-#endif
-# include "rx.h"
+# ifdef AFS_NT40_ENV
+# include <stdlib.h>
+# include <fcntl.h>
+# include <afs/afsutil.h>
+# include <WINNT\afsreg.h>
+# else
+# include <sys/socket.h>
+# include <sys/file.h>
+# include <netdb.h>
+# include <sys/stat.h>
+# include <netinet/in.h>
+# include <sys/time.h>
+# endif
# include "rx_user.h"
-# include "rx_clock.h"
-# include "rx_queue.h"
-# include "rx_atomic.h"
-# include "rx_globals.h"
-# include "rx_trace.h"
-# include "rx_internal.h"
-# include "rx_stats.h"
-# include <afs/rxgen_consts.h>
#endif /* KERNEL */
+#include "rx.h"
+#include "rx_clock.h"
+#include "rx_queue.h"
+#include "rx_atomic.h"
+#include "rx_globals.h"
+#include "rx_trace.h"
+#include "rx_internal.h"
+#include "rx_stats.h"
+
+#include <afs/rxgen_consts.h>
+
#ifndef KERNEL
#ifdef AFS_PTHREAD_ENV
#ifndef AFS_NT40_ENV
/* Local static routines */
static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
-static void rxi_ComputeRoundTripTime(struct rx_packet *, struct clock *,
+static void rxi_ComputeRoundTripTime(struct rx_packet *, struct rx_ackPacket *,
struct rx_peer *, struct clock *);
#ifdef RX_ENABLE_LOCKS
MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_event_mutex, "event", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&des_init_mutex, "des", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&des_random_mutex, "random", MUTEX_DEFAULT, 0);
MUTEX_INIT(&osi_malloc_mutex, "malloc", MUTEX_DEFAULT, 0);
MUTEX_INIT(&event_handler_mutex, "event handler", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rxi_connCacheMutex, "conn cache", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rxkad_random_mutex, "rxkad random", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_debug_mutex, "debug", MUTEX_DEFAULT, 0);
- osi_Assert(pthread_cond_init
- (&rx_event_handler_cond, (const pthread_condattr_t *)0) == 0);
- osi_Assert(pthread_cond_init(&rx_listener_cond, (const pthread_condattr_t *)0)
- == 0);
+ CV_INIT(&rx_event_handler_cond, "evhand", CV_DEFAULT, 0);
+ CV_INIT(&rx_listener_cond, "rxlisten", CV_DEFAULT, 0);
+
osi_Assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
osi_Assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
for (i = 0; i < RX_MAXCALLS; i++) {
conn->twind[i] = rx_initSendWindow;
conn->rwind[i] = rx_initReceiveWindow;
+ conn->lastBusy[i] = 0;
}
RXS_NewConnection(securityObject, conn);
}
#endif
+static void
+rxi_WakeUpTransmitQueue(struct rx_call *call)
+{
+ if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
+#ifdef RX_ENABLE_LOCKS
+ osirx_AssertMine(&call->lock, "rxi_Start start");
+ CV_BROADCAST(&call->cv_tq);
+#else /* RX_ENABLE_LOCKS */
+ osi_rxWakeup(&call->tq);
+#endif /* RX_ENABLE_LOCKS */
+ }
+}
+
/* Start a new rx remote procedure call, on the specified connection.
* If wait is set to 1, wait for a free call channel; otherwise return
* 0. Maxtime gives the maximum number of seconds this call may take,
struct rx_call *
rx_NewCall(struct rx_connection *conn)
{
- int i, wait;
+ int i, wait, ignoreBusy = 1;
struct rx_call *call;
struct clock queueTime;
+ afs_uint32 leastBusy = 0;
SPLVAR;
clock_NewTime();
for (i = 0; i < RX_MAXCALLS; i++) {
call = conn->call[i];
if (call) {
+ if (!ignoreBusy && conn->lastBusy[i] != leastBusy) {
+ /* we're not ignoring busy call slots; only look at the
+ * call slot that is the "least" busy */
+ continue;
+ }
+
if (call->state == RX_STATE_DALLY) {
MUTEX_ENTER(&call->lock);
if (call->state == RX_STATE_DALLY) {
+ if (ignoreBusy && conn->lastBusy[i]) {
+ /* if we're ignoring busy call slots, skip any ones that
+ * have lastBusy set */
+ if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
+ leastBusy = conn->lastBusy[i];
+ }
+ MUTEX_EXIT(&call->lock);
+ continue;
+ }
+
/*
* We are setting the state to RX_STATE_RESET to
* ensure that no one else will attempt to use this
MUTEX_EXIT(&call->lock);
}
} else {
+ if (ignoreBusy && conn->lastBusy[i]) {
+ /* if we're ignoring busy call slots, skip any ones that
+ * have lastBusy set */
+ if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
+ leastBusy = conn->lastBusy[i];
+ }
+ continue;
+ }
+
/* rxi_NewCall returns with mutex locked */
call = rxi_NewCall(conn, i);
MUTEX_ENTER(&rx_refcnt_mutex);
}
}
if (i < RX_MAXCALLS) {
+ conn->lastBusy[i] = 0;
break;
}
if (!wait)
continue;
+ if (leastBusy && ignoreBusy) {
+ /* we didn't find a useable call slot, but we did see at least one
+ * 'busy' slot; look again and only use a slot with the 'least
+ * busy time */
+ ignoreBusy = 0;
+ continue;
+ }
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_MAKECALL_WAITING;
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_call_lock);
MUTEX_ENTER(&call->lock);
+
+ if (!(call->flags & RX_CALL_PEER_BUSY)) {
+ conn->lastBusy[call->channel] = 0;
+ }
+
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_BUSY;
if (conn->flags & RX_CONN_MAKECALL_WAITING) {
if (call->state == RX_STATE_DALLY || call->state == RX_STATE_HOLD)
(*call->callNumber)++;
+ /*
+ * We are setting the state to RX_STATE_RESET to
+ * ensure that no one else will attempt to use this
+ * call once we drop the refcnt lock. We must drop
+ * the refcnt lock before calling rxi_ResetCall
+ * because it cannot be held across acquiring the
+ * freepktQ lock. NewCall does the same.
+ */
+ call->state = RX_STATE_RESET;
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_ResetCall(call, 0);
call->conn->call[channel] = (struct rx_call *)0;
- MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_ENTER(&rx_freeCallQueue_lock);
SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
return conn;
}
+/**
+ * Timeout a call on a busy call channel if appropriate.
+ *
+ * @param[in] call The busy call.
+ *
+ * @pre 'call' is marked as busy (namely,
+ * call->conn->lastBusy[call->channel] != 0)
+ *
+ * @pre call->lock is held
+ *
+ * @note call->lock is dropped and reacquired
+ */
+static void
+rxi_CheckBusy(struct rx_call *call)
+{
+ struct rx_connection *conn = call->conn;
+ int channel = call->channel;
+ int freechannel = 0;
+ int i;
+ afs_uint32 callNumber = *call->callNumber;
+
+ MUTEX_EXIT(&call->lock);
+
+ MUTEX_ENTER(&conn->conn_call_lock);
+
+ /* Are there any other call slots on this conn that we should try? Look for
+ * slots that are empty and are either non-busy, or were marked as busy
+ * longer than conn->secondsUntilDead seconds before this call started. */
+
+ for (i = 0; i < RX_MAXCALLS && !freechannel; i++) {
+ if (i == channel) {
+ /* only look at channels that aren't us */
+ continue;
+ }
+
+ if (conn->lastBusy[i]) {
+ /* if this channel looked busy too recently, don't look at it */
+ if (conn->lastBusy[i] >= call->startTime.sec) {
+ continue;
+ }
+ if (call->startTime.sec - conn->lastBusy[i] < conn->secondsUntilDead) {
+ continue;
+ }
+ }
+
+ if (conn->call[i]) {
+ struct rx_call *tcall = conn->call[i];
+ MUTEX_ENTER(&tcall->lock);
+ if (tcall->state == RX_STATE_DALLY) {
+ freechannel = 1;
+ }
+ MUTEX_EXIT(&tcall->lock);
+ } else {
+ freechannel = 1;
+ }
+ }
+
+ MUTEX_EXIT(&conn->conn_call_lock);
+
+ MUTEX_ENTER(&call->lock);
+
+ /* Since the call->lock and conn->conn_call_lock have been released it is
+ * possible that (1) the call may no longer be busy and/or (2) the call may
+ * have been reused by another waiting thread. Therefore, we must confirm
+ * that the call state has not changed when deciding whether or not to
+ * force this application thread to retry by forcing a Timeout error. */
+
+ if (freechannel && *call->callNumber == callNumber &&
+ (call->flags & RX_CALL_PEER_BUSY)) {
+ /* Since 'freechannel' is set, there exists another channel in this
+ * rx_conn that the application thread might be able to use. We know
+ * that we have the correct call since callNumber is unchanged, and we
+ * know that the call is still busy. So, set the call error state to
+ * RX_CALL_TIMEOUT so the application can retry the request, presumably
+ * on a less-busy call channel. */
+
+ rxi_CallError(call, RX_CALL_TIMEOUT);
+ }
+}
+
/* There are two packet tracing routines available for testing and monitoring
* Rx. One is called just after every packet is received and the other is
* called just before every packet is sent. Received packets, have had their
MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
}
- case RX_PACKET_TYPE_BUSY:
- /* XXXX */
- break;
+ case RX_PACKET_TYPE_BUSY: {
+ struct clock busyTime;
+ clock_NewTime();
+ clock_GetTime(&busyTime);
+
+ MUTEX_EXIT(&call->lock);
+
+ MUTEX_ENTER(&conn->conn_call_lock);
+ MUTEX_ENTER(&call->lock);
+ conn->lastBusy[call->channel] = busyTime.sec;
+ call->flags |= RX_CALL_PEER_BUSY;
+ MUTEX_EXIT(&call->lock);
+ MUTEX_EXIT(&conn->conn_call_lock);
+
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ return np;
+ }
+
case RX_PACKET_TYPE_ACKALL:
/* All packets acknowledged, so we can drop all packets previously
* readied for sending */
* the packet will be delivered to the user before any get time is required
* (if not, then the time won't actually be re-evaluated here). */
call->lastReceiveTime = clock_Sec();
+ /* we've received a legit packet, so the channel is not busy */
+ call->flags &= ~RX_CALL_PEER_BUSY;
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
- if (ap->reason != RX_ACK_DELAY &&
- clock_Eq(&tp->timeSent, &tp->firstSent)) {
- rxi_ComputeRoundTripTime(tp, &tp->timeSent, call->conn->peer,
- &now);
- }
+
+ rxi_ComputeRoundTripTime(tp, ap, call->conn->peer, &now);
}
#ifdef ADAPT_WINDOW
newAckCount++;
tp->flags |= RX_PKTFLAG_ACKED;
- if (ap->reason != RX_ACK_DELAY &&
- clock_Eq(&tp->timeSent, &tp->firstSent)) {
- rxi_ComputeRoundTripTime(tp, &tp->timeSent,
- call->conn->peer, &now);
- }
+ rxi_ComputeRoundTripTime(tp, ap, call->conn->peer, &now);
#ifdef ADAPT_WINDOW
rxi_ComputeRate(call->conn->peer, call, tp, np,
ap->reason);
call->tqc -=
#endif /* RXDEBUG_PACKET */
rxi_FreePackets(0, &call->tq);
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
-#ifdef RX_ENABLE_LOCKS
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
+ rxi_WakeUpTransmitQueue(call);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
call->flags &= ~RX_CALL_TQ_CLEARME;
}
}
call->flags = 0;
+ if ((flags & RX_CALL_PEER_BUSY)) {
+ /* The call channel is still busy; resetting the call doesn't change
+ * that */
+ call->flags |= RX_CALL_PEER_BUSY;
+ }
+
rxi_ClearReceiveQueue(call);
/* why init the queue if you just emptied it? queue_Init(&call->rq); */
return optionalPacket; /* Return packet for re-use by caller */
}
+struct xmitlist {
+ struct rx_packet **list;
+ int len;
+ int resending;
+};
+
/* Send all of the packets in the list in single datagram */
static void
-rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
- int istack, int moreFlag, struct clock *now,
- struct clock *retryTime, int resending)
+rxi_SendList(struct rx_call *call, struct xmitlist *xmit,
+ int istack, int moreFlag)
{
int i;
int requestAck = 0;
int lastPacket = 0;
+ struct clock now, retryTime;
struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
MUTEX_ENTER(&peer->peer_lock);
- peer->nSent += len;
- if (resending)
- peer->reSends += len;
+ peer->nSent += xmit->len;
+ if (xmit->resending)
+ peer->reSends += xmit->len;
+ retryTime = peer->timeout;
MUTEX_EXIT(&peer->peer_lock);
if (rx_stats_active) {
- if (resending)
- rx_atomic_add(&rx_stats.dataPacketsReSent, len);
+ if (xmit->resending)
+ rx_atomic_add(&rx_stats.dataPacketsReSent, xmit->len);
else
- rx_atomic_add(&rx_stats.dataPacketsSent, len);
+ rx_atomic_add(&rx_stats.dataPacketsSent, xmit->len);
}
- if (list[len - 1]->header.flags & RX_LAST_PACKET) {
+ clock_GetTime(&now);
+ clock_Add(&retryTime, &now);
+
+ if (xmit->list[xmit->len - 1]->header.flags & RX_LAST_PACKET) {
lastPacket = 1;
}
/* Set the packet flags and schedule the resend events */
/* Only request an ack for the last packet in the list */
- for (i = 0; i < len; i++) {
- list[i]->retryTime = *retryTime;
- if (list[i]->header.serial) {
+ for (i = 0; i < xmit->len; i++) {
+ struct rx_packet *packet = xmit->list[i];
+
+ packet->retryTime = retryTime;
+ if (packet->header.serial) {
/* Exponentially backoff retry times */
- if (list[i]->backoff < MAXBACKOFF) {
+ if (packet->backoff < MAXBACKOFF) {
/* so it can't stay == 0 */
- list[i]->backoff = (list[i]->backoff << 1) + 1;
+ packet->backoff = (packet->backoff << 1) + 1;
} else
- list[i]->backoff++;
- clock_Addmsec(&(list[i]->retryTime),
- ((afs_uint32) list[i]->backoff) << 8);
+ packet->backoff++;
+ clock_Addmsec(&(packet->retryTime),
+ ((afs_uint32) packet->backoff) << 8);
}
/* Wait a little extra for the ack on the last packet */
- if (lastPacket && !(list[i]->header.flags & RX_CLIENT_INITIATED)) {
- clock_Addmsec(&(list[i]->retryTime), 400);
+ if (lastPacket
+ && !(packet->header.flags & RX_CLIENT_INITIATED)) {
+ clock_Addmsec(&(packet->retryTime), 400);
}
/* Record the time sent */
- list[i]->timeSent = *now;
+ packet->timeSent = now;
/* Ask for an ack on retransmitted packets, on every other packet
* if the peer doesn't support slow start. Ask for an ack on every
* packet until the congestion window reaches the ack rate. */
- if (list[i]->header.serial) {
+ if (packet->header.serial) {
requestAck = 1;
} else {
/* improved RTO calculation- not Karn */
- list[i]->firstSent = *now;
+ packet->firstSent = now;
if (!lastPacket && (call->cwind <= (u_short) (conn->ackRate + 1)
|| (!(call->flags & RX_CALL_SLOW_START_OK)
- && (list[i]->header.seq & 1)))) {
+ && (packet->header.seq & 1)))) {
requestAck = 1;
}
}
/* Tag this packet as not being the last in this group,
* for the receiver's benefit */
- if (i < len - 1 || moreFlag) {
- list[i]->header.flags |= RX_MORE_PACKETS;
+ if (i < xmit->len - 1 || moreFlag) {
+ packet->header.flags |= RX_MORE_PACKETS;
}
-
- /* Install the new retransmit time for the packet, and
- * record the time sent */
- list[i]->timeSent = *now;
}
if (requestAck) {
- list[len - 1]->header.flags |= RX_REQUEST_ACK;
+ xmit->list[xmit->len - 1]->header.flags |= RX_REQUEST_ACK;
}
/* Since we're about to send a data packet to the peer, it's
MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
MUTEX_EXIT(&rx_refcnt_mutex);
- if (len > 1) {
- rxi_SendPacketList(call, conn, list, len, istack);
+ if (xmit->len > 1) {
+ rxi_SendPacketList(call, conn, xmit->list, xmit->len, istack);
} else {
- rxi_SendPacket(call, conn, list[0], istack);
+ rxi_SendPacket(call, conn, xmit->list[0], istack);
}
MUTEX_ENTER(&call->lock);
MUTEX_ENTER(&rx_refcnt_mutex);
* idle connections) */
conn->lastSendTime = call->lastSendTime = clock_Sec();
/* Let a set of retransmits trigger an idle timeout */
- if (!resending)
+ if (!xmit->resending)
call->lastSendData = call->lastSendTime;
}
* We always keep the last list we should have sent so we
* can set the RX_MORE_PACKETS flags correctly.
*/
+
static void
rxi_SendXmitList(struct rx_call *call, struct rx_packet **list, int len,
- int istack, struct clock *now, struct clock *retryTime,
- int resending)
+ int istack)
{
- int i, cnt, lastCnt = 0;
- struct rx_packet **listP, **lastP = 0;
+ int i;
+ struct xmitlist working;
+ struct xmitlist last;
+
struct rx_peer *peer = call->conn->peer;
int morePackets = 0;
- for (cnt = 0, listP = &list[0], i = 0; i < len; i++) {
+ memset(&last, 0, sizeof(struct xmitlist));
+ working.list = &list[0];
+ working.len = 0;
+ working.resending = 0;
+
+ for (i = 0; i < len; i++) {
/* Does the current packet force us to flush the current list? */
- if (cnt > 0
+ if (working.len > 0
&& (list[i]->header.serial || (list[i]->flags & RX_PKTFLAG_ACKED)
|| list[i]->length > RX_JUMBOBUFFERSIZE)) {
- if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 1, now, retryTime,
- resending);
+
+ /* This sends the 'last' list and then rolls the current working
+ * set into the 'last' one, and resets the working set */
+
+ if (last.len > 0) {
+ rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
return;
}
- lastP = listP;
- lastCnt = cnt;
- listP = &list[i];
- cnt = 0;
+ last = working;
+ working.len = 0;
+ working.resending = 0;
+ working.list = &list[i];
}
/* Add the current packet to the list if it hasn't been acked.
* Otherwise adjust the list pointer to skip the current packet. */
if (!(list[i]->flags & RX_PKTFLAG_ACKED)) {
- cnt++;
+ working.len++;
+
+ if (list[i]->header.serial)
+ working.resending = 1;
+
/* Do we need to flush the list? */
- if (cnt >= (int)peer->maxDgramPackets
- || cnt >= (int)call->nDgramPackets || cnt >= (int)call->cwind
+ if (working.len >= (int)peer->maxDgramPackets
+ || working.len >= (int)call->nDgramPackets
+ || working.len >= (int)call->cwind
|| list[i]->header.serial
|| list[i]->length != RX_JUMBOBUFFERSIZE) {
- if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 1, now,
- retryTime, resending);
+ if (last.len > 0) {
+ rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error
|| (call->flags & RX_CALL_FAST_RECOVER_WAIT))
return;
}
- lastP = listP;
- lastCnt = cnt;
- listP = &list[i + 1];
- cnt = 0;
+ last = working;
+ working.len = 0;
+ working.resending = 0;
+ working.list = &list[i + 1];
}
} else {
- if (cnt != 0) {
+ if (working.len != 0) {
osi_Panic("rxi_SendList error");
}
- listP = &list[i + 1];
+ working.list = &list[i + 1];
}
}
* an acked packet. Since we always send retransmissions
* in a separate packet, we only need to check the first
* packet in the list */
- if (cnt > 0 && !(listP[0]->flags & RX_PKTFLAG_ACKED)) {
+ if (working.len > 0 && !(working.list[0]->flags & RX_PKTFLAG_ACKED)) {
morePackets = 1;
}
- if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, morePackets, now,
- retryTime, resending);
+ if (last.len > 0) {
+ rxi_SendList(call, &last, istack, morePackets);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
return;
}
if (morePackets) {
- rxi_SendList(call, listP, cnt, istack, 0, now, retryTime,
- resending);
+ rxi_SendList(call, &working, istack, 0);
}
- } else if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 0, now, retryTime,
- resending);
+ } else if (last.len > 0) {
+ rxi_SendList(call, &last, istack, 0);
+ /* Packets which are in 'working' are not sent by this call */
}
}
struct rx_packet *p;
struct rx_packet *nxp; /* Next pointer for queue_Scan */
- struct rx_peer *peer = call->conn->peer;
struct clock now, usenow, retryTime;
int haveEvent;
int nXmitPackets;
int maxXmitPackets;
- int resending = 0;
/* If rxi_Start is being called as a result of a resend event,
* then make sure that the event pointer is removed from the call
CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
MUTEX_EXIT(&rx_refcnt_mutex);
call->resendEvent = NULL;
- resending = 1;
+
+ if ((call->flags & RX_CALL_PEER_BUSY)) {
+ rxi_CheckBusy(call);
+ }
+
if (queue_IsEmpty(&call->tq)) {
/* Nothing to do */
return;
}
- /* Timeouts trigger congestion recovery */
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* someone else is waiting to start recovery */
- return;
- }
- call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- rxi_WaitforTQBusy(call);
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- if (call->error) {
- if (rx_stats_active)
- rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
- return;
- }
-#endif
- call->flags |= RX_CALL_FAST_RECOVER;
-
- if (peer->maxDgramPackets > 1) {
- call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
- } else {
- call->MTU = MIN(peer->natMTU, peer->maxMTU);
- }
- call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
- call->nDgramPackets = 1;
- call->cwind = 1;
- call->nextCwind = 1;
- call->nAcks = 0;
- call->nNacks = 0;
- MUTEX_ENTER(&peer->peer_lock);
- peer->MTU = call->MTU;
- peer->cwind = call->cwind;
- peer->nDgramPackets = 1;
- peer->congestSeq++;
- call->congestSeq = peer->congestSeq;
- MUTEX_EXIT(&peer->peer_lock);
- /* Clear retry times on packets. Otherwise, it's possible for
- * some packets in the queue to force resends at rates faster
- * than recovery rates.
- */
- for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (!(p->flags & RX_PKTFLAG_ACKED)) {
- clock_Zero(&p->retryTime);
- }
- }
}
+
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (rx_stats_active)
}
if (queue_IsNotEmpty(&call->tq)) { /* If we have anything to send */
- /* Get clock to compute the re-transmit time for any packets
- * in this burst. Note, if we back off, it's reasonable to
- * back off all of the packets in the same manner, even if
- * some of them have been retransmitted more times than more
- * recent additions.
- * Do a dance to avoid blocking after setting now. */
- MUTEX_ENTER(&peer->peer_lock);
- retryTime = peer->timeout;
- MUTEX_EXIT(&peer->peer_lock);
- clock_GetTime(&now);
- clock_Add(&retryTime, &now);
+ clock_GetTime(&now);
usenow = now;
+
/* Send (or resend) any packets that need it, subject to
* window restrictions and congestion burst control
* restrictions. Ask for an ack on the last packet sent in
if (!clock_Lt(&now, &p->retryTime)) {
if (nXmitPackets == maxXmitPackets) {
rxi_SendXmitList(call, call->xmitList,
- nXmitPackets, istack, &now,
- &retryTime, resending);
+ nXmitPackets, istack);
goto restart;
}
- dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
+ dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u\n",
*(call->callNumber), p,
now.sec, now.usec,
- p->retryTime.sec, p->retryTime.usec,
- retryTime.sec, retryTime.usec));
+ p->retryTime.sec, p->retryTime.usec));
call->xmitList[nXmitPackets++] = p;
}
}
* ready to send. Now we loop to send the packets */
if (nXmitPackets > 0) {
rxi_SendXmitList(call, call->xmitList, nXmitPackets,
- istack, &now, &retryTime, resending);
+ istack);
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
*/
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
call->flags &= ~RX_CALL_TQ_BUSY;
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
- call, call->tqWaiters, call->flags));
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start start");
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
+ rxi_WakeUpTransmitQueue(call);
return;
}
if (call->error) {
if (rx_stats_active)
rx_atomic_inc(&rx_tq_debug.rxi_start_aborted);
call->flags &= ~RX_CALL_TQ_BUSY;
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
- call->error, call, call->tqWaiters, call->flags));
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start middle");
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
+ rxi_WakeUpTransmitQueue(call);
rxi_CallError(call, call->error);
return;
}
* protected by the global lock.
*/
call->flags &= ~RX_CALL_TQ_BUSY;
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
- call, call->tqWaiters, call->flags));
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start end");
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
+ rxi_WakeUpTransmitQueue(call);
} else {
call->flags |= RX_CALL_NEED_START;
}
}
}
- if (hardDeadTime) {
+ if (conn->hardDeadTime) {
hardDeadTime = conn->hardDeadTime + fudgeFactor;
}
{
struct rx_connection *conn = arg1;
struct rx_header theader;
- char tbuffer[1500];
+ char tbuffer[1 + sizeof(struct rx_header)];
struct sockaddr_in taddr;
char *tp;
char a[1] = { 0 };
}
-/* Compute round trip time of the packet provided, in *rttp.
- */
-
/* rxi_ComputeRoundTripTime is called with peer locked. */
-/* sentp and/or peer may be null */
+/* peer may be null */
static void
rxi_ComputeRoundTripTime(struct rx_packet *p,
- struct clock *sentp,
+ struct rx_ackPacket *ack,
struct rx_peer *peer,
struct clock *now)
{
- struct clock thisRtt, *rttp = &thisRtt;
+ struct clock thisRtt, *sentp;
int rtt_timeout;
+ int serial;
+
+ /* If the ACK is delayed, then do nothing */
+ if (ack->reason == RX_ACK_DELAY)
+ return;
+
+ /* On the wire, jumbograms are a single UDP packet. We shouldn't count
+ * their RTT multiple times, so only include the RTT of the last packet
+ * in a jumbogram */
+ if (p->flags & RX_JUMBO_PACKET)
+ return;
+
+ /* Use the serial number to determine which transmission the ACK is for,
+ * and set the sent time to match this. If we have no serial number, then
+ * only use the ACK for RTT calculations if the packet has not been
+ * retransmitted
+ */
+
+ serial = ntohl(ack->serial);
+ if (serial) {
+ if (serial == p->header.serial) {
+ sentp = &p->timeSent;
+ } else if (serial == p->firstSerial) {
+ sentp = &p->firstSent;
+ } else if (clock_Eq(&p->timeSent, &p->firstSent)) {
+ sentp = &p->firstSent;
+ } else
+ return;
+ } else {
+ if (clock_Eq(&p->timeSent, &p->firstSent)) {
+ sentp = &p->firstSent;
+ } else
+ return;
+ }
thisRtt = *now;
- if (clock_Lt(rttp, sentp))
+ if (clock_Lt(&thisRtt, sentp))
return; /* somebody set the clock back, don't count this time. */
- clock_Sub(rttp, sentp);
+ clock_Sub(&thisRtt, sentp);
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
- p->header.callNumber, p, rttp->sec, rttp->usec));
+ p->header.callNumber, p, thisRtt.sec, thisRtt.usec));
- if (rttp->sec == 0 && rttp->usec == 0) {
+ if (clock_IsZero(&thisRtt)) {
/*
* The actual round trip time is shorter than the
* clock_GetTime resolution. It is most likely 1ms or 100ns.
* Since we can't tell which at the moment we will assume 1ms.
*/
- rttp->usec = 1000;
+ thisRtt.usec = 1000;
}
if (rx_stats_active) {
MUTEX_ENTER(&rx_stats_mutex);
- if (clock_Lt(rttp, &rx_stats.minRtt))
- rx_stats.minRtt = *rttp;
- if (clock_Gt(rttp, &rx_stats.maxRtt)) {
- if (rttp->sec > 60) {
+ if (clock_Lt(&thisRtt, &rx_stats.minRtt))
+ rx_stats.minRtt = thisRtt;
+ if (clock_Gt(&thisRtt, &rx_stats.maxRtt)) {
+ if (thisRtt.sec > 60) {
MUTEX_EXIT(&rx_stats_mutex);
return; /* somebody set the clock ahead */
}
- rx_stats.maxRtt = *rttp;
+ rx_stats.maxRtt = thisRtt;
}
- clock_Add(&rx_stats.totalRtt, rttp);
+ clock_Add(&rx_stats.totalRtt, &thisRtt);
rx_atomic_inc(&rx_stats.nRttSamples);
MUTEX_EXIT(&rx_stats_mutex);
}
* srtt' = srtt + (rtt - srtt)/8
*/
- delta = _8THMSEC(rttp) - peer->rtt;
+ delta = _8THMSEC(&thisRtt) - peer->rtt;
peer->rtt += (delta >> 3);
/*
* little, and I set deviance to half the rtt. In practice,
* deviance tends to approach something a little less than
* half the smoothed rtt. */
- peer->rtt = _8THMSEC(rttp) + 8;
+ peer->rtt = _8THMSEC(&thisRtt) + 8;
peer->rtt_dev = peer->rtt >> 2; /* rtt/2: they're scaled differently */
}
- /* the timeout is RTT + 4*MDEV but no less than rx_minPeerTimeout msec.
+ /* the timeout is RTT + 4*MDEV + rx_minPeerTimeout msec.
* This is because one end or the other of these connections is usually
* in a user process, and can be switched and/or swapped out. So on fast,
* reliable networks, the timeout would otherwise be too short. */
- rtt_timeout = MAX(((peer->rtt >> 3) + peer->rtt_dev), rx_minPeerTimeout);
+ rtt_timeout = ((peer->rtt >> 3) + peer->rtt_dev) + rx_minPeerTimeout;
clock_Zero(&(peer->timeout));
clock_Addmsec(&(peer->timeout), rtt_timeout);
peer->backedOff = 0;
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n",
- p->header.callNumber, p, MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
+ p->header.callNumber, p, MSEC(&thisRtt), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
}