/* RX: Extended Remote Procedure Call */
#include <afsconfig.h>
-#ifdef KERNEL
-#include "afs/param.h"
-#else
#include <afs/param.h>
-#endif
-
#ifdef KERNEL
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#ifndef UKERNEL
-#include "h/types.h"
-#include "h/time.h"
-#include "h/stat.h"
-#ifdef AFS_OSF_ENV
-#include <net/net_globals.h>
-#endif /* AFS_OSF_ENV */
-#ifdef AFS_LINUX20_ENV
-#include "h/socket.h"
-#endif
-#include "netinet/in.h"
-#ifdef AFS_SUN57_ENV
-#include "inet/common.h"
-#include "inet/ip.h"
-#include "inet/ip_ire.h"
-#endif
-#include "afs/afs_args.h"
-#include "afs/afs_osi.h"
-#ifdef RX_KERNEL_TRACE
-#include "rx_kcommon.h"
-#endif
-#if (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
-#include "h/systm.h"
-#endif
-#ifdef RXDEBUG
-#undef RXDEBUG /* turn off debugging */
-#endif /* RXDEBUG */
-#if defined(AFS_SGI_ENV)
-#include "sys/debug.h"
-#endif
-#include "afsint.h"
-#ifdef AFS_OSF_ENV
-#undef kmem_alloc
-#undef kmem_free
-#undef mem_alloc
-#undef mem_free
-#endif /* AFS_OSF_ENV */
-#else /* !UKERNEL */
-#include "afs/sysincludes.h"
-#include "afsincludes.h"
-#endif /* !UKERNEL */
-#include "afs/lock.h"
-#include "rx_kmutex.h"
-#include "rx_kernel.h"
-#include "rx_clock.h"
-#include "rx_queue.h"
-#include "rx.h"
-#include "rx_globals.h"
-#include "rx_trace.h"
-#include "rx_atomic.h"
-#include "rx_internal.h"
-#define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
-#define AFSOP_STOP_AFS 211 /* Stop AFS process */
-#define AFSOP_STOP_BKG 212 /* Stop BKG process */
-#include "afsint.h"
+# include "afs/sysincludes.h"
+# include "afsincludes.h"
+# ifndef UKERNEL
+# include "h/types.h"
+# include "h/time.h"
+# include "h/stat.h"
+# ifdef AFS_LINUX20_ENV
+# include "h/socket.h"
+# endif
+# include "netinet/in.h"
+# ifdef AFS_SUN5_ENV
+# include "netinet/ip6.h"
+# include "inet/common.h"
+# include "inet/ip.h"
+# include "inet/ip_ire.h"
+# endif
+# include "afs/afs_args.h"
+# include "afs/afs_osi.h"
+# ifdef RX_KERNEL_TRACE
+# include "rx_kcommon.h"
+# endif
+# if defined(AFS_AIX_ENV)
+# include "h/systm.h"
+# endif
+# ifdef RXDEBUG
+# undef RXDEBUG /* turn off debugging */
+# endif /* RXDEBUG */
+# if defined(AFS_SGI_ENV)
+# include "sys/debug.h"
+# endif
+# else /* !UKERNEL */
+# include "afs/sysincludes.h"
+# include "afsincludes.h"
+# endif /* !UKERNEL */
+# include "afs/lock.h"
+# include "rx_kmutex.h"
+# include "rx_kernel.h"
+# define AFSOP_STOP_RXCALLBACK 210 /* Stop CALLBACK process */
+# define AFSOP_STOP_AFS 211 /* Stop AFS process */
+# define AFSOP_STOP_BKG 212 /* Stop BKG process */
extern afs_int32 afs_termState;
-#ifdef AFS_AIX41_ENV
-#include "sys/lockl.h"
-#include "sys/lock_def.h"
-#endif /* AFS_AIX41_ENV */
+# ifdef AFS_AIX41_ENV
+# include "sys/lockl.h"
+# include "sys/lock_def.h"
+# endif /* AFS_AIX41_ENV */
# include "afs/rxgen_consts.h"
#else /* KERNEL */
-# include <sys/types.h>
-# include <string.h>
-# include <stdarg.h>
-# include <errno.h>
-# ifdef HAVE_STDINT_H
-# include <stdint.h>
+# include <roken.h>
+
+# ifdef AFS_NT40_ENV
+# include <afs/afsutil.h>
+# include <WINNT\afsreg.h>
# endif
-#ifdef AFS_NT40_ENV
-# include <stdlib.h>
-# include <fcntl.h>
-# include <afs/afsutil.h>
-# include <WINNT\afsreg.h>
-#else
-# include <sys/socket.h>
-# include <sys/file.h>
-# include <netdb.h>
-# include <sys/stat.h>
-# include <netinet/in.h>
-# include <sys/time.h>
-#endif
-# include "rx.h"
+
# include "rx_user.h"
-# include "rx_clock.h"
-# include "rx_queue.h"
-# include "rx_atomic.h"
-# include "rx_globals.h"
-# include "rx_trace.h"
-# include "rx_internal.h"
-# include <afs/rxgen_consts.h>
#endif /* KERNEL */
+#include "rx.h"
+#include "rx_clock.h"
+#include "rx_queue.h"
+#include "rx_atomic.h"
+#include "rx_globals.h"
+#include "rx_trace.h"
+#include "rx_internal.h"
+#include "rx_stats.h"
+
+#include <afs/rxgen_consts.h>
+
#ifndef KERNEL
#ifdef AFS_PTHREAD_ENV
#ifndef AFS_NT40_ENV
/* Local static routines */
static void rxi_DestroyConnectionNoLock(struct rx_connection *conn);
+static void rxi_ComputeRoundTripTime(struct rx_packet *, struct rx_ackPacket *,
+ struct rx_call *, struct rx_peer *,
+ struct clock *);
+static void rxi_Resend(struct rxevent *event, void *arg0, void *arg1,
+ int istack);
+
#ifdef RX_ENABLE_LOCKS
static void rxi_SetAcksInTransmitQueue(struct rx_call *call);
#endif
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
struct rx_tq_debug {
- afs_int32 rxi_start_aborted; /* rxi_start awoke after rxi_Send in error. */
- afs_int32 rxi_start_in_error;
+ rx_atomic_t rxi_start_aborted; /* rxi_start awoke after rxi_Send in error.*/
+ rx_atomic_t rxi_start_in_error;
} rx_tq_debug;
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
+/* Constant delay time before sending an acknowledge of the last packet
+ * received. This is to avoid sending an extra acknowledge when the
+ * client is about to make another call, anyway, or the server is
+ * about to respond.
+ *
+ * The lastAckDelay may not exceeed 400ms without causing peers to
+ * unecessarily timeout.
+ */
+struct clock rx_lastAckDelay = {0, 400000};
+
+/* Constant delay time before sending a soft ack when none was requested.
+ * This is to make sure we send soft acks before the sender times out,
+ * Normally we wait and send a hard ack when the receiver consumes the packet
+ *
+ * This value has been 100ms in all shipping versions of OpenAFS. Changing it
+ * will require changes to the peer's RTT calculations.
+ */
+struct clock rx_softAckDelay = {0, 100000};
+
/*
* rxi_rpc_peer_stat_cnt counts the total number of peer stat structures
* currently allocated within rx. This number is used to allocate the
static unsigned int rxi_rpc_process_stat_cnt;
+/*
+ * rxi_busyChannelError is the error to return to the application when a call
+ * channel appears busy (inferred from the receipt of RX_PACKET_TYPE_BUSY
+ * packets on the channel), and there are other call channels in the
+ * connection that are not busy. If 0, we do not return errors upon receiving
+ * busy packets; we just keep trying on the same call channel until we hit a
+ * timeout.
+ */
+static afs_int32 rxi_busyChannelError = 0;
+
rx_atomic_t rx_nWaiting = RX_ATOMIC_INIT(0);
rx_atomic_t rx_nWaited = RX_ATOMIC_INIT(0);
afs_kmutex_t rx_atomic_mutex;
#endif
+/* Forward prototypes */
+static struct rx_call * rxi_NewCall(struct rx_connection *, int);
+
#ifdef AFS_PTHREAD_ENV
-#include <assert.h>
/*
* Use procedural initialization of mutexes/condition variables
* to ease NT porting
*/
-extern afs_kmutex_t rx_stats_mutex;
extern afs_kmutex_t rx_quota_mutex;
extern afs_kmutex_t rx_pthread_mutex;
extern afs_kmutex_t rx_packets_mutex;
+extern afs_kmutex_t rx_refcnt_mutex;
extern afs_kmutex_t des_init_mutex;
extern afs_kmutex_t des_random_mutex;
extern afs_kmutex_t rx_clock_mutex;
extern afs_kmutex_t rxi_connCacheMutex;
extern afs_kmutex_t rx_event_mutex;
-extern afs_kmutex_t osi_malloc_mutex;
extern afs_kmutex_t event_handler_mutex;
extern afs_kmutex_t listener_mutex;
extern afs_kmutex_t rx_if_init_mutex;
extern afs_kmutex_t rx_if_mutex;
-extern afs_kmutex_t rxkad_client_uid_mutex;
-extern afs_kmutex_t rxkad_random_mutex;
extern afs_kcondvar_t rx_event_handler_cond;
extern afs_kcondvar_t rx_listener_cond;
MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_refcnt_mutex, "refcnts", MUTEX_DEFAULT, 0);
MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_event_mutex, "event", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&des_init_mutex, "des", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&des_random_mutex, "random", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&osi_malloc_mutex, "malloc", MUTEX_DEFAULT, 0);
MUTEX_INIT(&event_handler_mutex, "event handler", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rxi_connCacheMutex, "conn cache", MUTEX_DEFAULT, 0);
MUTEX_INIT(&listener_mutex, "listener", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_if_init_mutex, "if init", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_if_mutex, "if", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rxkad_client_uid_mutex, "uid", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rxkad_random_mutex, "rxkad random", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_debug_mutex, "debug", MUTEX_DEFAULT, 0);
- assert(pthread_cond_init
- (&rx_event_handler_cond, (const pthread_condattr_t *)0) == 0);
- assert(pthread_cond_init(&rx_listener_cond, (const pthread_condattr_t *)0)
- == 0);
- assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
- assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
+ CV_INIT(&rx_event_handler_cond, "evhand", CV_DEFAULT, 0);
+ CV_INIT(&rx_listener_cond, "rxlisten", CV_DEFAULT, 0);
- rxkad_global_stats_init();
+ osi_Assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
+ osi_Assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
}
pthread_once_t rx_once_init = PTHREAD_ONCE_INIT;
-#define INIT_PTHREAD_LOCKS \
-assert(pthread_once(&rx_once_init, rxi_InitPthread)==0)
+#define INIT_PTHREAD_LOCKS osi_Assert(pthread_once(&rx_once_init, rxi_InitPthread)==0)
/*
* The rx_stats_mutex mutex protects the following global variables:
* rxi_lowConnRefCount
* to manipulate the queue.
*/
-#if defined(RX_ENABLE_LOCKS) && defined(KERNEL)
+#if defined(RX_ENABLE_LOCKS)
static afs_kmutex_t rx_rpc_stats;
-void rxi_StartUnlocked(struct rxevent *event, void *call,
- void *arg1, int istack);
#endif
/* We keep a "last conn pointer" in rxi_FindConnection. The odds are
* lowest level:
* multi_handle->lock
* rxevent_lock
+ * rx_packets_mutex
* rx_stats_mutex
+ * rx_refcnt_mutex
* rx_atomic_mutex
*
* Do we need a lock to protect the peer field in the conn structure?
MUTEX_INIT(&rx_quota_mutex, "rx_quota_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_pthread_mutex, "rx_pthread_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_packets_mutex, "rx_packets_mutex", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_refcnt_mutex, "rx_refcnt_mutex", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_rpc_stats, "rx_rpc_stats", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_freePktQ_lock, "rx_freePktQ_lock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&freeSQEList_lock, "freeSQEList lock", MUTEX_DEFAULT, 0);
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- memset(&rx_stats, 0, sizeof(struct rx_statistics));
+ rxi_ResetStatistics();
htable = (char *)
osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
rx_connHashTable = (struct rx_connection **)htable;
rx_peerHashTable = (struct rx_peer **)ptable;
- rx_lastAckDelay.sec = 0;
- rx_lastAckDelay.usec = 400000; /* 400 milliseconds */
rx_hardAckDelay.sec = 0;
rx_hardAckDelay.usec = 100000; /* 100 milliseconds */
- rx_softAckDelay.sec = 0;
- rx_softAckDelay.usec = 100000; /* 100 milliseconds */
rxevent_Init(20, rxi_ReScheduleEvents);
rx_GetIFInfo();
#endif
+#if defined(RXK_LISTENER_ENV) || !defined(KERNEL)
/* Start listener process (exact function is dependent on the
* implementation environment--kernel or user space) */
rxi_StartListener();
+#endif
USERPRI;
tmp_status = rxinit_status = 0;
return rx_InitHost(htonl(INADDR_ANY), port);
}
+/* RTT Timer
+ * ---------
+ *
+ * The rxi_rto functions implement a TCP (RFC2988) style algorithm for
+ * maintaing the round trip timer.
+ *
+ */
+
+/*!
+ * Start a new RTT timer for a given call and packet.
+ *
+ * There must be no resendEvent already listed for this call, otherwise this
+ * will leak events - intended for internal use within the RTO code only
+ *
+ * @param[in] call
+ * the RX call to start the timer for
+ * @param[in] lastPacket
+ * a flag indicating whether the last packet has been sent or not
+ *
+ * @pre call must be locked before calling this function
+ *
+ */
+static_inline void
+rxi_rto_startTimer(struct rx_call *call, int lastPacket, int istack)
+{
+ struct clock now, retryTime;
+
+ clock_GetTime(&now);
+ retryTime = now;
+
+ clock_Add(&retryTime, &call->rto);
+
+ /* If we're sending the last packet, and we're the client, then the server
+ * may wait for an additional 400ms before returning the ACK, wait for it
+ * rather than hitting a timeout */
+ if (lastPacket && call->conn->type == RX_CLIENT_CONNECTION)
+ clock_Addmsec(&retryTime, 400);
+
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ call->resendEvent = rxevent_PostNow2(&retryTime, &now, rxi_Resend,
+ call, 0, istack);
+}
+
+/*!
+ * Cancel an RTT timer for a given call.
+ *
+ *
+ * @param[in] call
+ * the RX call to cancel the timer for
+ *
+ * @pre call must be locked before calling this function
+ *
+ */
+
+static_inline void
+rxi_rto_cancel(struct rx_call *call)
+{
+ if (!call->resendEvent)
+ return;
+
+ rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
+}
+
+/*!
+ * Tell the RTO timer that we have sent a packet.
+ *
+ * If the timer isn't already running, then start it. If the timer is running,
+ * then do nothing.
+ *
+ * @param[in] call
+ * the RX call that the packet has been sent on
+ * @param[in] lastPacket
+ * A flag which is true if this is the last packet for the call
+ *
+ * @pre The call must be locked before calling this function
+ *
+ */
+
+static_inline void
+rxi_rto_packet_sent(struct rx_call *call, int lastPacket, int istack)
+{
+ if (call->resendEvent)
+ return;
+
+ rxi_rto_startTimer(call, lastPacket, istack);
+}
+
+/*!
+ * Tell the RTO timer that we have received an new ACK message
+ *
+ * This function should be called whenever a call receives an ACK that
+ * acknowledges new packets. Whatever happens, we stop the current timer.
+ * If there are unacked packets in the queue which have been sent, then
+ * we restart the timer from now. Otherwise, we leave it stopped.
+ *
+ * @param[in] call
+ * the RX call that the ACK has been received on
+ */
+
+static_inline void
+rxi_rto_packet_acked(struct rx_call *call, int istack)
+{
+ struct rx_packet *p, *nxp;
+
+ rxi_rto_cancel(call);
+
+ if (queue_IsEmpty(&call->tq))
+ return;
+
+ for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
+ if (p->header.seq > call->tfirst + call->twind)
+ return;
+
+ if (!(p->flags & RX_PKTFLAG_ACKED) && p->flags & RX_PKTFLAG_SENT) {
+ rxi_rto_startTimer(call, p->header.flags & RX_LAST_PACKET, istack);
+ return;
+ }
+ }
+}
+
+
+/**
+ * Set an initial round trip timeout for a peer connection
+ *
+ * @param[in] secs The timeout to set in seconds
+ */
+
+void
+rx_rto_setPeerTimeoutSecs(struct rx_peer *peer, int secs) {
+ peer->rtt = secs * 8000;
+}
+
+/**
+ * Sets the error generated when a busy call channel is detected.
+ *
+ * @param[in] error The error to return for a call on a busy channel.
+ *
+ * @pre Neither rx_Init nor rx_InitHost have been called yet
+ */
+void
+rx_SetBusyChannelError(afs_int32 error)
+{
+ osi_Assert(rxinit_status != 0);
+ rxi_busyChannelError = error;
+}
+
/* called with unincremented nRequestsRunning to see if it is OK to start
* a new thread in this service. Could be "no" for two reasons: over the
* max quota, or would prevent others from reaching their min quota.
/* Called by rx_StartServer to start up lwp's to service calls.
NExistingProcs gives the number of procs already existing, and which
therefore needn't be created. */
-void
+static void
rxi_StartServerProcs(int nExistingProcs)
{
struct rx_service *service;
for (i = 0; i < RX_MAXCALLS; i++) {
conn->twind[i] = rx_initSendWindow;
conn->rwind[i] = rx_initReceiveWindow;
+ conn->lastBusy[i] = 0;
}
RXS_NewConnection(securityObject, conn);
conn->next = rx_connHashTable[hashindex];
rx_connHashTable[hashindex] = conn;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nClientConns);
MUTEX_EXIT(&rx_connHashTable_lock);
USERPRI;
return conn;
}
+/**
+ * Ensure a connection's timeout values are valid.
+ *
+ * @param[in] conn The connection to check
+ *
+ * @post conn->secondUntilDead <= conn->idleDeadTime <= conn->hardDeadTime,
+ * unless idleDeadTime and/or hardDeadTime are not set
+ * @internal
+ */
+static void
+rxi_CheckConnTimeouts(struct rx_connection *conn)
+{
+ /* a connection's timeouts must have the relationship
+ * deadTime <= idleDeadTime <= hardDeadTime. Otherwise, for example, a
+ * total loss of network to a peer may cause an idle timeout instead of a
+ * dead timeout, simply because the idle timeout gets hit first. Also set
+ * a minimum deadTime of 6, just to ensure it doesn't get set too low. */
+ /* this logic is slightly complicated by the fact that
+ * idleDeadTime/hardDeadTime may not be set at all, but it's not too bad.
+ */
+ conn->secondsUntilDead = MAX(conn->secondsUntilDead, 6);
+ if (conn->idleDeadTime) {
+ conn->idleDeadTime = MAX(conn->idleDeadTime, conn->secondsUntilDead);
+ }
+ if (conn->hardDeadTime) {
+ if (conn->idleDeadTime) {
+ conn->hardDeadTime = MAX(conn->idleDeadTime, conn->hardDeadTime);
+ } else {
+ conn->hardDeadTime = MAX(conn->secondsUntilDead, conn->hardDeadTime);
+ }
+ }
+}
+
void
rx_SetConnDeadTime(struct rx_connection *conn, int seconds)
{
/* The idea is to set the dead time to a value that allows several
* keepalives to be dropped without timing out the connection. */
- conn->secondsUntilDead = MAX(seconds, 6);
+ conn->secondsUntilDead = seconds;
+ rxi_CheckConnTimeouts(conn);
conn->secondsUntilPing = conn->secondsUntilDead / 6;
}
+void
+rx_SetConnHardDeadTime(struct rx_connection *conn, int seconds)
+{
+ conn->hardDeadTime = seconds;
+ rxi_CheckConnTimeouts(conn);
+}
+
+void
+rx_SetConnIdleDeadTime(struct rx_connection *conn, int seconds)
+{
+ conn->idleDeadTime = seconds;
+ rxi_CheckConnTimeouts(conn);
+}
+
int rxi_lowPeerRefCount = 0;
int rxi_lowConnRefCount = 0;
* Cleanup a connection that was destroyed in rxi_DestroyConnectioNoLock.
* NOTE: must not be called with rx_connHashTable_lock held.
*/
-void
+static void
rxi_CleanupConnection(struct rx_connection *conn)
{
/* Notify the service exporter, if requested, that this connection
if (rx_stats_active)
{
if (conn->type == RX_SERVER_CONNECTION)
- rx_MutexDecrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nServerConns);
else
- rx_MutexDecrement(rx_stats.nClientConns, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nClientConns);
}
#ifndef KERNEL
if (conn->specific) {
NETPRI;
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (conn->refCount > 0)
conn->refCount--;
else {
if ((conn->refCount > 0) || (conn->flags & RX_CONN_BUSY)) {
/* Busy; wait till the last guy before proceeding */
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
USERPRI;
return;
USERPRI;
return;
}
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
/* Check for extant references to this connection */
+ MUTEX_ENTER(&conn->conn_call_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
struct rx_call *call = conn->call[i];
if (call) {
}
}
}
+ MUTEX_EXIT(&conn->conn_call_lock);
+
#ifdef RX_ENABLE_LOCKS
if (!havecalls) {
if (MUTEX_TRYENTER(&conn->conn_data_lock)) {
SPLVAR;
NETPRI;
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
USERPRI;
}
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Wait for the transmit queue to no longer be busy.
* requires the call->lock to be held */
-static void rxi_WaitforTQBusy(struct rx_call *call) {
- while (call->flags & RX_CALL_TQ_BUSY) {
+void
+rxi_WaitforTQBusy(struct rx_call *call) {
+ while (!call->error && (call->flags & RX_CALL_TQ_BUSY)) {
call->flags |= RX_CALL_TQ_WAIT;
call->tqWaiters++;
#ifdef RX_ENABLE_LOCKS
}
#endif
+static void
+rxi_WakeUpTransmitQueue(struct rx_call *call)
+{
+ if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
+#ifdef RX_ENABLE_LOCKS
+ osirx_AssertMine(&call->lock, "rxi_Start start");
+ CV_BROADCAST(&call->cv_tq);
+#else /* RX_ENABLE_LOCKS */
+ osi_rxWakeup(&call->tq);
+#endif /* RX_ENABLE_LOCKS */
+ }
+}
+
/* Start a new rx remote procedure call, on the specified connection.
* If wait is set to 1, wait for a free call channel; otherwise return
* 0. Maxtime gives the maximum number of seconds this call may take,
struct rx_call *
rx_NewCall(struct rx_connection *conn)
{
- int i, wait;
+ int i, wait, ignoreBusy = 1;
struct rx_call *call;
struct clock queueTime;
+ afs_uint32 leastBusy = 0;
SPLVAR;
clock_NewTime();
for (i = 0; i < RX_MAXCALLS; i++) {
call = conn->call[i];
if (call) {
+ if (!ignoreBusy && conn->lastBusy[i] != leastBusy) {
+ /* we're not ignoring busy call slots; only look at the
+ * call slot that is the "least" busy */
+ continue;
+ }
+
if (call->state == RX_STATE_DALLY) {
MUTEX_ENTER(&call->lock);
if (call->state == RX_STATE_DALLY) {
+ if (ignoreBusy && conn->lastBusy[i]) {
+ /* if we're ignoring busy call slots, skip any ones that
+ * have lastBusy set */
+ if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
+ leastBusy = conn->lastBusy[i];
+ }
+ MUTEX_EXIT(&call->lock);
+ continue;
+ }
+
/*
* We are setting the state to RX_STATE_RESET to
* ensure that no one else will attempt to use this
* effect on overall system performance.
*/
call->state = RX_STATE_RESET;
- CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&conn->conn_call_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_ResetCall(call, 0);
(*call->callNumber)++;
if (MUTEX_TRYENTER(&conn->conn_call_lock))
* Instead, cycle through one more time to see if
* we can find a call that can call our own.
*/
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
wait = 0;
}
MUTEX_EXIT(&call->lock);
}
} else {
+ if (ignoreBusy && conn->lastBusy[i]) {
+ /* if we're ignoring busy call slots, skip any ones that
+ * have lastBusy set */
+ if (leastBusy == 0 || conn->lastBusy[i] < leastBusy) {
+ leastBusy = conn->lastBusy[i];
+ }
+ continue;
+ }
+
/* rxi_NewCall returns with mutex locked */
call = rxi_NewCall(conn, i);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
break;
}
}
if (i < RX_MAXCALLS) {
+ conn->lastBusy[i] = 0;
break;
}
if (!wait)
continue;
+ if (leastBusy && ignoreBusy) {
+ /* we didn't find a useable call slot, but we did see at least one
+ * 'busy' slot; look again and only use a slot with the 'least
+ * busy time */
+ ignoreBusy = 0;
+ continue;
+ }
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_MAKECALL_WAITING;
return call;
}
-int
+static int
rxi_HasActiveCalls(struct rx_connection *aconn)
{
int i;
}
}
+#ifdef KERNEL
+ if (afs_termState == AFSOP_STOP_RXCALLBACK) {
+#ifdef RX_ENABLE_LOCKS
+ AFS_GLOCK();
+#endif /* RX_ENABLE_LOCKS */
+ afs_termState = AFSOP_STOP_AFS;
+ afs_osi_Wakeup(&afs_termState);
+#ifdef RX_ENABLE_LOCKS
+ AFS_GUNLOCK();
+#endif /* RX_ENABLE_LOCKS */
+ return;
+ }
+#endif
+
/* if server is restarting( typically smooth shutdown) then do not
* allow any new calls.
*/
MUTEX_EXIT(&call->lock);
USERPRI;
+ continue;
}
-#ifdef KERNEL
- if (afs_termState == AFSOP_STOP_RXCALLBACK) {
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
- afs_termState = AFSOP_STOP_AFS;
- afs_osi_Wakeup(&afs_termState);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
- return;
- }
-#endif
tservice = call->conn->service;
(*tservice->afterProc) (call, code);
rx_EndCall(call, code);
+
+ if (tservice->postProc)
+ (*tservice->postProc) (code);
+
if (rx_stats_active) {
MUTEX_ENTER(&rx_stats_mutex);
rxi_nCalls++;
struct rx_serverQueueEntry *sq;
struct rx_call *call = (struct rx_call *)0;
struct rx_service *service = NULL;
- SPLVAR;
MUTEX_ENTER(&freeSQEList_lock);
call->conn->service->servicePort, call->conn->service->serviceId,
call));
- CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
} else {
dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
call->arrivalProc = (void (*)())0;
if (rc && call->error == 0) {
rxi_CallError(call, rc);
+ call->mode = RX_MODE_ERROR;
/* Send an abort message to the peer if this error code has
* only just been set. If it was set previously, assume the
* peer has already been sent the error code or will request it
if (conn->type == RX_SERVER_CONNECTION) {
/* Make sure reply or at least dummy reply is sent */
if (call->mode == RX_MODE_RECEIVING) {
+ MUTEX_EXIT(&call->lock);
rxi_WriteProc(call, 0, 0);
+ MUTEX_ENTER(&call->lock);
}
if (call->mode == RX_MODE_SENDING) {
+ MUTEX_EXIT(&call->lock);
rxi_FlushWrite(call);
+ MUTEX_ENTER(&call->lock);
}
rxi_calltrace(RX_CALL_END, call);
/* Call goes to hold state until reply packets are acknowledged */
} else {
call->state = RX_STATE_DALLY;
rxi_ClearTransmitQueue(call, 0);
- rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
+ rxi_rto_cancel(call);
rxevent_Cancel(call->keepAliveEvent, call,
RX_CALL_REFCOUNT_ALIVE);
}
* no reply arguments are expected */
if ((call->mode == RX_MODE_SENDING)
|| (call->mode == RX_MODE_RECEIVING && call->rnext == 1)) {
+ MUTEX_EXIT(&call->lock);
(void)rxi_ReadProc(call, &dummy, 1);
+ MUTEX_ENTER(&call->lock);
}
/* If we had an outstanding delayed ack, be nice to the server
MUTEX_EXIT(&call->lock);
MUTEX_ENTER(&conn->conn_call_lock);
MUTEX_ENTER(&call->lock);
+
+ if (!(call->flags & RX_CALL_PEER_BUSY)) {
+ conn->lastBusy[call->channel] = 0;
+ }
+
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags |= RX_CONN_BUSY;
if (conn->flags & RX_CONN_MAKECALL_WAITING) {
call->iovqc -=
#endif /* RXDEBUG_PACKET */
rxi_FreePackets(0, &call->iovq);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_BEGIN);
- MUTEX_EXIT(&call->lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (conn->type == RX_CLIENT_CONNECTION) {
MUTEX_ENTER(&conn->conn_data_lock);
conn->flags &= ~RX_CONN_BUSY;
for (conn = *conn_ptr; conn; conn = next) {
next = conn->next;
if (conn->type == RX_CLIENT_CONNECTION) {
- /* MUTEX_ENTER(&conn->conn_data_lock); when used in kernel */
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- /* MUTEX_EXIT(&conn->conn_data_lock); when used in kernel */
+ MUTEX_EXIT(&rx_refcnt_mutex);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
#else /* RX_ENABLE_LOCKS */
/* Return this process's service structure for the
* specified socket and service */
-struct rx_service *
+static struct rx_service *
rxi_FindService(osi_socket socket, u_short serviceId)
{
struct rx_service **sp;
/* Allocate a call structure, for the indicated channel of the
* supplied connection. The mode and state of the call must be set by
* the caller. Returns the call with mutex locked. */
-struct rx_call *
+static struct rx_call *
rxi_NewCall(struct rx_connection *conn, int channel)
{
struct rx_call *call;
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
queue_Remove(call);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_ENTER(&call->lock);
CLEAR_CALL_QUEUE_LOCK(call);
call->allNextp = rx_allCallsp;
rx_allCallsp = call;
call->call_id =
+ rx_atomic_inc_and_read(&rx_stats.nCallStructs);
+#else /* RXDEBUG_PACKET */
+ rx_atomic_inc(&rx_stats.nCallStructs);
#endif /* RXDEBUG_PACKET */
- rx_MutexIncrement(rx_stats.nCallStructs, rx_stats_mutex);
MUTEX_EXIT(&rx_freeCallQueue_lock);
MUTEX_INIT(&call->lock, "call lock", MUTEX_DEFAULT, NULL);
/* A call has been inactive long enough that so we can throw away
* state, including the call structure, which is placed on the call
* free list.
- * Call is locked upon entry.
- * haveCTLock set if called from rxi_ReapConnections
+ *
+ * call->lock amd rx_refcnt_mutex are held upon entry.
+ * haveCTLock is set when called from rxi_ReapConnections.
*/
-#ifdef RX_ENABLE_LOCKS
-void
+static void
rxi_FreeCall(struct rx_call *call, int haveCTLock)
-#else /* RX_ENABLE_LOCKS */
-void
-rxi_FreeCall(struct rx_call *call)
-#endif /* RX_ENABLE_LOCKS */
{
int channel = call->channel;
struct rx_connection *conn = call->conn;
if (call->state == RX_STATE_DALLY || call->state == RX_STATE_HOLD)
(*call->callNumber)++;
+ /*
+ * We are setting the state to RX_STATE_RESET to
+ * ensure that no one else will attempt to use this
+ * call once we drop the refcnt lock. We must drop
+ * the refcnt lock before calling rxi_ResetCall
+ * because it cannot be held across acquiring the
+ * freepktQ lock. NewCall does the same.
+ */
+ call->state = RX_STATE_RESET;
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_ResetCall(call, 0);
- call->conn->call[channel] = (struct rx_call *)0;
+
+ MUTEX_ENTER(&conn->conn_call_lock);
+ if (call->conn->call[channel] == call)
+ call->conn->call[channel] = 0;
+ MUTEX_EXIT(&conn->conn_call_lock);
MUTEX_ENTER(&rx_freeCallQueue_lock);
SET_CALL_QUEUE_LOCK(call, &rx_freeCallQueue_lock);
queue_Append(&rx_freeCallQueue, call);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nFreeCallStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nFreeCallStructs);
MUTEX_EXIT(&rx_freeCallQueue_lock);
/* Destroy the connection if it was previously slated for
*/
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->flags & RX_CONN_DESTROY_ME && !(conn->flags & RX_CONN_MAKECALL_WAITING)) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
if (haveCTLock)
} else {
MUTEX_EXIT(&conn->conn_data_lock);
}
+ MUTEX_ENTER(&rx_refcnt_mutex);
}
-afs_int32 rxi_Alloccnt = 0, rxi_Allocsize = 0;
+rx_atomic_t rxi_Allocsize = RX_ATOMIC_INIT(0);
+rx_atomic_t rxi_Alloccnt = RX_ATOMIC_INIT(0);
+
void *
rxi_Alloc(size_t size)
{
char *p;
- if (rx_stats_active)
- rx_MutexAdd1Increment2(rxi_Allocsize, (afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active) {
+ rx_atomic_add(&rxi_Allocsize, (int) size);
+ rx_atomic_inc(&rxi_Alloccnt);
+ }
p = (char *)
#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
void
rxi_Free(void *addr, size_t size)
{
- if (rx_stats_active)
- rx_MutexAdd1Decrement2(rxi_Allocsize, -(afs_int32)size, rxi_Alloccnt, rx_stats_mutex);
+ if (rx_stats_active) {
+ rx_atomic_sub(&rxi_Allocsize, (int) size);
+ rx_atomic_dec(&rxi_Alloccnt);
+ }
osi_Free(addr, size);
}
rx_peerHashTable[hashIndex] = pp;
rxi_InitPeerParams(pp);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nPeerStructs);
}
}
if (pp && create) {
if (service->newConnProc)
(*service->newConnProc) (conn);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nServerConns, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nServerConns);
}
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxLastConn = conn; /* store this connection as the last conn used */
MUTEX_EXIT(&rx_connHashTable_lock);
return conn;
}
+/**
+ * Timeout a call on a busy call channel if appropriate.
+ *
+ * @param[in] call The busy call.
+ *
+ * @pre 'call' is marked as busy (namely,
+ * call->conn->lastBusy[call->channel] != 0)
+ *
+ * @pre call->lock is held
+ * @pre rxi_busyChannelError is nonzero
+ *
+ * @note call->lock is dropped and reacquired
+ */
+static void
+rxi_CheckBusy(struct rx_call *call)
+{
+ struct rx_connection *conn = call->conn;
+ int channel = call->channel;
+ int freechannel = 0;
+ int i;
+ afs_uint32 callNumber = *call->callNumber;
+
+ MUTEX_EXIT(&call->lock);
+
+ MUTEX_ENTER(&conn->conn_call_lock);
+
+ /* Are there any other call slots on this conn that we should try? Look for
+ * slots that are empty and are either non-busy, or were marked as busy
+ * longer than conn->secondsUntilDead seconds before this call started. */
+
+ for (i = 0; i < RX_MAXCALLS && !freechannel; i++) {
+ if (i == channel) {
+ /* only look at channels that aren't us */
+ continue;
+ }
+
+ if (conn->lastBusy[i]) {
+ /* if this channel looked busy too recently, don't look at it */
+ if (conn->lastBusy[i] >= call->startTime.sec) {
+ continue;
+ }
+ if (call->startTime.sec - conn->lastBusy[i] < conn->secondsUntilDead) {
+ continue;
+ }
+ }
+
+ if (conn->call[i]) {
+ struct rx_call *tcall = conn->call[i];
+ MUTEX_ENTER(&tcall->lock);
+ if (tcall->state == RX_STATE_DALLY) {
+ freechannel = 1;
+ }
+ MUTEX_EXIT(&tcall->lock);
+ } else {
+ freechannel = 1;
+ }
+ }
+
+ MUTEX_EXIT(&conn->conn_call_lock);
+
+ MUTEX_ENTER(&call->lock);
+
+ /* Since the call->lock and conn->conn_call_lock have been released it is
+ * possible that (1) the call may no longer be busy and/or (2) the call may
+ * have been reused by another waiting thread. Therefore, we must confirm
+ * that the call state has not changed when deciding whether or not to
+ * force this application thread to retry by forcing a Timeout error. */
+
+ if (freechannel && *call->callNumber == callNumber &&
+ (call->flags & RX_CALL_PEER_BUSY)) {
+ /* Since 'freechannel' is set, there exists another channel in this
+ * rx_conn that the application thread might be able to use. We know
+ * that we have the correct call since callNumber is unchanged, and we
+ * know that the call is still busy. So, set the call error state to
+ * rxi_busyChannelError so the application can retry the request,
+ * presumably on a less-busy call channel. */
+
+ rxi_CallError(call, rxi_busyChannelError);
+ }
+}
+
/* There are two packet tracing routines available for testing and monitoring
* Rx. One is called just after every packet is received and the other is
* called just before every packet is sent. Received packets, have had their
* this is the first time the packet has been seen */
packetType = (np->header.type > 0 && np->header.type < RX_N_PACKET_TYPES)
? rx_packetTypes[np->header.type - 1] : "*UNKNOWN*";
- dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %"AFS_PTR_FMT,
+ dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %"AFS_PTR_FMT"\n",
np->header.serial, packetType, ntohl(host), ntohs(port), np->header.serviceId,
np->header.epoch, np->header.cid, np->header.callNumber,
np->header.seq, np->header.flags, np));
return np;
}
- MUTEX_ENTER(&conn->conn_data_lock);
- if (conn->maxSerial < np->header.serial)
- conn->maxSerial = np->header.serial;
- MUTEX_EXIT(&conn->conn_data_lock);
-
/* If the connection is in an error state, send an abort packet and ignore
* the incoming packet */
if (conn->error) {
MUTEX_ENTER(&conn->conn_data_lock);
if (np->header.type != RX_PACKET_TYPE_ABORT)
np = rxi_SendConnectionAbort(conn, np, 1, 0);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
return np;
}
case RX_PACKET_TYPE_ABORT: {
/* What if the supplied error is zero? */
afs_int32 errcode = ntohl(rx_GetInt32(np, 0));
- dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d", errcode));
+ dpf(("rxi_ReceivePacket ABORT rx_GetInt32 = %d\n", errcode));
rxi_ConnectionError(conn, errcode);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
case RX_PACKET_TYPE_CHALLENGE:
tnp = rxi_ReceiveChallengePacket(conn, np, 1);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tnp;
case RX_PACKET_TYPE_RESPONSE:
tnp = rxi_ReceiveResponsePacket(conn, np, 1);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tnp;
case RX_PACKET_TYPE_PARAMS:
case RX_PACKET_TYPE_PARAMS + 1:
case RX_PACKET_TYPE_PARAMS + 2:
/* ignore these packet types for now */
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
rxi_ConnectionError(conn, RX_PROTOCOL_ERROR);
MUTEX_ENTER(&conn->conn_data_lock);
tnp = rxi_SendConnectionAbort(conn, np, 1, 0);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
return tnp;
}
channel = np->header.cid & RX_CHANNELMASK;
call = conn->call[channel];
-#ifdef RX_ENABLE_LOCKS
- if (call)
+
+ if (call) {
MUTEX_ENTER(&call->lock);
- /* Test to see if call struct is still attached to conn. */
- if (call != conn->call[channel]) {
- if (call)
- MUTEX_EXIT(&call->lock);
- if (type == RX_SERVER_CONNECTION) {
- call = conn->call[channel];
- /* If we started with no call attached and there is one now,
- * another thread is also running this routine and has gotten
- * the connection channel. We should drop this packet in the tests
- * below. If there was a call on this connection and it's now
- * gone, then we'll be making a new call below.
- * If there was previously a call and it's now different then
- * the old call was freed and another thread running this routine
- * has created a call on this channel. One of these two threads
- * has a packet for the old call and the code below handles those
- * cases.
- */
- if (call)
- MUTEX_ENTER(&call->lock);
- } else {
- /* This packet can't be for this call. If the new call address is
- * 0 then no call is running on this channel. If there is a call
- * then, since this is a client connection we're getting data for
- * it must be for the previous call.
- */
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
- MUTEX_ENTER(&conn->conn_data_lock);
- conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
- return np;
- }
- }
+ currentCallNumber = conn->callNumber[channel];
+ } else if (type == RX_SERVER_CONNECTION) { /* No call allocated */
+ MUTEX_ENTER(&conn->conn_call_lock);
+ call = conn->call[channel];
+ if (call) {
+ MUTEX_ENTER(&call->lock);
+ MUTEX_EXIT(&conn->conn_call_lock);
+ currentCallNumber = conn->callNumber[channel];
+ } else {
+ call = rxi_NewCall(conn, channel); /* returns locked call */
+ MUTEX_EXIT(&conn->conn_call_lock);
+ *call->callNumber = currentCallNumber = np->header.callNumber;
+#ifdef RXDEBUG
+ if (np->header.callNumber == 0)
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
+ np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
+ np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
+ np->header.flags, np, np->length));
#endif
- currentCallNumber = conn->callNumber[channel];
+ call->state = RX_STATE_PRECALL;
+ clock_GetTime(&call->queueTime);
+ hzero(call->bytesSent);
+ hzero(call->bytesRcvd);
+ /*
+ * If the number of queued calls exceeds the overload
+ * threshold then abort this call.
+ */
+ if ((rx_BusyThreshold > 0) &&
+ (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
+ struct rx_packet *tp;
+
+ rxi_CallError(call, rx_BusyError);
+ tp = rxi_SendCallAbort(call, np, 1, 0);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ if (rx_stats_active)
+ rx_atomic_inc(&rx_stats.nBusies);
+ return tp;
+ }
+ rxi_KeepAliveOn(call);
+ }
+ } else { /* RX_CLIENT_CONNECTION and No call allocated */
+ /* This packet can't be for this call. If the new call address is
+ * 0 then no call is running on this channel. If there is a call
+ * then, since this is a client connection we're getting data for
+ * it must be for the previous call.
+ */
+ if (rx_stats_active)
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ return np;
+ }
+ /* There is a non-NULL locked call at this point */
if (type == RX_SERVER_CONNECTION) { /* We're the server */
- if (np->header.callNumber < currentCallNumber) {
+ if (np->header.callNumber < currentCallNumber) {
+ MUTEX_EXIT(&call->lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
-#ifdef RX_ENABLE_LOCKS
- if (call)
- MUTEX_EXIT(&call->lock);
-#endif
- MUTEX_ENTER(&conn->conn_data_lock);
- conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
- return np;
- }
- if (!call) {
- MUTEX_ENTER(&conn->conn_call_lock);
- call = rxi_NewCall(conn, channel);
- MUTEX_EXIT(&conn->conn_call_lock);
- *call->callNumber = np->header.callNumber;
-#ifdef RXDEBUG
- if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.06d len %d",
- np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
- np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
- np->header.flags, np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
-#endif
- call->state = RX_STATE_PRECALL;
- clock_GetTime(&call->queueTime);
- hzero(call->bytesSent);
- hzero(call->bytesRcvd);
- /*
- * If the number of queued calls exceeds the overload
- * threshold then abort this call.
- */
- if ((rx_BusyThreshold > 0) &&
- (rx_atomic_read(&rx_nWaiting) > rx_BusyThreshold)) {
- struct rx_packet *tp;
-
- rxi_CallError(call, rx_BusyError);
- tp = rxi_SendCallAbort(call, np, 1, 0);
- MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
- conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
- if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
- return tp;
- }
- rxi_KeepAliveOn(call);
- } else if (np->header.callNumber != currentCallNumber) {
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ return np;
+ } else if (np->header.callNumber != currentCallNumber) {
/* Wait until the transmit queue is idle before deciding
* whether to reset the current call. Chances are that the
* call will be in ether DALLY or HOLD state once the TQ_BUSY
* flag is cleared.
*/
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- while ((call->state == RX_STATE_ACTIVE)
- && (call->flags & RX_CALL_TQ_BUSY)) {
- call->flags |= RX_CALL_TQ_WAIT;
- call->tqWaiters++;
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start lock3");
- CV_WAIT(&call->cv_tq, &call->lock);
-#else /* RX_ENABLE_LOCKS */
- osi_rxSleep(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- call->tqWaiters--;
- if (call->tqWaiters == 0)
- call->flags &= ~RX_CALL_TQ_WAIT;
- }
+ if (call->state == RX_STATE_ACTIVE) {
+ rxi_WaitforTQBusy(call);
+ /*
+ * If we entered error state while waiting,
+ * must call rxi_CallError to permit rxi_ResetCall
+ * to processed when the tqWaiter count hits zero.
+ */
+ if (call->error) {
+ rxi_CallError(call, call->error);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ return np;
+ }
+ }
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
/* If the new call cannot be taken right now send a busy and set
* the error condition in this call, so that it terminates as
tp = rxi_SendSpecial(call, conn, np, RX_PACKET_TYPE_BUSY,
NULL, 0, 1);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return tp;
}
rxi_ResetCall(call, 0);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%06d len %d",
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" len %d\n",
np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
- np->header.flags, np, np->retryTime.sec, np->retryTime.usec, np->length));
+ np->header.flags, np, np->length));
#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
rxi_CallError(call, rx_BusyError);
tp = rxi_SendCallAbort(call, np, 1, 0);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.nBusies, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.nBusies);
return tp;
}
rxi_KeepAliveOn(call);
}
} else { /* we're the client */
/* Ignore all incoming acknowledgements for calls in DALLY state */
- if (call && (call->state == RX_STATE_DALLY)
+ if ((call->state == RX_STATE_DALLY)
&& (np->header.type == RX_PACKET_TYPE_ACK)) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignorePacketDally, rx_stats_mutex);
-#ifdef RX_ENABLE_LOCKS
- if (call) {
- MUTEX_EXIT(&call->lock);
- }
-#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ rx_atomic_inc(&rx_stats.ignorePacketDally);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
/* Ignore anything that's not relevant to the current call. If there
* isn't a current call, then no packet is relevant. */
- if (!call || (np->header.callNumber != currentCallNumber)) {
+ if (np->header.callNumber != currentCallNumber) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
-#ifdef RX_ENABLE_LOCKS
- if (call) {
- MUTEX_EXIT(&call->lock);
- }
-#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
+ MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
/* If the service security object index stamped in the packet does not
* match the connection's security index, ignore the packet */
if (np->header.securityIndex != conn->securityIndex) {
-#ifdef RX_ENABLE_LOCKS
MUTEX_EXIT(&call->lock);
-#endif
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
#ifdef RX_ENABLE_LOCKS
rxi_SetAcksInTransmitQueue(call);
#else
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
#endif
} else {
* XXX code in receiveackpacket. */
if (ntohl(rx_GetInt32(np, FIRSTACKOFFSET)) < call->tfirst) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.spuriousPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.spuriousPacketsRead);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
}
/* What if error is zero? */
/* What if the error is -1? the application will treat it as a timeout. */
afs_int32 errdata = ntohl(*(afs_int32 *) rx_DataOf(np));
- dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d", errdata));
+ dpf(("rxi_ReceivePacket ABORT rx_DataOf = %d\n", errdata));
rxi_CallError(call, errdata);
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
}
- case RX_PACKET_TYPE_BUSY:
- /* XXXX */
- break;
+ case RX_PACKET_TYPE_BUSY: {
+ struct clock busyTime;
+ clock_NewTime();
+ clock_GetTime(&busyTime);
+
+ MUTEX_EXIT(&call->lock);
+
+ MUTEX_ENTER(&conn->conn_call_lock);
+ MUTEX_ENTER(&call->lock);
+ conn->lastBusy[call->channel] = busyTime.sec;
+ call->flags |= RX_CALL_PEER_BUSY;
+ MUTEX_EXIT(&call->lock);
+ MUTEX_EXIT(&conn->conn_call_lock);
+
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ return np;
+ }
+
case RX_PACKET_TYPE_ACKALL:
/* All packets acknowledged, so we can drop all packets previously
* readied for sending */
break;
#else /* RX_ENABLE_LOCKS */
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np; /* xmitting; drop packet */
#endif /* RX_ENABLE_LOCKS */
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
rxi_ClearTransmitQueue(call, 0);
- rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
break;
default:
/* Should not reach here, unless the peer is broken: send an abort
* the packet will be delivered to the user before any get time is required
* (if not, then the time won't actually be re-evaluated here). */
call->lastReceiveTime = clock_Sec();
+ /* we've received a legit packet, so the channel is not busy */
+ call->flags &= ~RX_CALL_PEER_BUSY;
MUTEX_EXIT(&call->lock);
- MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
- MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return np;
}
}
#endif /* KERNEL */
+/*!
+ * Clear the attach wait flag on a connection and proceed.
+ *
+ * Any processing waiting for a connection to be attached should be
+ * unblocked. We clear the flag and do any other needed tasks.
+ *
+ * @param[in] conn
+ * the conn to unmark waiting for attach
+ *
+ * @pre conn's conn_data_lock must be locked before calling this function
+ *
+ */
+static void
+rxi_ConnClearAttachWait(struct rx_connection *conn)
+{
+ /* Indicate that rxi_CheckReachEvent is no longer running by
+ * clearing the flag. Must be atomic under conn_data_lock to
+ * avoid a new call slipping by: rxi_CheckConnReach holds
+ * conn_data_lock while checking RX_CONN_ATTACHWAIT.
+ */
+ conn->flags &= ~RX_CONN_ATTACHWAIT;
+ if (conn->flags & RX_CONN_NAT_PING) {
+ conn->flags &= ~RX_CONN_NAT_PING;
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ }
+}
+
static void
rxi_CheckReachEvent(struct rxevent *event, void *arg1, void *arg2)
{
MUTEX_ENTER(&conn->conn_data_lock);
conn->checkReachEvent = NULL;
waiting = conn->flags & RX_CONN_ATTACHWAIT;
- if (event)
+ if (event) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ }
MUTEX_EXIT(&conn->conn_data_lock);
if (waiting) {
}
}
if (!call)
- /* Indicate that rxi_CheckReachEvent is no longer running by
- * clearing the flag. Must be atomic under conn_data_lock to
- * avoid a new call slipping by: rxi_CheckConnReach holds
- * conn_data_lock while checking RX_CONN_ATTACHWAIT.
- */
- conn->flags &= ~RX_CONN_ATTACHWAIT;
+ rxi_ConnClearAttachWait(conn);
MUTEX_EXIT(&conn->conn_data_lock);
MUTEX_EXIT(&conn->conn_call_lock);
}
when.sec += RX_CHECKREACH_TIMEOUT;
MUTEX_ENTER(&conn->conn_data_lock);
if (!conn->checkReachEvent) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++;
+ MUTEX_EXIT(&rx_refcnt_mutex);
conn->checkReachEvent =
rxevent_PostNow(&when, &now, rxi_CheckReachEvent, conn,
NULL);
struct rx_packet *tnp;
struct clock when, now;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dataPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dataPacketsRead);
#ifdef KERNEL
/* If there are no packet buffers, drop this new packet, unless we can find
rxi_NeedMorePackets = TRUE;
MUTEX_EXIT(&rx_freePktQ_lock);
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.noPacketBuffersOnRead);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
- dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems", np));
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems\n", np));
if (rxi_doreclaim)
rxi_ClearReceiveQueue(call);
clock_GetTime(&now);
|| clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+
call->delayedAckEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
}
if (queue_IsNotEmpty(&call->rq)
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
- dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate", np));
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate\n", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
* application already, then this is a duplicate */
if (seq < call->rnext) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/*Check for duplicate packet */
if (seq == tp->header.seq) {
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.dupPacketsRead);
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE,
|| clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAckEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
}
if (conn->flags & RX_CONN_ATTACHWAIT) {
int i;
- conn->flags &= ~RX_CONN_ATTACHWAIT;
+ rxi_ConnClearAttachWait(conn);
MUTEX_EXIT(&conn->conn_data_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
}
#endif
-
-/* rxi_ComputePeerNetStats
- *
- * Called exclusively by rxi_ReceiveAckPacket to compute network link
- * estimates (like RTT and throughput) based on ack packets. Caller
- * must ensure that the packet in question is the right one (i.e.
- * serial number matches).
- */
-static void
-rxi_ComputePeerNetStats(struct rx_call *call, struct rx_packet *p,
- struct rx_ackPacket *ap, struct rx_packet *np)
-{
- struct rx_peer *peer = call->conn->peer;
-
- /* Use RTT if not delayed by client and
- * ignore packets that were retransmitted. */
- if (!(p->flags & RX_PKTFLAG_ACKED) &&
- ap->reason != RX_ACK_DELAY &&
- clock_Eq(&p->timeSent, &p->firstSent))
- rxi_ComputeRoundTripTime(p, &p->timeSent, peer);
-#ifdef ADAPT_WINDOW
- rxi_ComputeRate(peer, call, p, np, ap->reason);
-#endif
-}
-
+
/* The real smarts of the whole thing. */
struct rx_packet *
rxi_ReceiveAckPacket(struct rx_call *call, struct rx_packet *np,
struct rx_packet *nxp; /* Next packet pointer for queue_Scan */
struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
+ struct clock now; /* Current time, for RTT calculations */
afs_uint32 first;
+ afs_uint32 prev;
afs_uint32 serial;
/* because there are CM's that are bogus, sending weird values for this. */
afs_uint32 skew = 0;
int conn_data_locked = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsRead, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsRead);
ap = (struct rx_ackPacket *)rx_DataOf(np);
nbytes = rx_Contiguous(np) - (int)((ap->acks) - (u_char *) ap);
if (nbytes < 0)
/* depends on ack packet struct */
nAcks = MIN((unsigned)nbytes, (unsigned)ap->nAcks);
first = ntohl(ap->firstPacket);
+ prev = ntohl(ap->previousPacket);
serial = ntohl(ap->serial);
/* temporarily disabled -- needs to degrade over time
* skew = ntohs(ap->maxSkew); */
/* Ignore ack packets received out of order */
- if (first < call->tfirst) {
+ if (first < call->tfirst ||
+ (first == call->tfirst && prev < call->tprev)) {
return np;
}
+ call->tprev = prev;
+
if (np->header.flags & RX_SLOW_START_OK) {
call->flags |= RX_CALL_SLOW_START_OK;
}
* much */
peer->outPacketSkew = skew;
- /* Check for packets that no longer need to be transmitted, and
- * discard them. This only applies to packets positively
- * acknowledged as having been sent to the peer's upper level.
- * All other packets must be retained. So only packets with
- * sequence numbers < ap->firstPacket are candidates. */
- for (queue_Scan(&call->tq, tp, nxp, rx_packet)) {
- if (tp->header.seq >= first)
- break;
+
+ clock_GetTime(&now);
+
+ /* The transmit queue splits into 4 sections.
+ *
+ * The first section is packets which have now been acknowledged
+ * by a window size change in the ack. These have reached the
+ * application layer, and may be discarded. These are packets
+ * with sequence numbers < ap->firstPacket.
+ *
+ * The second section is packets which have sequence numbers in
+ * the range ap->firstPacket to ap->firstPacket + ap->nAcks. The
+ * contents of the packet's ack array determines whether these
+ * packets are acknowledged or not.
+ *
+ * The third section is packets which fall above the range
+ * addressed in the ack packet. These have not yet been received
+ * by the peer.
+ *
+ * The four section is packets which have not yet been transmitted.
+ * These packets will have a header.serial of 0.
+ */
+
+ /* First section - implicitly acknowledged packets that can be
+ * disposed of
+ */
+
+ tp = queue_First(&call->tq, rx_packet);
+ while(!queue_IsEnd(&call->tq, tp) && tp->header.seq < first) {
+ struct rx_packet *next;
+
+ next = queue_Next(tp, rx_packet);
call->tfirst = tp->header.seq + 1;
- rxi_ComputePeerNetStats(call, tp, ap, np);
+
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
+ rxi_ComputeRoundTripTime(tp, ap, call, peer, &now);
}
+
+#ifdef ADAPT_WINDOW
+ rxi_ComputeRate(call->conn->peer, call, p, np, ap->reason);
+#endif
+
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* XXX Hack. Because we have to release the global rx lock when sending
* packets (osi_NetSend) we drop all acks while we're traversing the tq
#endif /* RXDEBUG_PACKET */
rxi_FreePacket(tp); /* rxi_FreePacket mustn't wake up anyone, preemptively. */
}
+ tp = next;
}
#ifdef ADAPT_WINDOW
/* N.B. we don't turn off any timers here. They'll go away by themselves, anyway */
- /* Now go through explicit acks/nacks and record the results in
+ /* Second section of the queue - packets for which we are receiving
+ * soft ACKs
+ *
+ * Go through the explicit acks/nacks and record the results in
* the waiting packets. These are packets that can't be released
* yet, even with a positive acknowledge. This positive
* acknowledge only means the packet has been received by the
* because this packet was out of sequence) */
call->nSoftAcked = 0;
- for (missing = 0, queue_Scan(&call->tq, tp, nxp, rx_packet)) {
- /* Update round trip time if the ack was stimulated on receipt
- * of this packet */
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
-#ifdef RX_ENABLE_LOCKS
- if (tp->header.seq >= first)
-#endif /* RX_ENABLE_LOCKS */
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rxi_ComputePeerNetStats(call, tp, ap, np);
-
+ missing = 0;
+ while (!queue_IsEnd(&call->tq, tp) && tp->header.seq < first + nAcks) {
/* Set the acknowledge flag per packet based on the
* information in the ack packet. An acknowlegded packet can
* be downgraded when the server has discarded a packet it
* soacked previously, or when an ack packet is received
* out of sequence. */
- if (tp->header.seq < first) {
- /* Implicit ack information */
+ if (ap->acks[tp->header.seq - first] == RX_ACK_TYPE_ACK) {
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
newAckCount++;
+ tp->flags |= RX_PKTFLAG_ACKED;
+ rxi_ComputeRoundTripTime(tp, ap, call, peer, &now);
+#ifdef ADAPT_WINDOW
+ rxi_ComputeRate(call->conn->peer, call, tp, np, ap->reason);
+#endif
}
- tp->flags |= RX_PKTFLAG_ACKED;
- } else if (tp->header.seq < first + nAcks) {
- /* Explicit ack information: set it in the packet appropriately */
- if (ap->acks[tp->header.seq - first] == RX_ACK_TYPE_ACK) {
- if (!(tp->flags & RX_PKTFLAG_ACKED)) {
- newAckCount++;
- tp->flags |= RX_PKTFLAG_ACKED;
- }
- if (missing) {
- nNacked++;
- } else {
- call->nSoftAcked++;
- }
- } else /* RX_ACK_TYPE_NACK */ {
- tp->flags &= ~RX_PKTFLAG_ACKED;
- missing = 1;
+ if (missing) {
+ nNacked++;
+ } else {
+ call->nSoftAcked++;
}
- } else {
+ } else /* RX_ACK_TYPE_NACK */ {
tp->flags &= ~RX_PKTFLAG_ACKED;
missing = 1;
}
- /*
- * Following the suggestion of Phil Kern, we back off the peer's
- * timeout value for future packets until a successful response
- * is received for an initial transmission.
- */
- if (missing && !peer->backedOff) {
- struct clock c = peer->timeout;
- struct clock max_to = {3, 0};
-
- clock_Add(&peer->timeout, &c);
- if (clock_Gt(&peer->timeout, &max_to))
- peer->timeout = max_to;
- peer->backedOff = 1;
- }
-
- /* If packet isn't yet acked, and it has been transmitted at least
- * once, reset retransmit time using latest timeout
- * ie, this should readjust the retransmit timer for all outstanding
- * packets... So we don't just retransmit when we should know better*/
-
- if (!(tp->flags & RX_PKTFLAG_ACKED) && !clock_IsZero(&tp->retryTime)) {
- tp->retryTime = tp->timeSent;
- clock_Add(&tp->retryTime, &peer->timeout);
- /* shift by eight because one quarter-sec ~ 256 milliseconds */
- clock_Addmsec(&(tp->retryTime), ((afs_uint32) tp->backoff) << 8);
- }
+ tp = queue_Next(tp, rx_packet);
}
+ /* We don't need to take any action with the 3rd or 4th section in the
+ * queue - they're not addressed by the contents of this ACK packet.
+ */
+
/* If the window has been extended by this acknowledge packet,
* then wakeup a sender waiting in alloc for window space, or try
* sending packets now, if he's been sitting on packets due to
maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets);
maxDgramPackets =
MIN(maxDgramPackets, (int)(peer->ifDgramPackets));
- maxDgramPackets = MIN(maxDgramPackets, tSize);
if (maxDgramPackets > 1) {
peer->maxDgramPackets = maxDgramPackets;
call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
call->nNacks = 0;
}
+ /* If the packet contained new acknowledgements, rather than just
+ * being a duplicate of one we have previously seen, then we can restart
+ * the RTT timer
+ */
+ if (newAckCount > 0)
+ rxi_rto_packet_acked(call, istack);
+
if (call->flags & RX_CALL_FAST_RECOVER) {
- if (nNacked) {
+ if (newAckCount == 0) {
call->cwind = MIN((int)(call->cwind + 1), rx_maxSendWindow);
} else {
call->flags &= ~RX_CALL_FAST_RECOVER;
call->nCwindAcks = 0;
} else if (nNacked && call->nNacks >= (u_short) rx_nackThreshold) {
/* Three negative acks in a row trigger congestion recovery */
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- MUTEX_EXIT(&peer->peer_lock);
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* someone else is waiting to start recovery */
- return np;
- }
- call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- rxi_WaitforTQBusy(call);
- MUTEX_ENTER(&peer->peer_lock);
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
call->flags |= RX_CALL_FAST_RECOVER;
call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
call->cwind =
peer->nDgramPackets = call->nDgramPackets;
peer->congestSeq++;
call->congestSeq = peer->congestSeq;
+
/* Reset the resend times on the packets that were nacked
- * so we will retransmit as soon as the window permits*/
+ * so we will retransmit as soon as the window permits
+ */
+
for (acked = 0, queue_ScanBackwards(&call->tq, tp, nxp, rx_packet)) {
if (acked) {
if (!(tp->flags & RX_PKTFLAG_ACKED)) {
- clock_Zero(&tp->retryTime);
+ tp->flags &= ~RX_PKTFLAG_SENT;
}
} else if (tp->flags & RX_PKTFLAG_ACKED) {
acked = 1;
rxi_ClearTransmitQueue(call, 0);
rxevent_Cancel(call->keepAliveEvent, call, RX_CALL_REFCOUNT_ALIVE);
} else if (!queue_IsEmpty(&call->tq)) {
- rxi_Start(0, call, 0, istack);
+ rxi_Start(call, istack);
}
return np;
}
queue_Append(&rx_incomingCallQueue, call);
}
} else {
- sq = queue_First(&rx_idleServerQueue, rx_serverQueueEntry);
+ sq = queue_Last(&rx_idleServerQueue, rx_serverQueueEntry);
/* If hot threads are enabled, and both newcallp and sq->socketp
* are non-null, then this thread will process the call, and the
*tnop = sq->tno;
*sq->socketp = socket;
clock_GetTime(&call->startTime);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
+ MUTEX_EXIT(&rx_refcnt_mutex);
} else {
sq->newcall = call;
}
if (event) {
MUTEX_ENTER(&call->lock);
call->delayedAckEvent = NULL;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ACKALL);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
rxi_SendSpecial(call, call->conn, (struct rx_packet *)0,
RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
+ call->flags |= RX_CALL_ACKALL_SENT;
if (event)
MUTEX_EXIT(&call->lock);
#else /* RX_ENABLE_LOCKS */
call->delayedAckEvent = NULL;
rxi_SendSpecial(call, call->conn, (struct rx_packet *)0,
RX_PACKET_TYPE_ACKALL, NULL, 0, 0);
+ call->flags |= RX_CALL_ACKALL_SENT;
#endif /* RX_ENABLE_LOCKS */
}
MUTEX_ENTER(&call->lock);
if (event == call->delayedAckEvent)
call->delayedAckEvent = NULL;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_DELAY);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
(void)rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
if (event)
call->flags |= RX_CALL_TQ_SOME_ACKED;
}
- rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
+ rxi_rto_cancel(call);
+
call->tfirst = call->tnext;
call->nSoftAcked = 0;
call->tqc -=
#endif /* RXDEBUG_PACKET */
rxi_FreePackets(0, &call->tq);
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
-#ifdef RX_ENABLE_LOCKS
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
+ rxi_WakeUpTransmitQueue(call);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
call->flags &= ~RX_CALL_TQ_CLEARME;
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
+ rxi_rto_cancel(call);
call->tfirst = call->tnext; /* implicitly acknowledge all data already sent */
call->nSoftAcked = 0;
#ifdef RXDEBUG_PACKET
call->rqc -= count;
if ( call->rqc != 0 )
- dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0", call, call->rqc));
+ dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0\n", call, call->rqc));
#endif
call->flags &= ~(RX_CALL_RECEIVE_DONE | RX_CALL_HAVE_LAST);
}
clock_GetTime(&now);
when = now;
clock_Addmsec(&when, rxi_callAbortDelay);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ABORT);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->delayedAbortEvent =
rxevent_PostNow(&when, &now, rxi_SendDelayedCallAbort, call, 0);
}
if (error) {
int i;
- dpf(("rxi_ConnectionError conn %"AFS_PTR_FMT" error %d", conn, error));
+ dpf(("rxi_ConnectionError conn %"AFS_PTR_FMT" error %d\n", conn, error));
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->challengeEvent)
if (conn->checkReachEvent) {
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
- conn->flags &= ~RX_CONN_ATTACHWAIT;
+ conn->flags &= ~(RX_CONN_ATTACHWAIT|RX_CONN_NAT_PING);
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount--;
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
MUTEX_EXIT(&conn->conn_data_lock);
for (i = 0; i < RX_MAXCALLS; i++) {
}
conn->error = error;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.fatalErrors, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.fatalErrors);
}
}
+/**
+ * Interrupt an in-progress call with the specified error and wakeup waiters.
+ *
+ * @param[in] call The call to interrupt
+ * @param[in] error The error code to send to the peer
+ */
+void
+rx_InterruptCall(struct rx_call *call, afs_int32 error)
+{
+ MUTEX_ENTER(&call->lock);
+ rxi_CallError(call, error);
+ rxi_SendCallAbort(call, NULL, 0, 1);
+ MUTEX_EXIT(&call->lock);
+}
+
void
rxi_CallError(struct rx_call *call, afs_int32 error)
{
#ifdef DEBUG
osirx_AssertMine(&call->lock, "rxi_CallError");
#endif
- dpf(("rxi_CallError call %"AFS_PTR_FMT" error %d call->error %d", call, error, call->error));
+ dpf(("rxi_CallError call %"AFS_PTR_FMT" error %d call->error %d\n", call, error, call->error));
if (call->error)
error = call->error;
rxi_ResetCall(call, 0);
#endif
call->error = error;
- call->mode = RX_MODE_ERROR;
}
/* Reset various fields in a call structure, and wakeup waiting
call->arrivalProc = (void (*)())0;
}
+ if (call->growMTUEvent)
+ rxevent_Cancel(call->growMTUEvent, call,
+ RX_CALL_REFCOUNT_ALIVE);
+
if (call->delayedAbortEvent) {
rxevent_Cancel(call->delayedAbortEvent, call, RX_CALL_REFCOUNT_ABORT);
packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
call->ssthresh = rx_maxSendWindow;
call->nDgramPackets = peer->nDgramPackets;
call->congestSeq = peer->congestSeq;
+ call->rtt = peer->rtt;
+ call->rtt_dev = peer->rtt_dev;
+ clock_Zero(&call->rto);
+ clock_Addmsec(&call->rto,
+ MAX(((call->rtt >> 3) + call->rtt_dev), rx_minPeerTimeout) + 200);
MUTEX_EXIT(&peer->peer_lock);
flags = call->flags;
}
call->flags = 0;
+ if ((flags & RX_CALL_PEER_BUSY)) {
+ /* The call channel is still busy; resetting the call doesn't change
+ * that */
+ call->flags |= RX_CALL_PEER_BUSY;
+ }
+
rxi_ClearReceiveQueue(call);
/* why init the queue if you just emptied it? queue_Init(&call->rq); */
call->nHardAcks = 0;
call->tfirst = call->rnext = call->tnext = 1;
+ call->tprev = 0;
call->rprev = 0;
call->lastAcked = 0;
call->localStatus = call->remoteStatus = 0;
ap->serial = htonl(serial);
ap->maxSkew = 0; /* used to be peer->inPacketSkew */
- ap->firstPacket = htonl(call->rnext); /* First packet not yet forwarded to reader */
+ /*
+ * First packet not yet forwarded to reader. When ACKALL has been
+ * sent the peer has been told that all received packets will be
+ * delivered to the reader. The value 'rnext' is used internally
+ * to refer to the next packet in the receive queue that must be
+ * delivered to the reader. From the perspective of the peer it
+ * already has so report the last sequence number plus one if there
+ * are packets in the receive queue awaiting processing.
+ */
+ if ((call->flags & RX_CALL_ACKALL_SENT) &&
+ !queue_IsEmpty(&call->rq)) {
+ ap->firstPacket = htonl(queue_Last(&call->rq, rx_packet)->header.seq + 1);
+ } else
+ ap->firstPacket = htonl(call->rnext);
+
ap->previousPacket = htonl(call->rprev); /* Previous packet received */
/* No fear of running out of ack packet here because there can only be at most
}
}
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ackPacketsSent, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ackPacketsSent);
#ifndef RX_ENABLE_TSFPQ
if (!optionalPacket)
rxi_FreePacket(p);
return optionalPacket; /* Return packet for re-use by caller */
}
+struct xmitlist {
+ struct rx_packet **list;
+ int len;
+ int resending;
+};
+
/* Send all of the packets in the list in single datagram */
static void
-rxi_SendList(struct rx_call *call, struct rx_packet **list, int len,
- int istack, int moreFlag, struct clock *now,
- struct clock *retryTime, int resending)
+rxi_SendList(struct rx_call *call, struct xmitlist *xmit,
+ int istack, int moreFlag)
{
int i;
int requestAck = 0;
int lastPacket = 0;
+ struct clock now;
struct rx_connection *conn = call->conn;
struct rx_peer *peer = conn->peer;
MUTEX_ENTER(&peer->peer_lock);
- peer->nSent += len;
- if (resending)
- peer->reSends += len;
+ peer->nSent += xmit->len;
+ if (xmit->resending)
+ peer->reSends += xmit->len;
MUTEX_EXIT(&peer->peer_lock);
if (rx_stats_active) {
- if (resending)
- rx_MutexAdd(rx_stats.dataPacketsReSent, len, rx_stats_mutex);
+ if (xmit->resending)
+ rx_atomic_add(&rx_stats.dataPacketsReSent, xmit->len);
else
- rx_MutexAdd(rx_stats.dataPacketsSent, len, rx_stats_mutex);
+ rx_atomic_add(&rx_stats.dataPacketsSent, xmit->len);
}
- if (list[len - 1]->header.flags & RX_LAST_PACKET) {
+ clock_GetTime(&now);
+
+ if (xmit->list[xmit->len - 1]->header.flags & RX_LAST_PACKET) {
lastPacket = 1;
}
/* Set the packet flags and schedule the resend events */
/* Only request an ack for the last packet in the list */
- for (i = 0; i < len; i++) {
- list[i]->retryTime = *retryTime;
- if (list[i]->header.serial) {
- /* Exponentially backoff retry times */
- if (list[i]->backoff < MAXBACKOFF) {
- /* so it can't stay == 0 */
- list[i]->backoff = (list[i]->backoff << 1) + 1;
- } else
- list[i]->backoff++;
- clock_Addmsec(&(list[i]->retryTime),
- ((afs_uint32) list[i]->backoff) << 8);
- }
-
- /* Wait a little extra for the ack on the last packet */
- if (lastPacket && !(list[i]->header.flags & RX_CLIENT_INITIATED)) {
- clock_Addmsec(&(list[i]->retryTime), 400);
- }
+ for (i = 0; i < xmit->len; i++) {
+ struct rx_packet *packet = xmit->list[i];
/* Record the time sent */
- list[i]->timeSent = *now;
+ packet->timeSent = now;
+ packet->flags |= RX_PKTFLAG_SENT;
/* Ask for an ack on retransmitted packets, on every other packet
* if the peer doesn't support slow start. Ask for an ack on every
* packet until the congestion window reaches the ack rate. */
- if (list[i]->header.serial) {
+ if (packet->header.serial) {
requestAck = 1;
} else {
- /* improved RTO calculation- not Karn */
- list[i]->firstSent = *now;
+ packet->firstSent = now;
if (!lastPacket && (call->cwind <= (u_short) (conn->ackRate + 1)
|| (!(call->flags & RX_CALL_SLOW_START_OK)
- && (list[i]->header.seq & 1)))) {
+ && (packet->header.seq & 1)))) {
requestAck = 1;
}
}
/* Tag this packet as not being the last in this group,
* for the receiver's benefit */
- if (i < len - 1 || moreFlag) {
- list[i]->header.flags |= RX_MORE_PACKETS;
+ if (i < xmit->len - 1 || moreFlag) {
+ packet->header.flags |= RX_MORE_PACKETS;
}
-
- /* Install the new retransmit time for the packet, and
- * record the time sent */
- list[i]->timeSent = *now;
}
if (requestAck) {
- list[len - 1]->header.flags |= RX_REQUEST_ACK;
+ xmit->list[xmit->len - 1]->header.flags |= RX_REQUEST_ACK;
}
/* Since we're about to send a data packet to the peer, it's
* safe to nuke any scheduled end-of-packets ack */
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
- CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
MUTEX_EXIT(&call->lock);
- if (len > 1) {
- rxi_SendPacketList(call, conn, list, len, istack);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ if (xmit->len > 1) {
+ rxi_SendPacketList(call, conn, xmit->list, xmit->len, istack);
} else {
- rxi_SendPacket(call, conn, list[0], istack);
+ rxi_SendPacket(call, conn, xmit->list[0], istack);
}
MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+
+ /* Tell the RTO calculation engine that we have sent a packet, and
+ * if it was the last one */
+ rxi_rto_packet_sent(call, lastPacket, istack);
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
* idle connections) */
conn->lastSendTime = call->lastSendTime = clock_Sec();
/* Let a set of retransmits trigger an idle timeout */
- if (!resending)
+ if (!xmit->resending)
call->lastSendData = call->lastSendTime;
}
* We always keep the last list we should have sent so we
* can set the RX_MORE_PACKETS flags correctly.
*/
+
static void
rxi_SendXmitList(struct rx_call *call, struct rx_packet **list, int len,
- int istack, struct clock *now, struct clock *retryTime,
- int resending)
+ int istack)
{
- int i, cnt, lastCnt = 0;
- struct rx_packet **listP, **lastP = 0;
+ int i;
+ int recovery;
+ struct xmitlist working;
+ struct xmitlist last;
+
struct rx_peer *peer = call->conn->peer;
int morePackets = 0;
- for (cnt = 0, listP = &list[0], i = 0; i < len; i++) {
+ memset(&last, 0, sizeof(struct xmitlist));
+ working.list = &list[0];
+ working.len = 0;
+ working.resending = 0;
+
+ recovery = call->flags & RX_CALL_FAST_RECOVER;
+
+ for (i = 0; i < len; i++) {
/* Does the current packet force us to flush the current list? */
- if (cnt > 0
+ if (working.len > 0
&& (list[i]->header.serial || (list[i]->flags & RX_PKTFLAG_ACKED)
|| list[i]->length > RX_JUMBOBUFFERSIZE)) {
- if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 1, now, retryTime,
- resending);
+
+ /* This sends the 'last' list and then rolls the current working
+ * set into the 'last' one, and resets the working set */
+
+ if (last.len > 0) {
+ rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
- if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ if (call->error
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
- lastP = listP;
- lastCnt = cnt;
- listP = &list[i];
- cnt = 0;
+ last = working;
+ working.len = 0;
+ working.resending = 0;
+ working.list = &list[i];
}
/* Add the current packet to the list if it hasn't been acked.
* Otherwise adjust the list pointer to skip the current packet. */
if (!(list[i]->flags & RX_PKTFLAG_ACKED)) {
- cnt++;
+ working.len++;
+
+ if (list[i]->header.serial)
+ working.resending = 1;
+
/* Do we need to flush the list? */
- if (cnt >= (int)peer->maxDgramPackets
- || cnt >= (int)call->nDgramPackets || cnt >= (int)call->cwind
+ if (working.len >= (int)peer->maxDgramPackets
+ || working.len >= (int)call->nDgramPackets
+ || working.len >= (int)call->cwind
|| list[i]->header.serial
|| list[i]->length != RX_JUMBOBUFFERSIZE) {
- if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 1, now,
- retryTime, resending);
+ if (last.len > 0) {
+ rxi_SendList(call, &last, istack, 1);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
if (call->error
- || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
- lastP = listP;
- lastCnt = cnt;
- listP = &list[i + 1];
- cnt = 0;
+ last = working;
+ working.len = 0;
+ working.resending = 0;
+ working.list = &list[i + 1];
}
} else {
- if (cnt != 0) {
+ if (working.len != 0) {
osi_Panic("rxi_SendList error");
}
- listP = &list[i + 1];
+ working.list = &list[i + 1];
}
}
* an acked packet. Since we always send retransmissions
* in a separate packet, we only need to check the first
* packet in the list */
- if (cnt > 0 && !(listP[0]->flags & RX_PKTFLAG_ACKED)) {
+ if (working.len > 0 && !(working.list[0]->flags & RX_PKTFLAG_ACKED)) {
morePackets = 1;
}
- if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, morePackets, now,
- retryTime, resending);
+ if (last.len > 0) {
+ rxi_SendList(call, &last, istack, morePackets);
/* If the call enters an error state stop sending, or if
* we entered congestion recovery mode, stop sending */
- if (call->error || (call->flags & RX_CALL_FAST_RECOVER_WAIT))
+ if (call->error
+ || (!recovery && (call->flags & RX_CALL_FAST_RECOVER)))
return;
}
if (morePackets) {
- rxi_SendList(call, listP, cnt, istack, 0, now, retryTime,
- resending);
+ rxi_SendList(call, &working, istack, 0);
}
- } else if (lastCnt > 0) {
- rxi_SendList(call, lastP, lastCnt, istack, 0, now, retryTime,
- resending);
+ } else if (last.len > 0) {
+ rxi_SendList(call, &last, istack, 0);
+ /* Packets which are in 'working' are not sent by this call */
}
}
-#ifdef RX_ENABLE_LOCKS
-/* Call rxi_Start, below, but with the call lock held. */
-void
-rxi_StartUnlocked(struct rxevent *event,
- void *arg0, void *arg1, int istack)
+static void
+rxi_Resend(struct rxevent *event, void *arg0, void *arg1, int istack)
{
struct rx_call *call = arg0;
+ struct rx_peer *peer;
+ struct rx_packet *p, *nxp;
+ struct clock maxTimeout = { 60, 0 };
MUTEX_ENTER(&call->lock);
- rxi_Start(event, call, arg1, istack);
+
+ peer = call->conn->peer;
+
+ /* Make sure that the event pointer is removed from the call
+ * structure, since there is no longer a per-call retransmission
+ * event pending. */
+ if (event == call->resendEvent) {
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ call->resendEvent = NULL;
+ }
+
+ if (rxi_busyChannelError && (call->flags & RX_CALL_PEER_BUSY)) {
+ rxi_CheckBusy(call);
+ }
+
+ if (queue_IsEmpty(&call->tq)) {
+ /* Nothing to do. This means that we've been raced, and that an
+ * ACK has come in between when we were triggered, and when we
+ * actually got to run. */
+ goto out;
+ }
+
+ /* We're in loss recovery */
+ call->flags |= RX_CALL_FAST_RECOVER;
+
+ /* Mark all of the pending packets in the queue as being lost */
+ for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
+ if (!(p->flags & RX_PKTFLAG_ACKED))
+ p->flags &= ~RX_PKTFLAG_SENT;
+ }
+
+ /* We're resending, so we double the timeout of the call. This will be
+ * dropped back down by the first successful ACK that we receive.
+ *
+ * We apply a maximum value here of 60 seconds
+ */
+ clock_Add(&call->rto, &call->rto);
+ if (clock_Gt(&call->rto, &maxTimeout))
+ call->rto = maxTimeout;
+
+ /* Packet loss is most likely due to congestion, so drop our window size
+ * and start again from the beginning */
+ if (peer->maxDgramPackets >1) {
+ call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
+ call->MTU = MIN(peer->natMTU, peer->maxMTU);
+ }
+ call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
+ call->nDgramPackets = 1;
+ call->cwind = 1;
+ call->nextCwind = 1;
+ call->nAcks = 0;
+ call->nNacks = 0;
+ MUTEX_ENTER(&peer->peer_lock);
+ peer->MTU = call->MTU;
+ peer->cwind = call->cwind;
+ peer->nDgramPackets = 1;
+ peer->congestSeq++;
+ call->congestSeq = peer->congestSeq;
+ MUTEX_EXIT(&peer->peer_lock);
+
+ rxi_Start(call, istack);
+
+out:
MUTEX_EXIT(&call->lock);
}
-#endif /* RX_ENABLE_LOCKS */
/* This routine is called when new packets are readied for
* transmission and when retransmission may be necessary, or when the
* better optimized for new packets, the usual case, now that we've
* got rid of queues of send packets. XXXXXXXXXXX */
void
-rxi_Start(struct rxevent *event,
- void *arg0, void *arg1, int istack)
+rxi_Start(struct rx_call *call, int istack)
{
- struct rx_call *call = arg0;
struct rx_packet *p;
struct rx_packet *nxp; /* Next pointer for queue_Scan */
- struct rx_peer *peer = call->conn->peer;
- struct clock now, usenow, retryTime;
- int haveEvent;
int nXmitPackets;
int maxXmitPackets;
- struct rx_packet **xmitList;
- int resending = 0;
- /* If rxi_Start is being called as a result of a resend event,
- * then make sure that the event pointer is removed from the call
- * structure, since there is no longer a per-call retransmission
- * event pending. */
- if (event && event == call->resendEvent) {
- CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
- call->resendEvent = NULL;
- resending = 1;
- if (queue_IsEmpty(&call->tq)) {
- /* Nothing to do */
- return;
- }
- /* Timeouts trigger congestion recovery */
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* someone else is waiting to start recovery */
- return;
- }
- call->flags |= RX_CALL_FAST_RECOVER_WAIT;
- rxi_WaitforTQBusy(call);
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- call->flags &= ~RX_CALL_FAST_RECOVER_WAIT;
- call->flags |= RX_CALL_FAST_RECOVER;
- if (peer->maxDgramPackets > 1) {
- call->MTU = RX_JUMBOBUFFERSIZE + RX_HEADER_SIZE;
- } else {
- call->MTU = MIN(peer->natMTU, peer->maxMTU);
- }
- call->ssthresh = MAX(4, MIN((int)call->cwind, (int)call->twind)) >> 1;
- call->nDgramPackets = 1;
- call->cwind = 1;
- call->nextCwind = 1;
- call->nAcks = 0;
- call->nNacks = 0;
- MUTEX_ENTER(&peer->peer_lock);
- peer->MTU = call->MTU;
- peer->cwind = call->cwind;
- peer->nDgramPackets = 1;
- peer->congestSeq++;
- call->congestSeq = peer->congestSeq;
- MUTEX_EXIT(&peer->peer_lock);
- /* Clear retry times on packets. Otherwise, it's possible for
- * some packets in the queue to force resends at rates faster
- * than recovery rates.
- */
- for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (!(p->flags & RX_PKTFLAG_ACKED)) {
- clock_Zero(&p->retryTime);
- }
- }
- }
if (call->error) {
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_in_error, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_in_error);
#endif
return;
}
if (queue_IsNotEmpty(&call->tq)) { /* If we have anything to send */
- /* Get clock to compute the re-transmit time for any packets
- * in this burst. Note, if we back off, it's reasonable to
- * back off all of the packets in the same manner, even if
- * some of them have been retransmitted more times than more
- * recent additions.
- * Do a dance to avoid blocking after setting now. */
- MUTEX_ENTER(&peer->peer_lock);
- retryTime = peer->timeout;
- MUTEX_EXIT(&peer->peer_lock);
- clock_GetTime(&now);
- clock_Add(&retryTime, &now);
- usenow = now;
/* Send (or resend) any packets that need it, subject to
* window restrictions and congestion burst control
* restrictions. Ask for an ack on the last packet sent in
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
nXmitPackets = 0;
maxXmitPackets = MIN(call->twind, call->cwind);
- xmitList = (struct rx_packet **)
-#if defined(KERNEL) && !defined(UKERNEL) && defined(AFS_FBSD80_ENV)
- /* XXXX else we must drop any mtx we hold */
- afs_osi_Alloc_NoSleep(maxXmitPackets * sizeof(struct rx_packet *));
-#else
- osi_Alloc(maxXmitPackets * sizeof(struct rx_packet *));
-#endif
- if (xmitList == NULL)
- osi_Panic("rxi_Start, failed to allocate xmit list");
for (queue_Scan(&call->tq, p, nxp, rx_packet)) {
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- /* We shouldn't be sending packets if a thread is waiting
- * to initiate congestion recovery */
- dpf(("call %d waiting to initiate fast recovery\n",
- *(call->callNumber)));
- break;
- }
- if ((nXmitPackets)
- && (call->flags & RX_CALL_FAST_RECOVER)) {
- /* Only send one packet during fast recovery */
- dpf(("call %d restricted to one packet per send during fast recovery\n",
- *(call->callNumber)));
- break;
- }
#ifdef RX_TRACK_PACKETS
if ((p->flags & RX_PKTFLAG_FREE)
|| (!queue_IsEnd(&call->tq, nxp)
#endif
if (p->flags & RX_PKTFLAG_ACKED) {
/* Since we may block, don't trust this */
- usenow.sec = usenow.usec = 0;
if (rx_stats_active)
- rx_MutexIncrement(rx_stats.ignoreAckedPacket, rx_stats_mutex);
+ rx_atomic_inc(&rx_stats.ignoreAckedPacket);
continue; /* Ignore this packet if it has been acknowledged */
}
}
/* Transmit the packet if it needs to be sent. */
- if (!clock_Lt(&now, &p->retryTime)) {
+ if (!(p->flags & RX_PKTFLAG_SENT)) {
if (nXmitPackets == maxXmitPackets) {
- rxi_SendXmitList(call, xmitList, nXmitPackets,
- istack, &now, &retryTime,
- resending);
- osi_Free(xmitList, maxXmitPackets *
- sizeof(struct rx_packet *));
+ rxi_SendXmitList(call, call->xmitList,
+ nXmitPackets, istack);
goto restart;
}
- dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
- *(call->callNumber), p,
- now.sec, now.usec,
- p->retryTime.sec, p->retryTime.usec,
- retryTime.sec, retryTime.usec));
- xmitList[nXmitPackets++] = p;
+ dpf(("call %d xmit packet %"AFS_PTR_FMT"\n",
+ *(call->callNumber), p));
+ call->xmitList[nXmitPackets++] = p;
}
}
/* xmitList now hold pointers to all of the packets that are
* ready to send. Now we loop to send the packets */
if (nXmitPackets > 0) {
- rxi_SendXmitList(call, xmitList, nXmitPackets, istack,
- &now, &retryTime, resending);
+ rxi_SendXmitList(call, call->xmitList, nXmitPackets,
+ istack);
}
- osi_Free(xmitList,
- maxXmitPackets * sizeof(struct rx_packet *));
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
- /*
- * TQ references no longer protected by this flag; they must remain
- * protected by the global lock.
- */
- if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
- call->flags &= ~RX_CALL_TQ_BUSY;
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
- call, call->tqWaiters, call->flags));
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start start");
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
- return;
- }
if (call->error) {
/* We went into the error state while sending packets. Now is
* the time to reset the call. This will also inform the using
* process that the call is in an error state.
*/
if (rx_stats_active)
- rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
+ rx_atomic_inc(&rx_tq_debug.rxi_start_aborted);
call->flags &= ~RX_CALL_TQ_BUSY;
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
- call->error, call, call->tqWaiters, call->flags));
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start middle");
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
+ rxi_WakeUpTransmitQueue(call);
rxi_CallError(call, call->error);
return;
}
call->flags |= RX_CALL_TQ_CLEARME;
}
#endif /* RX_ENABLE_LOCKS */
- /* Don't bother doing retransmits if the TQ is cleared. */
- if (call->flags & RX_CALL_TQ_CLEARME) {
+ if (call->flags & RX_CALL_TQ_CLEARME)
rxi_ClearTransmitQueue(call, 1);
- } else
-#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- {
-
- /* Always post a resend event, if there is anything in the
- * queue, and resend is possible. There should be at least
- * one unacknowledged packet in the queue ... otherwise none
- * of these packets should be on the queue in the first place.
- */
- if (call->resendEvent) {
- /* Cancel the existing event and post a new one */
- rxevent_Cancel(call->resendEvent, call,
- RX_CALL_REFCOUNT_RESEND);
- }
-
- /* The retry time is the retry time on the first unacknowledged
- * packet inside the current window */
- for (haveEvent =
- 0, queue_Scan(&call->tq, p, nxp, rx_packet)) {
- /* Don't set timers for packets outside the window */
- if (p->header.seq >= call->tfirst + call->twind) {
- break;
- }
-
- if (!(p->flags & RX_PKTFLAG_ACKED)
- && !clock_IsZero(&p->retryTime)) {
- haveEvent = 1;
- retryTime = p->retryTime;
- break;
- }
- }
-
- /* Post a new event to re-run rxi_Start when retries may be needed */
- if (haveEvent && !(call->flags & RX_CALL_NEED_START)) {
-#ifdef RX_ENABLE_LOCKS
- CALL_HOLD(call, RX_CALL_REFCOUNT_RESEND);
- call->resendEvent =
- rxevent_PostNow2(&retryTime, &usenow,
- rxi_StartUnlocked,
- (void *)call, 0, istack);
-#else /* RX_ENABLE_LOCKS */
- call->resendEvent =
- rxevent_PostNow2(&retryTime, &usenow, rxi_Start,
- (void *)call, 0, istack);
-#endif /* RX_ENABLE_LOCKS */
- }
- }
-#ifdef AFS_GLOBAL_RXLOCK_KERNEL
} while (call->flags & RX_CALL_NEED_START);
/*
* TQ references no longer protected by this flag; they must remain
* protected by the global lock.
*/
call->flags &= ~RX_CALL_TQ_BUSY;
- if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
- call, call->tqWaiters, call->flags));
-#ifdef RX_ENABLE_LOCKS
- osirx_AssertMine(&call->lock, "rxi_Start end");
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- }
+ rxi_WakeUpTransmitQueue(call);
} else {
call->flags |= RX_CALL_NEED_START;
}
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
} else {
- if (call->resendEvent) {
- rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
- }
+ rxi_rto_cancel(call);
}
}
rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
/* Actually send the packet, filling in more connection-specific fields */
- CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_HOLD(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
rxi_SendPacket(call, conn, p, istack);
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_SEND);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
/* Update last send time for this call (for keep-alive
* processing), and for the connection (so that we can discover
{
struct rx_connection *conn = call->conn;
afs_uint32 now;
- afs_uint32 deadTime;
+ afs_uint32 deadTime, idleDeadTime = 0, hardDeadTime = 0;
+ afs_uint32 fudgeFactor;
int cerror = 0;
int newmtu = 0;
return 0;
}
#endif
- /* dead time + RTT + 8*MDEV, rounded up to next second. */
- deadTime =
- (((afs_uint32) conn->secondsUntilDead << 10) +
- ((afs_uint32) conn->peer->rtt >> 3) +
- ((afs_uint32) conn->peer->rtt_dev << 1) + 1023) >> 10;
+ /* RTT + 8*MDEV, rounded up to the next second. */
+ fudgeFactor = (((afs_uint32) call->rtt >> 3) +
+ ((afs_uint32) call->rtt_dev << 1) + 1023) >> 10;
+
+ deadTime = conn->secondsUntilDead + fudgeFactor;
now = clock_Sec();
/* These are computed to the second (+- 1 second). But that's
* good enough for these values, which should be a significant
if (now > (call->lastReceiveTime + deadTime)) {
if (call->state == RX_STATE_ACTIVE) {
#ifdef ADAPT_PMTU
-#if defined(KERNEL) && defined(AFS_SUN57_ENV)
+#if defined(KERNEL) && defined(AFS_SUN5_ENV)
ire_t *ire;
#if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
netstack_t *ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);
/* Cancel pending events */
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
- rxevent_Cancel(call->resendEvent, call, RX_CALL_REFCOUNT_RESEND);
+ rxi_rto_cancel(call);
rxevent_Cancel(call->keepAliveEvent, call,
RX_CALL_REFCOUNT_ALIVE);
+ if (call->growMTUEvent)
+ rxevent_Cancel(call->growMTUEvent, call,
+ RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (call->refCount == 0) {
rxi_FreeCall(call, haveCTLock);
+ MUTEX_EXIT(&rx_refcnt_mutex);
return -2;
}
+ MUTEX_EXIT(&rx_refcnt_mutex);
return -1;
#else /* RX_ENABLE_LOCKS */
- rxi_FreeCall(call);
+ rxi_FreeCall(call, 0);
return -2;
#endif /* RX_ENABLE_LOCKS */
}
* to pings; active calls are simply flagged in error, so the
* attached process can die reasonably gracefully. */
}
+
+ if (conn->idleDeadTime) {
+ idleDeadTime = conn->idleDeadTime + fudgeFactor;
+ }
+
/* see if we have a non-activity timeout */
- if (call->startWait && conn->idleDeadTime
- && ((call->startWait + conn->idleDeadTime) < now) &&
+ if (call->startWait && idleDeadTime
+ && ((call->startWait + idleDeadTime) < now) &&
(call->flags & RX_CALL_READER_WAIT)) {
if (call->state == RX_STATE_ACTIVE) {
cerror = RX_CALL_TIMEOUT;
goto mtuout;
}
}
- if (call->lastSendData && conn->idleDeadTime && (conn->idleDeadErr != 0)
- && ((call->lastSendData + conn->idleDeadTime) < now)) {
+ if (call->lastSendData && idleDeadTime && (conn->idleDeadErr != 0)
+ && ((call->lastSendData + idleDeadTime) < now)) {
if (call->state == RX_STATE_ACTIVE) {
cerror = conn->idleDeadErr;
goto mtuout;
}
}
+
+ if (conn->hardDeadTime) {
+ hardDeadTime = conn->hardDeadTime + fudgeFactor;
+ }
+
/* see if we have a hard timeout */
- if (conn->hardDeadTime
- && (now > (conn->hardDeadTime + call->startTime.sec))) {
+ if (hardDeadTime
+ && (now > (hardDeadTime + call->startTime.sec))) {
if (call->state == RX_STATE_ACTIVE)
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
}
return 0;
mtuout:
- if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT) {
+ if (conn->msgsizeRetryErr && cerror != RX_CALL_TIMEOUT
+ && call->lastReceiveTime) {
int oldMTU = conn->peer->ifMTU;
/* if we thought we could send more, perhaps things got worse */
- if (call->conn->peer->maxPacketSize > conn->lastPacketSize)
+ if (conn->peer->maxPacketSize > conn->lastPacketSize)
/* maxpacketsize will be cleared in rxi_SetPeerMtu */
newmtu = MAX(conn->peer->maxPacketSize-RX_IPUDP_SIZE,
conn->lastPacketSize-(128+RX_IPUDP_SIZE));
{
struct rx_connection *conn = arg1;
struct rx_header theader;
- char tbuffer[1500];
+ char tbuffer[1 + sizeof(struct rx_header)];
struct sockaddr_in taddr;
char *tp;
char a[1] = { 0 };
osi_NetSend(socket, &taddr, tmpiov, 1, 1 + sizeof(struct rx_header), 1);
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
/* Only reschedule ourselves if the connection would not be destroyed */
if (conn->refCount <= 1) {
conn->natKeepAliveEvent = NULL;
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
rx_DestroyConnection(conn); /* drop the reference for this */
} else {
- conn->natKeepAliveEvent = NULL;
conn->refCount--; /* drop the reference for this */
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ conn->natKeepAliveEvent = NULL;
rxi_ScheduleNatKeepAliveEvent(conn);
MUTEX_EXIT(&conn->conn_data_lock);
}
clock_GetTime(&now);
when = now;
when.sec += conn->secondsUntilNatPing;
+ MUTEX_ENTER(&rx_refcnt_mutex);
conn->refCount++; /* hold a reference for this */
+ MUTEX_EXIT(&rx_refcnt_mutex);
conn->natKeepAliveEvent =
rxevent_PostNow(&when, &now, rxi_NatKeepAliveEvent, conn, 0);
}
{
MUTEX_ENTER(&conn->conn_data_lock);
conn->secondsUntilNatPing = seconds;
- if (seconds != 0)
- rxi_ScheduleNatKeepAliveEvent(conn);
+ if (seconds != 0) {
+ if (!(conn->flags & RX_CONN_ATTACHWAIT))
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ else
+ conn->flags |= RX_CONN_NAT_PING;
+ }
MUTEX_EXIT(&conn->conn_data_lock);
}
rxi_NatKeepAliveOn(struct rx_connection *conn)
{
MUTEX_ENTER(&conn->conn_data_lock);
- rxi_ScheduleNatKeepAliveEvent(conn);
+ /* if it's already attached */
+ if (!(conn->flags & RX_CONN_ATTACHWAIT))
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ else
+ conn->flags |= RX_CONN_NAT_PING;
MUTEX_EXIT(&conn->conn_data_lock);
}
struct rx_connection *conn;
afs_uint32 now;
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
if (event == call->keepAliveEvent)
call->keepAliveEvent = NULL;
now = clock_Sec();
struct rx_call *call = arg1;
struct rx_connection *conn;
- MUTEX_ENTER(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_RELE(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
+ MUTEX_ENTER(&call->lock);
+
if (event == call->growMTUEvent)
call->growMTUEvent = NULL;
clock_GetTime(&now);
when = now;
when.sec += call->conn->secondsUntilPing;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->keepAliveEvent =
rxevent_PostNow(&when, &now, rxi_KeepAliveEvent, call, 0);
}
}
when.sec += secs;
+ MUTEX_ENTER(&rx_refcnt_mutex);
CALL_HOLD(call, RX_CALL_REFCOUNT_ALIVE);
+ MUTEX_EXIT(&rx_refcnt_mutex);
call->growMTUEvent =
rxevent_PostNow(&when, &now, rxi_GrowMTUEvent, call, 0);
}
(char *)&error, sizeof(error), 0);
rxi_FreePacket(packet);
}
- CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
MUTEX_EXIT(&call->lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
+ CALL_RELE(call, RX_CALL_REFCOUNT_ABORT);
+ MUTEX_EXIT(&rx_refcnt_mutex);
}
/* This routine is called periodically (every RX_AUTH_REQUEST_TIMEOUT
}
-/* Compute round trip time of the packet provided, in *rttp.
- */
-
/* rxi_ComputeRoundTripTime is called with peer locked. */
-/* sentp and/or peer may be null */
-void
+/* peer may be null */
+static void
rxi_ComputeRoundTripTime(struct rx_packet *p,
- struct clock *sentp,
- struct rx_peer *peer)
+ struct rx_ackPacket *ack,
+ struct rx_call *call,
+ struct rx_peer *peer,
+ struct clock *now)
{
- struct clock thisRtt, *rttp = &thisRtt;
-
+ struct clock thisRtt, *sentp;
int rtt_timeout;
+ int serial;
- clock_GetTime(rttp);
+ /* If the ACK is delayed, then do nothing */
+ if (ack->reason == RX_ACK_DELAY)
+ return;
- if (clock_Lt(rttp, sentp)) {
- clock_Zero(rttp);
- return; /* somebody set the clock back, don't count this time. */
+ /* On the wire, jumbograms are a single UDP packet. We shouldn't count
+ * their RTT multiple times, so only include the RTT of the last packet
+ * in a jumbogram */
+ if (p->flags & RX_JUMBO_PACKET)
+ return;
+
+ /* Use the serial number to determine which transmission the ACK is for,
+ * and set the sent time to match this. If we have no serial number, then
+ * only use the ACK for RTT calculations if the packet has not been
+ * retransmitted
+ */
+
+ serial = ntohl(ack->serial);
+ if (serial) {
+ if (serial == p->header.serial) {
+ sentp = &p->timeSent;
+ } else if (serial == p->firstSerial) {
+ sentp = &p->firstSent;
+ } else if (clock_Eq(&p->timeSent, &p->firstSent)) {
+ sentp = &p->firstSent;
+ } else
+ return;
+ } else {
+ if (clock_Eq(&p->timeSent, &p->firstSent)) {
+ sentp = &p->firstSent;
+ } else
+ return;
}
- clock_Sub(rttp, sentp);
+
+ thisRtt = *now;
+
+ if (clock_Lt(&thisRtt, sentp))
+ return; /* somebody set the clock back, don't count this time. */
+
+ clock_Sub(&thisRtt, sentp);
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
- p->header.callNumber, p, rttp->sec, rttp->usec));
+ p->header.callNumber, p, thisRtt.sec, thisRtt.usec));
- if (rttp->sec == 0 && rttp->usec == 0) {
+ if (clock_IsZero(&thisRtt)) {
/*
* The actual round trip time is shorter than the
* clock_GetTime resolution. It is most likely 1ms or 100ns.
* Since we can't tell which at the moment we will assume 1ms.
*/
- rttp->usec = 1000;
+ thisRtt.usec = 1000;
}
if (rx_stats_active) {
MUTEX_ENTER(&rx_stats_mutex);
- if (clock_Lt(rttp, &rx_stats.minRtt))
- rx_stats.minRtt = *rttp;
- if (clock_Gt(rttp, &rx_stats.maxRtt)) {
- if (rttp->sec > 60) {
+ if (clock_Lt(&thisRtt, &rx_stats.minRtt))
+ rx_stats.minRtt = thisRtt;
+ if (clock_Gt(&thisRtt, &rx_stats.maxRtt)) {
+ if (thisRtt.sec > 60) {
MUTEX_EXIT(&rx_stats_mutex);
return; /* somebody set the clock ahead */
}
- rx_stats.maxRtt = *rttp;
+ rx_stats.maxRtt = thisRtt;
}
- clock_Add(&rx_stats.totalRtt, rttp);
- rx_stats.nRttSamples++;
+ clock_Add(&rx_stats.totalRtt, &thisRtt);
+ rx_atomic_inc(&rx_stats.nRttSamples);
MUTEX_EXIT(&rx_stats_mutex);
}
/* better rtt calculation courtesy of UMich crew (dave,larry,peter,?) */
/* Apply VanJacobson round-trip estimations */
- if (peer->rtt) {
+ if (call->rtt) {
int delta;
/*
- * srtt (peer->rtt) is in units of one-eighth-milliseconds.
+ * srtt (call->rtt) is in units of one-eighth-milliseconds.
* srtt is stored as fixed point with 3 bits after the binary
* point (i.e., scaled by 8). The following magic is
* equivalent to the smoothing algorithm in rfc793 with an
* srtt' = srtt + (rtt - srtt)/8
*/
- delta = _8THMSEC(rttp) - peer->rtt;
- peer->rtt += (delta >> 3);
+ delta = _8THMSEC(&thisRtt) - call->rtt;
+ call->rtt += (delta >> 3);
/*
* We accumulate a smoothed rtt variance (actually, a smoothed
if (delta < 0)
delta = -delta;
- delta -= (peer->rtt_dev << 1);
- peer->rtt_dev += (delta >> 3);
+ delta -= (call->rtt_dev << 1);
+ call->rtt_dev += (delta >> 3);
} else {
/* I don't have a stored RTT so I start with this value. Since I'm
* probably just starting a call, and will be pushing more data down
* little, and I set deviance to half the rtt. In practice,
* deviance tends to approach something a little less than
* half the smoothed rtt. */
- peer->rtt = _8THMSEC(rttp) + 8;
- peer->rtt_dev = peer->rtt >> 2; /* rtt/2: they're scaled differently */
+ call->rtt = _8THMSEC(&thisRtt) + 8;
+ call->rtt_dev = call->rtt >> 2; /* rtt/2: they're scaled differently */
}
- /* the timeout is RTT + 4*MDEV but no less than rx_minPeerTimeout msec.
- * This is because one end or the other of these connections is usually
- * in a user process, and can be switched and/or swapped out. So on fast,
- * reliable networks, the timeout would otherwise be too short. */
- rtt_timeout = MAX(((peer->rtt >> 3) + peer->rtt_dev), rx_minPeerTimeout);
- clock_Zero(&(peer->timeout));
- clock_Addmsec(&(peer->timeout), rtt_timeout);
+ /* the smoothed RTT time is RTT + 4*MDEV
+ *
+ * We allow a user specified minimum to be set for this, to allow clamping
+ * at a minimum value in the same way as TCP. In addition, we have to allow
+ * for the possibility that this packet is answered by a delayed ACK, so we
+ * add on a fixed 200ms to account for that timer expiring.
+ */
+
+ rtt_timeout = MAX(((call->rtt >> 3) + call->rtt_dev),
+ rx_minPeerTimeout) + 200;
+ clock_Zero(&call->rto);
+ clock_Addmsec(&call->rto, rtt_timeout);
- /* Reset the backedOff flag since we just computed a new timeout value */
- peer->backedOff = 0;
+ /* Update the peer, so any new calls start with our values */
+ peer->rtt_dev = call->rtt_dev;
+ peer->rtt = call->rtt;
dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n",
- p->header.callNumber, p, MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
+ p->header.callNumber, p, MSEC(&thisRtt), call->rtt >> 3, call->rtt_dev >> 2, (call->rto.sec), (call->rto.usec)));
}
/* This only actually destroys the connection if
* there are no outstanding calls */
MUTEX_ENTER(&conn->conn_data_lock);
+ MUTEX_ENTER(&rx_refcnt_mutex);
if (!havecalls && !conn->refCount
&& ((conn->lastSendTime + rx_idleConnectionTime) <
now.sec)) {
conn->refCount++; /* it will be decr in rx_DestroyConn */
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
#ifdef RX_ENABLE_LOCKS
rxi_DestroyConnectionNoLock(conn);
}
#ifdef RX_ENABLE_LOCKS
else {
+ MUTEX_EXIT(&rx_refcnt_mutex);
MUTEX_EXIT(&conn->conn_data_lock);
}
#endif /* RX_ENABLE_LOCKS */
prev->next = next;
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
/*
* Now if we hold references on 'prev' and 'next'
case RX_ACK_REQUESTED:
xferSize =
p->length + RX_HEADER_SIZE + call->conn->securityMaxTrailerSize;
- xferMs = peer->rtt;
+ xferMs = call->rtt;
break;
case RX_ACK_PING_RESPONSE:
return;
}
- dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)",
+ dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)\n",
ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"),
xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
* one packet exchange */
if (clock_Gt(&newTO, &peer->timeout)) {
- dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u)",
+ dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u)\n",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec,
newTO.sec, newTO.usec, peer->smRtt));
else if (minTime > rx_maxSendWindow)
minTime = rx_maxSendWindow;
/* if (minTime != peer->maxWindow) {
- dpf(("CONG peer %lx/%u: windowsize %lu ==> %lu (to %lu.%06lu, rtt %u)",
+ dpf(("CONG peer %lx/%u: windowsize %lu ==> %lu (to %lu.%06lu, rtt %u)\n",
ntohl(peer->host), ntohs(peer->port), peer->maxWindow, minTime,
peer->timeout.sec, peer->timeout.usec, peer->smRtt));
peer->maxWindow = minTime;
/* calculate estimate for transmission interval in milliseconds */
minTime = rx_maxSendWindow * peer->smRtt;
if (minTime < 1000) {
- dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u)",
+ dpf(("CONG peer %lx/%u: cut TO %d.%06d by 0.5 (rtt %u)\n",
ntohl(peer->host), ntohs(peer->port), peer->timeout.sec,
peer->timeout.usec, peer->smRtt));
void
rx_StatsOnOff(int on)
{
-#ifdef RXDEBUG
rx_stats_active = on;
-#endif
}
if (len > 0) {
len = _vsnprintf(msg, sizeof(msg)-2, tformat, ap);
- if (len > 0) {
- if (msg[len-1] != '\n') {
- msg[len] = '\n';
- msg[len+1] = '\0';
- }
+ if (len > 0)
OutputDebugString(msg);
- }
}
va_end(ap);
#else
fprintf(rx_Log, " %d.%06d:", (unsigned int)now.sec,
(unsigned int)now.usec);
vfprintf(rx_Log, format, ap);
- putc('\n', rx_Log);
va_end(ap);
#endif
#endif
rx_PrintStats(FILE * file)
{
MUTEX_ENTER(&rx_stats_mutex);
- rx_PrintTheseStats(file, &rx_stats, sizeof(rx_stats), rx_nFreePackets,
+ rx_PrintTheseStats(file, (struct rx_statistics *) &rx_stats,
+ sizeof(rx_stats), rx_nFreePackets,
RX_DEBUGI_VERSION);
MUTEX_EXIT(&rx_stats_mutex);
}
(int)peer->burstWait.sec, (int)peer->burstWait.usec);
fprintf(file,
- " Rtt %d, " "retry time %u.%06d, " "total sent %d, "
- "resent %d\n", peer->rtt, (int)peer->timeout.sec,
- (int)peer->timeout.usec, peer->nSent, peer->reSends);
+ " Rtt %d, " "total sent %d, " "resent %d\n",
+ peer->rtt, peer->nSent, peer->reSends);
fprintf(file,
" Packet size %d, " "max in packet skew %d, "
(struct sockaddr *)&taddr, sizeof(struct sockaddr_in));
/* see if there's a packet available */
- gettimeofday(&tv_wake,0);
+ gettimeofday(&tv_wake, NULL);
tv_wake.tv_sec += waitTime;
for (;;) {
FD_ZERO(&imask);
FD_SET(socket, &imask);
tv_delta.tv_sec = tv_wake.tv_sec;
tv_delta.tv_usec = tv_wake.tv_usec;
- gettimeofday(&tv_now, 0);
+ gettimeofday(&tv_now, NULL);
if (tv_delta.tv_usec < tv_now.tv_usec) {
/* borrow */
peer->burstWait.usec = ntohl(peer->burstWait.usec);
peer->rtt = ntohl(peer->rtt);
peer->rtt_dev = ntohl(peer->rtt_dev);
- peer->timeout.sec = ntohl(peer->timeout.sec);
- peer->timeout.usec = ntohl(peer->timeout.usec);
+ peer->timeout.sec = 0;
+ peer->timeout.usec = 0;
peer->nSent = ntohl(peer->nSent);
peer->reSends = ntohl(peer->reSends);
peer->inPacketSkew = ntohl(peer->inPacketSkew);
peerStats->burstWait.usec = tp->burstWait.usec;
peerStats->rtt = tp->rtt;
peerStats->rtt_dev = tp->rtt_dev;
- peerStats->timeout.sec = tp->timeout.sec;
- peerStats->timeout.usec = tp->timeout.usec;
+ peerStats->timeout.sec = 0;
+ peerStats->timeout.usec = 0;
peerStats->nSent = tp->nSent;
peerStats->reSends = tp->reSends;
peerStats->inPacketSkew = tp->inPacketSkew;
next = peer->next;
rxi_FreePeer(peer);
if (rx_stats_active)
- rx_MutexDecrement(rx_stats.nPeerStructs, rx_stats_mutex);
+ rx_atomic_dec(&rx_stats.nPeerStructs);
}
MUTEX_EXIT(&rx_peerHashTable_lock);
}