#endif /* KERNEL */
#include <opr/queue.h>
+#include <hcrypto/rand.h>
#include "rx.h"
#include "rx_clock.h"
static void rxi_CancelKeepAliveEvent(struct rx_call *call);
static void rxi_CancelDelayedAbortEvent(struct rx_call *call);
static void rxi_CancelGrowMTUEvent(struct rx_call *call);
+static void update_nextCid(void);
#ifdef RX_ENABLE_LOCKS
struct rx_tq_debug {
extern afs_kmutex_t rx_refcnt_mutex;
extern afs_kmutex_t des_init_mutex;
extern afs_kmutex_t des_random_mutex;
+#ifndef KERNEL
extern afs_kmutex_t rx_clock_mutex;
extern afs_kmutex_t rxi_connCacheMutex;
extern afs_kmutex_t event_handler_mutex;
extern afs_kcondvar_t rx_event_handler_cond;
extern afs_kcondvar_t rx_listener_cond;
+#endif /* !KERNEL */
static afs_kmutex_t epoch_mutex;
static afs_kmutex_t rx_init_mutex;
static void
rxi_InitPthread(void)
{
- MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rx_atomic_mutex, "atomic", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_quota_mutex, "quota", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_pthread_mutex, "pthread", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_packets_mutex, "packets", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_refcnt_mutex, "refcnts", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
- MUTEX_INIT(&event_handler_mutex, "event handler", MUTEX_DEFAULT, 0);
+#ifndef KERNEL
+ MUTEX_INIT(&rx_clock_mutex, "clock", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rxi_connCacheMutex, "conn cache", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&event_handler_mutex, "event handler", MUTEX_DEFAULT, 0);
MUTEX_INIT(&listener_mutex, "listener", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_if_init_mutex, "if init", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_if_mutex, "if", MUTEX_DEFAULT, 0);
+#endif
+ MUTEX_INIT(&rx_stats_mutex, "stats", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_atomic_mutex, "atomic", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&epoch_mutex, "epoch", MUTEX_DEFAULT, 0);
+ MUTEX_INIT(&rx_init_mutex, "init", MUTEX_DEFAULT, 0);
MUTEX_INIT(&rx_debug_mutex, "debug", MUTEX_DEFAULT, 0);
+#ifndef KERNEL
CV_INIT(&rx_event_handler_cond, "evhand", CV_DEFAULT, 0);
CV_INIT(&rx_listener_cond, "rxlisten", CV_DEFAULT, 0);
+#endif
osi_Assert(pthread_key_create(&rx_thread_id_key, NULL) == 0);
osi_Assert(pthread_key_create(&rx_ts_info_key, NULL) == 0);
MUTEX_INIT(&rx_connHashTable_lock, "rx_connHashTable_lock", MUTEX_DEFAULT,
0);
MUTEX_INIT(&rx_serverPool_lock, "rx_serverPool_lock", MUTEX_DEFAULT, 0);
+#ifndef KERNEL
MUTEX_INIT(&rxi_keyCreate_lock, "rxi_keyCreate_lock", MUTEX_DEFAULT, 0);
+#endif
#endif /* RX_ENABLE_LOCKS */
}
* tiers:
*
* rx_connHashTable_lock - synchronizes conn creation, rx_connHashTable access
+ * also protects updates to rx_nextCid
* conn_call_lock - used to synchonize rx_EndCall and rx_NewCall
* call->lock - locks call data fields.
* These are independent of each other:
#define CLEAR_CALL_QUEUE_LOCK(C)
#endif /* RX_ENABLE_LOCKS */
struct rx_serverQueueEntry *rx_waitForPacket = 0;
-struct rx_serverQueueEntry *rx_waitingForPacket = 0;
/* ------------Exported Interfaces------------- */
-/* This function allows rxkad to set the epoch to a suitably random number
- * which rx_NewConnection will use in the future. The principle purpose is to
- * get rxnull connections to use the same epoch as the rxkad connections do, at
- * least once the first rxkad connection is established. This is important now
- * that the host/port addresses aren't used in FindConnection: the uniqueness
- * of epoch/cid matters and the start time won't do. */
-
-#ifdef AFS_PTHREAD_ENV
-/*
- * This mutex protects the following global variables:
- * rx_epoch
- */
-
-#define LOCK_EPOCH MUTEX_ENTER(&epoch_mutex)
-#define UNLOCK_EPOCH MUTEX_EXIT(&epoch_mutex)
-#else
-#define LOCK_EPOCH
-#define UNLOCK_EPOCH
-#endif /* AFS_PTHREAD_ENV */
-
-void
-rx_SetEpoch(afs_uint32 epoch)
-{
- LOCK_EPOCH;
- rx_epoch = epoch;
- UNLOCK_EPOCH;
-}
-
/* Initialize rx. A port number may be mentioned, in which case this
* becomes the default port number for any service installed later.
* If 0 is provided for the port number, a random port will be chosen
#endif
}
rx_stats.minRtt.sec = 9999999;
-#ifdef KERNEL
- rx_SetEpoch(tv.tv_sec | 0x80000000);
-#else
- rx_SetEpoch(tv.tv_sec); /* Start time of this package, rxkad
- * will provide a randomer value. */
-#endif
+ if (RAND_bytes(&rx_epoch, sizeof(rx_epoch)) != 1)
+ return -1;
+ rx_epoch = (rx_epoch & ~0x40000000) | 0x80000000;
+ if (RAND_bytes(&rx_nextCid, sizeof(rx_nextCid)) != 1)
+ return -1;
+ rx_nextCid &= RX_CIDMASK;
MUTEX_ENTER(&rx_quota_mutex);
rxi_dataQuota += rx_extraQuota; /* + extra pkts caller asked to rsrv */
MUTEX_EXIT(&rx_quota_mutex);
static_inline void
rxi_rto_cancel(struct rx_call *call)
{
- rxevent_Cancel(&call->resendEvent);
- CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
+ if (call->resendEvent != NULL) {
+ rxevent_Cancel(&call->resendEvent);
+ CALL_RELE(call, RX_CALL_REFCOUNT_RESEND);
+ }
}
/*!
int serviceSecurityIndex)
{
int hashindex, i;
- afs_int32 cid;
struct rx_connection *conn;
SPLVAR;
#endif
NETPRI;
MUTEX_ENTER(&rx_connHashTable_lock);
- cid = (rx_nextCid += RX_MAXCALLS);
conn->type = RX_CLIENT_CONNECTION;
- conn->cid = cid;
conn->epoch = rx_epoch;
+ conn->cid = rx_nextCid;
+ update_nextCid();
conn->peer = rxi_FindPeer(shost, sport, 1);
conn->serviceId = sservice;
conn->securityObject = securityObject;
rx_SetConnIdleDeadTime(struct rx_connection *conn, int seconds)
{
conn->idleDeadTime = seconds;
- conn->idleDeadDetection = (seconds ? 1 : 0);
rxi_CheckConnTimeouts(conn);
}
service->minProcs = 0;
service->maxProcs = 1;
service->idleDeadTime = 60;
- service->idleDeadErr = 0;
service->connDeadTime = rx_connDeadTime;
service->executeRequestProc = serviceProc;
service->checkReach = 0;
opr_queue_Append(&rx_idleServerQueue, &sq->entry);
#ifndef AFS_AIX41_ENV
rx_waitForPacket = sq;
-#else
- rx_waitingForPacket = sq;
#endif /* AFS_AIX41_ENV */
do {
CV_WAIT(&sq->cv, &rx_serverPool_lock);
* Map errors to the local host's errno.h format.
*/
error = ntoh_syserr_conv(error);
+
+ /* If the caller said the call failed with some error, we had better
+ * return an error code. */
+ osi_Assert(!rc || error);
return error;
}
if (peer->ifMTU < OLD_MAX_PACKET_SIZE)
peer->maxDgramPackets = 1;
/* We no longer have valid peer packet information */
- if (peer->maxPacketSize-RX_IPUDP_SIZE > peer->ifMTU)
+ if (peer->maxPacketSize + RX_HEADER_SIZE > peer->ifMTU)
peer->maxPacketSize = 0;
MUTEX_EXIT(&peer->peer_lock);
conn->nSpecific = 0;
conn->specific = NULL;
rx_SetConnDeadTime(conn, service->connDeadTime);
- conn->idleDeadTime = service->idleDeadTime;
- conn->idleDeadDetection = service->idleDeadErr ? 1 : 0;
+ rx_SetConnIdleDeadTime(conn, service->idleDeadTime);
for (i = 0; i < RX_MAXCALLS; i++) {
conn->twind[i] = rx_initSendWindow;
conn->rwind[i] = rx_initReceiveWindow;
addr.sin_family = AF_INET;
addr.sin_port = port;
addr.sin_addr.s_addr = host;
+ memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
addr.sin_len = sizeof(addr);
#endif /* AFS_OSF_ENV */
* but we are clearly receiving.
*/
if (!peer->maxPacketSize)
- peer->maxPacketSize = RX_MIN_PACKET_SIZE+RX_IPUDP_SIZE;
+ peer->maxPacketSize = RX_MIN_PACKET_SIZE - RX_HEADER_SIZE;
if (pktsize > peer->maxPacketSize) {
peer->maxPacketSize = pktsize;
- if ((pktsize-RX_IPUDP_SIZE > peer->ifMTU)) {
- peer->ifMTU=pktsize-RX_IPUDP_SIZE;
+ if ((pktsize + RX_HEADER_SIZE > peer->ifMTU)) {
+ peer->ifMTU = pktsize + RX_HEADER_SIZE;
peer->natMTU = rxi_AdjustIfMTU(peer->ifMTU);
rxi_ScheduleGrowMTUEvent(call, 1);
}
if (RXS_CheckAuthentication(conn->securityObject, conn) == 0)
return np;
+ if (!conn->securityChallengeSent) {
+ /* We've never sent out a challenge for this connection, so this
+ * response cannot possibly be correct; ignore it. This can happen
+ * if we sent a challenge to the client, then we were restarted, and
+ * then the client sent us a response. If we ignore the response, the
+ * client will eventually resend a data packet, causing us to send a
+ * new challenge and the client to send a new response. */
+ return np;
+ }
+
/* Otherwise, have the security object evaluate the response packet */
error = RXS_CheckResponse(conn->securityObject, conn, np);
if (error) {
*/
if (call->conn->peer->maxPacketSize &&
(call->conn->peer->maxPacketSize < OLD_MAX_PACKET_SIZE
- +RX_IPUDP_SIZE))
+ - RX_HEADER_SIZE))
padbytes = call->conn->peer->maxPacketSize+16;
else
padbytes = call->conn->peer->maxMTU + 128;
* processing), and for the connection (so that we can discover
* idle connections) */
conn->lastSendTime = call->lastSendTime = clock_Sec();
- /* Let a set of retransmits trigger an idle timeout */
- if (!xmit->resending)
- call->lastSendData = call->lastSendTime;
}
/* When sending packets we need to follow these rules:
(p->length <= (rx_AckDataSize(call->rwind) + 4 * sizeof(afs_int32))))
{
conn->lastSendTime = call->lastSendTime = clock_Sec();
- /* Don't count keepalive ping/acks here, so idleness can be tracked. */
- if ((p->header.type != RX_PACKET_TYPE_ACK) ||
- ((((struct rx_ackPacket *)rx_DataOf(p))->reason != RX_ACK_PING) &&
- (((struct rx_ackPacket *)rx_DataOf(p))->reason !=
- RX_ACK_PING_RESPONSE)))
- call->lastSendData = call->lastSendTime;
}
}
* number of seconds. */
if (now > (call->lastReceiveTime + deadTime)) {
if (call->state == RX_STATE_ACTIVE) {
-#ifdef AFS_ADAPT_PMTU
-# if defined(KERNEL) && defined(AFS_SUN5_ENV)
- ire_t *ire;
-# if defined(AFS_SUN510_ENV) && defined(GLOBAL_NETSTACKID)
- netstack_t *ns = netstack_find_by_stackid(GLOBAL_NETSTACKID);
- ip_stack_t *ipst = ns->netstack_ip;
-# endif
- ire = ire_cache_lookup(conn->peer->host
-# if defined(AFS_SUN510_ENV) && defined(ALL_ZONES)
- , ALL_ZONES
-# if defined(ICL_3_ARG) || defined(GLOBAL_NETSTACKID)
- , NULL
-# if defined(GLOBAL_NETSTACKID)
- , ipst
-# endif
-# endif
-# endif
- );
-
- if (ire && ire->ire_max_frag > 0)
- rxi_SetPeerMtu(NULL, conn->peer->host, 0,
- ire->ire_max_frag);
-# if defined(GLOBAL_NETSTACKID)
- netstack_rele(ns);
-# endif
-# endif
-#endif /* AFS_ADAPT_PMTU */
cerror = RX_CALL_DEAD;
goto mtuout;
} else {
* attached process can die reasonably gracefully. */
}
- if (conn->idleDeadDetection) {
- if (conn->idleDeadTime) {
- idleDeadTime = conn->idleDeadTime + fudgeFactor;
- }
-
- if (idleDeadTime) {
- /* see if we have a non-activity timeout */
- if (call->startWait && ((call->startWait + idleDeadTime) < now) &&
- (call->flags & RX_CALL_READER_WAIT)) {
- if (call->state == RX_STATE_ACTIVE) {
- cerror = RX_CALL_TIMEOUT;
- goto mtuout;
- }
- }
+ if (conn->idleDeadTime) {
+ idleDeadTime = conn->idleDeadTime + fudgeFactor;
+ }
- if (call->lastSendData && ((call->lastSendData + idleDeadTime) < now)) {
- if (call->state == RX_STATE_ACTIVE) {
- cerror = conn->service ? conn->service->idleDeadErr : RX_CALL_IDLE;
- idle_timeout = 1;
- goto mtuout;
- }
- }
- }
+ if (idleDeadTime) {
+ /* see if we have a non-activity timeout */
+ if (call->startWait && ((call->startWait + idleDeadTime) < now)) {
+ if (call->state == RX_STATE_ACTIVE) {
+ cerror = RX_CALL_TIMEOUT;
+ goto mtuout;
+ }
+ }
}
if (conn->hardDeadTime) {
call->lastReceiveTime) {
int oldMTU = conn->peer->ifMTU;
- /* if we thought we could send more, perhaps things got worse */
- if (conn->peer->maxPacketSize > conn->lastPacketSize)
- /* maxpacketsize will be cleared in rxi_SetPeerMtu */
- newmtu = MAX(conn->peer->maxPacketSize-RX_IPUDP_SIZE,
- conn->lastPacketSize-(128+RX_IPUDP_SIZE));
+ /* If we thought we could send more, perhaps things got worse.
+ * Shrink by 128 bytes and try again. */
+ if (conn->peer->maxPacketSize < conn->lastPacketSize)
+ /* maxPacketSize will be cleared in rxi_SetPeerMtu */
+ newmtu = MAX(conn->peer->maxPacketSize + RX_HEADER_SIZE,
+ conn->lastPacketSize - 128 + RX_HEADER_SIZE);
else
- newmtu = conn->lastPacketSize-(128+RX_IPUDP_SIZE);
+ newmtu = conn->lastPacketSize - 128 + RX_HEADER_SIZE;
/* minimum capped in SetPeerMtu */
rxi_SetPeerMtu(conn->peer, 0, 0, newmtu);
taddr.sin_family = AF_INET;
taddr.sin_port = rx_PortOf(rx_PeerOf(conn));
taddr.sin_addr.s_addr = rx_HostOf(rx_PeerOf(conn));
+ memset(&taddr.sin_zero, 0, sizeof(taddr.sin_zero));
#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
taddr.sin_len = sizeof(struct sockaddr_in);
#endif
*/
if ((conn->peer->maxPacketSize != 0) &&
(conn->peer->natMTU < RX_MAX_PACKET_SIZE) &&
- conn->idleDeadDetection)
+ conn->idleDeadTime)
(void)rxi_SendAck(call, NULL, 0, RX_ACK_MTU, 0);
rxi_ScheduleGrowMTUEvent(call, 0);
MUTEX_EXIT(&call->lock);
}
}
+/*
+ * Increment the counter for the next connection ID, handling overflow.
+ */
+static void
+update_nextCid(void)
+{
+ /* Overflow is technically undefined behavior; avoid it. */
+ if (rx_nextCid > MAX_AFS_INT32 - (1 << RX_CIDSHIFT))
+ rx_nextCid = -1 * ((MAX_AFS_INT32 / RX_CIDSHIFT) * RX_CIDSHIFT);
+ else
+ rx_nextCid += 1 << RX_CIDSHIFT;
+}
+
static void
rxi_KeepAliveOn(struct rx_call *call)
{
rxi_ScheduleKeepAliveEvent(call);
}
-void
-rx_KeepAliveOff(struct rx_call *call)
-{
- MUTEX_ENTER(&call->lock);
- rxi_CancelKeepAliveEvent(call);
- MUTEX_EXIT(&call->lock);
-}
-
-void
-rx_KeepAliveOn(struct rx_call *call)
-{
- MUTEX_ENTER(&call->lock);
- rxi_KeepAliveOn(call);
- MUTEX_EXIT(&call->lock);
-}
-
static void
rxi_GrowMTUOn(struct rx_call *call)
{
rxi_SendSpecial((struct rx_call *)0, conn, packet,
RX_PACKET_TYPE_CHALLENGE, NULL, -1, 0);
rxi_FreePacket(packet);
+ conn->securityChallengeSent = 1;
}
clock_GetTime(&now);
when = now;
taddr.sin_family = AF_INET;
taddr.sin_port = remotePort;
taddr.sin_addr.s_addr = remoteAddr;
+ memset(&taddr.sin_zero, 0, sizeof(taddr.sin_zero));
#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
taddr.sin_len = sizeof(struct sockaddr_in);
#endif
rxi_StopListener();
#endif /* AFS_PTHREAD_ENV */
shutdown_rxevent();
- rx_SetEpoch(0);
+ rx_epoch = 0;
#ifndef AFS_PTHREAD_ENV
#ifndef AFS_USE_GETTIMEOFDAY
clock_UnInit();
"rqc=%u,%u, tqc=%u,%u, iovqc=%u,%u, "
"lstatus=%u, rstatus=%u, error=%d, timeout=%u, "
"resendEvent=%d, keepAliveEvt=%d, delayedAckEvt=%d, delayedAbortEvt=%d, abortCode=%d, abortCount=%d, "
- "lastSendTime=%u, lastRecvTime=%u, lastSendData=%u"
+ "lastSendTime=%u, lastRecvTime=%u"
#ifdef RX_ENABLE_LOCKS
", refCount=%u"
#endif
(afs_uint32)c->rqc, (afs_uint32)rqc, (afs_uint32)c->tqc, (afs_uint32)tqc, (afs_uint32)c->iovqc, (afs_uint32)iovqc,
(afs_uint32)c->localStatus, (afs_uint32)c->remoteStatus, c->error, c->timeout,
c->resendEvent?1:0, c->keepAliveEvent?1:0, c->delayedAckEvent?1:0, c->delayedAbortEvent?1:0,
- c->abortCode, c->abortCount, c->lastSendTime, c->lastReceiveTime, c->lastSendData
+ c->abortCode, c->abortCount, c->lastSendTime, c->lastReceiveTime
#ifdef RX_ENABLE_LOCKS
, (afs_uint32)c->refCount
#endif