# include <string.h>
# include <stdarg.h>
# include <errno.h>
+# ifdef HAVE_STDINT_H
+# include <stdint.h>
+# endif
#ifdef AFS_NT40_ENV
# include <stdlib.h>
# include <fcntl.h>
rxi_nCalls = 0;
rx_connDeadTime = 12;
rx_tranquil = 0; /* reset flag */
- memset((char *)&rx_stats, 0, sizeof(struct rx_statistics));
+ memset(&rx_stats, 0, sizeof(struct rx_statistics));
htable = (char *)
osi_Alloc(rx_hashTableSize * sizeof(struct rx_connection *));
PIN(htable, rx_hashTableSize * sizeof(struct rx_connection *)); /* XXXXX */
static int nProcs;
#ifdef AFS_PTHREAD_ENV
pid_t pid;
- pid = (pid_t) pthread_self();
+ pid = afs_pointer_to_int(pthread_self());
#else /* AFS_PTHREAD_ENV */
PROCESS pid;
LWP_CurrentProcess(&pid);
SPLVAR;
clock_NewTime();
- dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
+ dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %p, "
+ "serviceSecurityIndex %d)\n",
+ ntohl(shost), ntohs(sport), sservice, securityObject,
+ serviceSecurityIndex));
/* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
* the case of kmem_alloc? */
conn->securityData = (void *) 0;
conn->securityIndex = serviceSecurityIndex;
rx_SetConnDeadTime(conn, rx_connDeadTime);
+ rx_SetConnSecondsUntilNatPing(conn, 0);
conn->ackRate = RX_FAST_ACK_RATE;
conn->nSpecific = 0;
conn->specific = NULL;
return;
}
+ if (conn->natKeepAliveEvent) {
+ rxi_NatKeepAliveOff(conn);
+ }
+
if (conn->delayedAbortEvent) {
rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0);
packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
if (conn->checkReachEvent)
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
+ if (conn->natKeepAliveEvent)
+ rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
/* Add the connection to the list of destroyed connections that
* need to be cleaned up. This is necessary to avoid deadlocks
SPLVAR;
clock_NewTime();
- dpf(("rx_NewCall(conn %x)\n", conn));
+ dpf(("rx_NewCall(conn %"AFS_PTR_FMT")\n", conn));
NETPRI;
clock_GetTime(&queueTime);
MUTEX_EXIT(&call->lock);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- dpf(("rx_NewCall(call %x)\n", call));
+ dpf(("rx_NewCall(call %"AFS_PTR_FMT")\n", call));
return call;
}
#endif
rxi_calltrace(RX_CALL_START, call);
- dpf(("rx_GetCall(port=%d, service=%d) ==> call %x\n",
+ dpf(("rx_GetCall(port=%d, service=%d) ==> call %"AFS_PTR_FMT"\n",
call->conn->service->servicePort, call->conn->service->serviceId,
call));
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
} else {
- dpf(("rx_GetCall(socketp=0x%x, *socketp=0x%x)\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
return call;
#endif
rxi_calltrace(RX_CALL_START, call);
- dpf(("rx_GetCall(port=%d, service=%d) ==> call %x\n",
+ dpf(("rx_GetCall(port=%d, service=%d) ==> call %p\n",
call->conn->service->servicePort, call->conn->service->serviceId,
call));
} else {
- dpf(("rx_GetCall(socketp=0x%x, *socketp=0x%x)\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
USERPRI;
- dpf(("rx_EndCall(call %x rc %d error %d abortCode %d)\n", call, rc, call->error, call->abortCode));
+ dpf(("rx_EndCall(call %"AFS_PTR_FMT" rc %d error %d abortCode %d)\n",
+ call, rc, call->error, call->abortCode));
NETPRI;
MUTEX_ENTER(&call->lock);
struct rx_call *nxp; /* Next call pointer, for queue_Scan */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- dpf(("rxi_NewCall(conn %x, channel %d)\n", conn, channel));
+ dpf(("rxi_NewCall(conn %"AFS_PTR_FMT", channel %d)\n", conn, channel));
/* Grab an existing call structure, or allocate a new one.
* Existing call structures are assumed to have been left reset by
CLEAR_CALL_QUEUE_LOCK(call);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Now, if TQ wasn't cleared earlier, do it now. */
+ rxi_WaitforTQBusy(call);
if (call->flags & RX_CALL_TQ_CLEARME) {
rxi_ClearTransmitQueue(call, 1);
/*queue_Init(&call->tq);*/
* this is the first time the packet has been seen */
packetType = (np->header.type > 0 && np->header.type < RX_N_PACKET_TYPES)
? rx_packetTypes[np->header.type - 1] : "*UNKNOWN*";
- dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %x",
+ dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %"AFS_PTR_FMT,
np->header.serial, packetType, ntohl(host), ntohs(port), np->header.serviceId,
np->header.epoch, np->header.cid, np->header.callNumber,
np->header.seq, np->header.flags, np));
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.06d len %d",
+ np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
+ np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
+ np->header.flags, np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%06d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec, np->length));
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%06d len %d",
+ np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
+ np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
+ np->header.flags, np, np->retryTime.sec, np->retryTime.usec, np->length));
#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
struct rx_peer *peer;
peer = conn->peer;
if (skew > peer->inPacketSkew) {
- dpf(("*** In skew changed from %d to %d\n", peer->inPacketSkew,
- skew));
+ dpf(("*** In skew changed from %d to %d\n",
+ peer->inPacketSkew, skew));
peer->inPacketSkew = skew;
}
}
rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
- dpf(("packet %x dropped on receipt - quota problems", np));
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems", np));
if (rxi_doreclaim)
rxi_ClearReceiveQueue(call);
clock_GetTime(&now);
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
- dpf(("packet %x dropped on receipt - duplicate", np));
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/* If the ack packet has a "recommended" size that is less than
* what I am using now, reduce my size to match */
- rx_packetread(np, rx_AckDataSize(ap->nAcks) + sizeof(afs_int32),
+ rx_packetread(np, rx_AckDataSize(ap->nAcks) + (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
peer->natMTU = rxi_AdjustIfMTU(MIN(tSize, peer->ifMTU));
if (np->length == rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32)) {
/* AFS 3.4a */
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32),
+ rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize); /* peer's receive window, if it's */
if (tSize < call->twind) { /* smaller than our send */
rx_AckDataSize(ap->nAcks) + 4 * sizeof(afs_int32)) {
/* AFS 3.5 */
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32),
+ rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
/*
* larger than the natural MTU.
*/
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32),
- sizeof(afs_int32), &tSize);
+ rx_AckDataSize(ap->nAcks) + 3 * (int)sizeof(afs_int32),
+ (int)sizeof(afs_int32), &tSize);
maxDgramPackets = (afs_uint32) ntohl(tSize);
maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets);
maxDgramPackets =
call->tqc -=
#endif /* RXDEBUG_PACKET */
rxi_FreePackets(0, &call->tq);
+ if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
+#ifdef RX_ENABLE_LOCKS
+ CV_BROADCAST(&call->cv_tq);
+#else /* RX_ENABLE_LOCKS */
+ osi_rxWakeup(&call->tq);
+#endif /* RX_ENABLE_LOCKS */
+ }
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
call->flags &= ~RX_CALL_TQ_CLEARME;
}
#ifdef RXDEBUG_PACKET
call->rqc -= count;
if ( call->rqc != 0 )
- dpf(("rxi_ClearReceiveQueue call %x rqc %u != 0", call, call->rqc));
+ dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0", call, call->rqc));
#endif
call->flags &= ~(RX_CALL_RECEIVE_DONE | RX_CALL_HAVE_LAST);
}
if (error) {
int i;
- dpf(("rxi_ConnectionError conn %x error %d", conn, error));
+ dpf(("rxi_ConnectionError conn %"AFS_PTR_FMT" error %d", conn, error));
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->challengeEvent)
rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
+ if (conn->natKeepAliveEvent)
+ rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
if (conn->checkReachEvent) {
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
#ifdef DEBUG
osirx_AssertMine(&call->lock, "rxi_CallError");
#endif
- dpf(("rxi_CallError call %x error %d call->error %d", call, error, call->error));
+ dpf(("rxi_CallError call %"AFS_PTR_FMT" error %d call->error %d", call, error, call->error));
if (call->error)
error = call->error;
#ifdef DEBUG
osirx_AssertMine(&call->lock, "rxi_ResetCall");
#endif
- dpf(("rxi_ResetCall(call %x, newcall %d)\n", call, newcall));
+ dpf(("rxi_ResetCall(call %"AFS_PTR_FMT", newcall %d)\n", call, newcall));
/* Notify anyone who is waiting for asynchronous packet arrival */
if (call->arrivalProc) {
if (flags & RX_CALL_TQ_BUSY) {
call->flags = RX_CALL_TQ_CLEARME | RX_CALL_TQ_BUSY;
call->flags |= (flags & RX_CALL_TQ_WAIT);
+#ifdef RX_ENABLE_LOCKS
+ CV_WAIT(&call->cv_tq, &call->lock);
+#else /* RX_ENABLE_LOCKS */
+ osi_rxSleep(&call->tq);
+#endif /* RX_ENABLE_LOCKS */
} else
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
{
rxi_ClearTransmitQueue(call, 1);
/* why init the queue if you just emptied it? queue_Init(&call->tq); */
if (call->tqWaiters || (flags & RX_CALL_TQ_WAIT)) {
- dpf(("rcall %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("rcall %"AFS_PTR_FMT" has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
}
call->flags = 0;
- while (call->tqWaiters) {
-#ifdef RX_ENABLE_LOCKS
- CV_BROADCAST(&call->cv_tq);
-#else /* RX_ENABLE_LOCKS */
- osi_rxWakeup(&call->tq);
-#endif /* RX_ENABLE_LOCKS */
- call->tqWaiters--;
- }
}
rxi_ClearReceiveQueue(call);
* some of them have been retransmitted more times than more
* recent additions.
* Do a dance to avoid blocking after setting now. */
- clock_Zero(&retryTime);
MUTEX_ENTER(&peer->peer_lock);
- clock_Add(&retryTime, &peer->timeout);
+ retryTime = peer->timeout;
MUTEX_EXIT(&peer->peer_lock);
clock_GetTime(&now);
clock_Add(&retryTime, &now);
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
/* We shouldn't be sending packets if a thread is waiting
* to initiate congestion recovery */
+ dpf(("call %d waiting to initiate fast recovery\n",
+ *(call->callNumber)));
break;
}
if ((nXmitPackets)
&& (call->flags & RX_CALL_FAST_RECOVER)) {
/* Only send one packet during fast recovery */
+ dpf(("call %d restricted to one packet per send during fast recovery\n",
+ *(call->callNumber)));
break;
}
if ((p->flags & RX_PKTFLAG_FREE)
/* Note: if we're waiting for more window space, we can
* still send retransmits; hence we don't return here, but
* break out to schedule a retransmit event */
- dpf(("call %d waiting for window",
- *(call->callNumber)));
+ dpf(("call %d waiting for window (seq %d, twind %d, nSoftAcked %d, cwind %d)\n",
+ *(call->callNumber), p->header.seq, call->twind, call->nSoftAcked,
+ call->cwind));
break;
}
sizeof(struct rx_packet *));
goto restart;
}
+ dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
+ *(call->callNumber), p,
+ now.sec, now.usec,
+ p->retryTime.sec, p->retryTime.usec,
+ retryTime.sec, retryTime.usec));
xmitList[nXmitPackets++] = p;
}
}
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start start");
CV_BROADCAST(&call->cv_tq);
rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
+ call->error, call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start middle");
CV_BROADCAST(&call->cv_tq);
*/
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start end");
CV_BROADCAST(&call->cv_tq);
}
/* see if we have a non-activity timeout */
if (call->startWait && conn->idleDeadTime
- && ((call->startWait + conn->idleDeadTime) < now)) {
+ && ((call->startWait + conn->idleDeadTime) < now) &&
+ (call->flags & RX_CALL_READER_WAIT)) {
if (call->state == RX_STATE_ACTIVE) {
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
return 0;
}
+void
+rxi_NatKeepAliveEvent(struct rxevent *event, void *arg1, void *dummy)
+{
+ struct rx_connection *conn = arg1;
+ struct rx_header theader;
+ char tbuffer[1500];
+ struct sockaddr_in taddr;
+ char *tp;
+ char a[1] = { 0 };
+ struct iovec tmpiov[2];
+ osi_socket socket =
+ (conn->type ==
+ RX_CLIENT_CONNECTION ? rx_socket : conn->service->socket);
+
+
+ tp = &tbuffer[sizeof(struct rx_header)];
+ taddr.sin_family = AF_INET;
+ taddr.sin_port = rx_PortOf(rx_PeerOf(conn));
+ taddr.sin_addr.s_addr = rx_HostOf(rx_PeerOf(conn));
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
+ taddr.sin_len = sizeof(struct sockaddr_in);
+#endif
+ memset(&theader, 0, sizeof(theader));
+ theader.epoch = htonl(999);
+ theader.cid = 0;
+ theader.callNumber = 0;
+ theader.seq = 0;
+ theader.serial = 0;
+ theader.type = RX_PACKET_TYPE_VERSION;
+ theader.flags = RX_LAST_PACKET;
+ theader.serviceId = 0;
+
+ memcpy(tbuffer, &theader, sizeof(theader));
+ memcpy(tp, &a, sizeof(a));
+ tmpiov[0].iov_base = tbuffer;
+ tmpiov[0].iov_len = 1 + sizeof(struct rx_header);
+
+ osi_NetSend(socket, &taddr, tmpiov, 1, 1 + sizeof(struct rx_header), 1);
+
+ MUTEX_ENTER(&conn->conn_data_lock);
+ /* Only reschedule ourselves if the connection would not be destroyed */
+ if (conn->refCount <= 1) {
+ conn->natKeepAliveEvent = NULL;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ rx_DestroyConnection(conn); /* drop the reference for this */
+ } else {
+ conn->natKeepAliveEvent = NULL;
+ conn->refCount--; /* drop the reference for this */
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+ }
+}
+
+void
+rxi_ScheduleNatKeepAliveEvent(struct rx_connection *conn)
+{
+ if (!conn->natKeepAliveEvent && conn->secondsUntilNatPing) {
+ struct clock when, now;
+ clock_GetTime(&now);
+ when = now;
+ when.sec += conn->secondsUntilNatPing;
+ conn->refCount++; /* hold a reference for this */
+ conn->natKeepAliveEvent =
+ rxevent_PostNow(&when, &now, rxi_NatKeepAliveEvent, conn, 0);
+ }
+}
+
+void
+rx_SetConnSecondsUntilNatPing(struct rx_connection *conn, afs_int32 seconds)
+{
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->secondsUntilNatPing = seconds;
+ if (seconds != 0)
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+}
+
+void
+rxi_NatKeepAliveOn(struct rx_connection *conn)
+{
+ MUTEX_ENTER(&conn->conn_data_lock);
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+}
/* When a call is in progress, this routine is called occasionally to
* make sure that some traffic has arrived (or been sent to) the peer.
return; /* somebody set the clock back, don't count this time. */
}
clock_Sub(rttp, sentp);
+ dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
+ p->header.callNumber, p, rttp->sec, rttp->usec));
+
+ if (rttp->sec == 0 && rttp->usec == 0) {
+ /*
+ * The actual round trip time is shorter than the
+ * clock_GetTime resolution. It is most likely 1ms or 100ns.
+ * Since we can't tell which at the moment we will assume 1ms.
+ */
+ rttp->usec = 1000;
+ }
+
if (rx_stats_active) {
MUTEX_ENTER(&rx_stats_mutex);
if (clock_Lt(rttp, &rx_stats.minRtt))
peer->rtt = _8THMSEC(rttp) + 8;
peer->rtt_dev = peer->rtt >> 2; /* rtt/2: they're scaled differently */
}
- /* the timeout is RTT + 4*MDEV but no less than 350 msec This is because one end or
- * the other of these connections is usually in a user process, and can
- * be switched and/or swapped out. So on fast, reliable networks, the
- * timeout would otherwise be too short.
- */
- rtt_timeout = MIN((peer->rtt >> 3) + peer->rtt_dev, 350);
+ /* the timeout is RTT + 4*MDEV but no less than rx_minPeerTimeout msec.
+ * This is because one end or the other of these connections is usually
+ * in a user process, and can be switched and/or swapped out. So on fast,
+ * reliable networks, the timeout would otherwise be too short. */
+ rtt_timeout = MAX(((peer->rtt >> 3) + peer->rtt_dev), rx_minPeerTimeout);
clock_Zero(&(peer->timeout));
clock_Addmsec(&(peer->timeout), rtt_timeout);
- dpf(("rxi_ComputeRoundTripTime(rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n", MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
+ dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n",
+ p->header.callNumber, p, MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
}
return;
}
- dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"), xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
+ dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)",
+ ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"),
+ xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
/* Track only packets that are big enough. */
if ((p->length + RX_HEADER_SIZE + call->conn->securityMaxTrailerSize) <
* one packet exchange */
if (clock_Gt(&newTO, &peer->timeout)) {
- dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec, newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
+ dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)",
+ ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec,
+ newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
peer->timeout = newTO;
}
}
tv_delta.tv_sec -= tv_now.tv_sec;
+#ifdef AFS_NT40_ENV
+ code = select(0, &imask, 0, 0, &tv_delta);
+#else /* AFS_NT40_ENV */
code = select(socket + 1, &imask, 0, 0, &tv_delta);
+#endif /* AFS_NT40_ENV */
if (code == 1 && FD_ISSET(socket, &imask)) {
/* now receive a packet */
faddrLen = sizeof(struct sockaddr_in);
return FALSE;
}
}
+#endif /* AFS_NT40_ENV */
+#ifndef KERNEL
int rx_DumpCalls(FILE *outputFile, char *cookie)
{
#ifdef RXDEBUG_PACKET
- int zilch;
#ifdef KDUMP_RX_LOCK
struct rx_call_rx_lock *c;
#else
struct rx_call *c;
#endif
+#ifdef AFS_NT40_ENV
+ int zilch;
char output[2048];
+#define RXDPRINTF sprintf
+#define RXDPRINTOUT output
+#else
+#define RXDPRINTF fprintf
+#define RXDPRINTOUT outputFile
+#endif
- sprintf(output, "%s - Start dumping all Rx Calls - count=%u\r\n", cookie, rx_stats.nCallStructs);
+ RXDPRINTF(RXDPRINTOUT, "%s - Start dumping all Rx Calls - count=%u\r\n", cookie, rx_stats.nCallStructs);
+#ifdef AFS_NT40_ENV
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
for (c = rx_allCallsp; c; c = c->allNextp) {
u_short rqc, tqc, iovqc;
queue_Count(&c->tq, p, np, rx_packet, tqc);
queue_Count(&c->iovq, p, np, rx_packet, iovqc);
- sprintf(output, "%s - call=0x%p, id=%u, state=%u, mode=%u, conn=%p, epoch=%u, cid=%u, callNum=%u, connFlags=0x%x, flags=0x%x, "
+ RXDPRINTF(RXDPRINTOUT, "%s - call=0x%p, id=%u, state=%u, mode=%u, conn=%p, epoch=%u, cid=%u, callNum=%u, connFlags=0x%x, flags=0x%x, "
"rqc=%u,%u, tqc=%u,%u, iovqc=%u,%u, "
"lstatus=%u, rstatus=%u, error=%d, timeout=%u, "
"resendEvent=%d, timeoutEvt=%d, keepAliveEvt=%d, delayedAckEvt=%d, delayedAbortEvt=%d, abortCode=%d, abortCount=%d, "
);
MUTEX_EXIT(&c->lock);
+#ifdef AFS_NT40_ENV
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
}
- sprintf(output, "%s - End dumping all Rx Calls\r\n", cookie);
+ RXDPRINTF(RXDPRINTOUT, "%s - End dumping all Rx Calls\r\n", cookie);
+#ifdef AFS_NT40_ENV
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
#endif /* RXDEBUG_PACKET */
return 0;
}
-#endif /* AFS_NT40_ENV */
-
+#endif