# include <string.h>
# include <stdarg.h>
# include <errno.h>
+# ifdef HAVE_STDINT_H
+# include <stdint.h>
+# endif
#ifdef AFS_NT40_ENV
# include <stdlib.h>
# include <fcntl.h>
static int nProcs;
#ifdef AFS_PTHREAD_ENV
pid_t pid;
- pid = (pid_t) pthread_self();
+ pid = afs_pointer_to_int(pthread_self());
#else /* AFS_PTHREAD_ENV */
PROCESS pid;
LWP_CurrentProcess(&pid);
SPLVAR;
clock_NewTime();
- dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n",
- ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
+ dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %p, "
+ "serviceSecurityIndex %d)\n",
+ ntohl(shost), ntohs(sport), sservice, securityObject,
+ serviceSecurityIndex));
/* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
* the case of kmem_alloc? */
conn->securityData = (void *) 0;
conn->securityIndex = serviceSecurityIndex;
rx_SetConnDeadTime(conn, rx_connDeadTime);
+ rx_SetConnSecondsUntilNatPing(conn, 0);
conn->ackRate = RX_FAST_ACK_RATE;
conn->nSpecific = 0;
conn->specific = NULL;
return;
}
+ if (conn->natKeepAliveEvent) {
+ rxi_NatKeepAliveOff(conn);
+ }
+
if (conn->delayedAbortEvent) {
rxevent_Cancel(conn->delayedAbortEvent, (struct rx_call *)0, 0);
packet = rxi_AllocPacket(RX_PACKET_CLASS_SPECIAL);
rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
if (conn->checkReachEvent)
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
+ if (conn->natKeepAliveEvent)
+ rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
/* Add the connection to the list of destroyed connections that
* need to be cleaned up. This is necessary to avoid deadlocks
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
} else {
- dpf(("rx_GetCall(socketp=0x%"AFS_PTR_FMT", *socketp=0x%"AFS_PTR_FMT")\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
return call;
#endif
rxi_calltrace(RX_CALL_START, call);
- dpf(("rx_GetCall(port=%d, service=%d) ==> call %x\n",
+ dpf(("rx_GetCall(port=%d, service=%d) ==> call %p\n",
call->conn->service->servicePort, call->conn->service->serviceId,
call));
} else {
- dpf(("rx_GetCall(socketp=0x%"AFS_PTR_FMT", *socketp=0x%"AFS_PTR_FMT")\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=%p, *socketp=0x%x)\n", socketp, *socketp));
}
USERPRI;
CLEAR_CALL_QUEUE_LOCK(call);
#ifdef AFS_GLOBAL_RXLOCK_KERNEL
/* Now, if TQ wasn't cleared earlier, do it now. */
+ rxi_WaitforTQBusy(call);
if (call->flags & RX_CALL_TQ_CLEARME) {
rxi_ClearTransmitQueue(call, 1);
/*queue_Init(&call->tq);*/
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%0.06d len %d",
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%.06d len %d",
np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
np->header.flags, np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->challengeEvent)
rxevent_Cancel(conn->challengeEvent, (struct rx_call *)0, 0);
+ if (conn->natKeepAliveEvent)
+ rxevent_Cancel(conn->natKeepAliveEvent, (struct rx_call *)0, 0);
if (conn->checkReachEvent) {
rxevent_Cancel(conn->checkReachEvent, (struct rx_call *)0, 0);
conn->checkReachEvent = 0;
if (flags & RX_CALL_TQ_BUSY) {
call->flags = RX_CALL_TQ_CLEARME | RX_CALL_TQ_BUSY;
call->flags |= (flags & RX_CALL_TQ_WAIT);
+#ifdef RX_ENABLE_LOCKS
+ CV_WAIT(&call->cv_tq, &call->lock);
+#else /* RX_ENABLE_LOCKS */
+ osi_rxSleep(&call->tq);
+#endif /* RX_ENABLE_LOCKS */
} else
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
{
rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call error %d while xmit %x has %d waiters and flags %d\n",
- call, call->error, call->tqWaiters, call->flags));
+ dpf(("call error %d while xmit %p has %d waiters and flags %d\n",
+ call->error, call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start middle");
CV_BROADCAST(&call->cv_tq);
}
/* see if we have a non-activity timeout */
if (call->startWait && conn->idleDeadTime
- && ((call->startWait + conn->idleDeadTime) < now)) {
+ && ((call->startWait + conn->idleDeadTime) < now) &&
+ (call->flags & RX_CALL_READER_WAIT)) {
if (call->state == RX_STATE_ACTIVE) {
rxi_CallError(call, RX_CALL_TIMEOUT);
return -1;
return 0;
}
+void
+rxi_NatKeepAliveEvent(struct rxevent *event, void *arg1, void *dummy)
+{
+ struct rx_connection *conn = arg1;
+ struct rx_header theader;
+ char tbuffer[1500];
+ struct sockaddr_in taddr;
+ char *tp;
+ char a[1] = { 0 };
+ struct iovec tmpiov[2];
+ osi_socket socket =
+ (conn->type ==
+ RX_CLIENT_CONNECTION ? rx_socket : conn->service->socket);
+
+
+ tp = &tbuffer[sizeof(struct rx_header)];
+ taddr.sin_family = AF_INET;
+ taddr.sin_port = rx_PortOf(rx_PeerOf(conn));
+ taddr.sin_addr.s_addr = rx_HostOf(rx_PeerOf(conn));
+#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
+ taddr.sin_len = sizeof(struct sockaddr_in);
+#endif
+ memset(&theader, 0, sizeof(theader));
+ theader.epoch = htonl(999);
+ theader.cid = 0;
+ theader.callNumber = 0;
+ theader.seq = 0;
+ theader.serial = 0;
+ theader.type = RX_PACKET_TYPE_VERSION;
+ theader.flags = RX_LAST_PACKET;
+ theader.serviceId = 0;
+
+ memcpy(tbuffer, &theader, sizeof(theader));
+ memcpy(tp, &a, sizeof(a));
+ tmpiov[0].iov_base = tbuffer;
+ tmpiov[0].iov_len = 1 + sizeof(struct rx_header);
+
+ osi_NetSend(socket, &taddr, tmpiov, 1, 1 + sizeof(struct rx_header), 1);
+
+ MUTEX_ENTER(&conn->conn_data_lock);
+ /* Only reschedule ourselves if the connection would not be destroyed */
+ if (conn->refCount <= 1) {
+ conn->natKeepAliveEvent = NULL;
+ MUTEX_EXIT(&conn->conn_data_lock);
+ rx_DestroyConnection(conn); /* drop the reference for this */
+ } else {
+ conn->natKeepAliveEvent = NULL;
+ conn->refCount--; /* drop the reference for this */
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+ }
+}
+
+void
+rxi_ScheduleNatKeepAliveEvent(struct rx_connection *conn)
+{
+ if (!conn->natKeepAliveEvent && conn->secondsUntilNatPing) {
+ struct clock when, now;
+ clock_GetTime(&now);
+ when = now;
+ when.sec += conn->secondsUntilNatPing;
+ conn->refCount++; /* hold a reference for this */
+ conn->natKeepAliveEvent =
+ rxevent_PostNow(&when, &now, rxi_NatKeepAliveEvent, conn, 0);
+ }
+}
+
+void
+rx_SetConnSecondsUntilNatPing(struct rx_connection *conn, afs_int32 seconds)
+{
+ MUTEX_ENTER(&conn->conn_data_lock);
+ conn->secondsUntilNatPing = seconds;
+ if (seconds != 0)
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+}
+
+void
+rxi_NatKeepAliveOn(struct rx_connection *conn)
+{
+ MUTEX_ENTER(&conn->conn_data_lock);
+ rxi_ScheduleNatKeepAliveEvent(conn);
+ MUTEX_EXIT(&conn->conn_data_lock);
+}
/* When a call is in progress, this routine is called occasionally to
* make sure that some traffic has arrived (or been sent to) the peer.
return FALSE;
}
}
+#endif /* AFS_NT40_ENV */
+#ifndef KERNEL
int rx_DumpCalls(FILE *outputFile, char *cookie)
{
#ifdef RXDEBUG_PACKET
- int zilch;
#ifdef KDUMP_RX_LOCK
struct rx_call_rx_lock *c;
#else
struct rx_call *c;
#endif
+#ifdef AFS_NT40_ENV
+ int zilch;
char output[2048];
+#define RXDPRINTF sprintf
+#define RXDPRINTOUT output
+#else
+#define RXDPRINTF fprintf
+#define RXDPRINTOUT outputFile
+#endif
- sprintf(output, "%s - Start dumping all Rx Calls - count=%u\r\n", cookie, rx_stats.nCallStructs);
+ RXDPRINTF(RXDPRINTOUT, "%s - Start dumping all Rx Calls - count=%u\r\n", cookie, rx_stats.nCallStructs);
+#ifdef AFS_NT40_ENV
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
for (c = rx_allCallsp; c; c = c->allNextp) {
u_short rqc, tqc, iovqc;
queue_Count(&c->tq, p, np, rx_packet, tqc);
queue_Count(&c->iovq, p, np, rx_packet, iovqc);
- sprintf(output, "%s - call=0x%p, id=%u, state=%u, mode=%u, conn=%p, epoch=%u, cid=%u, callNum=%u, connFlags=0x%x, flags=0x%x, "
+ RXDPRINTF(RXDPRINTOUT, "%s - call=0x%p, id=%u, state=%u, mode=%u, conn=%p, epoch=%u, cid=%u, callNum=%u, connFlags=0x%x, flags=0x%x, "
"rqc=%u,%u, tqc=%u,%u, iovqc=%u,%u, "
"lstatus=%u, rstatus=%u, error=%d, timeout=%u, "
"resendEvent=%d, timeoutEvt=%d, keepAliveEvt=%d, delayedAckEvt=%d, delayedAbortEvt=%d, abortCode=%d, abortCount=%d, "
);
MUTEX_EXIT(&c->lock);
+#ifdef AFS_NT40_ENV
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
}
- sprintf(output, "%s - End dumping all Rx Calls\r\n", cookie);
+ RXDPRINTF(RXDPRINTOUT, "%s - End dumping all Rx Calls\r\n", cookie);
+#ifdef AFS_NT40_ENV
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+#endif
#endif /* RXDEBUG_PACKET */
return 0;
}
-#endif /* AFS_NT40_ENV */
-
+#endif