SPLVAR;
clock_NewTime();
- dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n", ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
+ dpf(("rx_NewConnection(host %x, port %u, service %u, securityObject %x, serviceSecurityIndex %d)\n",
+ ntohl(shost), ntohs(sport), sservice, securityObject, serviceSecurityIndex));
/* Vasilsi said: "NETPRI protects Cid and Alloc", but can this be true in
* the case of kmem_alloc? */
SPLVAR;
clock_NewTime();
- dpf(("rx_NewCall(conn %x)\n", conn));
+ dpf(("rx_NewCall(conn %"AFS_PTR_FMT")\n", conn));
NETPRI;
clock_GetTime(&queueTime);
MUTEX_EXIT(&call->lock);
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- dpf(("rx_NewCall(call %x)\n", call));
+ dpf(("rx_NewCall(call %"AFS_PTR_FMT")\n", call));
return call;
}
#endif
rxi_calltrace(RX_CALL_START, call);
- dpf(("rx_GetCall(port=%d, service=%d) ==> call %x\n",
+ dpf(("rx_GetCall(port=%d, service=%d) ==> call %"AFS_PTR_FMT"\n",
call->conn->service->servicePort, call->conn->service->serviceId,
call));
CALL_HOLD(call, RX_CALL_REFCOUNT_BEGIN);
MUTEX_EXIT(&call->lock);
} else {
- dpf(("rx_GetCall(socketp=0x%x, *socketp=0x%x)\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=0x%"AFS_PTR_FMT", *socketp=0x%"AFS_PTR_FMT")\n", socketp, *socketp));
}
return call;
call->conn->service->servicePort, call->conn->service->serviceId,
call));
} else {
- dpf(("rx_GetCall(socketp=0x%x, *socketp=0x%x)\n", socketp, *socketp));
+ dpf(("rx_GetCall(socketp=0x%"AFS_PTR_FMT", *socketp=0x%"AFS_PTR_FMT")\n", socketp, *socketp));
}
USERPRI;
- dpf(("rx_EndCall(call %x rc %d error %d abortCode %d)\n", call, rc, call->error, call->abortCode));
+ dpf(("rx_EndCall(call %"AFS_PTR_FMT" rc %d error %d abortCode %d)\n",
+ call, rc, call->error, call->abortCode));
NETPRI;
MUTEX_ENTER(&call->lock);
struct rx_call *nxp; /* Next call pointer, for queue_Scan */
#endif /* AFS_GLOBAL_RXLOCK_KERNEL */
- dpf(("rxi_NewCall(conn %x, channel %d)\n", conn, channel));
+ dpf(("rxi_NewCall(conn %"AFS_PTR_FMT", channel %d)\n", conn, channel));
/* Grab an existing call structure, or allocate a new one.
* Existing call structures are assumed to have been left reset by
* this is the first time the packet has been seen */
packetType = (np->header.type > 0 && np->header.type < RX_N_PACKET_TYPES)
? rx_packetTypes[np->header.type - 1] : "*UNKNOWN*";
- dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %x",
+ dpf(("R %d %s: %x.%d.%d.%d.%d.%d.%d flags %d, packet %"AFS_PTR_FMT,
np->header.serial, packetType, ntohl(host), ntohs(port), np->header.serviceId,
np->header.epoch, np->header.cid, np->header.callNumber,
np->header.seq, np->header.flags, np));
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%0.06d len %d",
+ np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
+ np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
+ np->header.flags, np, np->retryTime.sec, np->retryTime.usec / 1000, np->length));
#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
*call->callNumber = np->header.callNumber;
#ifdef RXDEBUG
if (np->header.callNumber == 0)
- dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%06d len %d", np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port), np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq, np->header.flags, (unsigned long)np, np->retryTime.sec, np->retryTime.usec, np->length));
+ dpf(("RecPacket call 0 %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%06d len %d",
+ np->header.serial, rx_packetTypes[np->header.type - 1], ntohl(conn->peer->host), ntohs(conn->peer->port),
+ np->header.serial, np->header.epoch, np->header.cid, np->header.callNumber, np->header.seq,
+ np->header.flags, np, np->retryTime.sec, np->retryTime.usec, np->length));
#endif
call->state = RX_STATE_PRECALL;
clock_GetTime(&call->queueTime);
struct rx_peer *peer;
peer = conn->peer;
if (skew > peer->inPacketSkew) {
- dpf(("*** In skew changed from %d to %d\n", peer->inPacketSkew,
- skew));
+ dpf(("*** In skew changed from %d to %d\n",
+ peer->inPacketSkew, skew));
peer->inPacketSkew = skew;
}
}
rx_MutexIncrement(rx_stats.noPacketBuffersOnRead, rx_stats_mutex);
call->rprev = np->header.serial;
rxi_calltrace(RX_TRACE_DROP, call);
- dpf(("packet %x dropped on receipt - quota problems", np));
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - quota problems", np));
if (rxi_doreclaim)
rxi_ClearReceiveQueue(call);
clock_GetTime(&now);
&& queue_First(&call->rq, rx_packet)->header.seq == seq) {
if (rx_stats_active)
rx_MutexIncrement(rx_stats.dupPacketsRead, rx_stats_mutex);
- dpf(("packet %x dropped on receipt - duplicate", np));
+ dpf(("packet %"AFS_PTR_FMT" dropped on receipt - duplicate", np));
rxevent_Cancel(call->delayedAckEvent, call,
RX_CALL_REFCOUNT_DELAY);
np = rxi_SendAck(call, np, serial, RX_ACK_DUPLICATE, istack);
/* If the ack packet has a "recommended" size that is less than
* what I am using now, reduce my size to match */
- rx_packetread(np, rx_AckDataSize(ap->nAcks) + sizeof(afs_int32),
+ rx_packetread(np, rx_AckDataSize(ap->nAcks) + (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
peer->natMTU = rxi_AdjustIfMTU(MIN(tSize, peer->ifMTU));
if (np->length == rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32)) {
/* AFS 3.4a */
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32),
+ rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
(int)sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize); /* peer's receive window, if it's */
if (tSize < call->twind) { /* smaller than our send */
rx_AckDataSize(ap->nAcks) + 4 * sizeof(afs_int32)) {
/* AFS 3.5 */
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 2 * sizeof(afs_int32),
+ rx_AckDataSize(ap->nAcks) + 2 * (int)sizeof(afs_int32),
sizeof(afs_int32), &tSize);
tSize = (afs_uint32) ntohl(tSize);
/*
* larger than the natural MTU.
*/
rx_packetread(np,
- rx_AckDataSize(ap->nAcks) + 3 * sizeof(afs_int32),
- sizeof(afs_int32), &tSize);
+ rx_AckDataSize(ap->nAcks) + 3 * (int)sizeof(afs_int32),
+ (int)sizeof(afs_int32), &tSize);
maxDgramPackets = (afs_uint32) ntohl(tSize);
maxDgramPackets = MIN(maxDgramPackets, rxi_nDgramPackets);
maxDgramPackets =
#ifdef RXDEBUG_PACKET
call->rqc -= count;
if ( call->rqc != 0 )
- dpf(("rxi_ClearReceiveQueue call %x rqc %u != 0", call, call->rqc));
+ dpf(("rxi_ClearReceiveQueue call %"AFS_PTR_FMT" rqc %u != 0", call, call->rqc));
#endif
call->flags &= ~(RX_CALL_RECEIVE_DONE | RX_CALL_HAVE_LAST);
}
if (error) {
int i;
- dpf(("rxi_ConnectionError conn %x error %d", conn, error));
+ dpf(("rxi_ConnectionError conn %"AFS_PTR_FMT" error %d", conn, error));
MUTEX_ENTER(&conn->conn_data_lock);
if (conn->challengeEvent)
#ifdef DEBUG
osirx_AssertMine(&call->lock, "rxi_CallError");
#endif
- dpf(("rxi_CallError call %x error %d call->error %d", call, error, call->error));
+ dpf(("rxi_CallError call %"AFS_PTR_FMT" error %d call->error %d", call, error, call->error));
if (call->error)
error = call->error;
#ifdef DEBUG
osirx_AssertMine(&call->lock, "rxi_ResetCall");
#endif
- dpf(("rxi_ResetCall(call %x, newcall %d)\n", call, newcall));
+ dpf(("rxi_ResetCall(call %"AFS_PTR_FMT", newcall %d)\n", call, newcall));
/* Notify anyone who is waiting for asynchronous packet arrival */
if (call->arrivalProc) {
rxi_ClearTransmitQueue(call, 1);
/* why init the queue if you just emptied it? queue_Init(&call->tq); */
if (call->tqWaiters || (flags & RX_CALL_TQ_WAIT)) {
- dpf(("rcall %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("rcall %"AFS_PTR_FMT" has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
}
call->flags = 0;
while (call->tqWaiters) {
* some of them have been retransmitted more times than more
* recent additions.
* Do a dance to avoid blocking after setting now. */
- clock_Zero(&retryTime);
MUTEX_ENTER(&peer->peer_lock);
- clock_Add(&retryTime, &peer->timeout);
+ retryTime = peer->timeout;
MUTEX_EXIT(&peer->peer_lock);
clock_GetTime(&now);
clock_Add(&retryTime, &now);
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
/* We shouldn't be sending packets if a thread is waiting
* to initiate congestion recovery */
+ dpf(("call %d waiting to initiate fast recovery\n",
+ *(call->callNumber)));
break;
}
if ((nXmitPackets)
&& (call->flags & RX_CALL_FAST_RECOVER)) {
/* Only send one packet during fast recovery */
+ dpf(("call %d restricted to one packet per send during fast recovery\n",
+ *(call->callNumber)));
break;
}
if ((p->flags & RX_PKTFLAG_FREE)
/* Note: if we're waiting for more window space, we can
* still send retransmits; hence we don't return here, but
* break out to schedule a retransmit event */
- dpf(("call %d waiting for window",
- *(call->callNumber)));
+ dpf(("call %d waiting for window (seq %d, twind %d, nSoftAcked %d, cwind %d)\n",
+ *(call->callNumber), p->header.seq, call->twind, call->nSoftAcked,
+ call->cwind));
break;
}
sizeof(struct rx_packet *));
goto restart;
}
+ dpf(("call %d xmit packet %"AFS_PTR_FMT" now %u.%06u retryTime %u.%06u nextRetry %u.%06u\n",
+ *(call->callNumber), p,
+ now.sec, now.usec,
+ p->retryTime.sec, p->retryTime.usec,
+ retryTime.sec, retryTime.usec));
xmitList[nXmitPackets++] = p;
}
}
if (call->flags & RX_CALL_FAST_RECOVER_WAIT) {
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start start");
CV_BROADCAST(&call->cv_tq);
rx_MutexIncrement(rx_tq_debug.rxi_start_aborted, rx_stats_mutex);
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call error %d while xmit %x has %d waiters and flags %d\n",
+ call, call->error, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start middle");
CV_BROADCAST(&call->cv_tq);
*/
call->flags &= ~RX_CALL_TQ_BUSY;
if (call->tqWaiters || (call->flags & RX_CALL_TQ_WAIT)) {
- dpf(("call %x has %d waiters and flags %d\n", call, call->tqWaiters, call->flags));
+ dpf(("call %"AFS_PTR_FMT" has %d waiters and flags %d\n",
+ call, call->tqWaiters, call->flags));
#ifdef RX_ENABLE_LOCKS
osirx_AssertMine(&call->lock, "rxi_Start end");
CV_BROADCAST(&call->cv_tq);
return; /* somebody set the clock back, don't count this time. */
}
clock_Sub(rttp, sentp);
+ dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rttp=%d.%06d sec)\n",
+ p->header.callNumber, p, rttp->sec, rttp->usec));
if (rx_stats_active) {
MUTEX_ENTER(&rx_stats_mutex);
if (clock_Lt(rttp, &rx_stats.minRtt))
clock_Zero(&(peer->timeout));
clock_Addmsec(&(peer->timeout), rtt_timeout);
- dpf(("rxi_ComputeRoundTripTime(rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n", MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
+ dpf(("rxi_ComputeRoundTripTime(call=%d packet=%"AFS_PTR_FMT" rtt=%d ms, srtt=%d ms, rtt_dev=%d ms, timeout=%d.%06d sec)\n",
+ p->header.callNumber, p, MSEC(rttp), peer->rtt >> 3, peer->rtt_dev >> 2, (peer->timeout.sec), (peer->timeout.usec)));
}
return;
}
- dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"), xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
+ dpf(("CONG peer %lx/%u: sample (%s) size %ld, %ld ms (to %d.%06d, rtt %u, ps %u)",
+ ntohl(peer->host), ntohs(peer->port), (ackReason == RX_ACK_REQUESTED ? "dataack" : "pingack"),
+ xferSize, xferMs, peer->timeout.sec, peer->timeout.usec, peer->smRtt, peer->ifMTU));
/* Track only packets that are big enough. */
if ((p->length + RX_HEADER_SIZE + call->conn->securityMaxTrailerSize) <
* one packet exchange */
if (clock_Gt(&newTO, &peer->timeout)) {
- dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)", ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec, newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
+ dpf(("CONG peer %lx/%u: timeout %d.%06d ==> %ld.%06d (rtt %u, ps %u)",
+ ntohl(peer->host), ntohs(peer->port), peer->timeout.sec, peer->timeout.usec,
+ newTO.sec, newTO.usec, peer->smRtt, peer->packetSize));
peer->timeout = newTO;
}
}
tv_delta.tv_sec -= tv_now.tv_sec;
+#ifdef AFS_NT40_ENV
+ code = select(0, &imask, 0, 0, &tv_delta);
+#else /* AFS_NT40_ENV */
code = select(socket + 1, &imask, 0, 0, &tv_delta);
+#endif /* AFS_NT40_ENV */
if (code == 1 && FD_ISSET(socket, &imask)) {
/* now receive a packet */
faddrLen = sizeof(struct sockaddr_in);
* offset only applies to the first iovec.
*/
r = resid;
- while ((resid > 0) && (i < packet->niovecs)) {
- j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+ while ((r > 0) && (i < packet->niovecs)) {
+ j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
memcpy(out, (char *)(packet->wirevec[i].iov_base) + (offset - l), j);
- resid -= j;
+ r -= j;
out += j;
l += packet->wirevec[i].iov_len;
offset = l;
i++;
}
- return (resid ? (r - resid) : r);
+ return (r ? (resid - r) : resid);
}
afs_int32
rx_SlowWritePacket(struct rx_packet * packet, int offset, int resid, char *in)
{
- int i, j, l, r;
+ unsigned int i, j, l, o, r;
char *b;
- for (l = 0, i = 1; i < packet->niovecs; i++) {
- if (l + packet->wirevec[i].iov_len > offset) {
+ for (l = 0, i = 1, o = offset; i < packet->niovecs; i++) {
+ if (l + packet->wirevec[i].iov_len > o) {
break;
}
l += packet->wirevec[i].iov_len;
* offset only applies to the first iovec.
*/
r = resid;
- while ((resid > 0) && (i <= RX_MAXWVECS)) {
+ while ((r > 0) && (i <= RX_MAXWVECS)) {
if (i >= packet->niovecs)
- if (rxi_AllocDataBuf(packet, resid, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
+ if (rxi_AllocDataBuf(packet, r, RX_PACKET_CLASS_SEND_CBUF) > 0) /* ++niovecs as a side-effect */
break;
b = (char *)(packet->wirevec[i].iov_base) + (offset - l);
- j = MIN(resid, packet->wirevec[i].iov_len - (offset - l));
+ j = MIN(r, packet->wirevec[i].iov_len - (offset - l));
memcpy(b, in, j);
- resid -= j;
+ r -= j;
in += j;
l += packet->wirevec[i].iov_len;
offset = l;
i++;
}
- return (resid ? (r - resid) : r);
+ return (r ? (resid - r) : resid);
}
int
rxi_FreePacketNoLock(struct rx_packet *p)
{
struct rx_ts_info_t * rx_ts_info;
- dpf(("Free %lx\n", (unsigned long)p));
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
RX_TS_INFO_GET(rx_ts_info);
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
void
rxi_FreePacketNoLock(struct rx_packet *p)
{
- dpf(("Free %lx\n", (unsigned long)p));
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
RX_FPQ_MARK_FREE(p);
rx_nFreePackets++;
rxi_FreePacketTSFPQ(struct rx_packet *p, int flush_global)
{
struct rx_ts_info_t * rx_ts_info;
- dpf(("Free %lx\n", (unsigned long)p));
+ dpf(("Free %"AFS_PTR_FMT"\n", p));
RX_TS_INFO_GET(rx_ts_info);
RX_TS_FPQ_CHECKIN(rx_ts_info,p);
void
rxi_RestoreDataBufs(struct rx_packet *p)
{
- int i;
+ unsigned int i;
struct iovec *iov = &p->wirevec[2];
RX_PACKET_IOV_INIT(p);
RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
- dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
/* have to do this here because rx_FlushWrite fiddles with the iovs in
queue_Remove(p);
RX_FPQ_MARK_USED(p);
- dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
/* have to do this here because rx_FlushWrite fiddles with the iovs in
RX_TS_FPQ_CHECKOUT(rx_ts_info,p);
- dpf(("Alloc %lx, class %d\n", (unsigned long)p, class));
+ dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
/* have to do this here because rx_FlushWrite fiddles with the iovs in
* order to truncate outbound packets. In the near future, may need
(void)rxi_AllocDataBuf(p, (want - p->length),
RX_PACKET_CLASS_SEND_CBUF);
- if ((unsigned)p->length > mud)
+ if (p->length > mud)
p->length = mud;
if (delta >= p->length) {
(void)rxi_AllocDataBuf(p, (want - p->length),
RX_PACKET_CLASS_SEND_CBUF);
- if ((unsigned)p->length > mud)
+ if (p->length > mud)
p->length = mud;
if (delta >= p->length) {
u_short * port)
{
struct sockaddr_in from;
- int nbytes;
+ unsigned int nbytes;
afs_int32 rlen;
- afs_int32 tlen, savelen;
+ afs_uint32 tlen, savelen;
struct msghdr msg;
rx_computelen(p, tlen);
rx_SetDataSize(p, tlen); /* this is the size of the user data area */
/* restore the vec to its correct state */
p->wirevec[p->niovecs - 1].iov_len = savelen;
- p->length = (nbytes - RX_HEADER_SIZE);
+ p->length = (u_short)(nbytes - RX_HEADER_SIZE);
if ((nbytes > tlen) || (p->length & 0x8000)) { /* Bogus packet */
if (nbytes < 0 && errno == EWOULDBLOCK) {
if (rx_stats_active)
case RX_DEBUGI_GETALLCONN:
case RX_DEBUGI_GETCONN:{
- int i, j;
+ unsigned int i, j;
struct rx_connection *tc;
struct rx_call *tcall;
struct rx_debugConn tconn;
*/
case RX_DEBUGI_GETPEER:{
- int i;
+ unsigned int i;
struct rx_peer *tp;
struct rx_debugPeer tpeer;
afs_int32 ahost, short aport, afs_int32 istack)
{
struct sockaddr_in taddr;
- int i;
- int nbytes;
+ unsigned int i, nbytes, savelen = 0;
int saven = 0;
- size_t savelen = 0;
#ifdef KERNEL
int waslocked = ISAFS_GLOCK();
#endif
#endif
#ifdef RXDEBUG
}
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%0.3d len %d",
+ deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+ ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+ p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
if (rx_stats_active)
rx_MutexIncrement(rx_stats.packetsSent[p->header.type - 1], rx_stats_mutex);
assert(p != NULL);
- dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %lx resend %d.%0.3d len %d", deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host), ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber, p->header.seq, p->header.flags, (unsigned long)p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
+ dpf(("%c %d %s: %x.%u.%u.%u.%u.%u.%u flags %d, packet %"AFS_PTR_FMT" resend %d.%0.3d len %d",
+ deliveryType, p->header.serial, rx_packetTypes[p->header.type - 1], ntohl(peer->host),
+ ntohs(peer->port), p->header.serial, p->header.epoch, p->header.cid, p->header.callNumber,
+ p->header.seq, p->header.flags, p, p->retryTime.sec, p->retryTime.usec / 1000, p->length));
#endif
if (rx_stats_active)
struct rx_packet *p, int last)
{
struct rx_connection *conn = call->conn;
- int i;
- ssize_t len; /* len must be a signed type; it can go negative */
+ unsigned int i;
+ afs_int32 len; /* len must be a signed type; it can go negative */
p->flags &= ~RX_PKTFLAG_ACKED;
p->header.cid = (conn->cid | call->channel);