# endif
# include "h/socket.h"
# if !defined(AFS_SUN5_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_HPUX110_ENV)
-# if !defined(AFS_OSF_ENV) && !defined(AFS_AIX41_ENV)
+# if !defined(AFS_AIX41_ENV)
# include "sys/mount.h" /* it gets pulled in by something later anyway */
# endif
# include "h/mbuf.h"
# include <sys/sysmacros.h>
#endif
+#include <opr/queue.h>
+
#include "rx.h"
#include "rx_clock.h"
-#include "rx_queue.h"
#include "rx_packet.h"
#include "rx_atomic.h"
#include "rx_globals.h"
#include "rx_conn.h"
#include "rx_call.h"
+/*!
+ * \brief structure used to keep track of allocated packets
+ */
+struct rx_mallocedPacket {
+ struct opr_queue entry; /*!< chained using opr_queue */
+ struct rx_packet *addr; /*!< address of the first element */
+ afs_uint32 size; /*!< array size in bytes */
+};
+
#ifdef RX_LOCKS_DB
/* rxdb_fileID is used to identify the lock location, along with line#. */
static int rxdb_fileID = RXDB_FILE_RX_PACKET;
extern char cml_version_number[];
-static int AllocPacketBufs(int class, int num_pkts, struct rx_queue *q);
+static int AllocPacketBufs(int class, int num_pkts, struct opr_queue *q);
static void rxi_SendDebugPacket(struct rx_packet *apacket, osi_socket asocket,
afs_uint32 ahost, short aport,
static void rxi_FreePacketNoLock(struct rx_packet *p);
static int rxi_FreeDataBufsNoLock(struct rx_packet *p, afs_uint32 first);
static int rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first,
- struct rx_queue * q);
+ struct opr_queue * q);
#endif
+extern struct opr_queue rx_idleServerQueue;
+
/* some rules about packets:
* 1. When a packet is allocated, the final iov_buf contains room for
* a security trailer, but iov_len masks that fact. If the security
}
int
-rxi_AllocPackets(int class, int num_pkts, struct rx_queue * q)
+rxi_AllocPackets(int class, int num_pkts, struct opr_queue * q)
{
- struct rx_packet *p, *np;
+ struct opr_queue *c;
num_pkts = AllocPacketBufs(class, num_pkts, q);
- for (queue_Scan(q, p, np, rx_packet)) {
- RX_PACKET_IOV_FULLINIT(p);
+ for (opr_queue_Scan(q, c)) {
+ RX_PACKET_IOV_FULLINIT(opr_queue_Entry(c, struct rx_packet, entry));
}
return num_pkts;
#ifdef RX_ENABLE_TSFPQ
static int
-AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
+AllocPacketBufs(int class, int num_pkts, struct opr_queue * q)
{
struct rx_ts_info_t * rx_ts_info;
int transfer;
}
#else /* RX_ENABLE_TSFPQ */
static int
-AllocPacketBufs(int class, int num_pkts, struct rx_queue * q)
+AllocPacketBufs(int class, int num_pkts, struct opr_queue * q)
{
struct rx_packet *c;
int i;
}
#endif /* KERNEL */
- for (i=0, c=queue_First(&rx_freePacketQueue, rx_packet);
+ for (i=0, c=opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry);
i < num_pkts;
- i++, c=queue_Next(c, rx_packet)) {
+ i++, c=opr_queue_Next(&c->entry, struct rx_packet, entry)) {
RX_FPQ_MARK_USED(c);
}
- queue_SplitBeforeAppend(&rx_freePacketQueue,q,c);
+ opr_queue_SplitBeforeAppend(&rx_freePacketQueue, q, &c->entry);
rx_nFreePackets -= num_pkts;
#ifdef RX_ENABLE_TSFPQ
/* num_pkts=0 means queue length is unknown */
int
-rxi_FreePackets(int num_pkts, struct rx_queue * q)
+rxi_FreePackets(int num_pkts, struct opr_queue * q)
{
struct rx_ts_info_t * rx_ts_info;
- struct rx_packet *c, *nc;
+ struct opr_queue *cursor, *store;
SPLVAR;
osi_Assert(num_pkts >= 0);
RX_TS_INFO_GET(rx_ts_info);
if (!num_pkts) {
- for (queue_Scan(q, c, nc, rx_packet), num_pkts++) {
- rxi_FreeDataBufsTSFPQ(c, 2, 0);
+ for (opr_queue_ScanSafe(q, cursor, store)) {
+ num_pkts++;
+ rxi_FreeDataBufsTSFPQ(opr_queue_Entry(cursor, struct rx_packet,
+ entry), 2, 0);
}
} else {
- for (queue_Scan(q, c, nc, rx_packet)) {
- rxi_FreeDataBufsTSFPQ(c, 2, 0);
+ for (opr_queue_ScanSafe(q, cursor, store)) {
+ rxi_FreeDataBufsTSFPQ(opr_queue_Entry(cursor, struct rx_packet,
+ entry), 2, 0);
}
}
#else /* RX_ENABLE_TSFPQ */
/* num_pkts=0 means queue length is unknown */
int
-rxi_FreePackets(int num_pkts, struct rx_queue *q)
+rxi_FreePackets(int num_pkts, struct opr_queue *q)
{
- struct rx_queue cbs;
- struct rx_packet *p, *np;
+ struct opr_queue cbs;
+ struct opr_queue *cursor, *store;
int qlen = 0;
SPLVAR;
osi_Assert(num_pkts >= 0);
- queue_Init(&cbs);
+ opr_queue_Init(&cbs);
if (!num_pkts) {
- for (queue_Scan(q, p, np, rx_packet), num_pkts++) {
+ for (opr_queue_ScanSafe(q, cursor, store)) {
+ struct rx_packet *p
+ = opr_queue_Entry(cursor, struct rx_packet, entry);
if (p->niovecs > 2) {
qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
}
RX_FPQ_MARK_FREE(p);
+ num_pkts++;
}
if (!num_pkts)
return 0;
} else {
- for (queue_Scan(q, p, np, rx_packet)) {
+ for (opr_queue_ScanSafe(q, cursor, store)) {
+ struct rx_packet *p
+ = opr_queue_Entry(cursor, struct rx_packet, entry);
+
if (p->niovecs > 2) {
qlen += rxi_FreeDataBufsToQueue(p, 2, &cbs);
}
}
if (qlen) {
- queue_SpliceAppend(q, &cbs);
+ opr_queue_SpliceAppend(q, &cbs);
qlen += num_pkts;
} else
qlen = num_pkts;
NETPRI;
MUTEX_ENTER(&rx_freePktQ_lock);
- queue_SpliceAppend(&rx_freePacketQueue, q);
+ opr_queue_SpliceAppend(&rx_freePacketQueue, q);
rx_nFreePackets += qlen;
/* Wakeup anyone waiting for packets */
rxi_AllocDataBuf(struct rx_packet *p, int nb, int class)
{
int i, nv;
- struct rx_queue q;
- struct rx_packet *cb, *ncb;
+ struct opr_queue q, *cursor, *store;
/* compute the number of cbuf's we need */
nv = nb / RX_CBUFFERSIZE;
return nb;
/* allocate buffers */
- queue_Init(&q);
+ opr_queue_Init(&q);
nv = AllocPacketBufs(class, nv, &q);
/* setup packet iovs */
- for (i = p->niovecs, queue_Scan(&q, cb, ncb, rx_packet), i++) {
- queue_Remove(cb);
+ i = p ->niovecs;
+ for (opr_queue_ScanSafe(&q, cursor, store)) {
+ struct rx_packet *cb
+ = opr_queue_Entry(cursor, struct rx_packet, entry);
+
+ opr_queue_Remove(&cb->entry);
p->wirevec[i].iov_base = (caddr_t) cb->localdata;
p->wirevec[i].iov_len = RX_CBUFFERSIZE;
+ i++;
}
nb -= (nv * RX_CBUFFERSIZE);
return nb;
}
+/**
+ * Register allocated packets.
+ *
+ * @param[in] addr array of packets
+ * @param[in] npkt number of packets
+ *
+ * @return none
+ */
+static void
+registerPackets(struct rx_packet *addr, afs_uint32 npkt)
+{
+ struct rx_mallocedPacket *mp;
+
+ mp = osi_Alloc(sizeof(*mp));
+
+ osi_Assert(mp != NULL);
+ memset(mp, 0, sizeof(*mp));
+
+ mp->addr = addr;
+ mp->size = npkt * sizeof(struct rx_packet);
+ osi_Assert(npkt <= MAX_AFS_UINT32 / sizeof(struct rx_packet));
+
+ MUTEX_ENTER(&rx_mallocedPktQ_lock);
+ opr_queue_Append(&rx_mallocedPacketQueue, &mp->entry);
+ MUTEX_EXIT(&rx_mallocedPktQ_lock);
+}
+
/* Add more packet buffers */
#ifdef RX_ENABLE_TSFPQ
void
getme = apackets * sizeof(struct rx_packet);
p = osi_Alloc(getme);
osi_Assert(p);
+ registerPackets(p, apackets);
PIN(p, getme); /* XXXXX */
memset(p, 0, getme);
getme = apackets * sizeof(struct rx_packet);
p = osi_Alloc(getme);
osi_Assert(p);
+ registerPackets(p, apackets);
PIN(p, getme); /* XXXXX */
memset(p, 0, getme);
#endif
p->niovecs = 2;
- queue_Append(&rx_freePacketQueue, p);
+ opr_queue_Append(&rx_freePacketQueue, &p->entry);
#ifdef RXDEBUG_PACKET
p->packetId = rx_packet_id++;
p->allNextp = rx_mallocedP;
getme = apackets * sizeof(struct rx_packet);
p = osi_Alloc(getme);
+ registerPackets(p, apackets);
PIN(p, getme); /* XXXXX */
memset(p, 0, getme);
}
} while(p == NULL);
memset(p, 0, getme);
+ registerPackets(p, apackets);
#ifdef RX_ENABLE_TSFPQ
RX_TS_INFO_GET(rx_ts_info);
#endif
p->niovecs = 2;
- queue_Append(&rx_freePacketQueue, p);
+ opr_queue_Append(&rx_freePacketQueue, &p->entry);
#ifdef RXDEBUG_PACKET
p->packetId = rx_packet_id++;
p->allNextp = rx_mallocedP;
void
rxi_FreeAllPackets(void)
{
- /* must be called at proper interrupt level, etcetera */
- /* MTUXXX need to free all Packets */
- osi_Free(rx_mallocedP,
- (rx_maxReceiveWindow + 2) * sizeof(struct rx_packet));
- UNPIN(rx_mallocedP, (rx_maxReceiveWindow + 2) * sizeof(struct rx_packet));
+ struct rx_mallocedPacket *mp;
+
+ MUTEX_ENTER(&rx_mallocedPktQ_lock);
+
+ while (!opr_queue_IsEmpty(&rx_mallocedPacketQueue)) {
+ mp = opr_queue_First(&rx_mallocedPacketQueue,
+ struct rx_mallocedPacket, entry);
+ opr_queue_Remove(&mp->entry);
+ osi_Free(mp->addr, mp->size);
+ UNPIN(mp->addr, mp->size);
+ osi_Free(mp, sizeof(*mp));
+ }
+ MUTEX_EXIT(&rx_mallocedPktQ_lock);
}
#ifdef RX_ENABLE_TSFPQ
RX_FPQ_MARK_FREE(p);
rx_nFreePackets++;
- queue_Append(&rx_freePacketQueue, p);
+ opr_queue_Append(&rx_freePacketQueue, &p->entry);
}
#endif /* RX_ENABLE_TSFPQ */
*/
#ifndef RX_ENABLE_TSFPQ
static int
-rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct rx_queue * q)
+rxi_FreeDataBufsToQueue(struct rx_packet *p, afs_uint32 first, struct opr_queue * q)
{
struct iovec *iov;
struct rx_packet * cb;
osi_Panic("rxi_FreeDataBufsToQueue: unexpected NULL iov");
cb = RX_CBUF_TO_PACKET(iov->iov_base, p);
RX_FPQ_MARK_FREE(cb);
- queue_Append(q, cb);
+ opr_queue_Append(q, &cb->entry);
}
p->length = 0;
p->niovecs = 0;
RX_TS_INFO_GET(rx_ts_info);
-#ifdef KERNEL
- if (rxi_OverQuota(class)) {
- rxi_NeedMorePackets = TRUE;
- if (rx_stats_active) {
- switch (class) {
- case RX_PACKET_CLASS_RECEIVE:
- rx_atomic_inc(rx_stats.receivePktAllocFailures);
- break;
- case RX_PACKET_CLASS_SEND:
- rx_atomic_inc(&rx_stats.sendPktAllocFailures);
- break;
- case RX_PACKET_CLASS_SPECIAL:
- rx_atomic_inc(&rx_stats.specialPktAllocFailures);
- break;
- case RX_PACKET_CLASS_RECV_CBUF:
- rx_atomic_inc(&rx_stats.receiveCbufPktAllocFailures);
- break;
- case RX_PACKET_CLASS_SEND_CBUF:
- rx_atomic_inc(&rx_stats.sendCbufPktAllocFailures);
- break;
- }
- }
- return (struct rx_packet *)0;
- }
-#endif /* KERNEL */
-
if (rx_stats_active)
rx_atomic_inc(&rx_stats.packetRequests);
- if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+ if (opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
#ifdef KERNEL
- if (queue_IsEmpty(&rx_freePacketQueue))
+ if (opr_queue_IsEmpty(&rx_freePacketQueue))
osi_Panic("rxi_AllocPacket error");
#else /* KERNEL */
- if (queue_IsEmpty(&rx_freePacketQueue))
+ if (opr_queue_IsEmpty(&rx_freePacketQueue))
rxi_MorePacketsNoLock(rx_maxSendWindow);
#endif /* KERNEL */
rx_atomic_inc(&rx_stats.packetRequests);
#ifdef KERNEL
- if (queue_IsEmpty(&rx_freePacketQueue))
+ if (opr_queue_IsEmpty(&rx_freePacketQueue))
osi_Panic("rxi_AllocPacket error");
#else /* KERNEL */
- if (queue_IsEmpty(&rx_freePacketQueue))
+ if (opr_queue_IsEmpty(&rx_freePacketQueue))
rxi_MorePacketsNoLock(rx_maxSendWindow);
#endif /* KERNEL */
rx_nFreePackets--;
- p = queue_First(&rx_freePacketQueue, rx_packet);
- queue_Remove(p);
+ p = opr_queue_First(&rx_freePacketQueue, struct rx_packet, entry);
+ opr_queue_Remove(&p->entry);
RX_FPQ_MARK_USED(p);
dpf(("Alloc %"AFS_PTR_FMT", class %d\n", p, class));
if (rx_stats_active)
rx_atomic_inc(&rx_stats.packetRequests);
- if (pull_global && queue_IsEmpty(&rx_ts_info->_FPQ)) {
+ if (pull_global && opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
MUTEX_ENTER(&rx_freePktQ_lock);
- if (queue_IsEmpty(&rx_freePacketQueue))
+ if (opr_queue_IsEmpty(&rx_freePacketQueue))
rxi_MorePacketsNoLock(rx_maxSendWindow);
RX_TS_FPQ_GTOL(rx_ts_info);
MUTEX_EXIT(&rx_freePktQ_lock);
- } else if (queue_IsEmpty(&rx_ts_info->_FPQ)) {
+ } else if (opr_queue_IsEmpty(&rx_ts_info->_FPQ.queue)) {
return NULL;
}
np->header = p->header;
np->header.serial = p->header.serial + 1;
np->header.seq = p->header.seq + 1;
+ np->header.userStatus = 0;
np->header.flags = jp->flags;
np->header.spare = jp->cksum;
{
struct rx_debugIn tin;
afs_int32 tl;
- struct rx_serverQueueEntry *np, *nqe;
/*
* Only respond to client-initiated Rx debug packets,
tstat.usedFDs = CountFDs(64);
tstat.nWaiting = htonl(rx_atomic_read(&rx_nWaiting));
tstat.nWaited = htonl(rx_atomic_read(&rx_nWaited));
- queue_Count(&rx_idleServerQueue, np, nqe, rx_serverQueueEntry,
- tstat.idleThreads);
+ tstat.idleThreads = opr_queue_Count(&rx_idleServerQueue);
MUTEX_EXIT(&rx_serverPool_lock);
tstat.idleThreads = htonl(tstat.idleThreads);
tl = sizeof(struct rx_debugStats) - ap->length;
tconn.callNumber[j] = htonl(tc->callNumber[j]);
if ((tcall = tc->call[j])) {
tconn.callState[j] = tcall->state;
- tconn.callMode[j] = tcall->mode;
+ tconn.callMode[j] = tcall->app.mode;
tconn.callFlags[j] = tcall->flags;
- if (queue_IsNotEmpty(&tcall->rq))
+ if (!opr_queue_IsEmpty(&tcall->rq))
tconn.callOther[j] |= RX_OTHER_IN;
- if (queue_IsNotEmpty(&tcall->tq))
+ if (!opr_queue_IsEmpty(&tcall->tq))
tconn.callOther[j] |= RX_OTHER_OUT;
} else
tconn.callState[j] = RX_STATE_NOTINIT;
return ap;
/* Since its all int32s convert to network order with a loop. */
- if (rx_stats_active)
- MUTEX_ENTER(&rx_stats_mutex);
+ if (rx_stats_active)
+ MUTEX_ENTER(&rx_stats_mutex);
s = (afs_int32 *) & rx_stats;
for (i = 0; i < sizeof(rx_stats) / sizeof(afs_int32); i++, s++)
rx_PutInt32(ap, i * sizeof(afs_int32), htonl(*s));
tl = ap->length;
ap->length = sizeof(rx_stats);
- if (rx_stats_active)
- MUTEX_EXIT(&rx_stats_mutex);
+ if (rx_stats_active)
+ MUTEX_EXIT(&rx_stats_mutex);
rxi_SendDebugPacket(ap, asocket, ahost, aport, istack);
ap->length = tl;
break;
taddr.sin_family = AF_INET;
taddr.sin_port = aport;
taddr.sin_addr.s_addr = ahost;
+ memset(&taddr.sin_zero, 0, sizeof(taddr.sin_zero));
#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
taddr.sin_len = sizeof(struct sockaddr_in);
#endif
}
+static void
+rxi_NetSendError(struct rx_call *call, int code)
+{
+ int down = 0;
+#ifdef AFS_NT40_ENV
+ if (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) {
+ down = 1;
+ }
+ if (code == -WSAEHOSTUNREACH) {
+ down = 1;
+ }
+#elif defined(AFS_LINUX20_ENV)
+ if (code == -ENETUNREACH) {
+ down = 1;
+ }
+#elif defined(AFS_DARWIN_ENV)
+ if (code == EHOSTUNREACH) {
+ down = 1;
+ }
+#endif
+ if (down) {
+ call->lastReceiveTime = 0;
+ }
+}
+
/* Send the packet to appropriate destination for the specified
* call. The header is first encoded and placed in the packet.
*/
addr.sin_family = AF_INET;
addr.sin_port = peer->port;
addr.sin_addr.s_addr = peer->host;
+ memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
/* This stuff should be revamped, I think, so that most, if not
* all, of the header stuff is always added here. We could
* So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call &&
-#ifdef AFS_NT40_ENV
- (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
-#elif defined(AFS_LINUX20_ENV)
- code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV)
- code == EHOSTUNREACH
-#else
- 0
-#endif
- )
- call->lastReceiveTime = 0;
+ if (call) {
+ rxi_NetSendError(call, code);
+ }
}
#ifdef KERNEL
#ifdef RX_KERNEL_TRACE
addr.sin_family = AF_INET;
addr.sin_port = peer->port;
addr.sin_addr.s_addr = peer->host;
+ memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
if (len + 1 > RX_MAXIOVECS) {
osi_Panic("rxi_SendPacketList, len > RX_MAXIOVECS\n");
conn->serial += len;
for (i = 0; i < len; i++) {
p = list[i];
+ /* a ping *or* a sequenced packet can count */
if (p->length > conn->peer->maxPacketSize) {
- /* a ping *or* a sequenced packet can count */
- if ((p->length > conn->peer->maxPacketSize)) {
- if (((p->header.type == RX_PACKET_TYPE_ACK) &&
- (p->header.flags & RX_REQUEST_ACK)) &&
- ((i == 0) || (p->length >= conn->lastPingSize))) {
- conn->lastPingSize = p->length;
- conn->lastPingSizeSer = serial + i;
- } else if ((p->header.seq != 0) &&
- ((i == 0) || (p->length >= conn->lastPacketSize))) {
- conn->lastPacketSize = p->length;
- conn->lastPacketSizeSeq = p->header.seq;
- }
+ if (((p->header.type == RX_PACKET_TYPE_ACK) &&
+ (p->header.flags & RX_REQUEST_ACK)) &&
+ ((i == 0) || (p->length >= conn->lastPingSize))) {
+ conn->lastPingSize = p->length;
+ conn->lastPingSizeSer = serial + i;
+ } else if ((p->header.seq != 0) &&
+ ((i == 0) || (p->length >= conn->lastPacketSize))) {
+ conn->lastPacketSize = p->length;
+ conn->lastPacketSizeSeq = p->header.seq;
}
}
}
* So, when this happens let's "down" the host NOW so
* we don't sit around waiting for this host to timeout later.
*/
- if (call &&
-#ifdef AFS_NT40_ENV
- (code == -1 && WSAGetLastError() == WSAEHOSTUNREACH) || (code == -WSAEHOSTUNREACH)
-#elif defined(AFS_LINUX20_ENV)
- code == -ENETUNREACH
-#elif defined(AFS_DARWIN_ENV)
- code == EHOSTUNREACH
-#else
- 0
-#endif
- )
- call->lastReceiveTime = 0;
+ if (call) {
+ rxi_NetSendError(call, code);
+ }
}
#if defined(AFS_SUN5_ENV) && defined(KERNEL)
if (!istack && waslocked)
/* Send a raw abort packet, without any call or connection structures */
void
rxi_SendRawAbort(osi_socket socket, afs_uint32 host, u_short port,
- afs_int32 error, struct rx_packet *source, int istack)
+ afs_uint32 serial, afs_int32 error,
+ struct rx_packet *source, int istack)
{
struct rx_header theader;
struct sockaddr_in addr;
memset(&theader, 0, sizeof(theader));
theader.epoch = htonl(source->header.epoch);
theader.callNumber = htonl(source->header.callNumber);
- theader.serial = htonl(1);
+ theader.serial = htonl(serial);
theader.type = RX_PACKET_TYPE_ABORT;
theader.serviceId = htons(source->header.serviceId);
theader.securityIndex = source->header.securityIndex;
theader.cid = htonl(source->header.cid);
+ /*
+ * If the abort is being sent in response to a server initiated packet,
+ * set client_initiated in the abort to ensure it is not associated by
+ * the receiver with a connection in the opposite direction.
+ */
+ if ((source->header.flags & RX_CLIENT_INITIATED) != RX_CLIENT_INITIATED)
+ theader.flags |= RX_CLIENT_INITIATED;
+
error = htonl(error);
iov[0].iov_base = &theader;
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = host;
addr.sin_port = port;
+ memset(&addr.sin_zero, 0, sizeof(addr.sin_zero));
#ifdef STRUCT_SOCKADDR_HAS_SA_LEN
addr.sin_len = sizeof(struct sockaddr_in);
#endif
p->header.seq = 0;
p->header.epoch = conn->epoch;
p->header.type = type;
+ p->header.userStatus = 0;
p->header.flags = 0;
if (conn->type == RX_CLIENT_CONNECTION)
p->header.flags |= RX_CLIENT_INITIATED;
afs_uint32 seq = call->tnext++;
unsigned int i;
afs_int32 len; /* len must be a signed type; it can go negative */
+ int code;
/* No data packets on call 0. Where do these come from? */
if (*call->callNumber == 0)
p->header.seq = seq;
p->header.epoch = conn->epoch;
p->header.type = RX_PACKET_TYPE_DATA;
+ p->header.userStatus = 0;
p->header.flags = 0;
p->header.spare = 0;
if (conn->type == RX_CLIENT_CONNECTION)
if (len)
p->wirevec[i - 1].iov_len += len;
MUTEX_ENTER(&call->lock);
- RXS_PreparePacket(conn->securityObject, call, p);
+ code = RXS_PreparePacket(conn->securityObject, call, p);
+ if (code) {
+ MUTEX_EXIT(&call->lock);
+ rxi_ConnectionError(conn, code);
+ MUTEX_ENTER(&conn->conn_data_lock);
+ p = rxi_SendConnectionAbort(conn, p, 0, 0);
+ MUTEX_EXIT(&conn->conn_data_lock);
+ MUTEX_ENTER(&call->lock);
+ /* setting a connection error means all calls for that conn are also
+ * error'd. if this call does not have an error by now, something is
+ * very wrong, and we risk sending data in the clear that is supposed
+ * to be encrypted. */
+ osi_Assert(call->error);
+ }
}
/* Given an interface MTU size, calculate an adjusted MTU size that