# if defined(AFS_AIX_ENV) || defined(AFS_AUX_ENV) || defined(AFS_SUN5_ENV)
# include "h/systm.h"
# endif
-# ifdef AFS_OSF_ENV
-# include <net/net_globals.h>
-# endif /* AFS_OSF_ENV */
# ifdef AFS_LINUX20_ENV
# include "h/socket.h"
# endif
return 0;
}
if (call->app.mode == RX_MODE_SENDING) {
- MUTEX_EXIT(&call->lock);
- rxi_FlushWrite(call);
- MUTEX_ENTER(&call->lock);
+ rxi_FlushWriteLocked(call);
continue;
}
}
if (call->app.currentPacket) {
if (!(call->flags & RX_CALL_RECEIVE_DONE)) {
if (call->nHardAcks > (u_short) rxi_HardAckRate) {
- rxevent_Cancel(&call->delayedAckEvent, call,
- RX_CALL_REFCOUNT_DELAY);
+ rxi_CancelDelayedAckEvent(call);
rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
} else {
/* Delay to consolidate ack packets */
* send a hard ack. */
if (didConsume && (!(call->flags & RX_CALL_RECEIVE_DONE))) {
if (call->nHardAcks > (u_short) rxi_HardAckRate) {
- rxevent_Cancel(&call->delayedAckEvent, call,
- RX_CALL_REFCOUNT_DELAY);
+ rxi_CancelDelayedAckEvent(call);
rxi_SendAck(call, 0, serial, RX_ACK_DELAY, 0);
didHardAck = 1;
} else {
* conn->securityMaxTrailerSize */
call->app.bytesSent += call->app.currentPacket->length;
rxi_PrepareSendPacket(call, call->app.currentPacket, 0);
-#ifdef RX_ENABLE_LOCKS
/* PrepareSendPacket drops the call lock */
rxi_WaitforTQBusy(call);
-#endif /* RX_ENABLE_LOCKS */
#ifdef RX_TRACK_PACKETS
call->app.currentPacket->flags |= RX_PKTFLAG_TQ;
#endif
/* might be out of space now */
if (!nbytes) {
return requestCount;
- } else; /* more data to send, so get another packet and keep going */
+ } else {
+ /* more data to send, so get another packet and keep going */
+ }
} while (nbytes);
return requestCount - nbytes;
#ifdef RX_TRACK_PACKETS
struct opr_queue *cursor;
#endif
- int nextio;
+ int nextio = 0;
int requestCount;
struct opr_queue tmpq;
#ifdef RXDEBUG_PACKET
#endif
requestCount = nbytes;
- nextio = 0;
MUTEX_ENTER(&call->lock);
if (call->error) {
} else if (call->app.mode != RX_MODE_SENDING) {
call->error = RX_PROTOCOL_ERROR;
}
-#ifdef RX_ENABLE_LOCKS
rxi_WaitforTQBusy(call);
-#endif /* RX_ENABLE_LOCKS */
if (call->error) {
call->app.mode = RX_MODE_ERROR;
* to send. Set RX_PROTOCOL_ERROR if any problems are found in
* the iovec. We put the loop condition at the end to ensure that
* a zero length write will push a short packet. */
- nextio = 0;
opr_queue_Init(&tmpq);
#ifdef RXDEBUG_PACKET
tmpqc = 0;
* conn->securityMaxTrailerSize */
call->app.bytesSent += call->app.currentPacket->length;
rxi_PrepareSendPacket(call, call->app.currentPacket, 0);
-#ifdef RX_ENABLE_LOCKS
/* PrepareSendPacket drops the call lock */
rxi_WaitforTQBusy(call);
-#endif /* RX_ENABLE_LOCKS */
opr_queue_Append(&tmpq, &call->app.currentPacket->entry);
#ifdef RXDEBUG_PACKET
tmpqc++;
}
/* Flush any buffered data to the stream, switch to read mode
- * (clients) or to EOF mode (servers)
+ * (clients) or to EOF mode (servers). If 'locked' is nonzero, call->lock must
+ * be already held.
*
* LOCKS HELD: called at netpri.
*/
-void
-rxi_FlushWrite(struct rx_call *call)
+static void
+FlushWrite(struct rx_call *call, int locked)
{
struct rx_packet *cp = NULL;
}
#endif
- MUTEX_ENTER(&call->lock);
+ if (!locked) {
+ MUTEX_ENTER(&call->lock);
+ }
+
if (call->error)
call->app.mode = RX_MODE_ERROR;
/* The 1 specifies that this is the last packet */
call->app.bytesSent += cp->length;
rxi_PrepareSendPacket(call, cp, 1);
-#ifdef RX_ENABLE_LOCKS
/* PrepareSendPacket drops the call lock */
rxi_WaitforTQBusy(call);
-#endif /* RX_ENABLE_LOCKS */
#ifdef RX_TRACK_PACKETS
cp->flags |= RX_PKTFLAG_TQ;
#endif
if (!(call->flags & RX_CALL_FAST_RECOVER)) {
rxi_Start(call, 0);
}
- MUTEX_EXIT(&call->lock);
+ if (!locked) {
+ MUTEX_EXIT(&call->lock);
+ }
}
}
+void
+rxi_FlushWrite(struct rx_call *call)
+{
+ FlushWrite(call, 0);
+}
+
+void
+rxi_FlushWriteLocked(struct rx_call *call)
+{
+ FlushWrite(call, 1);
+}
+
/* Flush any buffered data to the stream, switch to read mode
* (clients) or to EOF mode (servers) */
void
{
SPLVAR;
NETPRI;
- rxi_FlushWrite(call);
+ FlushWrite(call, 0);
USERPRI;
}