2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 #include "../afs/param.h"
13 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
14 #include "../afs/sysincludes.h"
16 #include "../h/types.h"
17 #include "../h/time.h"
18 #include "../h/stat.h"
20 #include <net/net_globals.h>
21 #endif /* AFS_OSF_ENV */
22 #ifdef AFS_LINUX20_ENV
23 #include "../h/socket.h"
25 #include "../netinet/in.h"
26 #if defined(AFS_SGI_ENV)
27 #include "../afs/sysincludes.h"
30 #include "../afs/afs_args.h"
31 #include "../afs/afs_osi.h"
32 #if (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
33 #include "../h/systm.h"
36 #include "../afs/sysincludes.h"
39 #undef RXDEBUG /* turn off debugging */
42 #include "../rx/rx_kmutex.h"
43 #include "../rx/rx_kernel.h"
44 #include "../rx/rx_clock.h"
45 #include "../rx/rx_queue.h"
47 #include "../rx/rx_globals.h"
48 #include "../afs/lock.h"
49 #include "../afsint/afsint.h"
56 #endif /* AFS_ALPHA_ENV */
58 # include <afs/param.h>
59 # include <sys/types.h>
61 # include <sys/socket.h>
62 # include <sys/file.h>
64 # include <netinet/in.h>
65 # include <sys/stat.h>
66 # include <sys/time.h>
69 # include "rx_clock.h"
70 # include "rx_queue.h"
72 # include "rx_globals.h"
73 # include "rx_internal.h"
78 /* rxdb_fileID is used to identify the lock location, along with line#. */
79 static int rxdb_fileID = RXDB_FILE_RX_RDWR;
80 #endif /* RX_LOCKS_DB */
81 /* rxi_ReadProc -- internal version.
83 * LOCKS USED -- called at netpri with rx global lock and call->lock held.
85 int rxi_ReadProc(call, buf, nbytes)
86 register struct rx_call *call;
90 register struct rx_packet *cp = call->currentPacket;
91 register struct rx_packet *rp;
92 register struct rx_packet *nxp; /* Next packet pointer, for queue_Scan */
93 register int requestCount;
94 register unsigned int t;
95 /* XXXX took out clock_NewTime from here. Was it needed? */
96 requestCount = nbytes;
98 /* Free any packets from the last call to ReadvProc/WritevProc */
99 if (!queue_IsEmpty(&call->iovq)) {
100 for (queue_Scan(&call->iovq, rp, nxp, rx_packet)) {
107 if (call->nLeft == 0) {
108 /* Get next packet */
110 if (call->error || (call->mode != RX_MODE_RECEIVING)) {
114 if (call->mode == RX_MODE_SENDING) {
115 rxi_FlushWrite(call);
119 if (queue_IsNotEmpty(&call->rq)) {
120 /* Check that next packet available is next in sequence */
121 rp = queue_First(&call->rq, rx_packet);
122 if (rp->header.seq == call->rnext) {
124 register struct rx_connection *conn = call->conn;
127 /* RXS_CheckPacket called to undo RXS_PreparePacket's
128 * work. It may reduce the length of the packet by up
129 * to conn->maxTrailerSize, to reflect the length of the
130 * data + the header. */
131 if (error = RXS_CheckPacket(conn->securityObject, call, rp)) {
132 /* Used to merely shut down the call, but now we
133 * shut down the whole connection since this may
134 * indicate an attempt to hijack it */
136 MUTEX_EXIT(&call->lock);
137 rxi_ConnectionError(conn, error);
138 MUTEX_ENTER(&conn->conn_data_lock);
139 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
140 MUTEX_EXIT(&conn->conn_data_lock);
142 MUTEX_ENTER(&call->lock);
147 cp = call->currentPacket = rp;
148 call->curvec = 1; /* 0th vec is always header */
149 /* begin at the beginning [ more or less ], continue
150 * on until the end, then stop. */
151 call->curpos = (char *)cp->wirevec[1].iov_base
152 + call->conn->securityHeaderSize;
153 call->curlen = cp->wirevec[1].iov_len
154 - call->conn->securityHeaderSize;
156 /* Notice that this code works correctly if the data
157 * size is 0 (which it may be--no reply arguments from
158 * server, for example). This relies heavily on the
159 * fact that the code below immediately frees the packet
160 * (no yields, etc.). If it didn't, this would be a
161 * problem because a value of zero for call->nLeft
162 * normally means that there is no read packet */
163 call->nLeft = cp->length;
164 hadd32(call->bytesRcvd, cp->length);
166 /* Send a hard ack for every rxi_HardAckRate+1 packets
167 * consumed. Otherwise schedule an event to send
168 * the hard ack later on.
171 if (!(call->flags &RX_CALL_RECEIVE_DONE)) {
172 if (call->nHardAcks > (u_short)rxi_HardAckRate) {
173 rxevent_Cancel(call->delayedAckEvent, call,
174 RX_CALL_REFCOUNT_DELAY);
175 rxi_SendAck(call, 0, 0, 0, 0, RX_ACK_DELAY, 0);
179 clock_GetTime(&when);
180 /* Delay to consolidate ack packets */
181 clock_Add(&when, &rx_hardAckDelay);
182 if (!call->delayedAckEvent ||
183 clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
184 rxevent_Cancel(call->delayedAckEvent, call,
185 RX_CALL_REFCOUNT_DELAY);
186 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
187 call->delayedAckEvent = rxevent_Post(&when,
198 MTUXXX doesn't there need to be an "else" here ???
200 /* Are there ever going to be any more packets? */
201 if (call->flags & RX_CALL_RECEIVE_DONE) {
202 return requestCount - nbytes;
204 /* Wait for in-sequence packet */
205 call->flags |= RX_CALL_READER_WAIT;
207 call->startWait = clock_Sec();
208 while (call->flags & RX_CALL_READER_WAIT) {
209 #ifdef RX_ENABLE_LOCKS
210 CV_WAIT(&call->cv_rq, &call->lock);
212 osi_rxSleep(&call->rq);
217 #ifdef RX_ENABLE_LOCKS
221 #endif /* RX_ENABLE_LOCKS */
224 else /* assert(cp); */ /* MTUXXX this should be replaced by some error-recovery code before shipping */
225 /* yes, the following block is allowed to be the ELSE clause (or not) */
227 /* It's possible for call->nLeft to be smaller than any particular
228 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
229 * reflects the size of the buffer. We have to keep track of the
230 * number of bytes read in the length field of the packet struct. On
231 * the final portion of a received packet, it's almost certain that
232 * call->nLeft will be smaller than the final buffer. */
234 while (nbytes && cp) {
235 t = MIN((int)call->curlen, nbytes);
236 t = MIN(t, (int)call->nLeft);
237 bcopy (call->curpos, buf, t);
245 /* out of packet. Get another one. */
247 cp = call->currentPacket = (struct rx_packet *)0;
249 else if (!call->curlen) {
250 /* need to get another struct iov */
251 if (++call->curvec >= cp->niovecs) {
252 /* current packet is exhausted, get ready for another */
253 /* don't worry about curvec and stuff, they get set somewhere else */
255 cp = call->currentPacket = (struct rx_packet *)0;
259 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
260 call->curlen = cp->wirevec[call->curvec].iov_len;
265 /* user buffer is full, return */
274 int rx_ReadProc(call, buf, nbytes)
275 struct rx_call *call;
286 * Free any packets from the last call to ReadvProc/WritevProc.
287 * We do not need the lock because the receiver threads only
288 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
289 * RX_CALL_IOVEC_WAIT is always cleared before returning from
290 * ReadvProc/WritevProc.
292 if (!queue_IsEmpty(&call->iovq)) {
293 register struct rx_packet *rp;
294 register struct rx_packet *nxp;
295 for (queue_Scan(&call->iovq, rp, nxp, rx_packet)) {
302 * Most common case, all of the data is in the current iovec.
303 * We do not need the lock because this is the only thread that
304 * updates the curlen, curpos, nLeft fields.
306 * We are relying on nLeft being zero unless the call is in receive mode.
308 tcurlen = call->curlen;
309 tnLeft = call->nLeft;
310 if (!call->error && tcurlen > nbytes && tnLeft > nbytes) {
311 tcurpos = call->curpos;
312 bcopy(tcurpos, buf, nbytes);
313 call->curpos = tcurpos + nbytes;
314 call->curlen = tcurlen - nbytes;
315 call->nLeft = tnLeft - nbytes;
321 MUTEX_ENTER(&call->lock);
322 bytes = rxi_ReadProc(call, buf, nbytes);
323 MUTEX_EXIT(&call->lock);
329 /* Optimization for unmarshalling 32 bit integers */
330 int rx_ReadProc32(call, value)
331 struct rx_call *call;
341 * Free any packets from the last call to ReadvProc/WritevProc.
342 * We do not need the lock because the receiver threads only
343 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
344 * RX_CALL_IOVEC_WAIT is always cleared before returning from
345 * ReadvProc/WritevProc.
347 if (!queue_IsEmpty(&call->iovq)) {
348 register struct rx_packet *rp;
349 register struct rx_packet *nxp;
350 for (queue_Scan(&call->iovq, rp, nxp, rx_packet)) {
357 * Most common case, all of the data is in the current iovec.
358 * We do not need the lock because this is the only thread that
359 * updates the curlen, curpos, nLeft fields.
361 * We are relying on nLeft being zero unless the call is in receive mode.
363 tcurlen = call->curlen;
364 tnLeft = call->nLeft;
365 if (!call->error && tcurlen > sizeof(afs_int32) && tnLeft > sizeof(afs_int32)) {
366 tcurpos = call->curpos;
367 if (!((long)tcurpos & (sizeof(afs_int32)-1))) {
368 *value = *((afs_int32 *)(tcurpos));
370 bcopy(tcurpos, (char *)value, sizeof(afs_int32));
372 call->curpos = tcurpos + sizeof(afs_int32);
373 call->curlen = tcurlen - sizeof(afs_int32);
374 call->nLeft = tnLeft - sizeof(afs_int32);
375 return sizeof(afs_int32);
380 MUTEX_ENTER(&call->lock);
381 bytes = rxi_ReadProc(call, (char *)value, sizeof(afs_int32));
382 MUTEX_EXIT(&call->lock);
390 * Uses packets in the receive queue to fill in as much of the
391 * current iovec as possible. Does not block if it runs out
392 * of packets to complete the iovec. Return true if an ack packet
393 * was sent, otherwise return false */
394 int rxi_FillReadVec(call, seq, serial, flags)
395 struct rx_call *call;
396 afs_uint32 seq, serial, flags;
400 register unsigned int t;
401 struct rx_packet *rp;
402 struct rx_packet *curp;
403 struct iovec *call_iov;
404 struct iovec *cur_iov;
406 curp = call->currentPacket;
408 cur_iov = &curp->wirevec[call->curvec];
410 call_iov = &call->iov[call->iovNext];
412 while (!call->error && call->iovNBytes && call->iovNext < call->iovMax) {
413 if (call->nLeft == 0) {
414 /* Get next packet */
415 if (queue_IsNotEmpty(&call->rq)) {
416 /* Check that next packet available is next in sequence */
417 rp = queue_First(&call->rq, rx_packet);
418 if (rp->header.seq == call->rnext) {
420 register struct rx_connection *conn = call->conn;
423 /* RXS_CheckPacket called to undo RXS_PreparePacket's
424 * work. It may reduce the length of the packet by up
425 * to conn->maxTrailerSize, to reflect the length of the
426 * data + the header. */
427 if (error = RXS_CheckPacket(conn->securityObject, call, rp)) {
428 /* Used to merely shut down the call, but now we
429 * shut down the whole connection since this may
430 * indicate an attempt to hijack it */
432 MUTEX_EXIT(&call->lock);
433 rxi_ConnectionError(conn, error);
434 MUTEX_ENTER(&conn->conn_data_lock);
435 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
436 MUTEX_EXIT(&conn->conn_data_lock);
438 MUTEX_ENTER(&call->lock);
443 curp = call->currentPacket = rp;
444 call->curvec = 1; /* 0th vec is always header */
445 cur_iov = &curp->wirevec[1];
446 /* begin at the beginning [ more or less ], continue
447 * on until the end, then stop. */
448 call->curpos = (char *)curp->wirevec[1].iov_base
449 + call->conn->securityHeaderSize;
450 call->curlen = curp->wirevec[1].iov_len
451 - call->conn->securityHeaderSize;
453 /* Notice that this code works correctly if the data
454 * size is 0 (which it may be--no reply arguments from
455 * server, for example). This relies heavily on the
456 * fact that the code below immediately frees the packet
457 * (no yields, etc.). If it didn't, this would be a
458 * problem because a value of zero for call->nLeft
459 * normally means that there is no read packet */
460 call->nLeft = curp->length;
461 hadd32(call->bytesRcvd, curp->length);
463 /* Send a hard ack for every rxi_HardAckRate+1 packets
464 * consumed. Otherwise schedule an event to send
465 * the hard ack later on.
475 /* It's possible for call->nLeft to be smaller than any particular
476 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
477 * reflects the size of the buffer. We have to keep track of the
478 * number of bytes read in the length field of the packet struct. On
479 * the final portion of a received packet, it's almost certain that
480 * call->nLeft will be smaller than the final buffer. */
481 while (call->iovNBytes && call->iovNext < call->iovMax && curp) {
483 t = MIN((int)call->curlen, call->iovNBytes);
484 t = MIN(t, (int)call->nLeft);
485 call_iov->iov_base = call->curpos;
486 call_iov->iov_len = t;
489 call->iovNBytes -= t;
495 /* out of packet. Get another one. */
496 queue_Append(&call->iovq, curp);
497 curp = call->currentPacket = (struct rx_packet *)0;
499 else if (!call->curlen) {
500 /* need to get another struct iov */
501 if (++call->curvec >= curp->niovecs) {
502 /* current packet is exhausted, get ready for another */
503 /* don't worry about curvec and stuff, they get set somewhere else */
504 queue_Append(&call->iovq, curp);
505 curp = call->currentPacket = (struct rx_packet *)0;
510 call->curpos = (char *)cur_iov->iov_base;
511 call->curlen = cur_iov->iov_len;
517 /* If we consumed any packets then check whether we need to
518 * send a hard ack. */
519 if (didConsume && (!(call->flags &RX_CALL_RECEIVE_DONE))) {
520 if (call->nHardAcks > (u_short)rxi_HardAckRate) {
521 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
522 rxi_SendAck(call, 0, seq, serial, flags, RX_ACK_DELAY, 0);
527 clock_GetTime(&when);
528 /* Delay to consolidate ack packets */
529 clock_Add(&when, &rx_hardAckDelay);
530 if (!call->delayedAckEvent ||
531 clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
532 rxevent_Cancel(call->delayedAckEvent, call, RX_CALL_REFCOUNT_DELAY);
533 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
534 call->delayedAckEvent = rxevent_Post(&when, rxi_SendDelayedAck,
543 /* rxi_ReadvProc -- internal version.
545 * Fills in an iovec with pointers to the packet buffers. All packets
546 * except the last packet (new current packet) are moved to the iovq
547 * while the application is processing the data.
549 * LOCKS USED -- called at netpri with rx global lock and call->lock held.
551 int rxi_ReadvProc(call, iov, nio, maxio, nbytes)
552 struct rx_call *call;
558 struct rx_packet *rp;
559 struct rx_packet *nxp; /* Next packet pointer, for queue_Scan */
563 requestCount = nbytes;
566 /* Free any packets from the last call to ReadvProc/WritevProc */
567 for (queue_Scan(&call->iovq, rp, nxp, rx_packet)) {
572 if (call->mode == RX_MODE_SENDING) {
573 rxi_FlushWrite(call);
580 /* Get whatever data is currently available in the receive queue.
581 * If rxi_FillReadVec sends an ack packet then it is possible
582 * that we will receive more data while we drop the call lock
583 * to send the packet. Set the RX_CALL_IOVEC_WAIT flag
584 * here to avoid a race with the receive thread if we send
585 * hard acks in rxi_FillReadVec. */
586 call->flags |= RX_CALL_IOVEC_WAIT;
587 call->iovNBytes = nbytes;
588 call->iovMax = maxio;
591 rxi_FillReadVec(call, 0, 0, 0);
593 /* if we need more data then sleep until the receive thread has
594 * filled in the rest. */
595 if (!call->error && call->iovNBytes &&
596 call->iovNext < call->iovMax &&
597 !(call->flags & RX_CALL_RECEIVE_DONE)) {
598 call->flags |= RX_CALL_READER_WAIT;
600 call->startWait = clock_Sec();
601 while (call->flags & RX_CALL_READER_WAIT) {
602 #ifdef RX_ENABLE_LOCKS
603 CV_WAIT(&call->cv_rq, &call->lock);
605 osi_rxSleep(&call->rq);
610 call->flags &= ~RX_CALL_IOVEC_WAIT;
611 #ifdef RX_ENABLE_LOCKS
615 #endif /* RX_ENABLE_LOCKS */
618 *nio = call->iovNext;
619 return nbytes - call->iovNBytes;
622 int rx_ReadvProc(call, iov, nio, maxio, nbytes)
623 struct rx_call *call;
634 MUTEX_ENTER(&call->lock);
635 bytes = rxi_ReadvProc(call, iov, nio, maxio, nbytes);
636 MUTEX_EXIT(&call->lock);
642 /* rxi_WriteProc -- internal version.
644 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
646 int rxi_WriteProc(call, buf, nbytes)
647 register struct rx_call *call;
651 struct rx_connection *conn = call->conn;
652 register struct rx_packet *cp = call->currentPacket;
653 register struct rx_packet *tp; /* Temporary packet pointer */
654 register struct rx_packet *nxp; /* Next packet pointer, for queue_Scan */
655 register unsigned int t;
656 int requestCount = nbytes;
658 /* Free any packets from the last call to ReadvProc/WritevProc */
659 if (!queue_IsEmpty(&call->iovq)) {
660 for (queue_Scan(&call->iovq, tp, nxp, rx_packet)) {
666 if (call->mode != RX_MODE_SENDING) {
667 if ((conn->type == RX_SERVER_CONNECTION)
668 && (call->mode == RX_MODE_RECEIVING)) {
669 call->mode = RX_MODE_SENDING;
672 cp = call->currentPacket = (struct rx_packet *) 0;
682 /* Loop condition is checked at end, so that a write of 0 bytes
683 * will force a packet to be created--specially for the case where
684 * there are 0 bytes on the stream, but we must send a packet
687 if (call->nFree == 0) {
688 if (!call->error && cp) {
689 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
690 /* Wait until TQ_BUSY is reset before adding any
691 * packets to the transmit queue
693 while (call->flags & RX_CALL_TQ_BUSY) {
694 call->flags |= RX_CALL_TQ_WAIT;
695 #ifdef RX_ENABLE_LOCKS
696 CV_WAIT(&call->cv_tq, &call->lock);
697 #else /* RX_ENABLE_LOCKS */
698 osi_rxSleep(&call->tq);
699 #endif /* RX_ENABLE_LOCKS */
701 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
702 clock_NewTime(); /* Bogus: need new time package */
703 /* The 0, below, specifies that it is not the last packet:
704 * there will be others. PrepareSendPacket may
705 * alter the packet length by up to
706 * conn->securityMaxTrailerSize */
707 hadd32(call->bytesSent, cp->length);
708 rxi_PrepareSendPacket(call, cp, 0);
709 queue_Append(&call->tq, cp);
710 cp = call->currentPacket = NULL;
711 if (!(call->flags & (RX_CALL_FAST_RECOVER|
712 RX_CALL_FAST_RECOVER_WAIT))) {
713 rxi_Start(0, call, 0);
716 /* Wait for transmit window to open up */
717 while (!call->error && call->tnext + 1 > call->tfirst + call->twind) {
719 call->startWait = clock_Sec();
721 #ifdef RX_ENABLE_LOCKS
722 CV_WAIT(&call->cv_twind, &call->lock);
724 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
725 osi_rxSleep(&call->twind);
729 #ifdef RX_ENABLE_LOCKS
733 #endif /* RX_ENABLE_LOCKS */
735 if (cp = rxi_AllocSendPacket(call, nbytes)) {
736 call->currentPacket = cp;
737 call->nFree = cp->length;
738 call->curvec = 1; /* 0th vec is always header */
739 /* begin at the beginning [ more or less ], continue
740 * on until the end, then stop. */
741 call->curpos = (char *)cp->wirevec[1].iov_base
742 + call->conn->securityHeaderSize;
743 call->curlen = cp->wirevec[1].iov_len
744 - call->conn->securityHeaderSize;
749 call->currentPacket = NULL;
755 if (cp && (int)call->nFree < nbytes) {
756 /* Try to extend the current buffer */
757 register int len, mud;
759 mud = rx_MaxUserDataSize(call);
762 want = MIN(nbytes - (int)call->nFree, mud - len);
763 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF);
764 if (cp->length > (unsigned)mud)
766 call->nFree += (cp->length - len);
770 /* If the remaining bytes fit in the buffer, then store them
771 * and return. Don't ship a buffer that's full immediately to
772 * the peer--we don't know if it's the last buffer yet */
778 while (nbytes && call->nFree) {
780 t = MIN((int)call->curlen, nbytes);
781 t = MIN((int)call->nFree, t);
782 bcopy (buf, call->curpos, t);
790 /* need to get another struct iov */
791 if (++call->curvec >= cp->niovecs) {
792 /* current packet is full, extend or send it */
795 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
796 call->curlen = cp->wirevec[call->curvec].iov_len;
799 } /* while bytes to send and room to send them */
801 /* might be out of space now */
805 else ; /* more data to send, so get another packet and keep going */
808 return requestCount - nbytes;
811 int rx_WriteProc(call, buf, nbytes)
812 struct rx_call *call;
823 * Free any packets from the last call to ReadvProc/WritevProc.
824 * We do not need the lock because the receiver threads only
825 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
826 * RX_CALL_IOVEC_WAIT is always cleared before returning from
827 * ReadvProc/WritevProc.
829 if (!queue_IsEmpty(&call->iovq)) {
830 register struct rx_packet *rp;
831 register struct rx_packet *nxp;
832 for (queue_Scan(&call->iovq, rp, nxp, rx_packet)) {
839 * Most common case: all of the data fits in the current iovec.
840 * We do not need the lock because this is the only thread that
841 * updates the curlen, curpos, nFree fields.
843 * We are relying on nFree being zero unless the call is in send mode.
845 tcurlen = (int)call->curlen;
846 tnFree = (int)call->nFree;
847 if (!call->error && tcurlen >= nbytes && tnFree >= nbytes) {
848 tcurpos = call->curpos;
849 bcopy(buf, tcurpos, nbytes);
850 call->curpos = tcurpos + nbytes;
851 call->curlen = tcurlen - nbytes;
852 call->nFree = tnFree - nbytes;
858 MUTEX_ENTER(&call->lock);
859 bytes = rxi_WriteProc(call, buf, nbytes);
860 MUTEX_EXIT(&call->lock);
866 /* Optimization for marshalling 32 bit arguments */
867 int rx_WriteProc32(call, value)
868 register struct rx_call *call;
869 register afs_int32 *value;
878 * Free any packets from the last call to ReadvProc/WritevProc.
879 * We do not need the lock because the receiver threads only
880 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
881 * RX_CALL_IOVEC_WAIT is always cleared before returning from
882 * ReadvProc/WritevProc.
884 if (!queue_IsEmpty(&call->iovq)) {
885 register struct rx_packet *rp;
886 register struct rx_packet *nxp;
887 for (queue_Scan(&call->iovq, rp, nxp, rx_packet)) {
894 * Most common case: all of the data fits in the current iovec.
895 * We do not need the lock because this is the only thread that
896 * updates the curlen, curpos, nFree fields.
898 * We are relying on nFree being zero unless the call is in send mode.
900 tcurlen = (int)call->curlen;
901 tnFree = (int)call->nFree;
902 if (!call->error && tcurlen >= sizeof(afs_int32) && tnFree >= sizeof(afs_int32)) {
903 tcurpos = call->curpos;
904 if (!((long)tcurpos & (sizeof(afs_int32)-1))) {
905 *((afs_int32 *)(tcurpos)) = *value;
907 bcopy((char *)value, tcurpos, sizeof(afs_int32));
909 call->curpos = tcurpos + sizeof(afs_int32);
910 call->curlen = tcurlen - sizeof(afs_int32);
911 call->nFree = tnFree - sizeof(afs_int32);
912 return sizeof(afs_int32);
917 MUTEX_ENTER(&call->lock);
918 bytes = rxi_WriteProc(call, (char *)value, sizeof(afs_int32));
919 MUTEX_EXIT(&call->lock);
925 /* rxi_WritevAlloc -- internal version.
927 * Fill in an iovec to point to data in packet buffers. The application
928 * calls rxi_WritevProc when the buffers are full.
930 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
932 int rxi_WritevAlloc(call, iov, nio, maxio, nbytes)
933 struct rx_call *call;
939 struct rx_connection *conn = call->conn;
940 struct rx_packet *cp = call->currentPacket;
941 struct rx_packet *tp; /* temporary packet pointer */
942 struct rx_packet *nxp; /* Next packet pointer, for queue_Scan */
945 /* Temporary values, real work is done in rxi_WritevProc */
951 requestCount = nbytes;
954 /* Free any packets from the last call to ReadvProc/WritevProc */
955 for (queue_Scan(&call->iovq, tp, nxp, rx_packet)) {
960 if (call->mode != RX_MODE_SENDING) {
961 if ((conn->type == RX_SERVER_CONNECTION)
962 && (call->mode == RX_MODE_RECEIVING)) {
963 call->mode = RX_MODE_SENDING;
966 cp = call->currentPacket = (struct rx_packet *) 0;
976 /* Set up the iovec to point to data in packet buffers. */
977 tnFree = call->nFree;
978 tcurvec = call->curvec;
979 tcurpos = call->curpos;
980 tcurlen = call->curlen;
982 register unsigned int t;
985 /* current packet is full, allocate a new one */
986 cp = rxi_AllocSendPacket(call, nbytes);
988 /* out of space, return what we have */
990 return requestCount - nbytes;
992 queue_Append(&call->iovq, cp);
995 tcurpos = (char *)cp->wirevec[1].iov_base
996 + call->conn->securityHeaderSize;
997 tcurlen = cp->wirevec[1].iov_len
998 - call->conn->securityHeaderSize;
1001 if (tnFree < nbytes) {
1002 /* try to extend the current packet */
1003 register int len, mud;
1005 mud = rx_MaxUserDataSize(call);
1008 want = MIN(nbytes - tnFree, mud - len);
1009 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF);
1010 if (cp->length > (unsigned)mud)
1012 tnFree += (cp->length - len);
1013 if (cp == call->currentPacket) {
1014 call->nFree += (cp->length - len);
1019 /* fill in the next entry in the iovec */
1020 t = MIN(tcurlen, nbytes);
1022 iov[nextio].iov_base = tcurpos;
1023 iov[nextio].iov_len = t;
1031 /* need to get another struct iov */
1032 if (++tcurvec >= cp->niovecs) {
1033 /* current packet is full, extend it or move on to next packet */
1036 tcurpos = (char *)cp->wirevec[tcurvec].iov_base;
1037 tcurlen = cp->wirevec[tcurvec].iov_len;
1040 } while (nbytes && nextio < maxio);
1042 return requestCount - nbytes;
1045 int rx_WritevAlloc(call, iov, nio, maxio, nbytes)
1046 struct rx_call *call;
1057 MUTEX_ENTER(&call->lock);
1058 bytes = rxi_WritevAlloc(call, iov, nio, maxio, nbytes);
1059 MUTEX_EXIT(&call->lock);
1065 int rx_WritevInit(call)
1066 struct rx_call *call;
1072 * Free any packets from the last call to ReadvProc/WritevProc.
1073 * We do not need the lock because the receiver threads only
1074 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
1075 * RX_CALL_IOVEC_WAIT is always cleared before returning from
1076 * ReadvProc/WritevProc.
1078 if (!queue_IsEmpty(&call->iovq)) {
1079 register struct rx_packet *rp;
1080 register struct rx_packet *nxp;
1081 for (queue_Scan(&call->iovq, rp, nxp, rx_packet)) {
1089 MUTEX_ENTER(&call->lock);
1090 bytes = rxi_WriteProc(call, &bytes, 0);
1091 MUTEX_EXIT(&call->lock);
1097 /* rxi_WritevProc -- internal version.
1099 * Send buffers allocated in rxi_WritevAlloc.
1101 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
1103 int rxi_WritevProc(call, iov, nio, nbytes)
1104 struct rx_call *call;
1109 struct rx_connection *conn = call->conn;
1110 struct rx_packet *cp = call->currentPacket;
1111 register struct rx_packet *tp; /* Temporary packet pointer */
1112 register struct rx_packet *nxp; /* Next packet pointer, for queue_Scan */
1115 struct rx_queue tmpq;
1117 requestCount = nbytes;
1120 if (call->mode != RX_MODE_SENDING) {
1121 call->error = RX_PROTOCOL_ERROR;
1124 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1125 /* Wait until TQ_BUSY is reset before trying to move any
1126 * packets to the transmit queue. */
1127 while (!call->error && call->flags & RX_CALL_TQ_BUSY) {
1128 call->flags |= RX_CALL_TQ_WAIT;
1129 #ifdef RX_ENABLE_LOCKS
1130 CV_WAIT(&call->cv_tq, &call->lock);
1131 #else /* RX_ENABLE_LOCKS */
1132 osi_rxSleep(&call->tq);
1133 #endif /* RX_ENABLE_LOCKS */
1135 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1138 for (queue_Scan(&call->iovq, tp, nxp, rx_packet)) {
1144 cp = call->currentPacket = NULL;
1149 /* Loop through the I/O vector adjusting packet pointers.
1150 * Place full packets back onto the iovq once they are ready
1151 * to send. Set RX_PROTOCOL_ERROR if any problems are found in
1152 * the iovec. We put the loop condition at the end to ensure that
1153 * a zero length write will push a short packet. */
1159 if (call->nFree == 0 && cp) {
1160 clock_NewTime(); /* Bogus: need new time package */
1161 /* The 0, below, specifies that it is not the last packet:
1162 * there will be others. PrepareSendPacket may
1163 * alter the packet length by up to
1164 * conn->securityMaxTrailerSize */
1165 hadd32(call->bytesSent, cp->length);
1166 rxi_PrepareSendPacket(call, cp, 0);
1167 queue_Append(&tmpq, cp);
1169 /* The head of the iovq is now the current packet */
1171 if (queue_IsEmpty(&call->iovq)) {
1172 call->error = RX_PROTOCOL_ERROR;
1173 cp = call->currentPacket = NULL;
1174 for (queue_Scan(&tmpq, tp, nxp, rx_packet)) {
1180 cp = queue_First(&call->iovq, rx_packet);
1182 call->currentPacket = cp;
1183 call->nFree = cp->length;
1185 call->curpos = (char *)cp->wirevec[1].iov_base
1186 + call->conn->securityHeaderSize;
1187 call->curlen = cp->wirevec[1].iov_len
1188 - call->conn->securityHeaderSize;
1193 /* The next iovec should point to the current position */
1194 if (iov[nextio].iov_base != call->curpos
1195 || iov[nextio].iov_len > (int)call->curlen) {
1196 call->error = RX_PROTOCOL_ERROR;
1197 for (queue_Scan(&tmpq, tp, nxp, rx_packet)) {
1203 call->currentPacket = NULL;
1207 nbytes -= iov[nextio].iov_len;
1208 call->curpos += iov[nextio].iov_len;
1209 call->curlen -= iov[nextio].iov_len;
1210 call->nFree -= iov[nextio].iov_len;
1212 if (call->curlen == 0) {
1213 if (++call->curvec > cp->niovecs) {
1216 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
1217 call->curlen = cp->wirevec[call->curvec].iov_len;
1221 } while (nbytes && nextio < nio);
1223 /* Move the packets from the temporary queue onto the transmit queue.
1224 * We may end up with more than call->twind packets on the queue. */
1225 for (queue_Scan(&tmpq, tp, nxp, rx_packet)) {
1227 queue_Append(&call->tq, tp);
1230 if (!(call->flags & (RX_CALL_FAST_RECOVER|RX_CALL_FAST_RECOVER_WAIT))) {
1231 rxi_Start(0, call, 0);
1234 /* Wait for the length of the transmit queue to fall below call->twind */
1235 while (!call->error && call->tnext + 1 > call->tfirst + call->twind) {
1237 call->startWait = clock_Sec();
1238 #ifdef RX_ENABLE_LOCKS
1239 CV_WAIT(&call->cv_twind, &call->lock);
1241 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
1242 osi_rxSleep(&call->twind);
1244 call->startWait = 0;
1250 cp = call->currentPacket = NULL;
1255 return requestCount - nbytes;
1258 int rx_WritevProc(call, iov, nio, nbytes)
1259 struct rx_call *call;
1269 MUTEX_ENTER(&call->lock);
1270 bytes = rxi_WritevProc(call, iov, nio, nbytes);
1271 MUTEX_EXIT(&call->lock);
1277 /* Flush any buffered data to the stream, switch to read mode
1278 * (clients) or to EOF mode (servers) */
1279 void rxi_FlushWrite(call)
1280 register struct rx_call *call;
1282 register struct rx_packet *cp = call->currentPacket;
1283 register struct rx_packet *tp; /* Temporary packet pointer */
1284 register struct rx_packet *nxp; /* Next packet pointer, for queue_Scan */
1286 /* Free any packets from the last call to ReadvProc/WritevProc */
1287 for (queue_Scan(&call->iovq, tp, nxp, rx_packet)) {
1292 if (call->mode == RX_MODE_SENDING) {
1294 call->mode = (call->conn->type == RX_CLIENT_CONNECTION ?
1295 RX_MODE_RECEIVING: RX_MODE_EOF);
1297 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1298 /* Wait until TQ_BUSY is reset before adding any
1299 * packets to the transmit queue
1301 while (call->flags & RX_CALL_TQ_BUSY) {
1302 call->flags |= RX_CALL_TQ_WAIT;
1303 #ifdef RX_ENABLE_LOCKS
1304 CV_WAIT(&call->cv_tq, &call->lock);
1305 #else /* RX_ENABLE_LOCKS */
1306 osi_rxSleep(&call->tq);
1307 #endif /* RX_ENABLE_LOCKS */
1309 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1312 /* cp->length is only supposed to be the user's data */
1313 /* cp->length was already set to (then-current)
1314 * MaxUserDataSize or less. */
1315 cp->length -= call->nFree;
1316 call->currentPacket = (struct rx_packet *) 0;
1320 cp = rxi_AllocSendPacket(call,0);
1322 /* Mode can no longer be MODE_SENDING */
1326 cp->niovecs = 1; /* just the header */
1330 /* The 1 specifies that this is the last packet */
1331 hadd32(call->bytesSent, cp->length);
1332 rxi_PrepareSendPacket(call, cp, 1);
1333 queue_Append(&call->tq, cp);
1334 if (!(call->flags & (RX_CALL_FAST_RECOVER|
1335 RX_CALL_FAST_RECOVER_WAIT))) {
1336 rxi_Start(0, call, 0);
1341 /* Flush any buffered data to the stream, switch to read mode
1342 * (clients) or to EOF mode (servers) */
1343 void rx_FlushWrite(call)
1344 struct rx_call *call;
1349 MUTEX_ENTER(&call->lock);
1350 rxi_FlushWrite(call);
1351 MUTEX_EXIT(&call->lock);