2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
12 #include "afs/param.h"
14 #include <afs/param.h>
20 #ifdef RX_KERNEL_TRACE
21 #include "rx_kcommon.h"
23 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
24 #include "afs/sysincludes.h"
29 #if defined(AFS_AIX_ENV) || defined(AFS_AUX_ENV) || defined(AFS_SUN5_ENV)
33 #include <net/net_globals.h>
34 #endif /* AFS_OSF_ENV */
35 #ifdef AFS_LINUX20_ENV
38 #include "netinet/in.h"
39 #if defined(AFS_SGI_ENV)
40 #include "afs/sysincludes.h"
43 #include "afs/afs_args.h"
44 #include "afs/afs_osi.h"
45 #if (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
49 #include "afs/sysincludes.h"
52 #undef RXDEBUG /* turn off debugging */
55 #include "rx_kmutex.h"
56 #include "rx/rx_kernel.h"
57 #include "rx/rx_clock.h"
58 #include "rx/rx_queue.h"
60 #include "rx/rx_globals.h"
68 #endif /* AFS_OSF_ENV */
70 # include <sys/types.h>
72 # include <winsock2.h>
73 #else /* !AFS_NT40_ENV */
74 # include <sys/socket.h>
75 # include <sys/file.h>
77 # include <netinet/in.h>
78 # include <sys/stat.h>
79 # include <sys/time.h>
80 #endif /* !AFS_NT40_ENV */
86 # include "rx_clock.h"
87 # include "rx_queue.h"
89 # include "rx_globals.h"
93 /* rxdb_fileID is used to identify the lock location, along with line#. */
94 static int rxdb_fileID = RXDB_FILE_RX_RDWR;
95 #endif /* RX_LOCKS_DB */
96 /* rxi_ReadProc -- internal version.
98 * LOCKS USED -- called at netpri with rx global lock and call->lock held.
101 rxi_ReadProc(struct rx_call *call, char *buf,
104 struct rx_packet *cp = call->currentPacket;
105 struct rx_packet *rp;
109 /* XXXX took out clock_NewTime from here. Was it needed? */
110 requestCount = nbytes;
112 /* Free any packets from the last call to ReadvProc/WritevProc */
113 if (queue_IsNotEmpty(&call->iovq)) {
114 #ifdef RXDEBUG_PACKET
116 #endif /* RXDEBUG_PACKET */
117 rxi_FreePackets(0, &call->iovq);
121 if (call->nLeft == 0) {
122 /* Get next packet */
124 if (call->error || (call->mode != RX_MODE_RECEIVING)) {
128 if (call->mode == RX_MODE_SENDING) {
129 rxi_FlushWrite(call);
133 if (queue_IsNotEmpty(&call->rq)) {
134 /* Check that next packet available is next in sequence */
135 rp = queue_First(&call->rq, rx_packet);
136 if (rp->header.seq == call->rnext) {
138 struct rx_connection *conn = call->conn;
140 #ifdef RX_TRACK_PACKETS
141 rp->flags &= ~RX_PKTFLAG_RQ;
143 #ifdef RXDEBUG_PACKET
145 #endif /* RXDEBUG_PACKET */
147 /* RXS_CheckPacket called to undo RXS_PreparePacket's
148 * work. It may reduce the length of the packet by up
149 * to conn->maxTrailerSize, to reflect the length of the
150 * data + the header. */
152 RXS_CheckPacket(conn->securityObject, call,
154 /* Used to merely shut down the call, but now we
155 * shut down the whole connection since this may
156 * indicate an attempt to hijack it */
158 MUTEX_EXIT(&call->lock);
159 rxi_ConnectionError(conn, error);
160 MUTEX_ENTER(&conn->conn_data_lock);
161 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
162 MUTEX_EXIT(&conn->conn_data_lock);
164 MUTEX_ENTER(&call->lock);
169 cp = call->currentPacket = rp;
170 #ifdef RX_TRACK_PACKETS
171 call->currentPacket->flags |= RX_PKTFLAG_CP;
173 call->curvec = 1; /* 0th vec is always header */
174 /* begin at the beginning [ more or less ], continue
175 * on until the end, then stop. */
177 (char *)cp->wirevec[1].iov_base +
178 call->conn->securityHeaderSize;
180 cp->wirevec[1].iov_len -
181 call->conn->securityHeaderSize;
183 /* Notice that this code works correctly if the data
184 * size is 0 (which it may be--no reply arguments from
185 * server, for example). This relies heavily on the
186 * fact that the code below immediately frees the packet
187 * (no yields, etc.). If it didn't, this would be a
188 * problem because a value of zero for call->nLeft
189 * normally means that there is no read packet */
190 call->nLeft = cp->length;
191 hadd32(call->bytesRcvd, cp->length);
193 /* Send a hard ack for every rxi_HardAckRate+1 packets
194 * consumed. Otherwise schedule an event to send
195 * the hard ack later on.
198 if (!(call->flags & RX_CALL_RECEIVE_DONE)) {
199 if (call->nHardAcks > (u_short) rxi_HardAckRate) {
200 rxevent_Cancel(call->delayedAckEvent, call,
201 RX_CALL_REFCOUNT_DELAY);
202 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
204 struct clock when, now;
207 /* Delay to consolidate ack packets */
208 clock_Add(&when, &rx_hardAckDelay);
209 if (!call->delayedAckEvent
210 || clock_Gt(&call->delayedAckEvent->
212 rxevent_Cancel(call->delayedAckEvent,
214 RX_CALL_REFCOUNT_DELAY);
215 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
216 call->delayedAckEvent =
217 rxevent_PostNow(&when, &now,
218 rxi_SendDelayedAck, call,
228 * If we reach this point either we have no packets in the
229 * receive queue or the next packet in the queue is not the
230 * one we are looking for. There is nothing else for us to
231 * do but wait for another packet to arrive.
234 /* Are there ever going to be any more packets? */
235 if (call->flags & RX_CALL_RECEIVE_DONE) {
236 return requestCount - nbytes;
238 /* Wait for in-sequence packet */
239 call->flags |= RX_CALL_READER_WAIT;
241 call->startWait = clock_Sec();
242 while (call->flags & RX_CALL_READER_WAIT) {
243 #ifdef RX_ENABLE_LOCKS
244 CV_WAIT(&call->cv_rq, &call->lock);
246 osi_rxSleep(&call->rq);
249 cp = call->currentPacket;
252 #ifdef RX_ENABLE_LOCKS
256 #endif /* RX_ENABLE_LOCKS */
260 /* MTUXXX this should be replaced by some error-recovery code before shipping */
261 /* yes, the following block is allowed to be the ELSE clause (or not) */
262 /* It's possible for call->nLeft to be smaller than any particular
263 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
264 * reflects the size of the buffer. We have to keep track of the
265 * number of bytes read in the length field of the packet struct. On
266 * the final portion of a received packet, it's almost certain that
267 * call->nLeft will be smaller than the final buffer. */
268 while (nbytes && cp) {
269 t = MIN((int)call->curlen, nbytes);
270 t = MIN(t, (int)call->nLeft);
271 memcpy(buf, call->curpos, t);
279 /* out of packet. Get another one. */
280 #ifdef RX_TRACK_PACKETS
281 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
284 cp = call->currentPacket = (struct rx_packet *)0;
285 } else if (!call->curlen) {
286 /* need to get another struct iov */
287 if (++call->curvec >= cp->niovecs) {
288 /* current packet is exhausted, get ready for another */
289 /* don't worry about curvec and stuff, they get set somewhere else */
290 #ifdef RX_TRACK_PACKETS
291 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
294 cp = call->currentPacket = (struct rx_packet *)0;
298 (char *)cp->wirevec[call->curvec].iov_base;
299 call->curlen = cp->wirevec[call->curvec].iov_len;
304 /* user buffer is full, return */
314 rx_ReadProc(struct rx_call *call, char *buf, int nbytes)
322 /* Free any packets from the last call to ReadvProc/WritevProc */
323 if (!queue_IsEmpty(&call->iovq)) {
324 #ifdef RXDEBUG_PACKET
326 #endif /* RXDEBUG_PACKET */
327 rxi_FreePackets(0, &call->iovq);
331 * Most common case, all of the data is in the current iovec.
332 * We are relying on nLeft being zero unless the call is in receive mode.
334 tcurlen = call->curlen;
335 tnLeft = call->nLeft;
336 if (!call->error && tcurlen > nbytes && tnLeft > nbytes) {
337 tcurpos = call->curpos;
338 memcpy(buf, tcurpos, nbytes);
340 call->curpos = tcurpos + nbytes;
341 call->curlen = tcurlen - nbytes;
342 call->nLeft = tnLeft - nbytes;
344 if (!call->nLeft && call->currentPacket != NULL) {
345 /* out of packet. Get another one. */
346 rxi_FreePacket(call->currentPacket);
347 call->currentPacket = (struct rx_packet *)0;
353 MUTEX_ENTER(&call->lock);
354 bytes = rxi_ReadProc(call, buf, nbytes);
355 MUTEX_EXIT(&call->lock);
360 /* Optimization for unmarshalling 32 bit integers */
362 rx_ReadProc32(struct rx_call *call, afs_int32 * value)
370 /* Free any packets from the last call to ReadvProc/WritevProc */
371 if (!queue_IsEmpty(&call->iovq)) {
372 #ifdef RXDEBUG_PACKET
374 #endif /* RXDEBUG_PACKET */
375 rxi_FreePackets(0, &call->iovq);
379 * Most common case, all of the data is in the current iovec.
380 * We are relying on nLeft being zero unless the call is in receive mode.
382 tcurlen = call->curlen;
383 tnLeft = call->nLeft;
384 if (!call->error && tcurlen >= sizeof(afs_int32)
385 && tnLeft >= sizeof(afs_int32)) {
386 tcurpos = call->curpos;
388 memcpy((char *)value, tcurpos, sizeof(afs_int32));
390 call->curpos = tcurpos + sizeof(afs_int32);
391 call->curlen = (u_short)(tcurlen - sizeof(afs_int32));
392 call->nLeft = (u_short)(tnLeft - sizeof(afs_int32));
393 if (!call->nLeft && call->currentPacket != NULL) {
394 /* out of packet. Get another one. */
395 rxi_FreePacket(call->currentPacket);
396 call->currentPacket = (struct rx_packet *)0;
398 return sizeof(afs_int32);
402 MUTEX_ENTER(&call->lock);
403 bytes = rxi_ReadProc(call, (char *)value, sizeof(afs_int32));
404 MUTEX_EXIT(&call->lock);
412 * Uses packets in the receive queue to fill in as much of the
413 * current iovec as possible. Does not block if it runs out
414 * of packets to complete the iovec. Return true if an ack packet
415 * was sent, otherwise return false */
417 rxi_FillReadVec(struct rx_call *call, afs_uint32 serial)
422 struct rx_packet *rp;
423 struct rx_packet *curp;
424 struct iovec *call_iov;
425 struct iovec *cur_iov = NULL;
427 curp = call->currentPacket;
429 cur_iov = &curp->wirevec[call->curvec];
431 call_iov = &call->iov[call->iovNext];
433 while (!call->error && call->iovNBytes && call->iovNext < call->iovMax) {
434 if (call->nLeft == 0) {
435 /* Get next packet */
436 if (queue_IsNotEmpty(&call->rq)) {
437 /* Check that next packet available is next in sequence */
438 rp = queue_First(&call->rq, rx_packet);
439 if (rp->header.seq == call->rnext) {
441 struct rx_connection *conn = call->conn;
443 #ifdef RX_TRACK_PACKETS
444 rp->flags &= ~RX_PKTFLAG_RQ;
446 #ifdef RXDEBUG_PACKET
448 #endif /* RXDEBUG_PACKET */
450 /* RXS_CheckPacket called to undo RXS_PreparePacket's
451 * work. It may reduce the length of the packet by up
452 * to conn->maxTrailerSize, to reflect the length of the
453 * data + the header. */
455 RXS_CheckPacket(conn->securityObject, call, rp))) {
456 /* Used to merely shut down the call, but now we
457 * shut down the whole connection since this may
458 * indicate an attempt to hijack it */
460 MUTEX_EXIT(&call->lock);
461 rxi_ConnectionError(conn, error);
462 MUTEX_ENTER(&conn->conn_data_lock);
463 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
464 MUTEX_EXIT(&conn->conn_data_lock);
466 MUTEX_ENTER(&call->lock);
471 curp = call->currentPacket = rp;
472 #ifdef RX_TRACK_PACKETS
473 call->currentPacket->flags |= RX_PKTFLAG_CP;
475 call->curvec = 1; /* 0th vec is always header */
476 cur_iov = &curp->wirevec[1];
477 /* begin at the beginning [ more or less ], continue
478 * on until the end, then stop. */
480 (char *)curp->wirevec[1].iov_base +
481 call->conn->securityHeaderSize;
483 curp->wirevec[1].iov_len -
484 call->conn->securityHeaderSize;
486 /* Notice that this code works correctly if the data
487 * size is 0 (which it may be--no reply arguments from
488 * server, for example). This relies heavily on the
489 * fact that the code below immediately frees the packet
490 * (no yields, etc.). If it didn't, this would be a
491 * problem because a value of zero for call->nLeft
492 * normally means that there is no read packet */
493 call->nLeft = curp->length;
494 hadd32(call->bytesRcvd, curp->length);
496 /* Send a hard ack for every rxi_HardAckRate+1 packets
497 * consumed. Otherwise schedule an event to send
498 * the hard ack later on.
508 /* It's possible for call->nLeft to be smaller than any particular
509 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
510 * reflects the size of the buffer. We have to keep track of the
511 * number of bytes read in the length field of the packet struct. On
512 * the final portion of a received packet, it's almost certain that
513 * call->nLeft will be smaller than the final buffer. */
514 while (call->iovNBytes && call->iovNext < call->iovMax && curp) {
516 t = MIN((int)call->curlen, call->iovNBytes);
517 t = MIN(t, (int)call->nLeft);
518 call_iov->iov_base = call->curpos;
519 call_iov->iov_len = t;
522 call->iovNBytes -= t;
528 /* out of packet. Get another one. */
529 #ifdef RX_TRACK_PACKETS
530 curp->flags &= ~RX_PKTFLAG_CP;
531 curp->flags |= RX_PKTFLAG_IOVQ;
533 queue_Append(&call->iovq, curp);
534 #ifdef RXDEBUG_PACKET
536 #endif /* RXDEBUG_PACKET */
537 curp = call->currentPacket = (struct rx_packet *)0;
538 } else if (!call->curlen) {
539 /* need to get another struct iov */
540 if (++call->curvec >= curp->niovecs) {
541 /* current packet is exhausted, get ready for another */
542 /* don't worry about curvec and stuff, they get set somewhere else */
543 #ifdef RX_TRACK_PACKETS
544 curp->flags &= ~RX_PKTFLAG_CP;
545 curp->flags |= RX_PKTFLAG_IOVQ;
547 queue_Append(&call->iovq, curp);
548 #ifdef RXDEBUG_PACKET
550 #endif /* RXDEBUG_PACKET */
551 curp = call->currentPacket = (struct rx_packet *)0;
555 call->curpos = (char *)cur_iov->iov_base;
556 call->curlen = cur_iov->iov_len;
562 /* If we consumed any packets then check whether we need to
563 * send a hard ack. */
564 if (didConsume && (!(call->flags & RX_CALL_RECEIVE_DONE))) {
565 if (call->nHardAcks > (u_short) rxi_HardAckRate) {
566 rxevent_Cancel(call->delayedAckEvent, call,
567 RX_CALL_REFCOUNT_DELAY);
568 rxi_SendAck(call, 0, serial, RX_ACK_DELAY, 0);
571 struct clock when, now;
574 /* Delay to consolidate ack packets */
575 clock_Add(&when, &rx_hardAckDelay);
576 if (!call->delayedAckEvent
577 || clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
578 rxevent_Cancel(call->delayedAckEvent, call,
579 RX_CALL_REFCOUNT_DELAY);
580 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
581 call->delayedAckEvent =
582 rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
590 /* rxi_ReadvProc -- internal version.
592 * Fills in an iovec with pointers to the packet buffers. All packets
593 * except the last packet (new current packet) are moved to the iovq
594 * while the application is processing the data.
596 * LOCKS USED -- called at netpri with rx global lock and call->lock held.
599 rxi_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
602 /* Free any packets from the last call to ReadvProc/WritevProc */
603 if (queue_IsNotEmpty(&call->iovq)) {
604 #ifdef RXDEBUG_PACKET
606 #endif /* RXDEBUG_PACKET */
607 rxi_FreePackets(0, &call->iovq);
610 if (call->mode == RX_MODE_SENDING) {
611 rxi_FlushWrite(call);
618 /* Get whatever data is currently available in the receive queue.
619 * If rxi_FillReadVec sends an ack packet then it is possible
620 * that we will receive more data while we drop the call lock
621 * to send the packet. Set the RX_CALL_IOVEC_WAIT flag
622 * here to avoid a race with the receive thread if we send
623 * hard acks in rxi_FillReadVec. */
624 call->flags |= RX_CALL_IOVEC_WAIT;
625 call->iovNBytes = nbytes;
626 call->iovMax = maxio;
629 rxi_FillReadVec(call, 0);
631 /* if we need more data then sleep until the receive thread has
632 * filled in the rest. */
633 if (!call->error && call->iovNBytes && call->iovNext < call->iovMax
634 && !(call->flags & RX_CALL_RECEIVE_DONE)) {
635 call->flags |= RX_CALL_READER_WAIT;
637 call->startWait = clock_Sec();
638 while (call->flags & RX_CALL_READER_WAIT) {
639 #ifdef RX_ENABLE_LOCKS
640 CV_WAIT(&call->cv_rq, &call->lock);
642 osi_rxSleep(&call->rq);
647 call->flags &= ~RX_CALL_IOVEC_WAIT;
648 #ifdef RX_ENABLE_LOCKS
652 #endif /* RX_ENABLE_LOCKS */
655 *nio = call->iovNext;
656 return nbytes - call->iovNBytes;
660 rx_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
667 MUTEX_ENTER(&call->lock);
668 bytes = rxi_ReadvProc(call, iov, nio, maxio, nbytes);
669 MUTEX_EXIT(&call->lock);
674 /* rxi_WriteProc -- internal version.
676 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
679 rxi_WriteProc(struct rx_call *call, char *buf,
682 struct rx_connection *conn = call->conn;
683 struct rx_packet *cp = call->currentPacket;
685 int requestCount = nbytes;
687 /* Free any packets from the last call to ReadvProc/WritevProc */
688 if (queue_IsNotEmpty(&call->iovq)) {
689 #ifdef RXDEBUG_PACKET
691 #endif /* RXDEBUG_PACKET */
692 rxi_FreePackets(0, &call->iovq);
695 if (call->mode != RX_MODE_SENDING) {
696 if ((conn->type == RX_SERVER_CONNECTION)
697 && (call->mode == RX_MODE_RECEIVING)) {
698 call->mode = RX_MODE_SENDING;
700 #ifdef RX_TRACK_PACKETS
701 cp->flags &= ~RX_PKTFLAG_CP;
704 cp = call->currentPacket = (struct rx_packet *)0;
713 /* Loop condition is checked at end, so that a write of 0 bytes
714 * will force a packet to be created--specially for the case where
715 * there are 0 bytes on the stream, but we must send a packet
718 if (call->nFree == 0) {
719 if (!call->error && cp) {
720 /* Clear the current packet now so that if
721 * we are forced to wait and drop the lock
722 * the packet we are planning on using
725 #ifdef RX_TRACK_PACKETS
726 cp->flags &= ~RX_PKTFLAG_CP;
728 call->currentPacket = (struct rx_packet *)0;
729 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
730 /* Wait until TQ_BUSY is reset before adding any
731 * packets to the transmit queue
733 while (call->flags & RX_CALL_TQ_BUSY) {
734 call->flags |= RX_CALL_TQ_WAIT;
736 #ifdef RX_ENABLE_LOCKS
737 CV_WAIT(&call->cv_tq, &call->lock);
738 #else /* RX_ENABLE_LOCKS */
739 osi_rxSleep(&call->tq);
740 #endif /* RX_ENABLE_LOCKS */
742 if (call->tqWaiters == 0)
743 call->flags &= ~RX_CALL_TQ_WAIT;
745 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
746 clock_NewTime(); /* Bogus: need new time package */
747 /* The 0, below, specifies that it is not the last packet:
748 * there will be others. PrepareSendPacket may
749 * alter the packet length by up to
750 * conn->securityMaxTrailerSize */
751 hadd32(call->bytesSent, cp->length);
752 rxi_PrepareSendPacket(call, cp, 0);
753 #ifdef RX_TRACK_PACKETS
754 cp->flags |= RX_PKTFLAG_TQ;
756 queue_Append(&call->tq, cp);
757 #ifdef RXDEBUG_PACKET
759 #endif /* RXDEBUG_PACKET */
760 cp = (struct rx_packet *)0;
763 flags & (RX_CALL_FAST_RECOVER |
764 RX_CALL_FAST_RECOVER_WAIT))) {
765 rxi_Start(0, call, 0, 0);
768 #ifdef RX_TRACK_PACKETS
769 cp->flags &= ~RX_PKTFLAG_CP;
772 cp = call->currentPacket = (struct rx_packet *)0;
774 /* Wait for transmit window to open up */
776 && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
778 call->startWait = clock_Sec();
780 #ifdef RX_ENABLE_LOCKS
781 CV_WAIT(&call->cv_twind, &call->lock);
783 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
784 osi_rxSleep(&call->twind);
788 #ifdef RX_ENABLE_LOCKS
792 #endif /* RX_ENABLE_LOCKS */
794 if ((cp = rxi_AllocSendPacket(call, nbytes))) {
795 #ifdef RX_TRACK_PACKETS
796 cp->flags |= RX_PKTFLAG_CP;
798 call->currentPacket = cp;
799 call->nFree = cp->length;
800 call->curvec = 1; /* 0th vec is always header */
801 /* begin at the beginning [ more or less ], continue
802 * on until the end, then stop. */
804 (char *)cp->wirevec[1].iov_base +
805 call->conn->securityHeaderSize;
807 cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
811 #ifdef RX_TRACK_PACKETS
812 cp->flags &= ~RX_PKTFLAG_CP;
815 call->currentPacket = NULL;
821 if (cp && (int)call->nFree < nbytes) {
822 /* Try to extend the current buffer */
825 mud = rx_MaxUserDataSize(call);
828 want = MIN(nbytes - (int)call->nFree, mud - len);
829 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF);
830 if (cp->length > (unsigned)mud)
832 call->nFree += (cp->length - len);
836 /* If the remaining bytes fit in the buffer, then store them
837 * and return. Don't ship a buffer that's full immediately to
838 * the peer--we don't know if it's the last buffer yet */
844 while (nbytes && call->nFree) {
846 t = MIN((int)call->curlen, nbytes);
847 t = MIN((int)call->nFree, t);
848 memcpy(call->curpos, buf, t);
852 call->curlen -= (u_short)t;
853 call->nFree -= (u_short)t;
856 /* need to get another struct iov */
857 if (++call->curvec >= cp->niovecs) {
858 /* current packet is full, extend or send it */
861 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
862 call->curlen = cp->wirevec[call->curvec].iov_len;
865 } /* while bytes to send and room to send them */
867 /* might be out of space now */
870 } else; /* more data to send, so get another packet and keep going */
873 return requestCount - nbytes;
877 rx_WriteProc(struct rx_call *call, char *buf, int nbytes)
885 /* Free any packets from the last call to ReadvProc/WritevProc */
886 if (queue_IsNotEmpty(&call->iovq)) {
887 #ifdef RXDEBUG_PACKET
889 #endif /* RXDEBUG_PACKET */
890 rxi_FreePackets(0, &call->iovq);
894 * Most common case: all of the data fits in the current iovec.
895 * We are relying on nFree being zero unless the call is in send mode.
897 tcurlen = (int)call->curlen;
898 tnFree = (int)call->nFree;
899 if (!call->error && tcurlen >= nbytes && tnFree >= nbytes) {
900 tcurpos = call->curpos;
902 memcpy(tcurpos, buf, nbytes);
903 call->curpos = tcurpos + nbytes;
904 call->curlen = (u_short)(tcurlen - nbytes);
905 call->nFree = (u_short)(tnFree - nbytes);
910 MUTEX_ENTER(&call->lock);
911 bytes = rxi_WriteProc(call, buf, nbytes);
912 MUTEX_EXIT(&call->lock);
917 /* Optimization for marshalling 32 bit arguments */
919 rx_WriteProc32(struct rx_call *call, afs_int32 * value)
927 if (queue_IsNotEmpty(&call->iovq)) {
928 #ifdef RXDEBUG_PACKET
930 #endif /* RXDEBUG_PACKET */
931 rxi_FreePackets(0, &call->iovq);
935 * Most common case: all of the data fits in the current iovec.
936 * We are relying on nFree being zero unless the call is in send mode.
938 tcurlen = call->curlen;
939 tnFree = call->nFree;
940 if (!call->error && tcurlen >= sizeof(afs_int32)
941 && tnFree >= sizeof(afs_int32)) {
942 tcurpos = call->curpos;
944 if (!((size_t)tcurpos & (sizeof(afs_int32) - 1))) {
945 *((afs_int32 *) (tcurpos)) = *value;
947 memcpy(tcurpos, (char *)value, sizeof(afs_int32));
949 call->curpos = tcurpos + sizeof(afs_int32);
950 call->curlen = (u_short)(tcurlen - sizeof(afs_int32));
951 call->nFree = (u_short)(tnFree - sizeof(afs_int32));
952 return sizeof(afs_int32);
956 MUTEX_ENTER(&call->lock);
957 bytes = rxi_WriteProc(call, (char *)value, sizeof(afs_int32));
958 MUTEX_EXIT(&call->lock);
963 /* rxi_WritevAlloc -- internal version.
965 * Fill in an iovec to point to data in packet buffers. The application
966 * calls rxi_WritevProc when the buffers are full.
968 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
971 rxi_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
974 struct rx_connection *conn = call->conn;
975 struct rx_packet *cp = call->currentPacket;
978 /* Temporary values, real work is done in rxi_WritevProc */
980 unsigned int tcurvec;
984 requestCount = nbytes;
987 /* Free any packets from the last call to ReadvProc/WritevProc */
988 if (queue_IsNotEmpty(&call->iovq)) {
989 #ifdef RXDEBUG_PACKET
991 #endif /* RXDEBUG_PACKET */
992 rxi_FreePackets(0, &call->iovq);
995 if (call->mode != RX_MODE_SENDING) {
996 if ((conn->type == RX_SERVER_CONNECTION)
997 && (call->mode == RX_MODE_RECEIVING)) {
998 call->mode = RX_MODE_SENDING;
1000 #ifdef RX_TRACK_PACKETS
1001 cp->flags &= ~RX_PKTFLAG_CP;
1004 cp = call->currentPacket = (struct rx_packet *)0;
1013 /* Set up the iovec to point to data in packet buffers. */
1014 tnFree = call->nFree;
1015 tcurvec = call->curvec;
1016 tcurpos = call->curpos;
1017 tcurlen = call->curlen;
1022 /* current packet is full, allocate a new one */
1023 cp = rxi_AllocSendPacket(call, nbytes);
1025 /* out of space, return what we have */
1027 return requestCount - nbytes;
1029 #ifdef RX_TRACK_PACKETS
1030 cp->flags |= RX_PKTFLAG_IOVQ;
1032 queue_Append(&call->iovq, cp);
1033 #ifdef RXDEBUG_PACKET
1035 #endif /* RXDEBUG_PACKET */
1036 tnFree = cp->length;
1039 (char *)cp->wirevec[1].iov_base +
1040 call->conn->securityHeaderSize;
1041 tcurlen = cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
1044 if (tnFree < nbytes) {
1045 /* try to extend the current packet */
1048 mud = rx_MaxUserDataSize(call);
1051 want = MIN(nbytes - tnFree, mud - len);
1052 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF);
1053 if (cp->length > (unsigned)mud)
1055 tnFree += (cp->length - len);
1056 if (cp == call->currentPacket) {
1057 call->nFree += (cp->length - len);
1062 /* fill in the next entry in the iovec */
1063 t = MIN(tcurlen, nbytes);
1065 iov[nextio].iov_base = tcurpos;
1066 iov[nextio].iov_len = t;
1074 /* need to get another struct iov */
1075 if (++tcurvec >= cp->niovecs) {
1076 /* current packet is full, extend it or move on to next packet */
1079 tcurpos = (char *)cp->wirevec[tcurvec].iov_base;
1080 tcurlen = cp->wirevec[tcurvec].iov_len;
1083 } while (nbytes && nextio < maxio);
1085 return requestCount - nbytes;
1089 rx_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
1096 MUTEX_ENTER(&call->lock);
1097 bytes = rxi_WritevAlloc(call, iov, nio, maxio, nbytes);
1098 MUTEX_EXIT(&call->lock);
1103 /* rxi_WritevProc -- internal version.
1105 * Send buffers allocated in rxi_WritevAlloc.
1107 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
1110 rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1112 struct rx_packet *cp = NULL;
1113 #ifdef RX_TRACK_PACKETS
1114 struct rx_packet *p, *np;
1118 struct rx_queue tmpq;
1119 #ifdef RXDEBUG_PACKET
1123 requestCount = nbytes;
1126 if (call->mode != RX_MODE_SENDING) {
1127 call->error = RX_PROTOCOL_ERROR;
1129 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1130 /* Wait until TQ_BUSY is reset before trying to move any
1131 * packets to the transmit queue. */
1132 while (!call->error && call->flags & RX_CALL_TQ_BUSY) {
1133 call->flags |= RX_CALL_TQ_WAIT;
1135 #ifdef RX_ENABLE_LOCKS
1136 CV_WAIT(&call->cv_tq, &call->lock);
1137 #else /* RX_ENABLE_LOCKS */
1138 osi_rxSleep(&call->tq);
1139 #endif /* RX_ENABLE_LOCKS */
1141 if (call->tqWaiters == 0)
1142 call->flags &= ~RX_CALL_TQ_WAIT;
1144 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1145 /* cp is no longer valid since we may have given up the lock */
1146 cp = call->currentPacket;
1150 #ifdef RX_TRACK_PACKETS
1151 cp->flags &= ~RX_PKTFLAG_CP;
1152 cp->flags |= RX_PKTFLAG_IOVQ;
1154 queue_Prepend(&call->iovq, cp);
1155 #ifdef RXDEBUG_PACKET
1157 #endif /* RXDEBUG_PACKET */
1158 cp = call->currentPacket = (struct rx_packet *)0;
1160 #ifdef RXDEBUG_PACKET
1162 #endif /* RXDEBUG_PACKET */
1163 rxi_FreePackets(0, &call->iovq);
1167 /* Loop through the I/O vector adjusting packet pointers.
1168 * Place full packets back onto the iovq once they are ready
1169 * to send. Set RX_PROTOCOL_ERROR if any problems are found in
1170 * the iovec. We put the loop condition at the end to ensure that
1171 * a zero length write will push a short packet. */
1174 #ifdef RXDEBUG_PACKET
1176 #endif /* RXDEBUG_PACKET */
1178 if (call->nFree == 0 && cp) {
1179 clock_NewTime(); /* Bogus: need new time package */
1180 /* The 0, below, specifies that it is not the last packet:
1181 * there will be others. PrepareSendPacket may
1182 * alter the packet length by up to
1183 * conn->securityMaxTrailerSize */
1184 hadd32(call->bytesSent, cp->length);
1185 rxi_PrepareSendPacket(call, cp, 0);
1186 queue_Append(&tmpq, cp);
1187 #ifdef RXDEBUG_PACKET
1189 #endif /* RXDEBUG_PACKET */
1190 cp = call->currentPacket = (struct rx_packet *)0;
1192 /* The head of the iovq is now the current packet */
1194 if (queue_IsEmpty(&call->iovq)) {
1195 call->error = RX_PROTOCOL_ERROR;
1196 #ifdef RXDEBUG_PACKET
1198 #endif /* RXDEBUG_PACKET */
1199 rxi_FreePackets(0, &tmpq);
1202 cp = queue_First(&call->iovq, rx_packet);
1204 #ifdef RX_TRACK_PACKETS
1205 cp->flags &= ~RX_PKTFLAG_IOVQ;
1207 #ifdef RXDEBUG_PACKET
1209 #endif /* RXDEBUG_PACKET */
1210 #ifdef RX_TRACK_PACKETS
1211 cp->flags |= RX_PKTFLAG_CP;
1213 call->currentPacket = cp;
1214 call->nFree = cp->length;
1217 (char *)cp->wirevec[1].iov_base +
1218 call->conn->securityHeaderSize;
1220 cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
1225 /* The next iovec should point to the current position */
1226 if (iov[nextio].iov_base != call->curpos
1227 || iov[nextio].iov_len > (int)call->curlen) {
1228 call->error = RX_PROTOCOL_ERROR;
1230 #ifdef RX_TRACK_PACKETS
1231 cp->flags &= ~RX_PKTFLAG_CP;
1233 queue_Prepend(&tmpq, cp);
1234 #ifdef RXDEBUG_PACKET
1236 #endif /* RXDEBUG_PACKET */
1237 cp = call->currentPacket = (struct rx_packet *)0;
1239 #ifdef RXDEBUG_PACKET
1241 #endif /* RXDEBUG_PACKET */
1242 rxi_FreePackets(0, &tmpq);
1245 nbytes -= iov[nextio].iov_len;
1246 call->curpos += iov[nextio].iov_len;
1247 call->curlen -= iov[nextio].iov_len;
1248 call->nFree -= iov[nextio].iov_len;
1250 if (call->curlen == 0) {
1251 if (++call->curvec > cp->niovecs) {
1254 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
1255 call->curlen = cp->wirevec[call->curvec].iov_len;
1259 } while (nbytes && nextio < nio);
1261 /* Move the packets from the temporary queue onto the transmit queue.
1262 * We may end up with more than call->twind packets on the queue. */
1264 #ifdef RX_TRACK_PACKETS
1265 for (queue_Scan(&tmpq, p, np, rx_packet))
1267 p->flags |= RX_PKTFLAG_TQ;
1270 queue_SpliceAppend(&call->tq, &tmpq);
1272 if (!(call->flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
1273 rxi_Start(0, call, 0, 0);
1276 /* Wait for the length of the transmit queue to fall below call->twind */
1277 while (!call->error && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
1279 call->startWait = clock_Sec();
1280 #ifdef RX_ENABLE_LOCKS
1281 CV_WAIT(&call->cv_twind, &call->lock);
1283 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
1284 osi_rxSleep(&call->twind);
1286 call->startWait = 0;
1288 /* cp is no longer valid since we may have given up the lock */
1289 cp = call->currentPacket;
1293 #ifdef RX_TRACK_PACKETS
1294 cp->flags &= ~RX_PKTFLAG_CP;
1297 cp = call->currentPacket = (struct rx_packet *)0;
1302 return requestCount - nbytes;
1306 rx_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1312 MUTEX_ENTER(&call->lock);
1313 bytes = rxi_WritevProc(call, iov, nio, nbytes);
1314 MUTEX_EXIT(&call->lock);
1319 /* Flush any buffered data to the stream, switch to read mode
1320 * (clients) or to EOF mode (servers) */
1322 rxi_FlushWrite(struct rx_call *call)
1324 struct rx_packet *cp = NULL;
1326 /* Free any packets from the last call to ReadvProc/WritevProc */
1327 if (queue_IsNotEmpty(&call->iovq)) {
1328 #ifdef RXDEBUG_PACKET
1330 #endif /* RXDEBUG_PACKET */
1331 rxi_FreePackets(0, &call->iovq);
1334 if (call->mode == RX_MODE_SENDING) {
1337 (call->conn->type ==
1338 RX_CLIENT_CONNECTION ? RX_MODE_RECEIVING : RX_MODE_EOF);
1340 #ifdef RX_KERNEL_TRACE
1342 int glockOwner = ISAFS_GLOCK();
1345 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
1346 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
1353 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1354 /* Wait until TQ_BUSY is reset before adding any
1355 * packets to the transmit queue
1357 while (call->flags & RX_CALL_TQ_BUSY) {
1358 call->flags |= RX_CALL_TQ_WAIT;
1360 #ifdef RX_ENABLE_LOCKS
1361 CV_WAIT(&call->cv_tq, &call->lock);
1362 #else /* RX_ENABLE_LOCKS */
1363 osi_rxSleep(&call->tq);
1364 #endif /* RX_ENABLE_LOCKS */
1366 if (call->tqWaiters == 0)
1367 call->flags &= ~RX_CALL_TQ_WAIT;
1369 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1371 /* cp is no longer valid since we may have given up the lock */
1372 cp = call->currentPacket;
1375 /* cp->length is only supposed to be the user's data */
1376 /* cp->length was already set to (then-current)
1377 * MaxUserDataSize or less. */
1378 #ifdef RX_TRACK_PACKETS
1379 cp->flags &= ~RX_PKTFLAG_CP;
1381 cp->length -= call->nFree;
1382 call->currentPacket = (struct rx_packet *)0;
1385 cp = rxi_AllocSendPacket(call, 0);
1387 /* Mode can no longer be MODE_SENDING */
1391 cp->niovecs = 2; /* header + space for rxkad stuff */
1395 /* The 1 specifies that this is the last packet */
1396 hadd32(call->bytesSent, cp->length);
1397 rxi_PrepareSendPacket(call, cp, 1);
1398 #ifdef RX_TRACK_PACKETS
1399 cp->flags |= RX_PKTFLAG_TQ;
1401 queue_Append(&call->tq, cp);
1402 #ifdef RXDEBUG_PACKET
1404 #endif /* RXDEBUG_PACKET */
1407 flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
1408 rxi_Start(0, call, 0, 0);
1413 /* Flush any buffered data to the stream, switch to read mode
1414 * (clients) or to EOF mode (servers) */
1416 rx_FlushWrite(struct rx_call *call)
1420 MUTEX_ENTER(&call->lock);
1421 rxi_FlushWrite(call);
1422 MUTEX_EXIT(&call->lock);