2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
12 #include "afs/param.h"
14 #include <afs/param.h>
22 #ifdef RX_KERNEL_TRACE
23 #include "rx_kcommon.h"
25 #if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
26 #include "afs/sysincludes.h"
31 #if defined(AFS_AIX_ENV) || defined(AFS_AUX_ENV) || defined(AFS_SUN5_ENV)
35 #include <net/net_globals.h>
36 #endif /* AFS_OSF_ENV */
37 #ifdef AFS_LINUX20_ENV
40 #include "netinet/in.h"
41 #if defined(AFS_SGI_ENV)
42 #include "afs/sysincludes.h"
45 #include "afs/afs_args.h"
46 #include "afs/afs_osi.h"
47 #if (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
51 #include "afs/sysincludes.h"
54 #undef RXDEBUG /* turn off debugging */
57 #include "rx_kmutex.h"
58 #include "rx/rx_kernel.h"
59 #include "rx/rx_clock.h"
60 #include "rx/rx_queue.h"
62 #include "rx/rx_globals.h"
71 #endif /* AFS_OSF_ENV */
73 # include <sys/types.h>
75 # include <winsock2.h>
76 #else /* !AFS_NT40_ENV */
77 # include <sys/socket.h>
78 # include <sys/file.h>
80 # include <netinet/in.h>
81 # include <sys/stat.h>
82 # include <sys/time.h>
83 #endif /* !AFS_NT40_ENV */
89 # include "rx_clock.h"
90 # include "rx_queue.h"
92 # include "rx_globals.h"
96 /* rxdb_fileID is used to identify the lock location, along with line#. */
97 static int rxdb_fileID = RXDB_FILE_RX_RDWR;
98 #endif /* RX_LOCKS_DB */
99 /* rxi_ReadProc -- internal version.
101 * LOCKS USED -- called at netpri with rx global lock and call->lock held.
104 rxi_ReadProc(register struct rx_call *call, register char *buf,
107 register struct rx_packet *cp = call->currentPacket;
108 register struct rx_packet *rp;
109 register int requestCount;
110 register unsigned int t;
112 /* XXXX took out clock_NewTime from here. Was it needed? */
113 requestCount = nbytes;
115 /* Free any packets from the last call to ReadvProc/WritevProc */
116 if (queue_IsNotEmpty(&call->iovq)) {
117 rxi_FreePackets(0, &call->iovq);
121 if (call->nLeft == 0) {
122 /* Get next packet */
124 if (call->error || (call->mode != RX_MODE_RECEIVING)) {
128 if (call->mode == RX_MODE_SENDING) {
129 rxi_FlushWrite(call);
133 if (queue_IsNotEmpty(&call->rq)) {
134 /* Check that next packet available is next in sequence */
135 rp = queue_First(&call->rq, rx_packet);
136 if (rp->header.seq == call->rnext) {
138 register struct rx_connection *conn = call->conn;
140 rp->flags &= ~RX_PKTFLAG_RQ;
142 /* RXS_CheckPacket called to undo RXS_PreparePacket's
143 * work. It may reduce the length of the packet by up
144 * to conn->maxTrailerSize, to reflect the length of the
145 * data + the header. */
147 RXS_CheckPacket(conn->securityObject, call,
149 /* Used to merely shut down the call, but now we
150 * shut down the whole connection since this may
151 * indicate an attempt to hijack it */
153 MUTEX_EXIT(&call->lock);
154 rxi_ConnectionError(conn, error);
155 MUTEX_ENTER(&conn->conn_data_lock);
156 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
157 MUTEX_EXIT(&conn->conn_data_lock);
159 MUTEX_ENTER(&call->lock);
164 cp = call->currentPacket = rp;
165 call->currentPacket->flags |= RX_PKTFLAG_CP;
166 call->curvec = 1; /* 0th vec is always header */
167 /* begin at the beginning [ more or less ], continue
168 * on until the end, then stop. */
170 (char *)cp->wirevec[1].iov_base +
171 call->conn->securityHeaderSize;
173 cp->wirevec[1].iov_len -
174 call->conn->securityHeaderSize;
176 /* Notice that this code works correctly if the data
177 * size is 0 (which it may be--no reply arguments from
178 * server, for example). This relies heavily on the
179 * fact that the code below immediately frees the packet
180 * (no yields, etc.). If it didn't, this would be a
181 * problem because a value of zero for call->nLeft
182 * normally means that there is no read packet */
183 call->nLeft = cp->length;
184 hadd32(call->bytesRcvd, cp->length);
186 /* Send a hard ack for every rxi_HardAckRate+1 packets
187 * consumed. Otherwise schedule an event to send
188 * the hard ack later on.
191 if (!(call->flags & RX_CALL_RECEIVE_DONE)) {
192 if (call->nHardAcks > (u_short) rxi_HardAckRate) {
193 rxevent_Cancel(call->delayedAckEvent, call,
194 RX_CALL_REFCOUNT_DELAY);
195 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
197 struct clock when, now;
200 /* Delay to consolidate ack packets */
201 clock_Add(&when, &rx_hardAckDelay);
202 if (!call->delayedAckEvent
203 || clock_Gt(&call->delayedAckEvent->
205 rxevent_Cancel(call->delayedAckEvent,
207 RX_CALL_REFCOUNT_DELAY);
208 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
209 call->delayedAckEvent =
210 rxevent_PostNow(&when, &now,
211 rxi_SendDelayedAck, call,
221 MTUXXX doesn't there need to be an "else" here ???
223 /* Are there ever going to be any more packets? */
224 if (call->flags & RX_CALL_RECEIVE_DONE) {
225 return requestCount - nbytes;
227 /* Wait for in-sequence packet */
228 call->flags |= RX_CALL_READER_WAIT;
230 call->startWait = clock_Sec();
231 while (call->flags & RX_CALL_READER_WAIT) {
232 #ifdef RX_ENABLE_LOCKS
233 CV_WAIT(&call->cv_rq, &call->lock);
235 osi_rxSleep(&call->rq);
240 #ifdef RX_ENABLE_LOCKS
244 #endif /* RX_ENABLE_LOCKS */
248 /* MTUXXX this should be replaced by some error-recovery code before shipping */
249 /* yes, the following block is allowed to be the ELSE clause (or not) */
250 /* It's possible for call->nLeft to be smaller than any particular
251 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
252 * reflects the size of the buffer. We have to keep track of the
253 * number of bytes read in the length field of the packet struct. On
254 * the final portion of a received packet, it's almost certain that
255 * call->nLeft will be smaller than the final buffer. */
256 while (nbytes && cp) {
257 t = MIN((int)call->curlen, nbytes);
258 t = MIN(t, (int)call->nLeft);
259 memcpy(buf, call->curpos, t);
267 /* out of packet. Get another one. */
268 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
270 cp = call->currentPacket = (struct rx_packet *)0;
271 } else if (!call->curlen) {
272 /* need to get another struct iov */
273 if (++call->curvec >= cp->niovecs) {
274 /* current packet is exhausted, get ready for another */
275 /* don't worry about curvec and stuff, they get set somewhere else */
276 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
278 cp = call->currentPacket = (struct rx_packet *)0;
282 (char *)cp->wirevec[call->curvec].iov_base;
283 call->curlen = cp->wirevec[call->curvec].iov_len;
288 /* user buffer is full, return */
298 rx_ReadProc(struct rx_call *call, char *buf, int nbytes)
307 * Free any packets from the last call to ReadvProc/WritevProc.
308 * We do not need the lock because the receiver threads only
309 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
310 * RX_CALL_IOVEC_WAIT is always cleared before returning from
311 * ReadvProc/WritevProc.
313 if (!queue_IsEmpty(&call->iovq)) {
314 rxi_FreePackets(0, &call->iovq);
318 * Most common case, all of the data is in the current iovec.
319 * We do not need the lock because this is the only thread that
320 * updates the curlen, curpos, nLeft fields.
322 * We are relying on nLeft being zero unless the call is in receive mode.
324 tcurlen = call->curlen;
325 tnLeft = call->nLeft;
326 if (!call->error && tcurlen > nbytes && tnLeft > nbytes) {
327 tcurpos = call->curpos;
328 memcpy(buf, tcurpos, nbytes);
329 call->curpos = tcurpos + nbytes;
330 call->curlen = tcurlen - nbytes;
331 call->nLeft = tnLeft - nbytes;
334 /* out of packet. Get another one. */
336 MUTEX_ENTER(&call->lock);
337 rxi_FreePacket(call->currentPacket);
338 call->currentPacket = (struct rx_packet *)0;
339 MUTEX_EXIT(&call->lock);
346 MUTEX_ENTER(&call->lock);
347 bytes = rxi_ReadProc(call, buf, nbytes);
348 MUTEX_EXIT(&call->lock);
353 /* Optimization for unmarshalling 32 bit integers */
355 rx_ReadProc32(struct rx_call *call, afs_int32 * value)
364 * Free any packets from the last call to ReadvProc/WritevProc.
365 * We do not need the lock because the receiver threads only
366 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
367 * RX_CALL_IOVEC_WAIT is always cleared before returning from
368 * ReadvProc/WritevProc.
370 if (!queue_IsEmpty(&call->iovq)) {
371 rxi_FreePackets(0, &call->iovq);
375 * Most common case, all of the data is in the current iovec.
376 * We do not need the lock because this is the only thread that
377 * updates the curlen, curpos, nLeft fields.
379 * We are relying on nLeft being zero unless the call is in receive mode.
381 tcurlen = call->curlen;
382 tnLeft = call->nLeft;
383 if (!call->error && tcurlen >= sizeof(afs_int32)
384 && tnLeft >= sizeof(afs_int32)) {
385 tcurpos = call->curpos;
386 memcpy((char *)value, tcurpos, sizeof(afs_int32));
387 call->curpos = tcurpos + sizeof(afs_int32);
388 call->curlen = (u_short)(tcurlen - sizeof(afs_int32));
389 call->nLeft = (u_short)(tnLeft - sizeof(afs_int32));
391 /* out of packet. Get another one. */
393 MUTEX_ENTER(&call->lock);
394 rxi_FreePacket(call->currentPacket);
395 call->currentPacket = (struct rx_packet *)0;
396 MUTEX_EXIT(&call->lock);
399 return sizeof(afs_int32);
403 MUTEX_ENTER(&call->lock);
404 bytes = rxi_ReadProc(call, (char *)value, sizeof(afs_int32));
405 MUTEX_EXIT(&call->lock);
412 * Uses packets in the receive queue to fill in as much of the
413 * current iovec as possible. Does not block if it runs out
414 * of packets to complete the iovec. Return true if an ack packet
415 * was sent, otherwise return false */
417 rxi_FillReadVec(struct rx_call *call, afs_uint32 serial)
421 register unsigned int t;
422 struct rx_packet *rp;
423 struct rx_packet *curp;
424 struct iovec *call_iov;
425 struct iovec *cur_iov = NULL;
427 curp = call->currentPacket;
429 cur_iov = &curp->wirevec[call->curvec];
431 call_iov = &call->iov[call->iovNext];
433 while (!call->error && call->iovNBytes && call->iovNext < call->iovMax) {
434 if (call->nLeft == 0) {
435 /* Get next packet */
436 if (queue_IsNotEmpty(&call->rq)) {
437 /* Check that next packet available is next in sequence */
438 rp = queue_First(&call->rq, rx_packet);
439 if (rp->header.seq == call->rnext) {
441 register struct rx_connection *conn = call->conn;
443 rp->flags &= ~RX_PKTFLAG_RQ;
445 /* RXS_CheckPacket called to undo RXS_PreparePacket's
446 * work. It may reduce the length of the packet by up
447 * to conn->maxTrailerSize, to reflect the length of the
448 * data + the header. */
450 RXS_CheckPacket(conn->securityObject, call, rp))) {
451 /* Used to merely shut down the call, but now we
452 * shut down the whole connection since this may
453 * indicate an attempt to hijack it */
455 MUTEX_EXIT(&call->lock);
456 rxi_ConnectionError(conn, error);
457 MUTEX_ENTER(&conn->conn_data_lock);
458 rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
459 MUTEX_EXIT(&conn->conn_data_lock);
461 MUTEX_ENTER(&call->lock);
466 curp = call->currentPacket = rp;
467 call->currentPacket->flags |= RX_PKTFLAG_CP;
468 call->curvec = 1; /* 0th vec is always header */
469 cur_iov = &curp->wirevec[1];
470 /* begin at the beginning [ more or less ], continue
471 * on until the end, then stop. */
473 (char *)curp->wirevec[1].iov_base +
474 call->conn->securityHeaderSize;
476 curp->wirevec[1].iov_len -
477 call->conn->securityHeaderSize;
479 /* Notice that this code works correctly if the data
480 * size is 0 (which it may be--no reply arguments from
481 * server, for example). This relies heavily on the
482 * fact that the code below immediately frees the packet
483 * (no yields, etc.). If it didn't, this would be a
484 * problem because a value of zero for call->nLeft
485 * normally means that there is no read packet */
486 call->nLeft = curp->length;
487 hadd32(call->bytesRcvd, curp->length);
489 /* Send a hard ack for every rxi_HardAckRate+1 packets
490 * consumed. Otherwise schedule an event to send
491 * the hard ack later on.
501 /* It's possible for call->nLeft to be smaller than any particular
502 * iov_len. Usually, recvmsg doesn't change the iov_len, since it
503 * reflects the size of the buffer. We have to keep track of the
504 * number of bytes read in the length field of the packet struct. On
505 * the final portion of a received packet, it's almost certain that
506 * call->nLeft will be smaller than the final buffer. */
507 while (call->iovNBytes && call->iovNext < call->iovMax && curp) {
509 t = MIN((int)call->curlen, call->iovNBytes);
510 t = MIN(t, (int)call->nLeft);
511 call_iov->iov_base = call->curpos;
512 call_iov->iov_len = t;
515 call->iovNBytes -= t;
521 /* out of packet. Get another one. */
522 curp->flags &= ~RX_PKTFLAG_CP;
523 curp->flags |= RX_PKTFLAG_IOVQ;
524 queue_Append(&call->iovq, curp);
525 curp = call->currentPacket = (struct rx_packet *)0;
526 } else if (!call->curlen) {
527 /* need to get another struct iov */
528 if (++call->curvec >= curp->niovecs) {
529 /* current packet is exhausted, get ready for another */
530 /* don't worry about curvec and stuff, they get set somewhere else */
531 curp->flags &= ~RX_PKTFLAG_CP;
532 curp->flags |= RX_PKTFLAG_IOVQ;
533 queue_Append(&call->iovq, curp);
534 curp = call->currentPacket = (struct rx_packet *)0;
538 call->curpos = (char *)cur_iov->iov_base;
539 call->curlen = cur_iov->iov_len;
545 /* If we consumed any packets then check whether we need to
546 * send a hard ack. */
547 if (didConsume && (!(call->flags & RX_CALL_RECEIVE_DONE))) {
548 if (call->nHardAcks > (u_short) rxi_HardAckRate) {
549 rxevent_Cancel(call->delayedAckEvent, call,
550 RX_CALL_REFCOUNT_DELAY);
551 rxi_SendAck(call, 0, serial, RX_ACK_DELAY, 0);
554 struct clock when, now;
557 /* Delay to consolidate ack packets */
558 clock_Add(&when, &rx_hardAckDelay);
559 if (!call->delayedAckEvent
560 || clock_Gt(&call->delayedAckEvent->eventTime, &when)) {
561 rxevent_Cancel(call->delayedAckEvent, call,
562 RX_CALL_REFCOUNT_DELAY);
563 CALL_HOLD(call, RX_CALL_REFCOUNT_DELAY);
564 call->delayedAckEvent =
565 rxevent_PostNow(&when, &now, rxi_SendDelayedAck, call, 0);
573 /* rxi_ReadvProc -- internal version.
575 * Fills in an iovec with pointers to the packet buffers. All packets
576 * except the last packet (new current packet) are moved to the iovq
577 * while the application is processing the data.
579 * LOCKS USED -- called at netpri with rx global lock and call->lock held.
582 rxi_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
588 requestCount = nbytes;
591 /* Free any packets from the last call to ReadvProc/WritevProc */
592 if (queue_IsNotEmpty(&call->iovq)) {
593 rxi_FreePackets(0, &call->iovq);
596 if (call->mode == RX_MODE_SENDING) {
597 rxi_FlushWrite(call);
604 /* Get whatever data is currently available in the receive queue.
605 * If rxi_FillReadVec sends an ack packet then it is possible
606 * that we will receive more data while we drop the call lock
607 * to send the packet. Set the RX_CALL_IOVEC_WAIT flag
608 * here to avoid a race with the receive thread if we send
609 * hard acks in rxi_FillReadVec. */
610 call->flags |= RX_CALL_IOVEC_WAIT;
611 call->iovNBytes = nbytes;
612 call->iovMax = maxio;
615 rxi_FillReadVec(call, 0);
617 /* if we need more data then sleep until the receive thread has
618 * filled in the rest. */
619 if (!call->error && call->iovNBytes && call->iovNext < call->iovMax
620 && !(call->flags & RX_CALL_RECEIVE_DONE)) {
621 call->flags |= RX_CALL_READER_WAIT;
623 call->startWait = clock_Sec();
624 while (call->flags & RX_CALL_READER_WAIT) {
625 #ifdef RX_ENABLE_LOCKS
626 CV_WAIT(&call->cv_rq, &call->lock);
628 osi_rxSleep(&call->rq);
633 call->flags &= ~RX_CALL_IOVEC_WAIT;
634 #ifdef RX_ENABLE_LOCKS
638 #endif /* RX_ENABLE_LOCKS */
641 *nio = call->iovNext;
642 return nbytes - call->iovNBytes;
646 rx_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
653 MUTEX_ENTER(&call->lock);
654 bytes = rxi_ReadvProc(call, iov, nio, maxio, nbytes);
655 MUTEX_EXIT(&call->lock);
660 /* rxi_WriteProc -- internal version.
662 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
665 rxi_WriteProc(register struct rx_call *call, register char *buf,
668 struct rx_connection *conn = call->conn;
669 register struct rx_packet *cp = call->currentPacket;
670 register unsigned int t;
671 int requestCount = nbytes;
673 /* Free any packets from the last call to ReadvProc/WritevProc */
674 if (queue_IsNotEmpty(&call->iovq)) {
675 rxi_FreePackets(0, &call->iovq);
678 if (call->mode != RX_MODE_SENDING) {
679 if ((conn->type == RX_SERVER_CONNECTION)
680 && (call->mode == RX_MODE_RECEIVING)) {
681 call->mode = RX_MODE_SENDING;
683 cp->flags &= ~RX_PKTFLAG_CP;
685 cp = call->currentPacket = (struct rx_packet *)0;
694 /* Loop condition is checked at end, so that a write of 0 bytes
695 * will force a packet to be created--specially for the case where
696 * there are 0 bytes on the stream, but we must send a packet
699 if (call->nFree == 0) {
700 if (!call->error && cp) {
701 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
702 /* Wait until TQ_BUSY is reset before adding any
703 * packets to the transmit queue
705 while (call->flags & RX_CALL_TQ_BUSY) {
706 call->flags |= RX_CALL_TQ_WAIT;
707 #ifdef RX_ENABLE_LOCKS
708 CV_WAIT(&call->cv_tq, &call->lock);
709 #else /* RX_ENABLE_LOCKS */
710 osi_rxSleep(&call->tq);
711 #endif /* RX_ENABLE_LOCKS */
713 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
714 clock_NewTime(); /* Bogus: need new time package */
715 /* The 0, below, specifies that it is not the last packet:
716 * there will be others. PrepareSendPacket may
717 * alter the packet length by up to
718 * conn->securityMaxTrailerSize */
719 hadd32(call->bytesSent, cp->length);
720 rxi_PrepareSendPacket(call, cp, 0);
721 cp->flags &= ~RX_PKTFLAG_CP;
722 cp->flags |= RX_PKTFLAG_TQ;
723 queue_Append(&call->tq, cp);
724 cp = call->currentPacket = (struct rx_packet *)0;
727 flags & (RX_CALL_FAST_RECOVER |
728 RX_CALL_FAST_RECOVER_WAIT))) {
729 rxi_Start(0, call, 0, 0);
731 } else else if (cp) {
732 cp->flags &= ~RX_PKTFLAG_CP;
734 cp = call->currentPacket = (struct rx_packet *)0;
736 /* Wait for transmit window to open up */
738 && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
740 call->startWait = clock_Sec();
742 #ifdef RX_ENABLE_LOCKS
743 CV_WAIT(&call->cv_twind, &call->lock);
745 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
746 osi_rxSleep(&call->twind);
750 #ifdef RX_ENABLE_LOCKS
754 #endif /* RX_ENABLE_LOCKS */
756 if ((cp = rxi_AllocSendPacket(call, nbytes))) {
757 cp->flags |= RX_PKTFLAG_CP;
758 call->currentPacket = cp;
759 call->nFree = cp->length;
760 call->curvec = 1; /* 0th vec is always header */
761 /* begin at the beginning [ more or less ], continue
762 * on until the end, then stop. */
764 (char *)cp->wirevec[1].iov_base +
765 call->conn->securityHeaderSize;
767 cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
771 cp->flags &= ~RX_PKTFLAG_CP;
773 call->currentPacket = NULL;
779 if (cp && (int)call->nFree < nbytes) {
780 /* Try to extend the current buffer */
781 register int len, mud;
783 mud = rx_MaxUserDataSize(call);
786 want = MIN(nbytes - (int)call->nFree, mud - len);
787 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF);
788 if (cp->length > (unsigned)mud)
790 call->nFree += (cp->length - len);
794 /* If the remaining bytes fit in the buffer, then store them
795 * and return. Don't ship a buffer that's full immediately to
796 * the peer--we don't know if it's the last buffer yet */
802 while (nbytes && call->nFree) {
804 t = MIN((int)call->curlen, nbytes);
805 t = MIN((int)call->nFree, t);
806 memcpy(call->curpos, buf, t);
810 call->curlen -= (u_short)t;
811 call->nFree -= (u_short)t;
814 /* need to get another struct iov */
815 if (++call->curvec >= cp->niovecs) {
816 /* current packet is full, extend or send it */
819 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
820 call->curlen = cp->wirevec[call->curvec].iov_len;
823 } /* while bytes to send and room to send them */
825 /* might be out of space now */
828 } else; /* more data to send, so get another packet and keep going */
831 return requestCount - nbytes;
835 rx_WriteProc(struct rx_call *call, char *buf, int nbytes)
844 * Free any packets from the last call to ReadvProc/WritevProc.
845 * We do not need the lock because the receiver threads only
846 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
847 * RX_CALL_IOVEC_WAIT is always cleared before returning from
848 * ReadvProc/WritevProc.
850 if (queue_IsNotEmpty(&call->iovq)) {
851 rxi_FreePackets(0, &call->iovq);
855 * Most common case: all of the data fits in the current iovec.
856 * We do not need the lock because this is the only thread that
857 * updates the curlen, curpos, nFree fields.
859 * We are relying on nFree being zero unless the call is in send mode.
861 tcurlen = (int)call->curlen;
862 tnFree = (int)call->nFree;
863 if (!call->error && tcurlen >= nbytes && tnFree >= nbytes) {
864 tcurpos = call->curpos;
865 memcpy(tcurpos, buf, nbytes);
866 call->curpos = tcurpos + nbytes;
867 call->curlen = (u_short)(tcurlen - nbytes);
868 call->nFree = (u_short)(tnFree - nbytes);
873 MUTEX_ENTER(&call->lock);
874 bytes = rxi_WriteProc(call, buf, nbytes);
875 MUTEX_EXIT(&call->lock);
880 /* Optimization for marshalling 32 bit arguments */
882 rx_WriteProc32(register struct rx_call *call, register afs_int32 * value)
891 * Free any packets from the last call to ReadvProc/WritevProc.
892 * We do not need the lock because the receiver threads only
893 * touch the iovq when the RX_CALL_IOVEC_WAIT flag is set, and the
894 * RX_CALL_IOVEC_WAIT is always cleared before returning from
895 * ReadvProc/WritevProc.
897 if (queue_IsNotEmpty(&call->iovq)) {
898 rxi_FreePackets(0, &call->iovq);
902 * Most common case: all of the data fits in the current iovec.
903 * We do not need the lock because this is the only thread that
904 * updates the curlen, curpos, nFree fields.
906 * We are relying on nFree being zero unless the call is in send mode.
908 tcurlen = call->curlen;
909 tnFree = call->nFree;
910 if (!call->error && tcurlen >= sizeof(afs_int32)
911 && tnFree >= sizeof(afs_int32)) {
912 tcurpos = call->curpos;
913 if (!((size_t)tcurpos & (sizeof(afs_int32) - 1))) {
914 *((afs_int32 *) (tcurpos)) = *value;
916 memcpy(tcurpos, (char *)value, sizeof(afs_int32));
918 call->curpos = tcurpos + sizeof(afs_int32);
919 call->curlen = (u_short)(tcurlen - sizeof(afs_int32));
920 call->nFree = (u_short)(tnFree - sizeof(afs_int32));
921 return sizeof(afs_int32);
925 MUTEX_ENTER(&call->lock);
926 bytes = rxi_WriteProc(call, (char *)value, sizeof(afs_int32));
927 MUTEX_EXIT(&call->lock);
932 /* rxi_WritevAlloc -- internal version.
934 * Fill in an iovec to point to data in packet buffers. The application
935 * calls rxi_WritevProc when the buffers are full.
937 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
940 rxi_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
943 struct rx_connection *conn = call->conn;
944 struct rx_packet *cp = call->currentPacket;
947 /* Temporary values, real work is done in rxi_WritevProc */
953 requestCount = nbytes;
956 /* Free any packets from the last call to ReadvProc/WritevProc */
957 if (queue_IsNotEmpty(&call->iovq)) {
958 rxi_FreePackets(0, &call->iovq);
961 if (call->mode != RX_MODE_SENDING) {
962 if ((conn->type == RX_SERVER_CONNECTION)
963 && (call->mode == RX_MODE_RECEIVING)) {
964 call->mode = RX_MODE_SENDING;
966 cp->flags &= ~RX_PKTFLAG_CP;
968 cp = call->currentPacket = (struct rx_packet *)0;
977 /* Set up the iovec to point to data in packet buffers. */
978 tnFree = call->nFree;
979 tcurvec = call->curvec;
980 tcurpos = call->curpos;
981 tcurlen = call->curlen;
983 register unsigned int t;
986 /* current packet is full, allocate a new one */
987 cp = rxi_AllocSendPacket(call, nbytes);
989 /* out of space, return what we have */
991 return requestCount - nbytes;
993 cp->flags |= RX_PKTFLAG_IOVQ;
994 queue_Append(&call->iovq, cp);
998 (char *)cp->wirevec[1].iov_base +
999 call->conn->securityHeaderSize;
1000 tcurlen = cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
1003 if (tnFree < nbytes) {
1004 /* try to extend the current packet */
1005 register int len, mud;
1007 mud = rx_MaxUserDataSize(call);
1010 want = MIN(nbytes - tnFree, mud - len);
1011 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF);
1012 if (cp->length > (unsigned)mud)
1014 tnFree += (cp->length - len);
1015 if (cp == call->currentPacket) {
1016 call->nFree += (cp->length - len);
1021 /* fill in the next entry in the iovec */
1022 t = MIN(tcurlen, nbytes);
1024 iov[nextio].iov_base = tcurpos;
1025 iov[nextio].iov_len = t;
1033 /* need to get another struct iov */
1034 if (++tcurvec >= cp->niovecs) {
1035 /* current packet is full, extend it or move on to next packet */
1038 tcurpos = (char *)cp->wirevec[tcurvec].iov_base;
1039 tcurlen = cp->wirevec[tcurvec].iov_len;
1042 } while (nbytes && nextio < maxio);
1044 return requestCount - nbytes;
1048 rx_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
1055 MUTEX_ENTER(&call->lock);
1056 bytes = rxi_WritevAlloc(call, iov, nio, maxio, nbytes);
1057 MUTEX_EXIT(&call->lock);
1062 /* rxi_WritevProc -- internal version.
1064 * Send buffers allocated in rxi_WritevAlloc.
1066 * LOCKS USED -- called at netpri with rx global lock and call->lock held. */
1069 rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1071 struct rx_packet *cp = call->currentPacket;
1074 struct rx_queue tmpq;
1076 requestCount = nbytes;
1079 if (call->mode != RX_MODE_SENDING) {
1080 call->error = RX_PROTOCOL_ERROR;
1082 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1083 /* Wait until TQ_BUSY is reset before trying to move any
1084 * packets to the transmit queue. */
1085 while (!call->error && call->flags & RX_CALL_TQ_BUSY) {
1086 call->flags |= RX_CALL_TQ_WAIT;
1087 #ifdef RX_ENABLE_LOCKS
1088 CV_WAIT(&call->cv_tq, &call->lock);
1089 #else /* RX_ENABLE_LOCKS */
1090 osi_rxSleep(&call->tq);
1091 #endif /* RX_ENABLE_LOCKS */
1093 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1097 cp->flags &= ~RX_PKTFLAG_CP;
1098 cp->flags |= RX_PKTFLAG_IOVQ;
1099 queue_Prepend(&call->iovq, cp);
1100 cp = call->currentPacket = (struct rx_packet *)0;
1102 rxi_FreePackets(0, &call->iovq);
1106 /* Loop through the I/O vector adjusting packet pointers.
1107 * Place full packets back onto the iovq once they are ready
1108 * to send. Set RX_PROTOCOL_ERROR if any problems are found in
1109 * the iovec. We put the loop condition at the end to ensure that
1110 * a zero length write will push a short packet. */
1114 if (call->nFree == 0 && cp) {
1115 clock_NewTime(); /* Bogus: need new time package */
1116 /* The 0, below, specifies that it is not the last packet:
1117 * there will be others. PrepareSendPacket may
1118 * alter the packet length by up to
1119 * conn->securityMaxTrailerSize */
1120 hadd32(call->bytesSent, cp->length);
1121 rxi_PrepareSendPacket(call, cp, 0);
1122 cp->flags |= RX_PKTFLAG_TQ;
1123 queue_Append(&tmpq, cp);
1125 /* The head of the iovq is now the current packet */
1127 if (queue_IsEmpty(&call->iovq)) {
1128 call->error = RX_PROTOCOL_ERROR;
1129 rxi_FreePackets(0, &tmpq);
1132 cp = queue_First(&call->iovq, rx_packet);
1134 cp->flags &= ~RX_PKTFLAG_IOVQ;
1135 cp->flags |= RX_PKTFLAG_CP;
1136 call->currentPacket = cp;
1137 call->nFree = cp->length;
1140 (char *)cp->wirevec[1].iov_base +
1141 call->conn->securityHeaderSize;
1143 cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
1148 /* The next iovec should point to the current position */
1149 if (iov[nextio].iov_base != call->curpos
1150 || iov[nextio].iov_len > (int)call->curlen) {
1151 call->error = RX_PROTOCOL_ERROR;
1153 cp->flags &= ~RX_PKTFLAG_CP;
1154 queue_Prepend(&tmpq, cp);
1156 rxi_FreePackets(0, &tmpq);
1159 nbytes -= iov[nextio].iov_len;
1160 call->curpos += iov[nextio].iov_len;
1161 call->curlen -= iov[nextio].iov_len;
1162 call->nFree -= iov[nextio].iov_len;
1164 if (call->curlen == 0) {
1165 if (++call->curvec > cp->niovecs) {
1168 call->curpos = (char *)cp->wirevec[call->curvec].iov_base;
1169 call->curlen = cp->wirevec[call->curvec].iov_len;
1173 } while (nbytes && nextio < nio);
1175 /* Move the packets from the temporary queue onto the transmit queue.
1176 * We may end up with more than call->twind packets on the queue. */
1177 queue_SpliceAppend(&call->tq, &tmpq);
1179 if (!(call->flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
1180 rxi_Start(0, call, 0, 0);
1183 /* Wait for the length of the transmit queue to fall below call->twind */
1184 while (!call->error && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
1186 call->startWait = clock_Sec();
1187 #ifdef RX_ENABLE_LOCKS
1188 CV_WAIT(&call->cv_twind, &call->lock);
1190 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
1191 osi_rxSleep(&call->twind);
1193 call->startWait = 0;
1198 cp->flags &= ~RX_PKTFLAG_CP;
1204 return requestCount - nbytes;
1208 rx_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1214 MUTEX_ENTER(&call->lock);
1215 bytes = rxi_WritevProc(call, iov, nio, nbytes);
1216 MUTEX_EXIT(&call->lock);
1221 /* Flush any buffered data to the stream, switch to read mode
1222 * (clients) or to EOF mode (servers) */
1224 rxi_FlushWrite(register struct rx_call *call)
1226 register struct rx_packet *cp = call->currentPacket;
1228 /* Free any packets from the last call to ReadvProc/WritevProc */
1229 if (queue_IsNotEmpty(&call->iovq)) {
1230 rxi_FreePackets(0, &call->iovq);
1233 if (call->mode == RX_MODE_SENDING) {
1236 (call->conn->type ==
1237 RX_CLIENT_CONNECTION ? RX_MODE_RECEIVING : RX_MODE_EOF);
1239 #ifdef RX_KERNEL_TRACE
1241 int glockOwner = ISAFS_GLOCK();
1244 afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
1245 __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
1252 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1253 /* Wait until TQ_BUSY is reset before adding any
1254 * packets to the transmit queue
1256 while (call->flags & RX_CALL_TQ_BUSY) {
1257 call->flags |= RX_CALL_TQ_WAIT;
1258 #ifdef RX_ENABLE_LOCKS
1259 CV_WAIT(&call->cv_tq, &call->lock);
1260 #else /* RX_ENABLE_LOCKS */
1261 osi_rxSleep(&call->tq);
1262 #endif /* RX_ENABLE_LOCKS */
1264 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1267 /* cp->length is only supposed to be the user's data */
1268 /* cp->length was already set to (then-current)
1269 * MaxUserDataSize or less. */
1270 cp->flags &= ~RX_PKTFLAG_CP;
1271 cp->length -= call->nFree;
1272 call->currentPacket = (struct rx_packet *)0;
1275 cp = rxi_AllocSendPacket(call, 0);
1277 /* Mode can no longer be MODE_SENDING */
1281 cp->niovecs = 2; /* header + space for rxkad stuff */
1285 /* The 1 specifies that this is the last packet */
1286 hadd32(call->bytesSent, cp->length);
1287 rxi_PrepareSendPacket(call, cp, 1);
1288 cp->flags |= RX_PKTFLAG_TQ;
1289 queue_Append(&call->tq, cp);
1292 flags & (RX_CALL_FAST_RECOVER | RX_CALL_FAST_RECOVER_WAIT))) {
1293 rxi_Start(0, call, 0, 0);
1298 /* Flush any buffered data to the stream, switch to read mode
1299 * (clients) or to EOF mode (servers) */
1301 rx_FlushWrite(struct rx_call *call)
1305 MUTEX_ENTER(&call->lock);
1306 rxi_FlushWrite(call);
1307 MUTEX_EXIT(&call->lock);