rx: Tidy up currentPacket handling
[openafs.git] / src / rx / rx_rdwr.c
1  /*
2   * Copyright 2000, International Business Machines Corporation and others.
3   * All Rights Reserved.
4   *
5   * This software has been released under the terms of the IBM Public
6   * License.  For details, see the LICENSE file in the top-level source
7   * directory or online at http://www.openafs.org/dl/license10.html
8   */
9
10 #include <afsconfig.h>
11 #include <afs/param.h>
12
13 #ifdef KERNEL
14 # ifndef UKERNEL
15 #  ifdef RX_KERNEL_TRACE
16 #   include "rx_kcommon.h"
17 #  endif
18 #  if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
19 #   include "afs/sysincludes.h"
20 #  else
21 #   include "h/types.h"
22 #   include "h/time.h"
23 #   include "h/stat.h"
24 #   if defined(AFS_AIX_ENV) || defined(AFS_AUX_ENV) || defined(AFS_SUN5_ENV)
25 #    include "h/systm.h"
26 #   endif
27 #   ifdef       AFS_OSF_ENV
28 #    include <net/net_globals.h>
29 #   endif /* AFS_OSF_ENV */
30 #   ifdef AFS_LINUX20_ENV
31 #    include "h/socket.h"
32 #   endif
33 #   include "netinet/in.h"
34 #   if defined(AFS_SGI_ENV)
35 #    include "afs/sysincludes.h"
36 #   endif
37 #  endif
38 #  include "afs/afs_args.h"
39 #  if   (defined(AFS_AUX_ENV) || defined(AFS_AIX_ENV))
40 #   include "h/systm.h"
41 #  endif
42 # else /* !UKERNEL */
43 #  include "afs/sysincludes.h"
44 # endif /* !UKERNEL */
45
46 # ifdef RXDEBUG
47 #  undef RXDEBUG                        /* turn off debugging */
48 # endif /* RXDEBUG */
49
50 # include "afs/afs_osi.h"
51 # include "rx_kmutex.h"
52 # include "rx/rx_kernel.h"
53 # include "afs/lock.h"
54 #else /* KERNEL */
55 # include <roken.h>
56 #endif /* KERNEL */
57
58 #include "rx.h"
59 #include "rx_clock.h"
60 #include "rx_queue.h"
61 #include "rx_globals.h"
62 #include "rx_atomic.h"
63 #include "rx_internal.h"
64 #include "rx_conn.h"
65 #include "rx_call.h"
66 #include "rx_packet.h"
67
68 #ifdef RX_LOCKS_DB
69 /* rxdb_fileID is used to identify the lock location, along with line#. */
70 static int rxdb_fileID = RXDB_FILE_RX_RDWR;
71 #endif /* RX_LOCKS_DB */
72 /* rxi_ReadProc -- internal version.
73  *
74  * LOCKS USED -- called at netpri
75  */
76 int
77 rxi_ReadProc(struct rx_call *call, char *buf,
78              int nbytes)
79 {
80     struct rx_packet *rp;
81     int requestCount;
82     unsigned int t;
83
84 /* XXXX took out clock_NewTime from here.  Was it needed? */
85     requestCount = nbytes;
86
87     /* Free any packets from the last call to ReadvProc/WritevProc */
88     if (queue_IsNotEmpty(&call->iovq)) {
89 #ifdef RXDEBUG_PACKET
90         call->iovqc -=
91 #endif /* RXDEBUG_PACKET */
92             rxi_FreePackets(0, &call->iovq);
93     }
94
95     do {
96         if (call->nLeft == 0) {
97             /* Get next packet */
98             MUTEX_ENTER(&call->lock);
99             for (;;) {
100                 if (call->error || (call->mode != RX_MODE_RECEIVING)) {
101                     if (call->error) {
102                         call->mode = RX_MODE_ERROR;
103                         MUTEX_EXIT(&call->lock);
104                         return 0;
105                     }
106                     if (call->mode == RX_MODE_SENDING) {
107                         MUTEX_EXIT(&call->lock);
108                         rxi_FlushWrite(call);
109                         MUTEX_ENTER(&call->lock);
110                         continue;
111                     }
112                 }
113                 if (queue_IsNotEmpty(&call->rq)) {
114                     /* Check that next packet available is next in sequence */
115                     rp = queue_First(&call->rq, rx_packet);
116                     if (rp->header.seq == call->rnext) {
117                         afs_int32 error;
118                         struct rx_connection *conn = call->conn;
119                         queue_Remove(rp);
120 #ifdef RX_TRACK_PACKETS
121                         rp->flags &= ~RX_PKTFLAG_RQ;
122 #endif
123 #ifdef RXDEBUG_PACKET
124                         call->rqc--;
125 #endif /* RXDEBUG_PACKET */
126
127                         /* RXS_CheckPacket called to undo RXS_PreparePacket's
128                          * work.  It may reduce the length of the packet by up
129                          * to conn->maxTrailerSize, to reflect the length of the
130                          * data + the header. */
131                         if ((error =
132                              RXS_CheckPacket(conn->securityObject, call,
133                                              rp))) {
134                             /* Used to merely shut down the call, but now we
135                              * shut down the whole connection since this may
136                              * indicate an attempt to hijack it */
137
138                             MUTEX_EXIT(&call->lock);
139                             rxi_ConnectionError(conn, error);
140                             MUTEX_ENTER(&conn->conn_data_lock);
141                             rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
142                             MUTEX_EXIT(&conn->conn_data_lock);
143                             rxi_FreePacket(rp);
144
145                             return 0;
146                         }
147                         call->rnext++;
148                         call->currentPacket = rp;
149 #ifdef RX_TRACK_PACKETS
150                         call->currentPacket->flags |= RX_PKTFLAG_CP;
151 #endif
152                         call->curvec = 1;       /* 0th vec is always header */
153                         /* begin at the beginning [ more or less ], continue
154                          * on until the end, then stop. */
155                         call->curpos =
156                             (char *) call->currentPacket->wirevec[1].iov_base +
157                             call->conn->securityHeaderSize;
158                         call->curlen =
159                             call->currentPacket->wirevec[1].iov_len -
160                             call->conn->securityHeaderSize;
161
162                         /* Notice that this code works correctly if the data
163                          * size is 0 (which it may be--no reply arguments from
164                          * server, for example).  This relies heavily on the
165                          * fact that the code below immediately frees the packet
166                          * (no yields, etc.).  If it didn't, this would be a
167                          * problem because a value of zero for call->nLeft
168                          * normally means that there is no read packet */
169                         call->nLeft = call->currentPacket->length;
170                         hadd32(call->bytesRcvd, call->currentPacket->length);
171
172                         /* Send a hard ack for every rxi_HardAckRate+1 packets
173                          * consumed. Otherwise schedule an event to send
174                          * the hard ack later on.
175                          */
176                         call->nHardAcks++;
177                         if (!(call->flags & RX_CALL_RECEIVE_DONE)) {
178                             if (call->nHardAcks > (u_short) rxi_HardAckRate) {
179                                 rxevent_Cancel(&call->delayedAckEvent, call,
180                                                RX_CALL_REFCOUNT_DELAY);
181                                 rxi_SendAck(call, 0, 0, RX_ACK_DELAY, 0);
182                             } else {
183                                 /* Delay to consolidate ack packets */
184                                 rxi_PostDelayedAckEvent(call,
185                                                         &rx_hardAckDelay);
186                             }
187                         }
188                         break;
189                     }
190                 }
191
192                 /*
193                  * If we reach this point either we have no packets in the
194                  * receive queue or the next packet in the queue is not the
195                  * one we are looking for.  There is nothing else for us to
196                  * do but wait for another packet to arrive.
197                  */
198
199                 /* Are there ever going to be any more packets? */
200                 if (call->flags & RX_CALL_RECEIVE_DONE) {
201                     MUTEX_EXIT(&call->lock);
202                     return requestCount - nbytes;
203                 }
204                 /* Wait for in-sequence packet */
205                 call->flags |= RX_CALL_READER_WAIT;
206                 clock_NewTime();
207                 call->startWait = clock_Sec();
208                 while (call->flags & RX_CALL_READER_WAIT) {
209 #ifdef  RX_ENABLE_LOCKS
210                     CV_WAIT(&call->cv_rq, &call->lock);
211 #else
212                     osi_rxSleep(&call->rq);
213 #endif
214                 }
215
216                 call->startWait = 0;
217 #ifdef RX_ENABLE_LOCKS
218                 if (call->error) {
219                     MUTEX_EXIT(&call->lock);
220                     return 0;
221                 }
222 #endif /* RX_ENABLE_LOCKS */
223             }
224             MUTEX_EXIT(&call->lock);
225         } else
226             /* osi_Assert(cp); */
227             /* MTUXXX  this should be replaced by some error-recovery code before shipping */
228             /* yes, the following block is allowed to be the ELSE clause (or not) */
229             /* It's possible for call->nLeft to be smaller than any particular
230              * iov_len.  Usually, recvmsg doesn't change the iov_len, since it
231              * reflects the size of the buffer.  We have to keep track of the
232              * number of bytes read in the length field of the packet struct.  On
233              * the final portion of a received packet, it's almost certain that
234              * call->nLeft will be smaller than the final buffer. */
235             while (nbytes && call->currentPacket) {
236                 t = MIN((int)call->curlen, nbytes);
237                 t = MIN(t, (int)call->nLeft);
238                 memcpy(buf, call->curpos, t);
239                 buf += t;
240                 nbytes -= t;
241                 call->curpos += t;
242                 call->curlen -= t;
243                 call->nLeft -= t;
244
245                 if (!call->nLeft) {
246                     /* out of packet.  Get another one. */
247 #ifdef RX_TRACK_PACKETS
248                     call->currentPacket->flags &= ~RX_PKTFLAG_CP;
249 #endif
250                     rxi_FreePacket(call->currentPacket);
251                     call->currentPacket = NULL;
252                 } else if (!call->curlen) {
253                     /* need to get another struct iov */
254                     if (++call->curvec >= call->currentPacket->niovecs) {
255                         /* current packet is exhausted, get ready for another */
256                         /* don't worry about curvec and stuff, they get set somewhere else */
257 #ifdef RX_TRACK_PACKETS
258                         call->currentPacket->flags &= ~RX_PKTFLAG_CP;
259 #endif
260                         rxi_FreePacket(call->currentPacket);
261                         call->currentPacket = NULL;
262                         call->nLeft = 0;
263                     } else {
264                         call->curpos =
265                             call->currentPacket->wirevec[call->curvec].iov_base;
266                         call->curlen =
267                             call->currentPacket->wirevec[call->curvec].iov_len;
268                     }
269                 }
270             }
271         if (!nbytes) {
272             /* user buffer is full, return */
273             return requestCount;
274         }
275
276     } while (nbytes);
277
278     return requestCount;
279 }
280
281 int
282 rx_ReadProc(struct rx_call *call, char *buf, int nbytes)
283 {
284     int bytes;
285     SPLVAR;
286
287     /* Free any packets from the last call to ReadvProc/WritevProc */
288     if (!queue_IsEmpty(&call->iovq)) {
289 #ifdef RXDEBUG_PACKET
290         call->iovqc -=
291 #endif /* RXDEBUG_PACKET */
292             rxi_FreePackets(0, &call->iovq);
293     }
294
295     /*
296      * Most common case, all of the data is in the current iovec.
297      * We are relying on nLeft being zero unless the call is in receive mode.
298      */
299     if (!call->error && call->curlen > nbytes && call->nLeft > nbytes) {
300         memcpy(buf, call->curpos, nbytes);
301
302         call->curpos += nbytes;
303         call->curlen -= nbytes;
304         call->nLeft  -= nbytes;
305
306         if (!call->nLeft && call->currentPacket != NULL) {
307             /* out of packet.  Get another one. */
308             rxi_FreePacket(call->currentPacket);
309             call->currentPacket = (struct rx_packet *)0;
310         }
311         return nbytes;
312     }
313
314     NETPRI;
315     bytes = rxi_ReadProc(call, buf, nbytes);
316     USERPRI;
317     return bytes;
318 }
319
320 /* Optimization for unmarshalling 32 bit integers */
321 int
322 rx_ReadProc32(struct rx_call *call, afs_int32 * value)
323 {
324     int bytes;
325     SPLVAR;
326
327     /* Free any packets from the last call to ReadvProc/WritevProc */
328     if (!queue_IsEmpty(&call->iovq)) {
329 #ifdef RXDEBUG_PACKET
330         call->iovqc -=
331 #endif /* RXDEBUG_PACKET */
332             rxi_FreePackets(0, &call->iovq);
333     }
334
335     /*
336      * Most common case, all of the data is in the current iovec.
337      * We are relying on nLeft being zero unless the call is in receive mode.
338      */
339     if (!call->error && call->curlen >= sizeof(afs_int32)
340         && call->nLeft >= sizeof(afs_int32)) {
341
342         memcpy((char *)value, call->curpos, sizeof(afs_int32));
343
344         call->curpos += sizeof(afs_int32);
345         call->curlen -= sizeof(afs_int32);
346         call->nLeft  -= sizeof(afs_int32);
347
348         if (!call->nLeft && call->currentPacket != NULL) {
349             /* out of packet.  Get another one. */
350             rxi_FreePacket(call->currentPacket);
351             call->currentPacket = (struct rx_packet *)0;
352         }
353         return sizeof(afs_int32);
354     }
355
356     NETPRI;
357     bytes = rxi_ReadProc(call, (char *)value, sizeof(afs_int32));
358     USERPRI;
359
360     return bytes;
361 }
362
363 /* rxi_FillReadVec
364  *
365  * Uses packets in the receive queue to fill in as much of the
366  * current iovec as possible. Does not block if it runs out
367  * of packets to complete the iovec. Return true if an ack packet
368  * was sent, otherwise return false */
369 int
370 rxi_FillReadVec(struct rx_call *call, afs_uint32 serial)
371 {
372     int didConsume = 0;
373     int didHardAck = 0;
374     unsigned int t;
375     struct rx_packet *rp;
376     struct iovec *call_iov;
377     struct iovec *cur_iov = NULL;
378
379     if (call->currentPacket) {
380         cur_iov = &call->currentPacket->wirevec[call->curvec];
381     }
382     call_iov = &call->iov[call->iovNext];
383
384     while (!call->error && call->iovNBytes && call->iovNext < call->iovMax) {
385         if (call->nLeft == 0) {
386             /* Get next packet */
387             if (queue_IsNotEmpty(&call->rq)) {
388                 /* Check that next packet available is next in sequence */
389                 rp = queue_First(&call->rq, rx_packet);
390                 if (rp->header.seq == call->rnext) {
391                     afs_int32 error;
392                     struct rx_connection *conn = call->conn;
393                     queue_Remove(rp);
394 #ifdef RX_TRACK_PACKETS
395                     rp->flags &= ~RX_PKTFLAG_RQ;
396 #endif
397 #ifdef RXDEBUG_PACKET
398                     call->rqc--;
399 #endif /* RXDEBUG_PACKET */
400
401                     /* RXS_CheckPacket called to undo RXS_PreparePacket's
402                      * work.  It may reduce the length of the packet by up
403                      * to conn->maxTrailerSize, to reflect the length of the
404                      * data + the header. */
405                     if ((error =
406                          RXS_CheckPacket(conn->securityObject, call, rp))) {
407                         /* Used to merely shut down the call, but now we
408                          * shut down the whole connection since this may
409                          * indicate an attempt to hijack it */
410
411                         MUTEX_EXIT(&call->lock);
412                         rxi_ConnectionError(conn, error);
413                         MUTEX_ENTER(&conn->conn_data_lock);
414                         rp = rxi_SendConnectionAbort(conn, rp, 0, 0);
415                         MUTEX_EXIT(&conn->conn_data_lock);
416                         rxi_FreePacket(rp);
417                         MUTEX_ENTER(&call->lock);
418
419                         return 1;
420                     }
421                     call->rnext++;
422                     call->currentPacket = rp;
423 #ifdef RX_TRACK_PACKETS
424                     call->currentPacket->flags |= RX_PKTFLAG_CP;
425 #endif
426                     call->curvec = 1;   /* 0th vec is always header */
427                     cur_iov = &call->currentPacket->wirevec[1];
428                     /* begin at the beginning [ more or less ], continue
429                      * on until the end, then stop. */
430                     call->curpos =
431                         (char *)call->currentPacket->wirevec[1].iov_base +
432                         call->conn->securityHeaderSize;
433                     call->curlen =
434                         call->currentPacket->wirevec[1].iov_len -
435                         call->conn->securityHeaderSize;
436
437                     /* Notice that this code works correctly if the data
438                      * size is 0 (which it may be--no reply arguments from
439                      * server, for example).  This relies heavily on the
440                      * fact that the code below immediately frees the packet
441                      * (no yields, etc.).  If it didn't, this would be a
442                      * problem because a value of zero for call->nLeft
443                      * normally means that there is no read packet */
444                     call->nLeft = call->currentPacket->length;
445                     hadd32(call->bytesRcvd, call->currentPacket->length);
446
447                     /* Send a hard ack for every rxi_HardAckRate+1 packets
448                      * consumed. Otherwise schedule an event to send
449                      * the hard ack later on.
450                      */
451                     call->nHardAcks++;
452                     didConsume = 1;
453                     continue;
454                 }
455             }
456             break;
457         }
458
459         /* It's possible for call->nLeft to be smaller than any particular
460          * iov_len.  Usually, recvmsg doesn't change the iov_len, since it
461          * reflects the size of the buffer.  We have to keep track of the
462          * number of bytes read in the length field of the packet struct.  On
463          * the final portion of a received packet, it's almost certain that
464          * call->nLeft will be smaller than the final buffer. */
465         while (call->iovNBytes
466                && call->iovNext < call->iovMax
467                && call->currentPacket) {
468
469             t = MIN((int)call->curlen, call->iovNBytes);
470             t = MIN(t, (int)call->nLeft);
471             call_iov->iov_base = call->curpos;
472             call_iov->iov_len = t;
473             call_iov++;
474             call->iovNext++;
475             call->iovNBytes -= t;
476             call->curpos += t;
477             call->curlen -= t;
478             call->nLeft -= t;
479
480             if (!call->nLeft) {
481                 /* out of packet.  Get another one. */
482 #ifdef RX_TRACK_PACKETS
483                 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
484                 call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
485 #endif
486                 queue_Append(&call->iovq, call->currentPacket);
487 #ifdef RXDEBUG_PACKET
488                 call->iovqc++;
489 #endif /* RXDEBUG_PACKET */
490                 call->currentPacket = NULL;
491             } else if (!call->curlen) {
492                 /* need to get another struct iov */
493                 if (++call->curvec >= call->currentPacket->niovecs) {
494                     /* current packet is exhausted, get ready for another */
495                     /* don't worry about curvec and stuff, they get set somewhere else */
496 #ifdef RX_TRACK_PACKETS
497                     call->currentPacket->flags &= ~RX_PKTFLAG_CP;
498                     call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
499 #endif
500                     queue_Append(&call->iovq, call->currentPacket);
501 #ifdef RXDEBUG_PACKET
502                     call->iovqc++;
503 #endif /* RXDEBUG_PACKET */
504                     call->currentPacket = NULL;
505                     call->nLeft = 0;
506                 } else {
507                     cur_iov++;
508                     call->curpos = (char *)cur_iov->iov_base;
509                     call->curlen = cur_iov->iov_len;
510                 }
511             }
512         }
513     }
514
515     /* If we consumed any packets then check whether we need to
516      * send a hard ack. */
517     if (didConsume && (!(call->flags & RX_CALL_RECEIVE_DONE))) {
518         if (call->nHardAcks > (u_short) rxi_HardAckRate) {
519             rxevent_Cancel(&call->delayedAckEvent, call,
520                            RX_CALL_REFCOUNT_DELAY);
521             rxi_SendAck(call, 0, serial, RX_ACK_DELAY, 0);
522             didHardAck = 1;
523         } else {
524             /* Delay to consolidate ack packets */
525             rxi_PostDelayedAckEvent(call, &rx_hardAckDelay);
526         }
527     }
528     return didHardAck;
529 }
530
531
532 /* rxi_ReadvProc -- internal version.
533  *
534  * Fills in an iovec with pointers to the packet buffers. All packets
535  * except the last packet (new current packet) are moved to the iovq
536  * while the application is processing the data.
537  *
538  * LOCKS USED -- called at netpri.
539  */
540 int
541 rxi_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
542               int nbytes)
543 {
544     int bytes;
545
546     /* Free any packets from the last call to ReadvProc/WritevProc */
547     if (queue_IsNotEmpty(&call->iovq)) {
548 #ifdef RXDEBUG_PACKET
549         call->iovqc -=
550 #endif /* RXDEBUG_PACKET */
551             rxi_FreePackets(0, &call->iovq);
552     }
553
554     if (call->mode == RX_MODE_SENDING) {
555         rxi_FlushWrite(call);
556     }
557
558     MUTEX_ENTER(&call->lock);
559     if (call->error)
560         goto error;
561
562     /* Get whatever data is currently available in the receive queue.
563      * If rxi_FillReadVec sends an ack packet then it is possible
564      * that we will receive more data while we drop the call lock
565      * to send the packet. Set the RX_CALL_IOVEC_WAIT flag
566      * here to avoid a race with the receive thread if we send
567      * hard acks in rxi_FillReadVec. */
568     call->flags |= RX_CALL_IOVEC_WAIT;
569     call->iovNBytes = nbytes;
570     call->iovMax = maxio;
571     call->iovNext = 0;
572     call->iov = iov;
573     rxi_FillReadVec(call, 0);
574
575     /* if we need more data then sleep until the receive thread has
576      * filled in the rest. */
577     if (!call->error && call->iovNBytes && call->iovNext < call->iovMax
578         && !(call->flags & RX_CALL_RECEIVE_DONE)) {
579         call->flags |= RX_CALL_READER_WAIT;
580         clock_NewTime();
581         call->startWait = clock_Sec();
582         while (call->flags & RX_CALL_READER_WAIT) {
583 #ifdef  RX_ENABLE_LOCKS
584             CV_WAIT(&call->cv_rq, &call->lock);
585 #else
586             osi_rxSleep(&call->rq);
587 #endif
588         }
589         call->startWait = 0;
590     }
591     call->flags &= ~RX_CALL_IOVEC_WAIT;
592
593     if (call->error)
594         goto error;
595
596     call->iov = NULL;
597     *nio = call->iovNext;
598     bytes = nbytes - call->iovNBytes;
599     MUTEX_EXIT(&call->lock);
600     return bytes;
601
602   error:
603     MUTEX_EXIT(&call->lock);
604     call->mode = RX_MODE_ERROR;
605     return 0;
606 }
607
608 int
609 rx_ReadvProc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
610              int nbytes)
611 {
612     int bytes;
613     SPLVAR;
614
615     NETPRI;
616     bytes = rxi_ReadvProc(call, iov, nio, maxio, nbytes);
617     USERPRI;
618     return bytes;
619 }
620
621 /* rxi_WriteProc -- internal version.
622  *
623  * LOCKS USED -- called at netpri
624  */
625
626 int
627 rxi_WriteProc(struct rx_call *call, char *buf,
628               int nbytes)
629 {
630     struct rx_connection *conn = call->conn;
631     unsigned int t;
632     int requestCount = nbytes;
633
634     /* Free any packets from the last call to ReadvProc/WritevProc */
635     if (queue_IsNotEmpty(&call->iovq)) {
636 #ifdef RXDEBUG_PACKET
637         call->iovqc -=
638 #endif /* RXDEBUG_PACKET */
639             rxi_FreePackets(0, &call->iovq);
640     }
641
642     if (call->mode != RX_MODE_SENDING) {
643         if ((conn->type == RX_SERVER_CONNECTION)
644             && (call->mode == RX_MODE_RECEIVING)) {
645             call->mode = RX_MODE_SENDING;
646             if (call->currentPacket) {
647 #ifdef RX_TRACK_PACKETS
648                 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
649 #endif
650                 rxi_FreePacket(call->currentPacket);
651                 call->currentPacket = NULL;
652                 call->nLeft = 0;
653                 call->nFree = 0;
654             }
655         } else {
656             return 0;
657         }
658     }
659
660     /* Loop condition is checked at end, so that a write of 0 bytes
661      * will force a packet to be created--specially for the case where
662      * there are 0 bytes on the stream, but we must send a packet
663      * anyway. */
664     do {
665         if (call->nFree == 0) {
666             MUTEX_ENTER(&call->lock);
667             if (call->error)
668                 call->mode = RX_MODE_ERROR;
669             if (!call->error && call->currentPacket) {
670                 clock_NewTime();        /* Bogus:  need new time package */
671                 /* The 0, below, specifies that it is not the last packet:
672                  * there will be others. PrepareSendPacket may
673                  * alter the packet length by up to
674                  * conn->securityMaxTrailerSize */
675                 hadd32(call->bytesSent, call->currentPacket->length);
676                 rxi_PrepareSendPacket(call, call->currentPacket, 0);
677 #ifdef  AFS_GLOBAL_RXLOCK_KERNEL
678                 /* PrepareSendPacket drops the call lock */
679                 rxi_WaitforTQBusy(call);
680 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
681 #ifdef RX_TRACK_PACKETS
682                 call->currentPacket->flags |= RX_PKTFLAG_TQ;
683 #endif
684                 queue_Append(&call->tq, call->currentPacket);
685 #ifdef RXDEBUG_PACKET
686                 call->tqc++;
687 #endif /* RXDEBUG_PACKET */
688 #ifdef RX_TRACK_PACKETS
689                 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
690 #endif
691                 call->currentPacket = NULL;
692
693                 /* If the call is in recovery, let it exhaust its current
694                  * retransmit queue before forcing it to send new packets
695                  */
696                 if (!(call->flags & (RX_CALL_FAST_RECOVER))) {
697                     rxi_Start(call, 0);
698                 }
699             } else if (call->currentPacket) {
700 #ifdef RX_TRACK_PACKETS
701                 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
702 #endif
703                 rxi_FreePacket(call->currentPacket);
704                 call->currentPacket = NULL;
705             }
706             /* Wait for transmit window to open up */
707             while (!call->error
708                    && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
709                 clock_NewTime();
710                 call->startWait = clock_Sec();
711
712 #ifdef  RX_ENABLE_LOCKS
713                 CV_WAIT(&call->cv_twind, &call->lock);
714 #else
715                 call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
716                 osi_rxSleep(&call->twind);
717 #endif
718
719                 call->startWait = 0;
720 #ifdef RX_ENABLE_LOCKS
721                 if (call->error) {
722                     call->mode = RX_MODE_ERROR;
723                     MUTEX_EXIT(&call->lock);
724                     return 0;
725                 }
726 #endif /* RX_ENABLE_LOCKS */
727             }
728             if ((call->currentPacket = rxi_AllocSendPacket(call, nbytes))) {
729 #ifdef RX_TRACK_PACKETS
730                 call->currentPacket->flags |= RX_PKTFLAG_CP;
731 #endif
732                 call->nFree = call->currentPacket->length;
733                 call->curvec = 1;       /* 0th vec is always header */
734                 /* begin at the beginning [ more or less ], continue
735                  * on until the end, then stop. */
736                 call->curpos =
737                     (char *) call->currentPacket->wirevec[1].iov_base +
738                     call->conn->securityHeaderSize;
739                 call->curlen =
740                     call->currentPacket->wirevec[1].iov_len -
741                     call->conn->securityHeaderSize;
742             }
743             if (call->error) {
744                 call->mode = RX_MODE_ERROR;
745                 if (call->currentPacket) {
746 #ifdef RX_TRACK_PACKETS
747                     call->currentPacket->flags &= ~RX_PKTFLAG_CP;
748 #endif
749                     rxi_FreePacket(call->currentPacket);
750                     call->currentPacket = NULL;
751                 }
752                 MUTEX_EXIT(&call->lock);
753                 return 0;
754             }
755             MUTEX_EXIT(&call->lock);
756         }
757
758         if (call->currentPacket && (int)call->nFree < nbytes) {
759             /* Try to extend the current buffer */
760             int len, mud;
761             len = call->currentPacket->length;
762             mud = rx_MaxUserDataSize(call);
763             if (mud > len) {
764                 int want;
765                 want = MIN(nbytes - (int)call->nFree, mud - len);
766                 rxi_AllocDataBuf(call->currentPacket, want,
767                                  RX_PACKET_CLASS_SEND_CBUF);
768                 if (call->currentPacket->length > (unsigned)mud)
769                     call->currentPacket->length = mud;
770                 call->nFree += (call->currentPacket->length - len);
771             }
772         }
773
774         /* If the remaining bytes fit in the buffer, then store them
775          * and return.  Don't ship a buffer that's full immediately to
776          * the peer--we don't know if it's the last buffer yet */
777
778         if (!call->currentPacket) {
779             call->nFree = 0;
780         }
781
782         while (nbytes && call->nFree) {
783
784             t = MIN((int)call->curlen, nbytes);
785             t = MIN((int)call->nFree, t);
786             memcpy(call->curpos, buf, t);
787             buf += t;
788             nbytes -= t;
789             call->curpos += t;
790             call->curlen -= (u_short)t;
791             call->nFree -= (u_short)t;
792
793             if (!call->curlen) {
794                 /* need to get another struct iov */
795                 if (++call->curvec >= call->currentPacket->niovecs) {
796                     /* current packet is full, extend or send it */
797                     call->nFree = 0;
798                 } else {
799                     call->curpos =
800                         call->currentPacket->wirevec[call->curvec].iov_base;
801                     call->curlen =
802                         call->currentPacket->wirevec[call->curvec].iov_len;
803                 }
804             }
805         }                       /* while bytes to send and room to send them */
806
807         /* might be out of space now */
808         if (!nbytes) {
809             return requestCount;
810         } else;                 /* more data to send, so get another packet and keep going */
811     } while (nbytes);
812
813     return requestCount - nbytes;
814 }
815
816 int
817 rx_WriteProc(struct rx_call *call, char *buf, int nbytes)
818 {
819     int bytes;
820     int tcurlen;
821     int tnFree;
822     char *tcurpos;
823     SPLVAR;
824
825     /* Free any packets from the last call to ReadvProc/WritevProc */
826     if (queue_IsNotEmpty(&call->iovq)) {
827 #ifdef RXDEBUG_PACKET
828         call->iovqc -=
829 #endif /* RXDEBUG_PACKET */
830             rxi_FreePackets(0, &call->iovq);
831     }
832
833     /*
834      * Most common case: all of the data fits in the current iovec.
835      * We are relying on nFree being zero unless the call is in send mode.
836      */
837     tcurlen = (int)call->curlen;
838     tnFree = (int)call->nFree;
839     if (!call->error && tcurlen >= nbytes && tnFree >= nbytes) {
840         tcurpos = call->curpos;
841
842         memcpy(tcurpos, buf, nbytes);
843         call->curpos = tcurpos + nbytes;
844         call->curlen = (u_short)(tcurlen - nbytes);
845         call->nFree = (u_short)(tnFree - nbytes);
846         return nbytes;
847     }
848
849     NETPRI;
850     bytes = rxi_WriteProc(call, buf, nbytes);
851     USERPRI;
852     return bytes;
853 }
854
855 /* Optimization for marshalling 32 bit arguments */
856 int
857 rx_WriteProc32(struct rx_call *call, afs_int32 * value)
858 {
859     int bytes;
860     int tcurlen;
861     int tnFree;
862     char *tcurpos;
863     SPLVAR;
864
865     if (queue_IsNotEmpty(&call->iovq)) {
866 #ifdef RXDEBUG_PACKET
867         call->iovqc -=
868 #endif /* RXDEBUG_PACKET */
869             rxi_FreePackets(0, &call->iovq);
870     }
871
872     /*
873      * Most common case: all of the data fits in the current iovec.
874      * We are relying on nFree being zero unless the call is in send mode.
875      */
876     tcurlen = call->curlen;
877     tnFree = call->nFree;
878     if (!call->error && tcurlen >= sizeof(afs_int32)
879         && tnFree >= sizeof(afs_int32)) {
880         tcurpos = call->curpos;
881
882         if (!((size_t)tcurpos & (sizeof(afs_int32) - 1))) {
883             *((afs_int32 *) (tcurpos)) = *value;
884         } else {
885             memcpy(tcurpos, (char *)value, sizeof(afs_int32));
886         }
887         call->curpos = tcurpos + sizeof(afs_int32);
888         call->curlen = (u_short)(tcurlen - sizeof(afs_int32));
889         call->nFree = (u_short)(tnFree - sizeof(afs_int32));
890         return sizeof(afs_int32);
891     }
892
893     NETPRI;
894     bytes = rxi_WriteProc(call, (char *)value, sizeof(afs_int32));
895     USERPRI;
896     return bytes;
897 }
898
899 /* rxi_WritevAlloc -- internal version.
900  *
901  * Fill in an iovec to point to data in packet buffers. The application
902  * calls rxi_WritevProc when the buffers are full.
903  *
904  * LOCKS USED -- called at netpri.
905  */
906
907 static int
908 rxi_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
909                 int nbytes)
910 {
911     struct rx_connection *conn = call->conn;
912     struct rx_packet *cp;
913     int requestCount;
914     int nextio;
915     /* Temporary values, real work is done in rxi_WritevProc */
916     int tnFree;
917     unsigned int tcurvec;
918     char *tcurpos;
919     int tcurlen;
920
921     requestCount = nbytes;
922     nextio = 0;
923
924     /* Free any packets from the last call to ReadvProc/WritevProc */
925     if (queue_IsNotEmpty(&call->iovq)) {
926 #ifdef RXDEBUG_PACKET
927         call->iovqc -=
928 #endif /* RXDEBUG_PACKET */
929             rxi_FreePackets(0, &call->iovq);
930     }
931
932     if (call->mode != RX_MODE_SENDING) {
933         if ((conn->type == RX_SERVER_CONNECTION)
934             && (call->mode == RX_MODE_RECEIVING)) {
935             call->mode = RX_MODE_SENDING;
936             if (call->currentPacket) {
937 #ifdef RX_TRACK_PACKETS
938                 call->currentPacket->flags &= ~RX_PKTFLAG_CP;
939 #endif
940                 rxi_FreePacket(call->currentPacket);
941                 call->currentPacket = NULL;
942                 call->nLeft = 0;
943                 call->nFree = 0;
944             }
945         } else {
946             return 0;
947         }
948     }
949
950     /* Set up the iovec to point to data in packet buffers. */
951     tnFree = call->nFree;
952     tcurvec = call->curvec;
953     tcurpos = call->curpos;
954     tcurlen = call->curlen;
955     cp = call->currentPacket;
956     do {
957         int t;
958
959         if (tnFree == 0) {
960             /* current packet is full, allocate a new one */
961             MUTEX_ENTER(&call->lock);
962             cp = rxi_AllocSendPacket(call, nbytes);
963             MUTEX_EXIT(&call->lock);
964             if (cp == NULL) {
965                 /* out of space, return what we have */
966                 *nio = nextio;
967                 return requestCount - nbytes;
968             }
969 #ifdef RX_TRACK_PACKETS
970             cp->flags |= RX_PKTFLAG_IOVQ;
971 #endif
972             queue_Append(&call->iovq, cp);
973 #ifdef RXDEBUG_PACKET
974             call->iovqc++;
975 #endif /* RXDEBUG_PACKET */
976             tnFree = cp->length;
977             tcurvec = 1;
978             tcurpos =
979                 (char *)cp->wirevec[1].iov_base +
980                 call->conn->securityHeaderSize;
981             tcurlen = cp->wirevec[1].iov_len - call->conn->securityHeaderSize;
982         }
983
984         if (tnFree < nbytes) {
985             /* try to extend the current packet */
986             int len, mud;
987             len = cp->length;
988             mud = rx_MaxUserDataSize(call);
989             if (mud > len) {
990                 int want;
991                 want = MIN(nbytes - tnFree, mud - len);
992                 rxi_AllocDataBuf(cp, want, RX_PACKET_CLASS_SEND_CBUF);
993                 if (cp->length > (unsigned)mud)
994                     cp->length = mud;
995                 tnFree += (cp->length - len);
996                 if (cp == call->currentPacket) {
997                     call->nFree += (cp->length - len);
998                 }
999             }
1000         }
1001
1002         /* fill in the next entry in the iovec */
1003         t = MIN(tcurlen, nbytes);
1004         t = MIN(tnFree, t);
1005         iov[nextio].iov_base = tcurpos;
1006         iov[nextio].iov_len = t;
1007         nbytes -= t;
1008         tcurpos += t;
1009         tcurlen -= t;
1010         tnFree -= t;
1011         nextio++;
1012
1013         if (!tcurlen) {
1014             /* need to get another struct iov */
1015             if (++tcurvec >= cp->niovecs) {
1016                 /* current packet is full, extend it or move on to next packet */
1017                 tnFree = 0;
1018             } else {
1019                 tcurpos = (char *)cp->wirevec[tcurvec].iov_base;
1020                 tcurlen = cp->wirevec[tcurvec].iov_len;
1021             }
1022         }
1023     } while (nbytes && nextio < maxio);
1024     *nio = nextio;
1025     return requestCount - nbytes;
1026 }
1027
1028 int
1029 rx_WritevAlloc(struct rx_call *call, struct iovec *iov, int *nio, int maxio,
1030                int nbytes)
1031 {
1032     int bytes;
1033     SPLVAR;
1034
1035     NETPRI;
1036     bytes = rxi_WritevAlloc(call, iov, nio, maxio, nbytes);
1037     USERPRI;
1038     return bytes;
1039 }
1040
1041 /* rxi_WritevProc -- internal version.
1042  *
1043  * Send buffers allocated in rxi_WritevAlloc.
1044  *
1045  * LOCKS USED -- called at netpri.
1046  */
1047 int
1048 rxi_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1049 {
1050 #ifdef RX_TRACK_PACKETS
1051     struct rx_packet *p, *np;
1052 #endif
1053     int nextio;
1054     int requestCount;
1055     struct rx_queue tmpq;
1056 #ifdef RXDEBUG_PACKET
1057     u_short tmpqc;
1058 #endif
1059
1060     requestCount = nbytes;
1061     nextio = 0;
1062
1063     MUTEX_ENTER(&call->lock);
1064     if (call->error) {
1065         call->mode = RX_MODE_ERROR;
1066     } else if (call->mode != RX_MODE_SENDING) {
1067         call->error = RX_PROTOCOL_ERROR;
1068     }
1069 #ifdef AFS_GLOBAL_RXLOCK_KERNEL
1070     rxi_WaitforTQBusy(call);
1071 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1072
1073     if (call->error) {
1074         call->mode = RX_MODE_ERROR;
1075         MUTEX_EXIT(&call->lock);
1076         if (call->currentPacket) {
1077 #ifdef RX_TRACK_PACKETS
1078             call->currentPacket->flags &= ~RX_PKTFLAG_CP;
1079             call->currentPacket->flags |= RX_PKTFLAG_IOVQ;
1080 #endif
1081             queue_Prepend(&call->iovq, call->currentPacket);
1082 #ifdef RXDEBUG_PACKET
1083             call->iovqc++;
1084 #endif /* RXDEBUG_PACKET */
1085             call->currentPacket = NULL;
1086         }
1087 #ifdef RXDEBUG_PACKET
1088         call->iovqc -=
1089 #endif /* RXDEBUG_PACKET */
1090             rxi_FreePackets(0, &call->iovq);
1091         return 0;
1092     }
1093
1094     /* Loop through the I/O vector adjusting packet pointers.
1095      * Place full packets back onto the iovq once they are ready
1096      * to send. Set RX_PROTOCOL_ERROR if any problems are found in
1097      * the iovec. We put the loop condition at the end to ensure that
1098      * a zero length write will push a short packet. */
1099     nextio = 0;
1100     queue_Init(&tmpq);
1101 #ifdef RXDEBUG_PACKET
1102     tmpqc = 0;
1103 #endif /* RXDEBUG_PACKET */
1104     do {
1105         if (call->nFree == 0 && call->currentPacket) {
1106             clock_NewTime();    /* Bogus:  need new time package */
1107             /* The 0, below, specifies that it is not the last packet:
1108              * there will be others. PrepareSendPacket may
1109              * alter the packet length by up to
1110              * conn->securityMaxTrailerSize */
1111             hadd32(call->bytesSent, call->currentPacket->length);
1112             rxi_PrepareSendPacket(call, call->currentPacket, 0);
1113 #ifdef  AFS_GLOBAL_RXLOCK_KERNEL
1114             /* PrepareSendPacket drops the call lock */
1115             rxi_WaitforTQBusy(call);
1116 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1117             queue_Append(&tmpq, call->currentPacket);
1118 #ifdef RXDEBUG_PACKET
1119             tmpqc++;
1120 #endif /* RXDEBUG_PACKET */
1121             call->currentPacket = NULL;
1122
1123             /* The head of the iovq is now the current packet */
1124             if (nbytes) {
1125                 if (queue_IsEmpty(&call->iovq)) {
1126                     MUTEX_EXIT(&call->lock);
1127                     call->error = RX_PROTOCOL_ERROR;
1128 #ifdef RXDEBUG_PACKET
1129                     tmpqc -=
1130 #endif /* RXDEBUG_PACKET */
1131                         rxi_FreePackets(0, &tmpq);
1132                     return 0;
1133                 }
1134                 call->currentPacket = queue_First(&call->iovq, rx_packet);
1135                 queue_Remove(call->currentPacket);
1136 #ifdef RX_TRACK_PACKETS
1137                 call->currentPacket->flags &= ~RX_PKTFLAG_IOVQ;
1138                 call->currentPacket->flags |= RX_PKTFLAG_CP;
1139 #endif
1140 #ifdef RXDEBUG_PACKET
1141                 call->iovqc--;
1142 #endif /* RXDEBUG_PACKET */
1143                 call->nFree = call->currentPacket->length;
1144                 call->curvec = 1;
1145                 call->curpos =
1146                     (char *) call->currentPacket->wirevec[1].iov_base +
1147                     call->conn->securityHeaderSize;
1148                 call->curlen =
1149                     call->currentPacket->wirevec[1].iov_len -
1150                     call->conn->securityHeaderSize;
1151             }
1152         }
1153
1154         if (nbytes) {
1155             /* The next iovec should point to the current position */
1156             if (iov[nextio].iov_base != call->curpos
1157                 || iov[nextio].iov_len > (int)call->curlen) {
1158                 call->error = RX_PROTOCOL_ERROR;
1159                 MUTEX_EXIT(&call->lock);
1160                 if (call->currentPacket) {
1161 #ifdef RX_TRACK_PACKETS
1162                     call->currentPacket->flags &= ~RX_PKTFLAG_CP;
1163 #endif
1164                     queue_Prepend(&tmpq, call->currentPacket);
1165 #ifdef RXDEBUG_PACKET
1166                     tmpqc++;
1167 #endif /* RXDEBUG_PACKET */
1168                     call->currentPacket = NULL;
1169                 }
1170 #ifdef RXDEBUG_PACKET
1171                 tmpqc -=
1172 #endif /* RXDEBUG_PACKET */
1173                     rxi_FreePackets(0, &tmpq);
1174                 return 0;
1175             }
1176             nbytes -= iov[nextio].iov_len;
1177             call->curpos += iov[nextio].iov_len;
1178             call->curlen -= iov[nextio].iov_len;
1179             call->nFree -= iov[nextio].iov_len;
1180             nextio++;
1181             if (call->curlen == 0) {
1182                 if (++call->curvec > call->currentPacket->niovecs) {
1183                     call->nFree = 0;
1184                 } else {
1185                     call->curpos =
1186                         call->currentPacket->wirevec[call->curvec].iov_base;
1187                     call->curlen =
1188                         call->currentPacket->wirevec[call->curvec].iov_len;
1189                 }
1190             }
1191         }
1192     } while (nbytes && nextio < nio);
1193
1194     /* Move the packets from the temporary queue onto the transmit queue.
1195      * We may end up with more than call->twind packets on the queue. */
1196
1197 #ifdef RX_TRACK_PACKETS
1198     for (queue_Scan(&tmpq, p, np, rx_packet))
1199     {
1200         p->flags |= RX_PKTFLAG_TQ;
1201     }
1202 #endif
1203
1204     if (call->error)
1205         call->mode = RX_MODE_ERROR;
1206
1207     queue_SpliceAppend(&call->tq, &tmpq);
1208
1209     /* If the call is in recovery, let it exhaust its current retransmit
1210      * queue before forcing it to send new packets
1211      */
1212     if (!(call->flags & RX_CALL_FAST_RECOVER)) {
1213         rxi_Start(call, 0);
1214     }
1215
1216     /* Wait for the length of the transmit queue to fall below call->twind */
1217     while (!call->error && call->tnext + 1 > call->tfirst + (2 * call->twind)) {
1218         clock_NewTime();
1219         call->startWait = clock_Sec();
1220 #ifdef  RX_ENABLE_LOCKS
1221         CV_WAIT(&call->cv_twind, &call->lock);
1222 #else
1223         call->flags |= RX_CALL_WAIT_WINDOW_ALLOC;
1224         osi_rxSleep(&call->twind);
1225 #endif
1226         call->startWait = 0;
1227     }
1228
1229     if (call->error) {
1230         call->mode = RX_MODE_ERROR;
1231         call->currentPacket = NULL;
1232         MUTEX_EXIT(&call->lock);
1233         if (call->currentPacket) {
1234 #ifdef RX_TRACK_PACKETS
1235             call->currentPacket->flags &= ~RX_PKTFLAG_CP;
1236 #endif
1237             rxi_FreePacket(call->currentPacket);
1238         }
1239         return 0;
1240     }
1241     MUTEX_EXIT(&call->lock);
1242
1243     return requestCount - nbytes;
1244 }
1245
1246 int
1247 rx_WritevProc(struct rx_call *call, struct iovec *iov, int nio, int nbytes)
1248 {
1249     int bytes;
1250     SPLVAR;
1251
1252     NETPRI;
1253     bytes = rxi_WritevProc(call, iov, nio, nbytes);
1254     USERPRI;
1255     return bytes;
1256 }
1257
1258 /* Flush any buffered data to the stream, switch to read mode
1259  * (clients) or to EOF mode (servers)
1260  *
1261  * LOCKS HELD: called at netpri.
1262  */
1263 void
1264 rxi_FlushWrite(struct rx_call *call)
1265 {
1266     struct rx_packet *cp = NULL;
1267
1268     /* Free any packets from the last call to ReadvProc/WritevProc */
1269     if (queue_IsNotEmpty(&call->iovq)) {
1270 #ifdef RXDEBUG_PACKET
1271         call->iovqc -=
1272 #endif /* RXDEBUG_PACKET */
1273             rxi_FreePackets(0, &call->iovq);
1274     }
1275
1276     if (call->mode == RX_MODE_SENDING) {
1277
1278         call->mode =
1279             (call->conn->type ==
1280              RX_CLIENT_CONNECTION ? RX_MODE_RECEIVING : RX_MODE_EOF);
1281
1282 #ifdef RX_KERNEL_TRACE
1283         {
1284             int glockOwner = ISAFS_GLOCK();
1285             if (!glockOwner)
1286                 AFS_GLOCK();
1287             afs_Trace3(afs_iclSetp, CM_TRACE_WASHERE, ICL_TYPE_STRING,
1288                        __FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER,
1289                        call);
1290             if (!glockOwner)
1291                 AFS_GUNLOCK();
1292         }
1293 #endif
1294
1295         MUTEX_ENTER(&call->lock);
1296         if (call->error)
1297             call->mode = RX_MODE_ERROR;
1298
1299         cp = call->currentPacket;
1300
1301         if (cp) {
1302             /* cp->length is only supposed to be the user's data */
1303             /* cp->length was already set to (then-current)
1304              * MaxUserDataSize or less. */
1305 #ifdef RX_TRACK_PACKETS
1306             cp->flags &= ~RX_PKTFLAG_CP;
1307 #endif
1308             cp->length -= call->nFree;
1309             call->currentPacket = (struct rx_packet *)0;
1310             call->nFree = 0;
1311         } else {
1312             cp = rxi_AllocSendPacket(call, 0);
1313             if (!cp) {
1314                 /* Mode can no longer be MODE_SENDING */
1315                 return;
1316             }
1317             cp->length = 0;
1318             cp->niovecs = 2;    /* header + space for rxkad stuff */
1319             call->nFree = 0;
1320         }
1321
1322         /* The 1 specifies that this is the last packet */
1323         hadd32(call->bytesSent, cp->length);
1324         rxi_PrepareSendPacket(call, cp, 1);
1325 #ifdef  AFS_GLOBAL_RXLOCK_KERNEL
1326         /* PrepareSendPacket drops the call lock */
1327         rxi_WaitforTQBusy(call);
1328 #endif /* AFS_GLOBAL_RXLOCK_KERNEL */
1329 #ifdef RX_TRACK_PACKETS
1330         cp->flags |= RX_PKTFLAG_TQ;
1331 #endif
1332         queue_Append(&call->tq, cp);
1333 #ifdef RXDEBUG_PACKET
1334         call->tqc++;
1335 #endif /* RXDEBUG_PACKET */
1336
1337         /* If the call is in recovery, let it exhaust its current retransmit
1338          * queue before forcing it to send new packets
1339          */
1340         if (!(call->flags & RX_CALL_FAST_RECOVER)) {
1341             rxi_Start(call, 0);
1342         }
1343         MUTEX_EXIT(&call->lock);
1344     }
1345 }
1346
1347 /* Flush any buffered data to the stream, switch to read mode
1348  * (clients) or to EOF mode (servers) */
1349 void
1350 rx_FlushWrite(struct rx_call *call)
1351 {
1352     SPLVAR;
1353     NETPRI;
1354     rxi_FlushWrite(call);
1355     USERPRI;
1356 }