2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
13 #include "afs/sysincludes.h" /* Standard vendor system headers */
14 #ifndef AFS_LINUX22_ENV
15 #include "rpc/types.h"
23 #endif /* AFS_ALPHA_ENV */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs_prototypes.h"
28 extern int cacheDiskType;
33 FillStoreStats(int code, int idx, osi_timeval_t *xferStartTime,
34 afs_size_t bytesToXfer, afs_size_t bytesXferred)
36 struct afs_stats_xferData *xferP;
37 osi_timeval_t xferStopTime;
40 xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[idx]);
41 osi_GetuTime(&xferStopTime);
44 (xferP->numSuccesses)++;
45 afs_stats_XferSumBytes[idx] += bytesXferred;
46 (xferP->sumBytes) += (afs_stats_XferSumBytes[idx] >> 10);
47 afs_stats_XferSumBytes[idx] &= 0x3FF;
48 if (bytesXferred < xferP->minBytes)
49 xferP->minBytes = bytesXferred;
50 if (bytesXferred > xferP->maxBytes)
51 xferP->maxBytes = bytesXferred;
54 * Tally the size of the object. Note: we tally the actual size,
55 * NOT the number of bytes that made it out over the wire.
57 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0) (xferP->count[0])++;
58 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1) (xferP->count[1])++;
59 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2) (xferP->count[2])++;
60 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3) (xferP->count[3])++;
61 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4) (xferP->count[4])++;
62 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5) (xferP->count[5])++;
63 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6) (xferP->count[6])++;
64 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7) (xferP->count[7])++;
68 afs_stats_GetDiff(elapsedTime, (*xferStartTime), xferStopTime);
69 afs_stats_AddTo((xferP->sumTime), elapsedTime);
70 afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime);
71 if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) {
72 afs_stats_TimeAssign((xferP->minTime), elapsedTime);
74 if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) {
75 afs_stats_TimeAssign((xferP->maxTime), elapsedTime);
79 #endif /* AFS_NOSTATS */
81 /* rock and operations for RX_FILESERVER */
83 struct rxfs_storeVariables {
89 struct AFSStoreStatus InStatus;
93 rxfs_storeUfsPrepare(void *r, afs_uint32 size, afs_uint32 *tlen)
95 *tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
100 rxfs_storeMemPrepare(void *r, afs_uint32 size, afs_uint32 *tlen)
103 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *) r;
105 *tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
107 code = rx_WritevAlloc(v->call, v->tiov, &v->tnio, RX_MAXIOVECS, *tlen);
110 code = rx_Error(v->call);
122 rxfs_storeUfsRead(void *r, struct osi_file *tfile, afs_uint32 offset,
123 afs_uint32 tlen, afs_uint32 *bytesread)
126 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
129 code = afs_osi_Read(tfile, -1, v->tbuffer, tlen);
135 #if defined(KERNEL_HAVE_UERROR)
143 rxfs_storeMemRead(void *r, struct osi_file *tfile, afs_uint32 offset,
144 afs_uint32 tlen, afs_uint32 *bytesread)
147 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
148 struct memCacheEntry *mceP = (struct memCacheEntry *)tfile;
151 code = afs_MemReadvBlk(mceP, offset, v->tiov, v->tnio, tlen);
159 rxfs_storeMemWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten)
162 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
165 code = rx_Writev(v->call, v->tiov, v->tnio, l);
168 code = rx_Error(v->call);
169 return (code ? code : -33);
171 *byteswritten = code;
176 rxfs_storeUfsWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten)
179 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
182 code = rx_Write(v->call, v->tbuffer, l);
183 /* writing 0 bytes will
184 * push a short packet. Is that really what we want, just because the
185 * data didn't come back from the disk yet? Let's try it and see. */
188 code = rx_Error(v->call);
189 return (code ? code : -33);
191 *byteswritten = code;
196 rxfs_storePadd(void *rock, afs_uint32 sbytes)
200 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock;
201 char *tbuffer = v->tbuffer;
204 tbuffer = v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
207 tlen = (sbytes > AFS_LRALLOCSIZ
208 ? AFS_LRALLOCSIZ : sbytes);
209 memset(tbuffer, 0, tlen);
211 bsent = rx_Write(v->call, tbuffer, tlen);
215 code = -33; /* XXX */
224 rxfs_storeStatus(void *rock)
226 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock;
228 if (rx_GetRemoteStatus(v->call) & 1)
234 rxfs_storeClose(void *r, struct AFSFetchStatus *OutStatus, int *doProcessFS)
237 struct AFSVolSync tsync;
238 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
241 code = EndRXAFS_StoreData(v->call, OutStatus, &tsync);
244 *doProcessFS = 1; /* Flag to run afs_ProcessFS() later on */
250 rxfs_storeDestroy(void **r, afs_int32 error)
252 afs_int32 code = error;
253 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)*r;
259 code2 = rx_EndCall(v->call, code);
265 osi_FreeLargeSpace(v->tbuffer);
267 osi_FreeSmallSpace(v->tiov);
268 osi_FreeSmallSpace(v);
273 struct storeOps rxfs_storeUfsOps = {
274 rxfs_storeUfsPrepare,
284 struct storeOps rxfs_storeMemOps = {
285 rxfs_storeMemPrepare,
295 rxfs_storeInit(struct vcache *avc, struct afs_conn *tc, afs_size_t tlen,
296 afs_size_t bytes, afs_size_t base,
297 int sync, struct storeOps **ops, void **rock)
300 struct rxfs_storeVariables *v;
305 v = (struct rxfs_storeVariables *) osi_AllocSmallSpace(sizeof(struct rxfs_storeVariables));
307 osi_Panic("rxfs_storeInit: osi_AllocSmallSpace returned NULL\n");
308 memset(v, 0, sizeof(struct rxfs_storeVariables));
310 v->InStatus.ClientModTime = avc->f.m.Date;
311 v->InStatus.Mask = AFS_SETMODTIME;
313 v->InStatus.Mask |= AFS_FSYNC;
315 v->call = rx_NewCall(tc->id);
317 #ifdef AFS_64BIT_CLIENT
318 if (!afs_serverHasNo64Bit(tc))
319 code = StartRXAFS_StoreData64(v->call,(struct AFSFid*)&avc->f.fid.Fid,
320 &v->InStatus, base, bytes, tlen);
322 if (tlen > 0xFFFFFFFF)
325 afs_int32 t1 = base, t2 = bytes, t3 = tlen;
326 code = StartRXAFS_StoreData(v->call,
327 (struct AFSFid *) &avc->f.fid.Fid,
328 &v->InStatus, t1, t2, t3);
330 #else /* AFS_64BIT_CLIENT */
331 code = StartRXAFS_StoreData(v->call, (struct AFSFid *)&avc->f.fid.Fid,
332 &v->InStatus, base, bytes, tlen);
333 #endif /* AFS_64BIT_CLIENT */
338 osi_FreeSmallSpace(v);
341 if (cacheDiskType == AFS_FCACHE_TYPE_UFS) {
342 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
345 ("rxfs_storeInit: osi_AllocLargeSpace for iovecs returned NULL\n");
346 *ops = (struct storeOps *) &rxfs_storeUfsOps;
348 v->tiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
351 ("rxfs_storeInit: osi_AllocSmallSpace for iovecs returned NULL\n");
352 *ops = (struct storeOps *) &rxfs_storeMemOps;
354 /* do this at a higher level now -- it's a parameter */
355 /* for now, only do 'continue from close' code if file fits in one
356 * chunk. Could clearly do better: if only one modified chunk
357 * then can still do this. can do this on *last* modified chunk */
358 tlen = avc->f.m.Length - 1; /* byte position of last byte we'll store */
360 if (AFS_CHUNK(tlen) != 0)
372 unsigned int storeallmissing = 0;
374 * Called for each chunk upon store.
376 * \param avc Ptr to the vcache entry of the file being stored.
377 * \param dclist pointer to the list of dcaches
378 * \param bytes total number of bytes for the current operation
379 * \param anewDV Ptr to the dataversion after store
380 * \param doProcessFS pointer to the "do process FetchStatus" flag
381 * \param OutStatus pointer to the FetchStatus as returned by the fileserver
382 * \param nchunks number of dcaches to consider
383 * \param nomore copy of the "no more data" flag
384 * \param ops pointer to the block of storeOps to be used for this operation
385 * \param rock pointer to the opaque protocol-specific data of this operation
388 afs_CacheStoreDCaches(struct vcache *avc, struct dcache **dclist,
392 struct AFSFetchStatus *OutStatus,
395 struct storeOps *ops, void *rock)
397 int *shouldwake = NULL;
402 osi_timeval_t xferStartTime; /*FS xfer start time */
403 afs_size_t bytesToXfer = 10000; /* # bytes to xfer */
404 afs_size_t bytesXferred = 10000; /* # bytes actually xferred */
405 #endif /* AFS_NOSTATS */
408 for (i = 0; i < nchunks && !code; i++) {
412 struct dcache *tdc = dclist[i];
413 afs_int32 alen = tdc->f.chunkBytes;
415 afs_warn("afs: missing dcache!\n");
417 continue; /* panic? */
419 afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2, ICL_TYPE_POINTER, avc,
420 ICL_TYPE_INT32, tdc->f.chunk, ICL_TYPE_INT32, tdc->index,
421 ICL_TYPE_INT32, afs_inode2trace(&tdc->f.inode));
424 if (avc->asynchrony == -1) {
425 if (afs_defaultAsynchrony > (bytes - stored))
426 shouldwake = &nomore;
428 else if ((afs_uint32) avc->asynchrony >= (bytes - stored))
429 shouldwake = &nomore;
431 fP = afs_CFileOpen(&tdc->f.inode);
433 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
434 ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
435 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, alen);
437 AFS_STATCNT(CacheStoreProc);
439 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
440 avc->f.truncPos = AFS_NOTRUNC;
443 * In this case, alen is *always* the amount of data we'll be trying
449 #endif /* AFS_NOSTATS */
453 afs_int32 bytesread, byteswritten;
454 code = (*ops->prepare)(rock, alen, &tlen);
458 code = (*ops->read)(rock, fP, offset, tlen, &bytesread);
463 code = (*ops->write)(rock, tlen, &byteswritten);
467 bytesXferred += byteswritten;
468 #endif /* AFS_NOSTATS */
473 * if file has been locked on server, can allow
476 if (shouldwake && *shouldwake && ((*ops->status)(rock) == 0)) {
477 *shouldwake = 0; /* only do this once */
481 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
482 ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
483 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, alen);
486 FillStoreStats(code, AFS_STATS_FS_XFERIDX_STOREDATA,
487 &xferStartTime, bytesToXfer, bytesXferred);
488 #endif /* AFS_NOSTATS */
491 if ((tdc->f.chunkBytes < afs_OtherCSize)
492 && (i < (nchunks - 1)) && code == 0) {
493 int bsent, tlen, sbytes = afs_OtherCSize - tdc->f.chunkBytes;
494 char *tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
497 tlen = (sbytes > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : sbytes);
498 memset(tbuffer, 0, tlen);
501 ((struct rxfs_storeVariables*)rock)->call, tbuffer, tlen);
505 code = -33; /* XXX */
510 osi_FreeLargeSpace(tbuffer);
512 stored += tdc->f.chunkBytes;
513 /* ideally, I'd like to unlock the dcache and turn
514 * off the writing bit here, but that would
515 * require being able to retry StoreAllSegments in
516 * the event of a failure. It only really matters
517 * if user can't read from a 'locked' dcache or
518 * one which has the writing bit turned on. */
522 code = (*ops->close)(rock, OutStatus, doProcessFS);
528 code = (*ops->destroy)(&rock, code);
532 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
536 * \param dclist pointer to the list of dcaches
537 * \param avc Ptr to the vcache entry.
538 * \param areq Ptr to the request structure
539 * \param sync sync flag
540 * \param minj the chunk offset for this call
541 * \param high index of last dcache to store
542 * \param moredata the moredata flag
543 * \param anewDV Ptr to the dataversion after store
544 * \param amaxStoredLength Ptr to the amount of that is actually stored
546 * \note Environment: Nothing interesting.
549 afs_CacheStoreVCache(struct dcache **dcList,
551 struct vrequest *areq,
555 unsigned int moredata,
557 afs_size_t *amaxStoredLength)
560 struct storeOps *ops;
564 struct AFSStoreStatus InStatus;
565 struct AFSFetchStatus OutStatus;
567 afs_size_t base, bytes, length;
570 unsigned int first = 0;
573 for (bytes = 0, j = 0; !code && j <= high; j++) {
575 ObtainSharedLock(&(dcList[j]->lock), 629);
578 bytes += dcList[j]->f.chunkBytes;
579 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
580 && (dcList[j]->f.chunk - minj < high)
582 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
586 if (bytes && (j == high || !dcList[j + 1])) {
587 struct dcache **dclist = &dcList[first];
588 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
589 base = AFS_CHUNKTOBASE(first + minj);
592 * take a list of dcache structs and send them all off to the server
593 * the list must be in order, and the chunks contiguous.
594 * Note - there is no locking done by this code currently. For
595 * safety's sake, xdcache could be locked over the entire call.
596 * However, that pretty well ties up all the threads. Meantime, all
597 * the chunks _MUST_ have their refcounts bumped.
598 * The writes done before a store back will clear setuid-ness
600 * We can permit CacheStoreProc to wake up the user process IFF we
601 * are doing the last RPC for this close, ie, storing back the last
602 * set of contiguous chunks of a file.
605 nchunks = 1 + j - first;
606 nomore = !(moredata || (j != high));
607 length = lmin(avc->f.m.Length, avc->f.truncPos);
608 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
609 ICL_TYPE_FID, &avc->f.fid.Fid, ICL_TYPE_OFFSET,
610 ICL_HANDLE_OFFSET(base), ICL_TYPE_OFFSET,
611 ICL_HANDLE_OFFSET(bytes), ICL_TYPE_OFFSET,
612 ICL_HANDLE_OFFSET(length));
615 tc = afs_Conn(&avc->f.fid, areq, 0);
617 #ifdef AFS_64BIT_CLIENT
620 code = rxfs_storeInit(avc, tc, length, bytes, base,
624 "afs_CacheStoreProc: rxfs_storeInit failed with %d", code);
627 code = afs_CacheStoreDCaches(avc, dclist, bytes, anewDV,
628 &doProcessFS, &OutStatus, nchunks, nomore, ops, rock);
630 #ifdef AFS_64BIT_CLIENT
631 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
632 afs_serverSetNo64Bit(tc);
635 #endif /* AFS_64BIT_CLIENT */
637 (tc, code, &avc->f.fid, areq,
638 AFS_STATS_FS_RPCIDX_STOREDATA, SHARED_LOCK,
641 /* put back all remaining locked dcache entries */
642 for (i = 0; i < nchunks; i++) {
643 struct dcache *tdc = dclist[i];
645 if (afs_indexFlags[tdc->index] & IFDataMod) {
647 * LOCKXXX -- should hold afs_xdcache(W) when
648 * modifying afs_indexFlags.
650 afs_indexFlags[tdc->index] &= ~IFDataMod;
651 afs_stats_cmperf.cacheCurrDirtyChunks--;
652 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
653 if (sync & AFS_VMSYNC_INVAL) {
654 /* since we have invalidated all the pages of this
655 ** vnode by calling osi_VM_TryToSmush, we can
656 ** safely mark this dcache entry as not having
657 ** any pages. This vnode now becomes eligible for
658 ** reclamation by getDownD.
660 afs_indexFlags[tdc->index] &= ~IFAnyPages;
664 UpgradeSToWLock(&tdc->lock, 628);
665 tdc->f.states &= ~DWriting; /* correct? */
666 tdc->dflags |= DFEntryMod;
667 ReleaseWriteLock(&tdc->lock);
669 /* Mark the entry as released */
674 /* Now copy out return params */
675 UpgradeSToWLock(&avc->lock, 28); /* keep out others for a while */
676 afs_ProcessFS(avc, &OutStatus, areq);
677 /* Keep last (max) size of file on server to see if
678 * we need to call afs_StoreMini to extend the file.
681 *amaxStoredLength = OutStatus.Length;
682 ConvertWToSLock(&avc->lock);
687 for (j++; j <= high; j++) {
689 ReleaseSharedLock(&(dcList[j]->lock));
690 afs_PutDCache(dcList[j]);
691 /* Releasing entry */
697 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
698 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
706 /* rock and operations for RX_FILESERVER */
708 struct rxfs_fetchVariables {
709 struct rx_call *call;
713 afs_int32 hasNo64bit;
719 rxfs_fetchUfsRead(void *r, afs_uint32 size, afs_uint32 *bytesread)
723 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
726 tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
728 code = rx_Read(v->call, v->tbuffer, tlen);
737 rxfs_fetchMemRead(void *r, afs_uint32 tlen, afs_uint32 *bytesread)
740 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
744 code = rx_Readv(v->call, v->iov, &v->nio, RX_MAXIOVECS, tlen);
754 rxfs_fetchMemWrite(void *r, struct osi_file *fP,
755 afs_uint32 offset, afs_uint32 tlen,
756 afs_uint32 *byteswritten)
759 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
760 struct memCacheEntry *mceP = (struct memCacheEntry *)fP;
762 code = afs_MemWritevBlk(mceP, offset, v->iov, v->nio, tlen);
766 *byteswritten = code;
771 rxfs_fetchUfsWrite(void *r, struct osi_file *fP,
772 afs_uint32 offset, afs_uint32 tlen,
773 afs_uint32 *byteswritten)
776 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
778 code = afs_osi_Write(fP, -1, v->tbuffer, tlen);
782 *byteswritten = code;
788 rxfs_fetchClose(void *r, struct vcache *avc, struct dcache * adc,
789 struct afs_FetchOutput *tsmall)
791 afs_int32 code, code1 = 0;
792 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
798 code = EndRXAFS_FetchData(v->call, &tsmall->OutStatus,
805 code1 = rx_EndCall(v->call, code);
816 rxfs_fetchDestroy(void **r, afs_int32 error)
818 afs_int32 code = error;
819 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)*r;
823 osi_FreeLargeSpace(v->tbuffer);
825 osi_FreeSmallSpace(v->iov);
826 osi_FreeSmallSpace(v);
831 rxfs_fetchMore(void *r, afs_uint32 *length, afs_uint32 *moredata)
834 register struct rxfs_fetchVariables *v
835 = (struct rxfs_fetchVariables *)r;
838 code = rx_Read(v->call, (void *)length, sizeof(afs_int32));
839 *length = ntohl(*length);
841 if (code != sizeof(afs_int32)) {
842 code = rx_Error(v->call);
843 return (code ? code : -1); /* try to return code, not -1 */
849 struct fetchOps rxfs_fetchUfsOps = {
858 struct fetchOps rxfs_fetchMemOps = {
867 rxfs_fetchInit(register struct afs_conn *tc, struct vcache *avc,afs_offs_t base,
868 afs_uint32 size, afs_uint32 *out_length, struct dcache *adc,
869 struct osi_file *fP, struct fetchOps **ops, void **rock)
871 struct rxfs_fetchVariables *v;
873 afs_int32 length_hi, length, bytes;
874 #ifdef AFS_64BIT_CLIENT
876 afs_size_t lengthFound; /* as returned from server */
877 #endif /* AFS_64BIT_CLIENT */
879 v = (struct rxfs_fetchVariables *) osi_AllocSmallSpace(sizeof(struct rxfs_fetchVariables));
881 osi_Panic("rxfs_fetchInit: osi_AllocSmallSpace returned NULL\n");
882 memset(v, 0, sizeof(struct rxfs_fetchVariables));
885 v->call = rx_NewCall(tc->id);
888 #ifdef AFS_64BIT_CLIENT
889 length_hi = code = 0;
890 if (!afs_serverHasNo64Bit(tc)) {
893 code = StartRXAFS_FetchData64(v->call, (struct AFSFid *)&avc->f.fid.Fid,
897 afs_Trace2(afs_iclSetp, CM_TRACE_FETCH64CODE,
898 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
900 bytes = rx_Read(v->call, (char *)&length_hi, sizeof(afs_int32));
902 if (bytes == sizeof(afs_int32)) {
903 length_hi = ntohl(length_hi);
906 code = rx_Error(v->call);
908 code1 = rx_EndCall(v->call, code);
914 if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
915 if (base > 0x7FFFFFFF) {
922 v->call = rx_NewCall(tc->id);
924 StartRXAFS_FetchData(v->call, (struct AFSFid *)
925 &avc->f.fid.Fid, pos,
929 afs_serverSetNo64Bit(tc);
933 bytes = rx_Read(v->call, (char *)&length, sizeof(afs_int32));
935 if (bytes == sizeof(afs_int32))
936 length = ntohl(length);
938 code = rx_Error(v->call);
941 FillInt64(lengthFound, length_hi, length);
942 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64LENG,
943 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
945 ICL_HANDLE_OFFSET(lengthFound));
946 #else /* AFS_64BIT_CLIENT */
948 code = StartRXAFS_FetchData(v->call, (struct AFSFid *)&avc->f.fid.Fid,
953 bytes = rx_Read(v->call, (char *)&length, sizeof(afs_int32));
955 if (bytes == sizeof(afs_int32))
956 length = ntohl(length);
958 code = rx_Error(v->call);
960 #endif /* AFS_64BIT_CLIENT */
962 osi_FreeSmallSpace(v);
966 if ( cacheDiskType == AFS_FCACHE_TYPE_UFS ) {
967 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
969 osi_Panic("rxfs_fetchInit: osi_AllocLargeSpace for iovecs returned NULL\n");
970 osi_Assert(WriteLocked(&adc->lock));
972 *ops = (struct fetchOps *) &rxfs_fetchUfsOps;
975 afs_Trace4(afs_iclSetp, CM_TRACE_MEMFETCH, ICL_TYPE_POINTER, avc,
976 ICL_TYPE_POINTER, fP, ICL_TYPE_OFFSET,
977 ICL_HANDLE_OFFSET(base), ICL_TYPE_INT32, length);
979 * We need to alloc the iovecs on the heap so that they are "pinned"
980 * rather than declare them on the stack - defect 11272
982 v->iov = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec) *
985 osi_Panic("afs_CacheFetchProc: osi_AllocSmallSpace for iovecs returned NULL\n");
986 *ops = (struct fetchOps *) &rxfs_fetchMemOps;
989 *out_length = length;
995 * Routine called on fetch; also tells people waiting for data
996 * that more has arrived.
998 * \param tc Ptr to the Rx connection structure.
999 * \param fP File descriptor for the cache file.
1000 * \param abase Base offset to fetch.
1001 * \param adc Ptr to the dcache entry for the file, write-locked.
1002 * \param avc Ptr to the vcache entry for the file.
1003 * \param size Amount of data that should be fetched.
1004 * \param tsmall Ptr to the afs_FetchOutput structure.
1006 * \note Environment: Nothing interesting.
1009 afs_CacheFetchProc(register struct afs_conn *tc,
1010 register struct osi_file *fP, afs_size_t abase,
1011 struct dcache *adc, struct vcache *avc,
1013 struct afs_FetchOutput *tsmall)
1015 register afs_int32 code;
1017 afs_uint32 bytesread, byteswritten;
1018 struct fetchOps *ops = NULL;
1021 register int offset = 0;
1025 osi_timeval_t xferStartTime; /*FS xfer start time */
1026 afs_size_t bytesToXfer = 0, bytesXferred = 0;
1029 AFS_STATCNT(CacheFetchProc);
1031 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHDATA);
1033 code = rxfs_fetchInit(tc, avc, abase, size, &length, adc, fP, &ops, &rock);
1036 osi_GetuTime(&xferStartTime);
1037 #endif /* AFS_NOSTATS */
1039 adc->validPos = abase;
1043 code = (*ops->more)(rock, &length, &moredata);
1048 * The fetch protocol is extended for the AFS/DFS translator
1049 * to allow multiple blocks of data, each with its own length,
1050 * to be returned. As long as the top bit is set, there are more
1053 * We do not do this for AFS file servers because they sometimes
1054 * return large negative numbers as the transfer size.
1056 if (avc->f.states & CForeign) {
1057 moredata = length & 0x80000000;
1058 length &= ~0x80000000;
1063 bytesToXfer += length;
1064 #endif /* AFS_NOSTATS */
1065 while (length > 0) {
1066 #ifdef RX_KERNEL_TRACE
1067 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
1070 code = (*ops->read)(rock, length, &bytesread);
1071 #ifdef RX_KERNEL_TRACE
1072 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
1076 bytesXferred += bytesread;
1077 #endif /* AFS_NOSTATS */
1079 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64READ,
1080 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
1081 ICL_TYPE_INT32, length);
1085 code = (*ops->write)(rock, fP, offset, bytesread, &byteswritten);
1088 offset += bytesread;
1090 length -= bytesread;
1091 adc->validPos = abase;
1092 if (afs_osi_Wakeup(&adc->validPos) == 0)
1093 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAKE, ICL_TYPE_STRING,
1094 __FILE__, ICL_TYPE_INT32, __LINE__,
1095 ICL_TYPE_POINTER, adc, ICL_TYPE_INT32,
1101 code = (*ops->close)(rock, avc, adc, tsmall);
1102 (*ops->destroy)(&rock, code);
1105 FillStoreStats(code, AFS_STATS_FS_XFERIDX_FETCHDATA,&xferStartTime,
1106 bytesToXfer, bytesXferred);