2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
13 #include "afs/sysincludes.h" /* Standard vendor system headers */
14 #ifndef AFS_LINUX22_ENV
15 #include "rpc/types.h"
23 #endif /* AFS_ALPHA_ENV */
24 #include "afsincludes.h" /* Afs-based standard headers */
25 #include "afs/afs_stats.h" /* statistics */
26 #include "afs_prototypes.h"
28 extern int cacheDiskType;
33 FillStoreStats(int code, int idx, osi_timeval_t *xferStartTime,
34 afs_size_t bytesToXfer, afs_size_t bytesXferred)
36 struct afs_stats_xferData *xferP;
37 osi_timeval_t xferStopTime;
38 osi_timeval_t elapsedTime;
40 xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[idx]);
41 osi_GetuTime(&xferStopTime);
44 (xferP->numSuccesses)++;
45 afs_stats_XferSumBytes[idx] += bytesXferred;
46 (xferP->sumBytes) += (afs_stats_XferSumBytes[idx] >> 10);
47 afs_stats_XferSumBytes[idx] &= 0x3FF;
48 if (bytesXferred < xferP->minBytes)
49 xferP->minBytes = bytesXferred;
50 if (bytesXferred > xferP->maxBytes)
51 xferP->maxBytes = bytesXferred;
54 * Tally the size of the object. Note: we tally the actual size,
55 * NOT the number of bytes that made it out over the wire.
57 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0) (xferP->count[0])++;
58 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1) (xferP->count[1])++;
59 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2) (xferP->count[2])++;
60 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3) (xferP->count[3])++;
61 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4) (xferP->count[4])++;
62 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5) (xferP->count[5])++;
63 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6) (xferP->count[6])++;
64 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7) (xferP->count[7])++;
68 afs_stats_GetDiff(elapsedTime, (*xferStartTime), xferStopTime);
69 afs_stats_AddTo((xferP->sumTime), elapsedTime);
70 afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime);
71 if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) {
72 afs_stats_TimeAssign((xferP->minTime), elapsedTime);
74 if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) {
75 afs_stats_TimeAssign((xferP->maxTime), elapsedTime);
79 #endif /* AFS_NOSTATS */
81 /* rock and operations for RX_FILESERVER */
86 rxfs_storeUfsPrepare(void *r, afs_uint32 size, afs_uint32 *tlen)
88 *tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
93 rxfs_storeMemPrepare(void *r, afs_uint32 size, afs_uint32 *tlen)
96 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *) r;
98 *tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
100 code = rx_WritevAlloc(v->call, v->tiov, &v->tnio, RX_MAXIOVECS, *tlen);
103 code = rx_Error(v->call);
115 rxfs_storeUfsRead(void *r, struct osi_file *tfile, afs_uint32 offset,
116 afs_uint32 tlen, afs_uint32 *bytesread)
119 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
122 code = afs_osi_Read(tfile, -1, v->tbuffer, tlen);
128 #if defined(KERNEL_HAVE_UERROR)
136 rxfs_storeMemRead(void *r, struct osi_file *tfile, afs_uint32 offset,
137 afs_uint32 tlen, afs_uint32 *bytesread)
140 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
141 struct memCacheEntry *mceP = (struct memCacheEntry *)tfile;
144 code = afs_MemReadvBlk(mceP, offset, v->tiov, v->tnio, tlen);
152 rxfs_storeMemWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten)
155 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
158 code = rx_Writev(v->call, v->tiov, v->tnio, l);
161 code = rx_Error(v->call);
162 return (code ? code : -33);
164 *byteswritten = code;
169 rxfs_storeUfsWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten)
172 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
175 code = rx_Write(v->call, v->tbuffer, l);
176 /* writing 0 bytes will
177 * push a short packet. Is that really what we want, just because the
178 * data didn't come back from the disk yet? Let's try it and see. */
181 code = rx_Error(v->call);
182 return (code ? code : -33);
184 *byteswritten = code;
189 rxfs_storePadd(void *rock, afs_uint32 size)
193 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock;
196 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
197 memset(v->tbuffer, 0, AFS_LRALLOCSIZ);
200 tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
202 code = rx_Write(v->call, v->tbuffer, tlen);
206 return -33; /* XXX */
213 rxfs_storeStatus(void *rock)
215 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock;
217 if (rx_GetRemoteStatus(v->call) & 1)
223 rxfs_storeClose(void *r, struct AFSFetchStatus *OutStatus, int *doProcessFS)
226 struct AFSVolSync tsync;
227 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
232 #ifdef AFS_64BIT_CLIENT
234 code = EndRXAFS_StoreData64(v->call, OutStatus, &tsync);
237 code = EndRXAFS_StoreData(v->call, OutStatus, &tsync);
240 *doProcessFS = 1; /* Flag to run afs_ProcessFS() later on */
246 rxfs_storeDestroy(void **r, afs_int32 error)
248 afs_int32 code = error;
249 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)*r;
254 code = rx_EndCall(v->call, error);
260 osi_FreeLargeSpace(v->tbuffer);
262 osi_FreeSmallSpace(v->tiov);
263 osi_FreeSmallSpace(v);
268 struct storeOps rxfs_storeUfsOps = {
269 #if (defined(AFS_SGI_ENV) && !defined(__c99))
270 rxfs_storeUfsPrepare,
278 .prepare = rxfs_storeUfsPrepare,
279 .read = rxfs_storeUfsRead,
280 .write = rxfs_storeUfsWrite,
281 .status = rxfs_storeStatus,
282 .padd = rxfs_storePadd,
283 .close = rxfs_storeClose,
284 .destroy = rxfs_storeDestroy,
285 #ifdef AFS_LINUX26_ENV
286 .storeproc = afs_linux_storeproc
292 struct storeOps rxfs_storeMemOps = {
293 #if (defined(AFS_SGI_ENV) && !defined(__c99))
294 rxfs_storeMemPrepare,
302 .prepare = rxfs_storeMemPrepare,
303 .read = rxfs_storeMemRead,
304 .write = rxfs_storeMemWrite,
305 .status = rxfs_storeStatus,
306 .padd = rxfs_storePadd,
307 .close = rxfs_storeClose,
308 .destroy = rxfs_storeDestroy
313 rxfs_storeInit(struct vcache *avc, struct afs_conn *tc, afs_size_t base,
314 afs_size_t bytes, afs_size_t length,
315 int sync, struct storeOps **ops, void **rock)
318 struct rxfs_storeVariables *v;
323 v = (struct rxfs_storeVariables *) osi_AllocSmallSpace(sizeof(struct rxfs_storeVariables));
325 osi_Panic("rxfs_storeInit: osi_AllocSmallSpace returned NULL\n");
326 memset(v, 0, sizeof(struct rxfs_storeVariables));
328 v->InStatus.ClientModTime = avc->f.m.Date;
329 v->InStatus.Mask = AFS_SETMODTIME;
332 v->InStatus.Mask |= AFS_FSYNC;
334 v->call = rx_NewCall(tc->id);
336 #ifdef AFS_64BIT_CLIENT
337 if (!afs_serverHasNo64Bit(tc))
338 code = StartRXAFS_StoreData64(
339 v->call, (struct AFSFid*)&avc->f.fid.Fid,
340 &v->InStatus, base, bytes, length);
342 if (length > 0xFFFFFFFF)
345 afs_int32 t1 = base, t2 = bytes, t3 = length;
346 code = StartRXAFS_StoreData(v->call,
347 (struct AFSFid *) &avc->f.fid.Fid,
348 &v->InStatus, t1, t2, t3);
350 #else /* AFS_64BIT_CLIENT */
351 code = StartRXAFS_StoreData(v->call, (struct AFSFid *)&avc->f.fid.Fid,
352 &v->InStatus, base, bytes, length);
353 #endif /* AFS_64BIT_CLIENT */
358 osi_FreeSmallSpace(v);
361 if (cacheDiskType == AFS_FCACHE_TYPE_UFS) {
362 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
365 ("rxfs_storeInit: osi_AllocLargeSpace for iovecs returned NULL\n");
366 *ops = (struct storeOps *) &rxfs_storeUfsOps;
368 v->tiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
371 ("rxfs_storeInit: osi_AllocSmallSpace for iovecs returned NULL\n");
372 *ops = (struct storeOps *) &rxfs_storeMemOps;
374 /* do this at a higher level now -- it's a parameter */
375 /* for now, only do 'continue from close' code if file fits in one
376 * chunk. Could clearly do better: if only one modified chunk
377 * then can still do this. can do this on *last* modified chunk */
378 length = avc->f.m.Length - 1; /* byte position of last byte we'll store */
380 if (AFS_CHUNK(length) != 0)
393 afs_GenericStoreProc(struct storeOps *ops, void *rock,
394 struct dcache *tdc, int *shouldwake,
395 afs_size_t *bytesXferred)
397 struct rxfs_storeVariables *svar = rock;
398 afs_uint32 tlen, bytesread, byteswritten;
402 struct osi_file *tfile;
404 size = tdc->f.chunkBytes;
406 tfile = afs_CFileOpen(&tdc->f.inode);
409 code = (*ops->prepare)(rock, size, &tlen);
413 code = (*ops->read)(rock, tfile, offset, tlen, &bytesread);
418 code = (*ops->write)(rock, tlen, &byteswritten);
422 *bytesXferred += byteswritten;
423 #endif /* AFS_NOSTATS */
428 * if file has been locked on server, can allow
431 if (shouldwake && *shouldwake && ((*ops->status)(rock) == 0)) {
432 *shouldwake = 0; /* only do this once */
433 afs_wakeup(svar->vcache);
436 afs_CFileClose(tfile);
441 unsigned int storeallmissing = 0;
443 * Called for each chunk upon store.
445 * \param avc Ptr to the vcache entry of the file being stored.
446 * \param dclist pointer to the list of dcaches
447 * \param bytes total number of bytes for the current operation
448 * \param anewDV Ptr to the dataversion after store
449 * \param doProcessFS pointer to the "do process FetchStatus" flag
450 * \param OutStatus pointer to the FetchStatus as returned by the fileserver
451 * \param nchunks number of dcaches to consider
452 * \param nomore copy of the "no more data" flag
453 * \param ops pointer to the block of storeOps to be used for this operation
454 * \param rock pointer to the opaque protocol-specific data of this operation
457 afs_CacheStoreDCaches(struct vcache *avc, struct dcache **dclist,
461 struct AFSFetchStatus *OutStatus,
464 struct storeOps *ops, void *rock)
466 int *shouldwake = NULL;
469 afs_size_t bytesXferred;
472 osi_timeval_t xferStartTime; /*FS xfer start time */
473 afs_size_t bytesToXfer = 10000; /* # bytes to xfer */
474 #endif /* AFS_NOSTATS */
477 for (i = 0; i < nchunks && !code; i++) {
479 struct dcache *tdc = dclist[i];
480 afs_int32 size = tdc->f.chunkBytes;
482 afs_warn("afs: missing dcache!\n");
484 continue; /* panic? */
486 afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2, ICL_TYPE_POINTER, avc,
487 ICL_TYPE_INT32, tdc->f.chunk, ICL_TYPE_INT32, tdc->index,
488 ICL_TYPE_INT32, afs_inode2trace(&tdc->f.inode));
491 if (avc->asynchrony == -1) {
492 if (afs_defaultAsynchrony > (bytes - stored))
493 shouldwake = &nomore;
495 else if ((afs_uint32) avc->asynchrony >= (bytes - stored))
496 shouldwake = &nomore;
499 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
500 ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
501 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, size);
503 AFS_STATCNT(CacheStoreProc);
505 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
506 avc->f.truncPos = AFS_NOTRUNC;
509 * In this case, size is *always* the amount of data we'll be trying
514 osi_GetuTime(&xferStartTime);
515 #endif /* AFS_NOSTATS */
518 code = (*ops->storeproc)(ops, rock, tdc, shouldwake,
521 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
522 ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
523 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, size);
526 FillStoreStats(code, AFS_STATS_FS_XFERIDX_STOREDATA,
527 &xferStartTime, bytesToXfer, bytesXferred);
528 #endif /* AFS_NOSTATS */
530 if ((tdc->f.chunkBytes < afs_OtherCSize)
531 && (i < (nchunks - 1)) && code == 0) {
532 code = (*ops->padd)(rock, afs_OtherCSize - tdc->f.chunkBytes);
534 stored += tdc->f.chunkBytes;
535 /* ideally, I'd like to unlock the dcache and turn
536 * off the writing bit here, but that would
537 * require being able to retry StoreAllSegments in
538 * the event of a failure. It only really matters
539 * if user can't read from a 'locked' dcache or
540 * one which has the writing bit turned on. */
544 code = (*ops->close)(rock, OutStatus, doProcessFS);
551 code = (*ops->destroy)(&rock, code);
555 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
559 * \param dclist pointer to the list of dcaches
560 * \param avc Ptr to the vcache entry.
561 * \param areq Ptr to the request structure
562 * \param sync sync flag
563 * \param minj the chunk offset for this call
564 * \param high index of last dcache to store
565 * \param moredata the moredata flag
566 * \param anewDV Ptr to the dataversion after store
567 * \param amaxStoredLength Ptr to the amount of that is actually stored
569 * \note Environment: Nothing interesting.
572 afs_CacheStoreVCache(struct dcache **dcList, struct vcache *avc,
573 struct vrequest *areq, int sync,
574 unsigned int minj, unsigned int high,
575 unsigned int moredata,
576 afs_hyper_t *anewDV, afs_size_t *amaxStoredLength)
579 struct storeOps *ops;
583 struct AFSFetchStatus OutStatus;
585 afs_size_t base, bytes, length;
587 unsigned int first = 0;
590 for (bytes = 0, j = 0; !code && j <= high; j++) {
592 ObtainSharedLock(&(dcList[j]->lock), 629);
595 bytes += dcList[j]->f.chunkBytes;
596 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
597 && (dcList[j]->f.chunk - minj < high)
599 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
603 if (bytes && (j == high || !dcList[j + 1])) {
605 struct dcache **dclist = &dcList[first];
606 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
607 base = AFS_CHUNKTOBASE(first + minj);
610 * take a list of dcache structs and send them all off to the server
611 * the list must be in order, and the chunks contiguous.
612 * Note - there is no locking done by this code currently. For
613 * safety's sake, xdcache could be locked over the entire call.
614 * However, that pretty well ties up all the threads. Meantime, all
615 * the chunks _MUST_ have their refcounts bumped.
616 * The writes done before a store back will clear setuid-ness
618 * We can permit CacheStoreProc to wake up the user process IFF we
619 * are doing the last RPC for this close, ie, storing back the last
620 * set of contiguous chunks of a file.
623 nchunks = 1 + j - first;
624 nomore = !(moredata || (j != high));
625 length = lmin(avc->f.m.Length, avc->f.truncPos);
626 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
627 ICL_TYPE_FID, &avc->f.fid.Fid, ICL_TYPE_OFFSET,
628 ICL_HANDLE_OFFSET(base), ICL_TYPE_OFFSET,
629 ICL_HANDLE_OFFSET(bytes), ICL_TYPE_OFFSET,
630 ICL_HANDLE_OFFSET(length));
633 tc = afs_Conn(&avc->f.fid, areq, 0);
635 #ifdef AFS_64BIT_CLIENT
638 code = rxfs_storeInit(avc, tc, base, bytes, length,
641 code = afs_CacheStoreDCaches(avc, dclist, bytes, anewDV,
642 &doProcessFS, &OutStatus,
643 nchunks, nomore, ops, rock);
646 #ifdef AFS_64BIT_CLIENT
647 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
648 afs_serverSetNo64Bit(tc);
651 #endif /* AFS_64BIT_CLIENT */
653 (tc, code, &avc->f.fid, areq,
654 AFS_STATS_FS_RPCIDX_STOREDATA, SHARED_LOCK,
657 /* put back all remaining locked dcache entries */
658 for (i = 0; i < nchunks; i++) {
659 struct dcache *tdc = dclist[i];
661 if (afs_indexFlags[tdc->index] & IFDataMod) {
663 * LOCKXXX -- should hold afs_xdcache(W) when
664 * modifying afs_indexFlags.
666 afs_indexFlags[tdc->index] &= ~IFDataMod;
667 afs_stats_cmperf.cacheCurrDirtyChunks--;
668 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
669 if (sync & AFS_VMSYNC_INVAL) {
670 /* since we have invalidated all the pages of this
671 ** vnode by calling osi_VM_TryToSmush, we can
672 ** safely mark this dcache entry as not having
673 ** any pages. This vnode now becomes eligible for
674 ** reclamation by getDownD.
676 afs_indexFlags[tdc->index] &= ~IFAnyPages;
680 UpgradeSToWLock(&tdc->lock, 628);
681 tdc->f.states &= ~DWriting; /* correct? */
682 tdc->dflags |= DFEntryMod;
683 ReleaseWriteLock(&tdc->lock);
685 /* Mark the entry as released */
690 /* Now copy out return params */
691 UpgradeSToWLock(&avc->lock, 28); /* keep out others for a while */
692 afs_ProcessFS(avc, &OutStatus, areq);
693 /* Keep last (max) size of file on server to see if
694 * we need to call afs_StoreMini to extend the file.
697 *amaxStoredLength = OutStatus.Length;
698 ConvertWToSLock(&avc->lock);
703 for (j++; j <= high; j++) {
705 ReleaseSharedLock(&(dcList[j]->lock));
706 afs_PutDCache(dcList[j]);
707 /* Releasing entry */
713 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
714 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
722 /* rock and operations for RX_FILESERVER */
724 struct rxfs_fetchVariables {
725 struct rx_call *call;
729 afs_int32 hasNo64bit;
735 rxfs_fetchUfsRead(void *r, afs_uint32 size, afs_uint32 *bytesread)
739 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
742 tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
744 code = rx_Read(v->call, v->tbuffer, tlen);
753 rxfs_fetchMemRead(void *r, afs_uint32 tlen, afs_uint32 *bytesread)
756 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
760 code = rx_Readv(v->call, v->iov, &v->nio, RX_MAXIOVECS, tlen);
770 rxfs_fetchMemWrite(void *r, struct osi_file *fP,
771 afs_uint32 offset, afs_uint32 tlen,
772 afs_uint32 *byteswritten)
775 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
776 struct memCacheEntry *mceP = (struct memCacheEntry *)fP;
778 code = afs_MemWritevBlk(mceP, offset, v->iov, v->nio, tlen);
782 *byteswritten = code;
787 rxfs_fetchUfsWrite(void *r, struct osi_file *fP,
788 afs_uint32 offset, afs_uint32 tlen,
789 afs_uint32 *byteswritten)
792 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
794 code = afs_osi_Write(fP, -1, v->tbuffer, tlen);
798 *byteswritten = code;
804 rxfs_fetchClose(void *r, struct vcache *avc, struct dcache * adc,
805 struct afs_FetchOutput *o)
807 afs_int32 code, code1 = 0;
808 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
814 #ifdef AFS_64BIT_CLIENT
816 code = EndRXAFS_FetchData64(v->call, &o->OutStatus, &o->CallBack,
820 code = EndRXAFS_FetchData(v->call, &o->OutStatus, &o->CallBack,
822 code1 = rx_EndCall(v->call, code);
833 rxfs_fetchDestroy(void **r, afs_int32 error)
835 afs_int32 code = error;
836 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)*r;
841 code = rx_EndCall(v->call, error);
847 osi_FreeLargeSpace(v->tbuffer);
849 osi_FreeSmallSpace(v->iov);
850 osi_FreeSmallSpace(v);
855 rxfs_fetchMore(void *r, afs_int32 *length, afs_uint32 *moredata)
858 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
861 * The fetch protocol is extended for the AFS/DFS translator
862 * to allow multiple blocks of data, each with its own length,
863 * to be returned. As long as the top bit is set, there are more
866 * We do not do this for AFS file servers because they sometimes
867 * return large negative numbers as the transfer size.
871 code = rx_Read(v->call, (void *)length, sizeof(afs_int32));
873 *length = ntohl(*length);
874 if (code != sizeof(afs_int32)) {
875 code = rx_Error(v->call);
877 return (code ? code : -1); /* try to return code, not -1 */
880 *moredata = *length & 0x80000000;
881 *length &= ~0x80000000;
886 struct fetchOps rxfs_fetchUfsOps = {
895 struct fetchOps rxfs_fetchMemOps = {
904 rxfs_fetchInit(struct afs_conn *tc, struct vcache *avc, afs_offs_t base,
905 afs_uint32 size, afs_int32 *alength, struct dcache *adc,
906 struct osi_file *fP, struct fetchOps **ops, void **rock)
908 struct rxfs_fetchVariables *v;
909 int code = 0, code1 = 0;
910 #ifdef AFS_64BIT_CLIENT
911 afs_uint32 length_hi = 0;
913 afs_uint32 length, bytes;
915 v = (struct rxfs_fetchVariables *) osi_AllocSmallSpace(sizeof(struct rxfs_fetchVariables));
917 osi_Panic("rxfs_fetchInit: osi_AllocSmallSpace returned NULL\n");
918 memset(v, 0, sizeof(struct rxfs_fetchVariables));
921 v->call = rx_NewCall(tc->id);
924 #ifdef AFS_64BIT_CLIENT
925 afs_size_t length64; /* as returned from server */
926 if (!afs_serverHasNo64Bit(tc)) {
927 afs_uint64 llbytes = size;
929 code = StartRXAFS_FetchData64(v->call, (struct AFSFid *) &avc->f.fid.Fid,
933 afs_Trace2(afs_iclSetp, CM_TRACE_FETCH64CODE,
934 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
936 bytes = rx_Read(v->call, (char *)&length_hi, sizeof(afs_int32));
938 if (bytes == sizeof(afs_int32)) {
939 length_hi = ntohl(length_hi);
941 code = rx_Error(v->call);
943 code1 = rx_EndCall(v->call, code);
949 if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
950 if (base > 0x7FFFFFFF) {
957 v->call = rx_NewCall(tc->id);
959 StartRXAFS_FetchData(
960 v->call, (struct AFSFid*)&avc->f.fid.Fid,
964 afs_serverSetNo64Bit(tc);
968 bytes = rx_Read(v->call, (char *)&length, sizeof(afs_int32));
970 if (bytes == sizeof(afs_int32))
971 length = ntohl(length);
974 code = rx_Error(v->call);
975 code1 = rx_EndCall(v->call, code);
980 FillInt64(length64, length_hi, length);
981 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64LENG,
982 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
984 ICL_HANDLE_OFFSET(length64));
986 #else /* AFS_64BIT_CLIENT */
988 code = StartRXAFS_FetchData(v->call, (struct AFSFid *)&avc->f.fid.Fid,
994 rx_Read(v->call, (char *)&length, sizeof(afs_int32));
996 if (bytes == sizeof(afs_int32)) {
997 *alength = ntohl(length);
999 code = rx_Error(v->call);
1000 code1 = rx_EndCall(v->call, code);
1004 #endif /* AFS_64BIT_CLIENT */
1008 /* We need to cast here, in order to avoid issues if *alength is
1009 * negative. Some, older, fileservers can return a negative length,
1010 * which the rest of the code deals correctly with. */
1011 if (code == 0 && *alength > (afs_int32) size) {
1012 /* The fileserver told us it is going to send more data than we
1013 * requested. It shouldn't do that, and accepting that much data
1014 * can make us take up more cache space than we're supposed to,
1016 code = rx_Error(v->call);
1018 code1 = rx_EndCall(v->call, code);
1028 osi_FreeSmallSpace(v);
1031 if (cacheDiskType == AFS_FCACHE_TYPE_UFS) {
1032 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
1034 osi_Panic("rxfs_fetchInit: osi_AllocLargeSpace for iovecs returned NULL\n");
1035 osi_Assert(WriteLocked(&adc->lock));
1037 *ops = (struct fetchOps *) &rxfs_fetchUfsOps;
1040 afs_Trace4(afs_iclSetp, CM_TRACE_MEMFETCH, ICL_TYPE_POINTER, avc,
1041 ICL_TYPE_POINTER, fP, ICL_TYPE_OFFSET,
1042 ICL_HANDLE_OFFSET(base), ICL_TYPE_INT32, length);
1044 * We need to alloc the iovecs on the heap so that they are "pinned"
1045 * rather than declare them on the stack - defect 11272
1047 v->iov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
1049 osi_Panic("rxfs_fetchInit: osi_AllocSmallSpace for iovecs returned NULL\n");
1050 *ops = (struct fetchOps *) &rxfs_fetchMemOps;
1058 * Routine called on fetch; also tells people waiting for data
1059 * that more has arrived.
1061 * \param tc Ptr to the Rx connection structure.
1062 * \param fP File descriptor for the cache file.
1063 * \param base Base offset to fetch.
1064 * \param adc Ptr to the dcache entry for the file, write-locked.
1065 * \param avc Ptr to the vcache entry for the file.
1066 * \param size Amount of data that should be fetched.
1067 * \param tsmall Ptr to the afs_FetchOutput structure.
1069 * \note Environment: Nothing interesting.
1072 afs_CacheFetchProc(struct afs_conn *tc, struct osi_file *fP, afs_size_t base,
1073 struct dcache *adc, struct vcache *avc, afs_int32 size,
1074 struct afs_FetchOutput *tsmall)
1078 afs_uint32 bytesread, byteswritten;
1079 struct fetchOps *ops = NULL;
1081 afs_uint32 moredata = 0;
1086 osi_timeval_t xferStartTime; /*FS xfer start time */
1087 afs_size_t bytesToXfer = 0, bytesXferred = 0;
1090 AFS_STATCNT(CacheFetchProc);
1092 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHDATA);
1096 * avc->lock(R) if setLocks && !slowPass
1097 * avc->lock(W) if !setLocks || slowPass
1100 code = rxfs_fetchInit(
1101 tc, avc, base, size, &length, adc, fP, &ops, &rock);
1104 osi_GetuTime(&xferStartTime);
1105 #endif /* AFS_NOSTATS */
1108 adc->validPos = base;
1112 if (avc->f.states & CForeign) {
1113 code = (*ops->more)(rock, &length, &moredata);
1118 bytesToXfer += length;
1119 #endif /* AFS_NOSTATS */
1120 while (length > 0) {
1121 #ifdef RX_KERNEL_TRACE
1122 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
1125 code = (*ops->read)(rock, length, &bytesread);
1126 #ifdef RX_KERNEL_TRACE
1127 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
1131 bytesXferred += bytesread;
1132 #endif /* AFS_NOSTATS */
1134 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64READ,
1135 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
1136 ICL_TYPE_INT32, length);
1140 code = (*ops->write)(rock, fP, offset, bytesread, &byteswritten);
1143 offset += bytesread;
1145 length -= bytesread;
1146 adc->validPos = base;
1147 if (afs_osi_Wakeup(&adc->validPos) == 0)
1148 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAKE, ICL_TYPE_STRING,
1149 __FILE__, ICL_TYPE_INT32, __LINE__,
1150 ICL_TYPE_POINTER, adc, ICL_TYPE_INT32,
1156 code = (*ops->close)(rock, avc, adc, tsmall);
1158 (*ops->destroy)(&rock, code);
1161 FillStoreStats(code, AFS_STATS_FS_XFERIDX_FETCHDATA, &xferStartTime,
1162 bytesToXfer, bytesXferred);