2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
13 #include "afs/sysincludes.h" /* Standard vendor system headers */
19 #endif /* AFS_ALPHA_ENV */
20 #include "afsincludes.h" /* Afs-based standard headers */
21 #include "afs/afs_stats.h" /* statistics */
22 #include "afs_prototypes.h"
24 extern int cacheDiskType;
28 FillStoreStats(int code, int idx, osi_timeval_t xferStartTime,
29 afs_size_t bytesToXfer, afs_size_t bytesXferred)
31 struct afs_stats_xferData *xferP;
32 osi_timeval_t xferStopTime;
33 osi_timeval_t elapsedTime;
35 xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[idx]);
36 osi_GetuTime(&xferStopTime);
39 (xferP->numSuccesses)++;
40 afs_stats_XferSumBytes[idx] += bytesXferred;
41 (xferP->sumBytes) += (afs_stats_XferSumBytes[idx] >> 10);
42 afs_stats_XferSumBytes[idx] &= 0x3FF;
43 if (bytesXferred < xferP->minBytes)
44 xferP->minBytes = bytesXferred;
45 if (bytesXferred > xferP->maxBytes)
46 xferP->maxBytes = bytesXferred;
49 * Tally the size of the object. Note: we tally the actual size,
50 * NOT the number of bytes that made it out over the wire.
52 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0) (xferP->count[0])++;
53 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1) (xferP->count[1])++;
54 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2) (xferP->count[2])++;
55 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3) (xferP->count[3])++;
56 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4) (xferP->count[4])++;
57 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5) (xferP->count[5])++;
58 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6) (xferP->count[6])++;
59 else if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7) (xferP->count[7])++;
63 afs_stats_GetDiff(elapsedTime, xferStartTime, xferStopTime);
64 afs_stats_AddTo((xferP->sumTime), elapsedTime);
65 afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime);
66 if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) {
67 afs_stats_TimeAssign((xferP->minTime), elapsedTime);
69 if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) {
70 afs_stats_TimeAssign((xferP->maxTime), elapsedTime);
74 #endif /* AFS_NOSTATS */
76 /* rock and operations for RX_FILESERVER */
81 rxfs_storeUfsPrepare(void *r, afs_uint32 size, afs_uint32 *tlen)
83 *tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
88 rxfs_storeMemPrepare(void *r, afs_uint32 size, afs_uint32 *tlen)
91 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *) r;
94 code = rx_WritevAlloc(v->call, v->tiov, &v->tnio, RX_MAXIOVECS, size);
97 code = rx_Error(v->call);
109 rxfs_storeUfsRead(void *r, struct osi_file *tfile, afs_uint32 offset,
110 afs_uint32 tlen, afs_uint32 *bytesread)
113 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
116 code = afs_osi_Read(tfile, -1, v->tbuffer, tlen);
122 #if defined(KERNEL_HAVE_UERROR)
130 rxfs_storeMemRead(void *r, struct osi_file *tfile, afs_uint32 offset,
131 afs_uint32 tlen, afs_uint32 *bytesread)
134 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
135 struct memCacheEntry *mceP = (struct memCacheEntry *)tfile;
138 code = afs_MemReadvBlk(mceP, offset, v->tiov, v->tnio, tlen);
146 rxfs_storeMemWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten)
149 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
152 code = rx_Writev(v->call, v->tiov, v->tnio, l);
155 code = rx_Error(v->call);
156 return (code ? code : -33);
158 *byteswritten = code;
163 rxfs_storeUfsWrite(void *r, afs_uint32 l, afs_uint32 *byteswritten)
166 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
169 code = rx_Write(v->call, v->tbuffer, l);
170 /* writing 0 bytes will
171 * push a short packet. Is that really what we want, just because the
172 * data didn't come back from the disk yet? Let's try it and see. */
175 code = rx_Error(v->call);
176 return (code ? code : -33);
178 *byteswritten = code;
183 rxfs_storePadd(void *rock, afs_uint32 size)
187 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock;
190 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
191 memset(v->tbuffer, 0, AFS_LRALLOCSIZ);
194 tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
196 code = rx_Write(v->call, v->tbuffer, tlen);
200 return -33; /* XXX */
207 rxfs_storeStatus(void *rock)
209 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)rock;
211 if (rx_GetRemoteStatus(v->call) & 1)
217 rxfs_storeClose(void *r, struct AFSFetchStatus *OutStatus, int *doProcessFS)
220 struct AFSVolSync tsync;
221 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)r;
226 #ifdef AFS_64BIT_CLIENT
228 code = EndRXAFS_StoreData64(v->call, OutStatus, &tsync);
231 code = EndRXAFS_StoreData(v->call, OutStatus, &tsync);
234 *doProcessFS = 1; /* Flag to run afs_ProcessFS() later on */
240 rxfs_storeDestroy(void **r, afs_int32 code)
242 struct rxfs_storeVariables *v = (struct rxfs_storeVariables *)*r;
247 code = rx_EndCall(v->call, code);
251 osi_FreeLargeSpace(v->tbuffer);
253 osi_FreeSmallSpace(v->tiov);
254 osi_FreeSmallSpace(v);
259 afs_GenericStoreProc(struct storeOps *ops, void *rock,
260 struct dcache *tdc, int *shouldwake,
261 afs_size_t *bytesXferred)
263 struct rxfs_storeVariables *svar = rock;
264 afs_uint32 tlen, bytesread, byteswritten;
268 struct osi_file *tfile;
270 size = tdc->f.chunkBytes;
272 tfile = afs_CFileOpen(&tdc->f.inode);
276 code = (*ops->prepare)(rock, size, &tlen);
280 code = (*ops->read)(rock, tfile, offset, tlen, &bytesread);
285 code = (*ops->write)(rock, tlen, &byteswritten);
289 *bytesXferred += byteswritten;
290 #endif /* AFS_NOSTATS */
295 * if file has been locked on server, can allow
298 if (shouldwake && *shouldwake && ((*ops->status)(rock) == 0)) {
299 *shouldwake = 0; /* only do this once */
300 afs_wakeup(svar->vcache);
303 afs_CFileClose(tfile);
309 struct storeOps rxfs_storeUfsOps = {
310 #ifndef HAVE_STRUCT_LABEL_SUPPORT
311 rxfs_storeUfsPrepare,
320 .prepare = rxfs_storeUfsPrepare,
321 .read = rxfs_storeUfsRead,
322 .write = rxfs_storeUfsWrite,
323 .status = rxfs_storeStatus,
324 .padd = rxfs_storePadd,
325 .close = rxfs_storeClose,
326 .destroy = rxfs_storeDestroy,
327 #if 0 && defined(AFS_LINUX26_ENV)
328 .storeproc = afs_linux_storeproc
330 .storeproc = afs_GenericStoreProc
336 struct storeOps rxfs_storeMemOps = {
337 #ifndef HAVE_STRUCT_LABEL_SUPPORT
338 rxfs_storeMemPrepare,
347 .prepare = rxfs_storeMemPrepare,
348 .read = rxfs_storeMemRead,
349 .write = rxfs_storeMemWrite,
350 .status = rxfs_storeStatus,
351 .padd = rxfs_storePadd,
352 .close = rxfs_storeClose,
353 .destroy = rxfs_storeDestroy,
354 .storeproc = afs_GenericStoreProc
359 rxfs_storeInit(struct vcache *avc, struct afs_conn *tc,
360 struct rx_connection *rxconn, afs_size_t base,
361 afs_size_t bytes, afs_size_t length,
362 int sync, struct storeOps **ops, void **rock)
365 struct rxfs_storeVariables *v;
370 v = osi_AllocSmallSpace(sizeof(struct rxfs_storeVariables));
372 osi_Panic("rxfs_storeInit: osi_AllocSmallSpace returned NULL\n");
373 memset(v, 0, sizeof(struct rxfs_storeVariables));
375 v->InStatus.ClientModTime = avc->f.m.Date;
376 v->InStatus.Mask = AFS_SETMODTIME;
379 v->InStatus.Mask |= AFS_FSYNC;
381 v->call = rx_NewCall(rxconn);
383 #ifdef AFS_64BIT_CLIENT
384 if (!afs_serverHasNo64Bit(tc))
385 code = StartRXAFS_StoreData64(
386 v->call, (struct AFSFid*)&avc->f.fid.Fid,
387 &v->InStatus, base, bytes, length);
389 if (length > 0xFFFFFFFF)
392 afs_int32 t1 = base, t2 = bytes, t3 = length;
393 code = StartRXAFS_StoreData(v->call,
394 (struct AFSFid *) &avc->f.fid.Fid,
395 &v->InStatus, t1, t2, t3);
399 #else /* AFS_64BIT_CLIENT */
400 code = StartRXAFS_StoreData(v->call, (struct AFSFid *)&avc->f.fid.Fid,
401 &v->InStatus, base, bytes, length);
402 #endif /* AFS_64BIT_CLIENT */
407 osi_FreeSmallSpace(v);
410 if (cacheDiskType == AFS_FCACHE_TYPE_UFS) {
411 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
414 ("rxfs_storeInit: osi_AllocLargeSpace for iovecs returned NULL\n");
415 *ops = (struct storeOps *) &rxfs_storeUfsOps;
417 v->tiov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
420 ("rxfs_storeInit: osi_AllocSmallSpace for iovecs returned NULL\n");
421 *ops = (struct storeOps *) &rxfs_storeMemOps;
423 /* do this at a higher level now -- it's a parameter */
424 /* for now, only do 'continue from close' code if file fits in one
425 * chunk. Could clearly do better: if only one modified chunk
426 * then can still do this. can do this on *last* modified chunk */
427 length = avc->f.m.Length - 1; /* byte position of last byte we'll store */
429 if (AFS_CHUNK(length) != 0)
440 unsigned int storeallmissing = 0;
442 * Called for each chunk upon store.
444 * \param avc Ptr to the vcache entry of the file being stored.
445 * \param dclist pointer to the list of dcaches
446 * \param bytes total number of bytes for the current operation
447 * \param anewDV Ptr to the dataversion after store
448 * \param doProcessFS pointer to the "do process FetchStatus" flag
449 * \param OutStatus pointer to the FetchStatus as returned by the fileserver
450 * \param nchunks number of dcaches to consider
451 * \param nomore copy of the "no more data" flag
452 * \param ops pointer to the block of storeOps to be used for this operation
453 * \param rock pointer to the opaque protocol-specific data of this operation
456 afs_CacheStoreDCaches(struct vcache *avc, struct dcache **dclist,
457 afs_size_t bytes, afs_hyper_t *anewDV, int *doProcessFS,
458 struct AFSFetchStatus *OutStatus, afs_uint32 nchunks,
459 int nomore, struct storeOps *ops, void *rock)
461 int *shouldwake = NULL;
465 afs_size_t bytesXferred;
468 osi_timeval_t xferStartTime; /*FS xfer start time */
469 afs_size_t bytesToXfer = 10000; /* # bytes to xfer */
470 #endif /* AFS_NOSTATS */
472 osi_Assert(nchunks != 0);
474 for (i = 0; i < nchunks && !code; i++) {
475 struct dcache *tdc = dclist[i];
479 afs_warn("afs: missing dcache!\n");
481 continue; /* panic? */
483 size = tdc->f.chunkBytes;
484 afs_Trace4(afs_iclSetp, CM_TRACE_STOREALL2, ICL_TYPE_POINTER, avc,
485 ICL_TYPE_INT32, tdc->f.chunk, ICL_TYPE_INT32, tdc->index,
486 ICL_TYPE_INT32, afs_inode2trace(&tdc->f.inode));
489 if (avc->asynchrony == -1) {
490 if (afs_defaultAsynchrony > (bytes - stored))
491 shouldwake = &nomore;
493 else if ((afs_uint32) avc->asynchrony >= (bytes - stored))
494 shouldwake = &nomore;
497 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
498 ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
499 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, size);
501 AFS_STATCNT(CacheStoreProc);
503 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA);
504 avc->f.truncPos = AFS_NOTRUNC;
507 * In this case, size is *always* the amount of data we'll be trying
512 osi_GetuTime(&xferStartTime);
513 #endif /* AFS_NOSTATS */
516 code = (*ops->storeproc)(ops, rock, tdc, shouldwake,
519 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
520 ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
521 ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, size);
524 FillStoreStats(code, AFS_STATS_FS_XFERIDX_STOREDATA,
525 xferStartTime, bytesToXfer, bytesXferred);
526 #endif /* AFS_NOSTATS */
528 if ((tdc->f.chunkBytes < afs_OtherCSize)
529 && (i < (nchunks - 1)) && code == 0) {
530 code = (*ops->padd)(rock, afs_OtherCSize - tdc->f.chunkBytes);
532 stored += tdc->f.chunkBytes;
533 /* ideally, I'd like to unlock the dcache and turn
534 * off the writing bit here, but that would
535 * require being able to retry StoreAllSegments in
536 * the event of a failure. It only really matters
537 * if user can't read from a 'locked' dcache or
538 * one which has the writing bit turned on. */
542 code = (*ops->close)(rock, OutStatus, doProcessFS);
543 /* if this succeeds, dv has been bumped. */
550 code = (*ops->destroy)(&rock, code);
552 /* if we errored, can't trust this. */
559 #define lmin(a,b) (((a) < (b)) ? (a) : (b))
563 * \param dclist pointer to the list of dcaches
564 * \param avc Ptr to the vcache entry.
565 * \param areq Ptr to the request structure
566 * \param sync sync flag
567 * \param minj the chunk offset for this call
568 * \param high index of last dcache to store
569 * \param moredata the moredata flag
570 * \param anewDV Ptr to the dataversion after store
571 * \param amaxStoredLength Ptr to the amount of that is actually stored
573 * \note Environment: Nothing interesting.
576 afs_CacheStoreVCache(struct dcache **dcList, struct vcache *avc,
577 struct vrequest *areq, int sync, unsigned int minj,
578 unsigned int high, unsigned int moredata,
579 afs_hyper_t *anewDV, afs_size_t *amaxStoredLength)
582 struct storeOps *ops;
586 struct AFSFetchStatus OutStatus;
588 afs_size_t base, bytes, length;
590 unsigned int first = 0;
592 struct rx_connection *rxconn;
594 for (bytes = 0, j = 0; !code && j <= high; j++) {
596 ObtainSharedLock(&(dcList[j]->lock), 629);
599 bytes += dcList[j]->f.chunkBytes;
600 if ((dcList[j]->f.chunkBytes < afs_OtherCSize)
601 && (dcList[j]->f.chunk - minj < high)
603 int sbytes = afs_OtherCSize - dcList[j]->f.chunkBytes;
607 if (bytes && (j == high || !dcList[j + 1])) {
609 struct dcache **dclist = &dcList[first];
610 /* base = AFS_CHUNKTOBASE(dcList[first]->f.chunk); */
611 base = AFS_CHUNKTOBASE(first + minj);
614 * take a list of dcache structs and send them all off to the server
615 * the list must be in order, and the chunks contiguous.
616 * Note - there is no locking done by this code currently. For
617 * safety's sake, xdcache could be locked over the entire call.
618 * However, that pretty well ties up all the threads. Meantime, all
619 * the chunks _MUST_ have their refcounts bumped.
620 * The writes done before a store back will clear setuid-ness
622 * We can permit CacheStoreProc to wake up the user process IFF we
623 * are doing the last RPC for this close, ie, storing back the last
624 * set of contiguous chunks of a file.
627 nchunks = 1 + j - first;
628 nomore = !(moredata || (j != high));
629 length = lmin(avc->f.m.Length, avc->f.truncPos);
630 afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64,
631 ICL_TYPE_FID, &avc->f.fid.Fid, ICL_TYPE_OFFSET,
632 ICL_HANDLE_OFFSET(base), ICL_TYPE_OFFSET,
633 ICL_HANDLE_OFFSET(bytes), ICL_TYPE_OFFSET,
634 ICL_HANDLE_OFFSET(length));
637 tc = afs_Conn(&avc->f.fid, areq, 0, &rxconn);
639 #ifdef AFS_64BIT_CLIENT
642 code = rxfs_storeInit(avc, tc, rxconn, base, bytes, length,
645 code = afs_CacheStoreDCaches(avc, dclist, bytes, anewDV,
646 &doProcessFS, &OutStatus,
647 nchunks, nomore, ops, rock);
650 #ifdef AFS_64BIT_CLIENT
651 if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) {
652 afs_serverSetNo64Bit(tc);
655 #endif /* AFS_64BIT_CLIENT */
657 (tc, rxconn, code, &avc->f.fid, areq,
658 AFS_STATS_FS_RPCIDX_STOREDATA, SHARED_LOCK,
661 /* put back all remaining locked dcache entries */
662 for (i = 0; i < nchunks; i++) {
663 struct dcache *tdc = dclist[i];
665 if (afs_indexFlags[tdc->index] & IFDataMod) {
667 * LOCKXXX -- should hold afs_xdcache(W) when
668 * modifying afs_indexFlags.
670 afs_indexFlags[tdc->index] &= ~IFDataMod;
671 afs_stats_cmperf.cacheCurrDirtyChunks--;
672 afs_indexFlags[tdc->index] &= ~IFDirtyPages;
673 if (sync & AFS_VMSYNC_INVAL) {
674 /* since we have invalidated all the pages of this
675 ** vnode by calling osi_VM_TryToSmush, we can
676 ** safely mark this dcache entry as not having
677 ** any pages. This vnode now becomes eligible for
678 ** reclamation by getDownD.
680 afs_indexFlags[tdc->index] &= ~IFAnyPages;
684 UpgradeSToWLock(&tdc->lock, 628);
685 tdc->f.states &= ~DWriting; /* correct? */
686 tdc->dflags |= DFEntryMod;
687 ReleaseWriteLock(&tdc->lock);
689 /* Mark the entry as released */
694 /* Now copy out return params */
695 UpgradeSToWLock(&avc->lock, 28); /* keep out others for a while */
696 afs_ProcessFS(avc, &OutStatus, areq);
697 /* Keep last (max) size of file on server to see if
698 * we need to call afs_StoreMini to extend the file.
701 *amaxStoredLength = OutStatus.Length;
702 ConvertWToSLock(&avc->lock);
707 for (j++; j <= high; j++) {
709 ReleaseSharedLock(&(dcList[j]->lock));
710 afs_PutDCache(dcList[j]);
711 /* Releasing entry */
717 afs_Trace2(afs_iclSetp, CM_TRACE_STOREALLDCDONE,
718 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
726 /* rock and operations for RX_FILESERVER */
728 struct rxfs_fetchVariables {
729 struct rx_call *call;
733 afs_int32 hasNo64bit;
739 rxfs_fetchUfsRead(void *r, afs_uint32 size, afs_uint32 *bytesread)
743 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
746 tlen = (size > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : size);
748 code = rx_Read(v->call, v->tbuffer, tlen);
757 rxfs_fetchMemRead(void *r, afs_uint32 tlen, afs_uint32 *bytesread)
760 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
764 code = rx_Readv(v->call, v->iov, &v->nio, RX_MAXIOVECS, tlen);
774 rxfs_fetchMemWrite(void *r, struct osi_file *fP, afs_uint32 offset,
775 afs_uint32 tlen, afs_uint32 *byteswritten)
778 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
779 struct memCacheEntry *mceP = (struct memCacheEntry *)fP;
781 code = afs_MemWritevBlk(mceP, offset, v->iov, v->nio, tlen);
785 *byteswritten = code;
790 rxfs_fetchUfsWrite(void *r, struct osi_file *fP, afs_uint32 offset,
791 afs_uint32 tlen, afs_uint32 *byteswritten)
794 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
796 code = afs_osi_Write(fP, -1, v->tbuffer, tlen);
800 *byteswritten = code;
806 rxfs_fetchClose(void *r, struct vcache *avc, struct dcache * adc,
807 struct afs_FetchOutput *o)
810 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
816 #ifdef AFS_64BIT_CLIENT
818 code = EndRXAFS_FetchData64(v->call, &o->OutStatus, &o->CallBack,
822 code = EndRXAFS_FetchData(v->call, &o->OutStatus, &o->CallBack,
824 code = rx_EndCall(v->call, code);
833 rxfs_fetchDestroy(void **r, afs_int32 code)
835 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)*r;
840 code = rx_EndCall(v->call, code);
844 osi_FreeLargeSpace(v->tbuffer);
846 osi_FreeSmallSpace(v->iov);
847 osi_FreeSmallSpace(v);
852 rxfs_fetchMore(void *r, afs_int32 *length, afs_uint32 *moredata)
855 struct rxfs_fetchVariables *v = (struct rxfs_fetchVariables *)r;
858 * The fetch protocol is extended for the AFS/DFS translator
859 * to allow multiple blocks of data, each with its own length,
860 * to be returned. As long as the top bit is set, there are more
863 * We do not do this for AFS file servers because they sometimes
864 * return large negative numbers as the transfer size.
868 code = rx_Read(v->call, (void *)length, sizeof(afs_int32));
870 *length = ntohl(*length);
871 if (code != sizeof(afs_int32)) {
872 code = rx_Error(v->call);
874 return (code ? code : -1); /* try to return code, not -1 */
877 *moredata = *length & 0x80000000;
878 *length &= ~0x80000000;
883 struct fetchOps rxfs_fetchUfsOps = {
892 struct fetchOps rxfs_fetchMemOps = {
901 rxfs_fetchInit(struct afs_conn *tc, struct rx_connection *rxconn,
902 struct vcache *avc, afs_offs_t base,
903 afs_uint32 size, afs_int32 *alength, struct dcache *adc,
904 struct osi_file *fP, struct fetchOps **ops, void **rock)
906 struct rxfs_fetchVariables *v;
908 #ifdef AFS_64BIT_CLIENT
909 afs_uint32 length_hi = 0;
911 afs_uint32 length = 0, bytes;
913 v = (struct rxfs_fetchVariables *)
914 osi_AllocSmallSpace(sizeof(struct rxfs_fetchVariables));
916 osi_Panic("rxfs_fetchInit: osi_AllocSmallSpace returned NULL\n");
917 memset(v, 0, sizeof(struct rxfs_fetchVariables));
920 v->call = rx_NewCall(rxconn);
923 #ifdef AFS_64BIT_CLIENT
924 afs_size_t length64; /* as returned from server */
925 if (!afs_serverHasNo64Bit(tc)) {
926 afs_uint64 llbytes = size;
928 code = StartRXAFS_FetchData64(v->call,
929 (struct AFSFid *) &avc->f.fid.Fid,
933 afs_Trace2(afs_iclSetp, CM_TRACE_FETCH64CODE,
934 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
936 bytes = rx_Read(v->call, (char *)&length_hi, sizeof(afs_int32));
938 if (bytes == sizeof(afs_int32)) {
939 length_hi = ntohl(length_hi);
942 code = rx_EndCall(v->call, RX_PROTOCOL_ERROR);
948 if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
949 if (base > 0x7FFFFFFF) {
956 v->call = rx_NewCall(rxconn);
958 StartRXAFS_FetchData(
959 v->call, (struct AFSFid*)&avc->f.fid.Fid,
963 afs_serverSetNo64Bit(tc);
968 bytes = rx_Read(v->call, (char *)&length, sizeof(afs_int32));
970 if (bytes == sizeof(afs_int32))
971 length = ntohl(length);
974 code = rx_EndCall(v->call, RX_PROTOCOL_ERROR);
980 FillInt64(length64, length_hi, length);
983 /* Check if the fileserver said our length is bigger than can fit
984 * in a signed 32-bit integer. If it is, we can't handle that, so
986 if (length64 > MAX_AFS_INT32) {
990 afs_warn("afs: Warning: FetchData64 returned too much data "
991 "(length64 %u.%u); this should not happen! "
992 "Aborting fetch request.\n",
996 code = rx_EndCall(v->call, RX_PROTOCOL_ERROR);
1000 code = code != 0 ? code : EIO;
1005 /* Check if the fileserver said our length was negative. If it
1006 * is, just treat it as a 0 length, since some older fileservers
1007 * returned negative numbers when they meant to return 0. Note
1008 * that we must do this in this 64-bit-specific block, since
1009 * length64 being negative will screw up our conversion to the
1010 * 32-bit 'alength' below. */
1012 length_hi = length = 0;
1013 FillInt64(length64, 0, 0);
1017 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64LENG,
1018 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
1020 ICL_HANDLE_OFFSET(length64));
1023 #else /* AFS_64BIT_CLIENT */
1025 code = StartRXAFS_FetchData(v->call, (struct AFSFid *)&avc->f.fid.Fid,
1031 rx_Read(v->call, (char *)&length, sizeof(afs_int32));
1033 if (bytes == sizeof(afs_int32)) {
1034 *alength = ntohl(length);
1036 /* Older fileservers can return a negative length when they
1037 * meant to return 0; just assume negative lengths were
1038 * meant to be 0 lengths. */
1042 code = rx_EndCall(v->call, RX_PROTOCOL_ERROR);
1046 #endif /* AFS_64BIT_CLIENT */
1050 /* We need to cast here, in order to avoid issues if *alength is
1051 * negative. Some, older, fileservers can return a negative length,
1052 * which the rest of the code deals correctly with. */
1053 if (code == 0 && *alength > (afs_int32) size) {
1054 /* The fileserver told us it is going to send more data than we
1055 * requested. It shouldn't do that, and accepting that much data
1056 * can make us take up more cache space than we're supposed to,
1061 afs_warn("afs: Warning: FetchData64 returned more data than "
1062 "requested (requested %ld, got %ld); this should not "
1063 "happen! Aborting fetch request.\n",
1064 (long)size, (long)*alength);
1067 code = rx_EndCall(v->call, RX_PROTOCOL_ERROR);
1074 osi_FreeSmallSpace(v);
1077 if (cacheDiskType == AFS_FCACHE_TYPE_UFS) {
1078 v->tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
1080 osi_Panic("rxfs_fetchInit: osi_AllocLargeSpace for iovecs returned NULL\n");
1081 osi_Assert(WriteLocked(&adc->lock));
1083 *ops = (struct fetchOps *) &rxfs_fetchUfsOps;
1086 afs_Trace4(afs_iclSetp, CM_TRACE_MEMFETCH, ICL_TYPE_POINTER, avc,
1087 ICL_TYPE_POINTER, fP, ICL_TYPE_OFFSET,
1088 ICL_HANDLE_OFFSET(base), ICL_TYPE_INT32, length);
1090 * We need to alloc the iovecs on the heap so that they are "pinned"
1091 * rather than declare them on the stack - defect 11272
1093 v->iov = osi_AllocSmallSpace(sizeof(struct iovec) * RX_MAXIOVECS);
1095 osi_Panic("rxfs_fetchInit: osi_AllocSmallSpace for iovecs returned NULL\n");
1096 *ops = (struct fetchOps *) &rxfs_fetchMemOps;
1104 * Routine called on fetch; also tells people waiting for data
1105 * that more has arrived.
1107 * \param tc Ptr to the AFS connection structure.
1108 * \param rxconn Ptr to the Rx connection structure.
1109 * \param fP File descriptor for the cache file.
1110 * \param base Base offset to fetch.
1111 * \param adc Ptr to the dcache entry for the file, write-locked.
1112 * \param avc Ptr to the vcache entry for the file.
1113 * \param size Amount of data that should be fetched.
1114 * \param tsmall Ptr to the afs_FetchOutput structure.
1116 * \note Environment: Nothing interesting.
1119 afs_CacheFetchProc(struct afs_conn *tc, struct rx_connection *rxconn,
1120 struct osi_file *fP, afs_size_t base,
1121 struct dcache *adc, struct vcache *avc, afs_int32 size,
1122 struct afs_FetchOutput *tsmall)
1126 afs_uint32 bytesread, byteswritten;
1127 struct fetchOps *ops = NULL;
1129 afs_uint32 moredata = 0;
1134 osi_timeval_t xferStartTime; /*FS xfer start time */
1135 afs_size_t bytesToXfer = 0, bytesXferred = 0;
1138 AFS_STATCNT(CacheFetchProc);
1140 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHDATA);
1144 * avc->lock(R) if setLocks && !slowPass
1145 * avc->lock(W) if !setLocks || slowPass
1148 code = rxfs_fetchInit(
1149 tc, rxconn, avc, base, size, &length, adc, fP, &ops, &rock);
1152 osi_GetuTime(&xferStartTime);
1153 #endif /* AFS_NOSTATS */
1155 adc->validPos = base;
1158 if (avc->f.states & CForeign) {
1159 code = (*ops->more)(rock, &length, &moredata);
1164 bytesToXfer += length;
1165 #endif /* AFS_NOSTATS */
1166 while (length > 0) {
1167 #ifdef RX_KERNEL_TRACE
1168 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
1171 code = (*ops->read)(rock, length, &bytesread);
1172 #ifdef RX_KERNEL_TRACE
1173 afs_Trace1(afs_iclSetp, CM_TRACE_TIMESTAMP, ICL_TYPE_STRING,
1177 bytesXferred += bytesread;
1178 #endif /* AFS_NOSTATS */
1180 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64READ,
1181 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
1182 ICL_TYPE_INT32, length);
1186 code = (*ops->write)(rock, fP, offset, bytesread, &byteswritten);
1189 offset += bytesread;
1191 length -= bytesread;
1192 adc->validPos = base;
1193 if (afs_osi_Wakeup(&adc->validPos) == 0)
1194 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAKE, ICL_TYPE_STRING,
1195 __FILE__, ICL_TYPE_INT32, __LINE__,
1196 ICL_TYPE_POINTER, adc, ICL_TYPE_INT32,
1202 code = (*ops->close)(rock, avc, adc, tsmall);
1204 code = (*ops->destroy)(&rock, code);
1207 FillStoreStats(code, AFS_STATS_FS_XFERIDX_FETCHDATA, xferStartTime,
1208 bytesToXfer, bytesXferred);