2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include <afsconfig.h>
20 #include "../afs/param.h"
24 #include "../afs/sysincludes.h" /* Standard vendor system headers */
25 #include "../afs/afsincludes.h" /* Afs-based standard headers */
26 #include "../afs/afs_stats.h" /* statistics */
27 #include "../afs/afs_cbqueue.h"
28 #include "../afs/nfsclient.h"
29 #include "../afs/afs_osidnlc.h"
32 extern char afs_zeros[AFS_ZEROS];
35 afs_int32 nihints; /* # of above actually in-use */
39 /* Imported variables */
40 extern afs_rwlock_t afs_xdcache;
41 extern unsigned char *afs_indexFlags;
42 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
43 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
46 /* Forward declarations */
47 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
48 struct AFS_UCRED *acred, struct vrequest *areq);
50 int afs_MemRead(register struct vcache *avc, struct uio *auio, struct AFS_UCRED *acred,
51 daddr_t albn, struct buf **abpp, int noLock)
53 afs_size_t totalLength;
54 afs_size_t transferLength;
56 afs_size_t offset, len, tlen;
59 afs_int32 error, trybusy=1;
65 AFS_STATCNT(afs_MemRead);
69 /* check that we have the latest status info in the vnode cache */
70 if ((code = afs_InitReq(&treq, acred))) return code;
72 code = afs_VerifyVCache(avc, &treq);
74 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
79 #ifndef AFS_VM_RDWR_ENV
80 if (AFS_NFSXLATORREQ(acred)) {
81 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
82 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
83 return afs_CheckCode(EACCES, &treq, 9);
88 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
89 totalLength = auio->afsio_resid;
90 filePos = auio->afsio_offset;
91 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
92 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
93 ICL_TYPE_INT32, totalLength,
94 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
98 ObtainReadLock(&avc->lock);
99 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
100 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
101 hset(avc->flushDV, avc->m.DataVersion);
109 while (totalLength > 0) {
110 /* read all of the cached info */
111 if (filePos >= avc->m.Length) break; /* all done */
114 ReleaseReadLock(&tdc->lock);
117 tdc = afs_FindDCache(avc, filePos);
119 ObtainReadLock(&tdc->lock);
120 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
121 len = tdc->f.chunkBytes - offset;
124 /* a tricky question: does the presence of the DFFetching flag
125 mean that we're fetching the latest version of the file? No.
126 The server could update the file as soon as the fetch responsible
127 for the setting of the DFFetching flag completes.
129 However, the presence of the DFFetching flag (visible under
130 a dcache read lock since it is set and cleared only under a
131 dcache write lock) means that we're fetching as good a version
132 as was known to this client at the time of the last call to
133 afs_VerifyVCache, since the latter updates the stat cache's
134 m.DataVersion field under a vcache write lock, and from the
135 time that the DFFetching flag goes on in afs_GetDCache (before
136 the fetch starts), to the time it goes off (after the fetch
137 completes), afs_GetDCache keeps at least a read lock on the
140 This means that if the DFFetching flag is set, we can use that
141 data for any reads that must come from the current version of
142 the file (current == m.DataVersion).
144 Another way of looking at this same point is this: if we're
145 fetching some data and then try do an afs_VerifyVCache, the
146 VerifyVCache operation will not complete until after the
147 DFFetching flag is turned off and the dcache entry's f.versionNo
150 Note, by the way, that if DFFetching is set,
151 m.DataVersion > f.versionNo (the latter is not updated until
152 after the fetch completes).
155 ReleaseReadLock(&tdc->lock);
156 afs_PutDCache(tdc); /* before reusing tdc */
158 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
159 ObtainReadLock(&tdc->lock);
160 /* now, first try to start transfer, if we'll need the data. If
161 * data already coming, we don't need to do this, obviously. Type
162 * 2 requests never return a null dcache entry, btw.
164 if (!(tdc->dflags & DFFetching)
165 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
166 /* have cache entry, it is not coming in now,
167 * and we'll need new data */
169 if (trybusy && !afs_BBusy()) {
171 /* daemon is not busy */
172 ObtainSharedLock(&tdc->mflock, 665);
173 if (!(tdc->mflags & DFFetchReq)) {
174 /* start the daemon (may already be running, however) */
175 UpgradeSToWLock(&tdc->mflock, 666);
176 tdc->mflags |= DFFetchReq;
177 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
178 (afs_size_t)filePos, (afs_size_t) 0,
181 tdc->mflags &= ~DFFetchReq;
182 trybusy = 0; /* Avoid bkg daemon since they're too busy */
183 ReleaseWriteLock(&tdc->mflock);
186 ConvertWToSLock(&tdc->mflock);
187 /* don't use bp pointer! */
190 ConvertSToRLock(&tdc->mflock);
191 while (!code && tdc->mflags & DFFetchReq) {
192 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
193 ICL_TYPE_STRING, __FILE__,
194 ICL_TYPE_INT32, __LINE__,
195 ICL_TYPE_POINTER, tdc,
196 ICL_TYPE_INT32, tdc->dflags);
197 /* don't need waiting flag on this one */
198 ReleaseReadLock(&tdc->mflock);
199 ReleaseReadLock(&tdc->lock);
200 ReleaseReadLock(&avc->lock);
201 code = afs_osi_SleepSig(&tdc->validPos);
202 ObtainReadLock(&avc->lock);
203 ObtainReadLock(&tdc->lock);
204 ObtainReadLock(&tdc->mflock);
206 ReleaseReadLock(&tdc->mflock);
213 /* now data may have started flowing in (if DFFetching is on). If
214 * data is now streaming in, then wait for some interesting stuff.
217 while (!code && (tdc->dflags & DFFetching) &&
218 tdc->validPos <= filePos) {
219 /* too early: wait for DFFetching flag to vanish,
220 * or data to appear */
221 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
222 ICL_TYPE_STRING, __FILE__,
223 ICL_TYPE_INT32, __LINE__,
224 ICL_TYPE_POINTER, tdc,
225 ICL_TYPE_INT32, tdc->dflags);
226 ReleaseReadLock(&tdc->lock);
227 ReleaseReadLock(&avc->lock);
228 code = afs_osi_SleepSig(&tdc->validPos);
229 ObtainReadLock(&avc->lock);
230 ObtainReadLock(&tdc->lock);
236 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
237 if (tdc->dflags & DFFetching) {
238 /* still fetching, some new data is here: compute length and offset */
239 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
240 len = tdc->validPos - filePos;
243 /* no longer fetching, verify data version (avoid new GetDCache call) */
244 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
245 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
246 len = tdc->f.chunkBytes - offset;
249 /* don't have current data, so get it below */
250 afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
251 ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
252 ICL_TYPE_HYPER, &avc->m.DataVersion,
253 ICL_TYPE_HYPER, &tdc->f.versionNo);
254 ReleaseReadLock(&tdc->lock);
261 ReleaseReadLock(&avc->lock);
262 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
263 ObtainReadLock(&avc->lock);
264 if (tdc) ObtainReadLock(&tdc->lock);
279 if (len > totalLength) len = totalLength; /* will read len bytes */
280 if (len <= 0) { /* shouldn't get here if DFFetching is on */
281 /* read past the end of a chunk, may not be at next chunk yet, and yet
282 also not at eof, so may have to supply fake zeros */
283 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
284 if (len > totalLength) len = totalLength; /* and still within xfr request */
285 tlen = avc->m.Length - offset; /* and still within file */
286 if (len > tlen) len = tlen;
287 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
288 afsio_copy(auio, &tuio, tvec);
290 afsio_trim(&tuio, trimlen);
291 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
298 /* get the data from the mem cache */
300 /* mung uio structure to be right for this transfer */
301 afsio_copy(auio, &tuio, tvec);
303 afsio_trim(&tuio, trimlen);
304 tuio.afsio_offset = offset;
306 code = afs_MemReadUIO(tdc->f.inode, &tuio);
313 /* otherwise we've read some, fixup length, etc and continue with next seg */
314 len = len - tuio.afsio_resid; /* compute amount really transferred */
316 afsio_skip(auio, trimlen); /* update input uio structure */
318 transferLength += len;
321 if (len <= 0) break; /* surprise eof */
322 } /* the whole while loop */
327 * tdc->lock(R) if tdc
330 /* if we make it here with tdc non-zero, then it is the last chunk we
331 * dealt with, and we have to release it when we're done. We hold on
332 * to it in case we need to do a prefetch.
335 ReleaseReadLock(&tdc->lock);
336 #if !defined(AFS_VM_RDWR_ENV)
337 /* try to queue prefetch, if needed */
339 afs_PrefetchChunk(avc, tdc, acred, &treq);
345 ReleaseReadLock(&avc->lock);
346 osi_FreeSmallSpace(tvec);
347 error = afs_CheckCode(error, &treq, 10);
351 /* called with the dcache entry triggering the fetch, the vcache entry involved,
352 * and a vrequest for the read call. Marks the dcache entry as having already
353 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
354 * flag in the prefetched block, so that the next call to read knows to wait
355 * for the daemon to start doing things.
357 * This function must be called with the vnode at least read-locked, and
358 * no locks on the dcache, because it plays around with dcache entries.
360 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
361 struct AFS_UCRED *acred, struct vrequest *areq)
363 register struct dcache *tdc;
365 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
367 offset = adc->f.chunk+1; /* next chunk we'll need */
368 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
369 ObtainReadLock(&adc->lock);
370 ObtainSharedLock(&adc->mflock, 662);
371 if (offset < avc->m.Length && !(adc->mflags & DFNextStarted) && !afs_BBusy()) {
374 UpgradeSToWLock(&adc->mflock, 663);
375 adc->mflags |= DFNextStarted; /* we've tried to prefetch for this guy */
376 ReleaseWriteLock(&adc->mflock);
377 ReleaseReadLock(&adc->lock);
379 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
380 ObtainSharedLock(&tdc->mflock, 651);
381 if (!(tdc->mflags & DFFetchReq)) {
382 /* ask the daemon to do the work */
383 UpgradeSToWLock(&tdc->mflock, 652);
384 tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
385 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
386 * since we don't want to wait for it to finish before doing so ourselves.
388 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
389 (afs_size_t) offset, (afs_size_t) 1, tdc);
391 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
392 tdc->mflags &= ~DFFetchReq;
393 ReleaseWriteLock(&tdc->mflock);
397 * DCLOCKXXX: This is a little sketchy, since someone else
398 * could have already started a prefetch.. In practice,
399 * this probably doesn't matter; at most it would cause an
400 * extra slot in the BKG table to be used up when someone
401 * prefetches this for the second time.
403 ObtainReadLock(&adc->lock);
404 ObtainWriteLock(&adc->mflock, 664);
405 adc->mflags &= ~DFNextStarted;
406 ReleaseWriteLock(&adc->mflock);
407 ReleaseReadLock(&adc->lock);
409 ReleaseWriteLock(&tdc->mflock);
412 ReleaseSharedLock(&tdc->mflock);
416 ReleaseSharedLock(&adc->mflock);
417 ReleaseReadLock(&adc->lock);
422 /* if the vcache is up-to-date, and the request fits entirely into the chunk
423 * that the hint here references, then we just use it quickly, otherwise we
424 * have to call the slow read.
426 * This could be generalized in several ways to take advantage of partial
427 * state even when all the chips don't fall the right way. For instance,
428 * if the hint is good and the first part of the read request can be
429 * satisfied from the chunk, then just do the read. After the read has
430 * completed, check to see if there's more. (Chances are there won't be.)
431 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
433 * For the time being, I'm ignoring quick.f, but it should be used at
435 * do this in the future avc->quick.f = tfile; but I think it
436 * has to be done under a write lock, but don't want to wait on the
439 /* everywhere that a dcache can be freed (look for NULLIDX)
440 * probably does it under a write lock on xdcache. Need to invalidate
442 * Also need to worry about DFFetching, and IFFree, I think. */
443 static struct dcache *savedc = 0;
445 int afs_UFSReadFast(register struct vcache *avc, struct uio *auio,
446 struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
448 struct vrequest treq;
451 struct osi_file *tfile;
455 ObtainReadLock(&avc->lock);
456 ObtainReadLock(&afs_xdcache);
458 if ((avc->states & CStatd) /* up to date */
459 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
460 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
465 ReleaseReadLock(&afs_xdcache);
466 if (tdc->stamp == avc->quick.stamp) {
468 ObtainReadLock(&tdc->lock);
471 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
472 && ((offDiff = (afs_size_t)(auio->afsio_offset - avc->quick.minLoc)) >= 0)
473 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
474 && !(tdc->dflags & DFFetching)) { /* fits in chunk */
476 auio->afsio_offset -= avc->quick.minLoc;
478 afs_Trace4(afs_iclSetp, CM_TRACE_READFAST, ICL_TYPE_POINTER, avc,
479 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->afsio_offset),
480 ICL_TYPE_INT32, auio->afsio_resid,
481 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
483 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
488 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
492 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
494 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
500 VOP_RWLOCK(tfile->vnode, 0);
501 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
502 VOP_RWUNLOCK(tfile->vnode, 0);
505 #if defined(AFS_SGI_ENV)
507 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
508 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
509 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
513 auio->uio_rw = UIO_READ;
515 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
517 #else /* AFS_OSF_ENV */
518 #if defined(AFS_HPUX100_ENV)
520 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
523 #if defined(AFS_LINUX20_ENV)
525 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
528 #if defined(AFS_DARWIN_ENV)
530 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
531 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
532 VOP_UNLOCK(tfile->vnode, 0, current_proc());
535 #if defined(AFS_XBSD_ENV)
537 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
538 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
539 VOP_UNLOCK(tfile->vnode, 0, curproc);
542 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
551 auio->afsio_offset += avc->quick.minLoc;
553 /* Fix up LRU info */
554 hset(afs_indexTimes[tdc->index], afs_indexCounter);
555 hadd32(afs_indexCounter, 1);
558 ReleaseReadLock(&avc->lock);
559 #if !defined(AFS_VM_RDWR_ENV)
560 if (!(code = afs_InitReq(&treq, acred))) {
561 if (!(tdc->mflags & DFNextStarted))
562 afs_PrefetchChunk(avc, tdc, acred, &treq);
566 if (readLocked) ReleaseReadLock(&tdc->lock);
570 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
573 if (readLocked) ReleaseReadLock(&tdc->lock);
576 ReleaseReadLock(&afs_xdcache);
579 /* come here if fast path doesn't work for some reason or other */
581 ReleaseReadLock(&avc->lock);
582 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
585 int afs_UFSRead(register struct vcache *avc, struct uio *auio,
586 struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
588 afs_size_t totalLength;
589 afs_size_t transferLength;
591 afs_size_t offset, len, tlen;
593 struct dcache *tdc=0;
597 struct osi_file *tfile;
600 struct vrequest treq;
602 AFS_STATCNT(afs_UFSRead);
603 if (avc && avc->vc_error)
606 /* check that we have the latest status info in the vnode cache */
607 if ((code = afs_InitReq(&treq, acred))) return code;
610 osi_Panic ("null avc in afs_UFSRead");
612 code = afs_VerifyVCache(avc, &treq);
614 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
620 #ifndef AFS_VM_RDWR_ENV
621 if (AFS_NFSXLATORREQ(acred)) {
622 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
623 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
624 return afs_CheckCode(EACCES, &treq, 12);
629 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
630 totalLength = auio->afsio_resid;
631 filePos = auio->afsio_offset;
632 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
633 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
634 ICL_TYPE_INT32, totalLength,
635 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
639 ObtainReadLock(&avc->lock);
640 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
641 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
642 hset(avc->flushDV, avc->m.DataVersion);
646 while (totalLength > 0) {
647 /* read all of the cached info */
648 if (filePos >= avc->m.Length) break; /* all done */
651 ReleaseReadLock(&tdc->lock);
654 tdc = afs_FindDCache(avc, filePos);
656 ObtainReadLock(&tdc->lock);
657 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
658 len = tdc->f.chunkBytes - offset;
661 /* a tricky question: does the presence of the DFFetching flag
662 mean that we're fetching the latest version of the file? No.
663 The server could update the file as soon as the fetch responsible
664 for the setting of the DFFetching flag completes.
666 However, the presence of the DFFetching flag (visible under
667 a dcache read lock since it is set and cleared only under a
668 dcache write lock) means that we're fetching as good a version
669 as was known to this client at the time of the last call to
670 afs_VerifyVCache, since the latter updates the stat cache's
671 m.DataVersion field under a vcache write lock, and from the
672 time that the DFFetching flag goes on in afs_GetDCache (before
673 the fetch starts), to the time it goes off (after the fetch
674 completes), afs_GetDCache keeps at least a read lock on the
677 This means that if the DFFetching flag is set, we can use that
678 data for any reads that must come from the current version of
679 the file (current == m.DataVersion).
681 Another way of looking at this same point is this: if we're
682 fetching some data and then try do an afs_VerifyVCache, the
683 VerifyVCache operation will not complete until after the
684 DFFetching flag is turned off and the dcache entry's f.versionNo
687 Note, by the way, that if DFFetching is set,
688 m.DataVersion > f.versionNo (the latter is not updated until
689 after the fetch completes).
692 ReleaseReadLock(&tdc->lock);
693 afs_PutDCache(tdc); /* before reusing tdc */
695 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
696 ObtainReadLock(&tdc->lock);
700 /* now, first try to start transfer, if we'll need the data. If
701 * data already coming, we don't need to do this, obviously. Type
702 * 2 requests never return a null dcache entry, btw. */
703 if (!(tdc->dflags & DFFetching)
704 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
705 /* have cache entry, it is not coming in now, and we'll need new data */
707 if (trybusy && !afs_BBusy()) {
709 /* daemon is not busy */
710 ObtainSharedLock(&tdc->mflock, 667);
711 if (!(tdc->mflags & DFFetchReq)) {
712 UpgradeSToWLock(&tdc->mflock, 668);
713 tdc->mflags |= DFFetchReq;
714 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
715 (afs_size_t) filePos, (afs_size_t) 0,
718 /* Bkg table full; retry deadlocks */
719 tdc->mflags &= ~DFFetchReq;
720 trybusy = 0; /* Avoid bkg daemon since they're too busy */
721 ReleaseWriteLock(&tdc->mflock);
724 ConvertWToSLock(&tdc->mflock);
727 ConvertSToRLock(&tdc->mflock);
728 while (!code && tdc->mflags & DFFetchReq) {
729 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
730 ICL_TYPE_STRING, __FILE__,
731 ICL_TYPE_INT32, __LINE__,
732 ICL_TYPE_POINTER, tdc,
733 ICL_TYPE_INT32, tdc->dflags);
734 /* don't need waiting flag on this one */
735 ReleaseReadLock(&tdc->mflock);
736 ReleaseReadLock(&tdc->lock);
737 ReleaseReadLock(&avc->lock);
738 code = afs_osi_SleepSig(&tdc->validPos);
739 ObtainReadLock(&avc->lock);
740 ObtainReadLock(&tdc->lock);
741 ObtainReadLock(&tdc->mflock);
743 ReleaseReadLock(&tdc->mflock);
750 /* now data may have started flowing in (if DFFetching is on). If
751 * data is now streaming in, then wait for some interesting stuff.
754 while (!code && (tdc->dflags & DFFetching) &&
755 tdc->validPos <= filePos) {
756 /* too early: wait for DFFetching flag to vanish,
757 * or data to appear */
758 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
759 ICL_TYPE_STRING, __FILE__,
760 ICL_TYPE_INT32, __LINE__,
761 ICL_TYPE_POINTER, tdc,
762 ICL_TYPE_INT32, tdc->dflags);
763 ReleaseReadLock(&tdc->lock);
764 ReleaseReadLock(&avc->lock);
765 code = afs_osi_SleepSig(&tdc->validPos);
766 ObtainReadLock(&avc->lock);
767 ObtainReadLock(&tdc->lock);
773 /* fetching flag gone, data is here, or we never tried
774 * (BBusy for instance) */
775 if (tdc->dflags & DFFetching) {
776 /* still fetching, some new data is here:
777 * compute length and offset */
778 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
779 len = tdc->validPos - filePos;
782 /* no longer fetching, verify data version (avoid new
784 if (hsame(avc->m.DataVersion, tdc->f.versionNo)
785 && tdc->f.chunkBytes) {
786 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
787 len = tdc->f.chunkBytes - offset;
790 /* don't have current data, so get it below */
791 afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
792 ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
793 ICL_TYPE_HYPER, &avc->m.DataVersion,
794 ICL_TYPE_HYPER, &tdc->f.versionNo);
795 ReleaseReadLock(&tdc->lock);
802 /* If we get, it was not possible to start the
803 * background daemon. With flag == 1 afs_GetDCache
804 * does the FetchData rpc synchronously.
806 ReleaseReadLock(&avc->lock);
807 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
808 ObtainReadLock(&avc->lock);
809 if (tdc) ObtainReadLock(&tdc->lock);
813 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD,
814 ICL_TYPE_POINTER, tdc,
815 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
816 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
821 if (len > totalLength) len = totalLength; /* will read len bytes */
822 if (len <= 0) { /* shouldn't get here if DFFetching is on */
823 afs_Trace4(afs_iclSetp, CM_TRACE_VNODEREAD2,
824 ICL_TYPE_POINTER, tdc,
825 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tdc->validPos),
826 ICL_TYPE_INT32, tdc->f.chunkBytes,
827 ICL_TYPE_INT32, tdc->dflags);
828 /* read past the end of a chunk, may not be at next chunk yet, and yet
829 also not at eof, so may have to supply fake zeros */
830 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
831 if (len > totalLength) len = totalLength; /* and still within xfr request */
832 tlen = avc->m.Length - offset; /* and still within file */
833 if (len > tlen) len = tlen;
834 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
835 afsio_copy(auio, &tuio, tvec);
837 afsio_trim(&tuio, trimlen);
838 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
845 /* get the data from the file */
847 if (tfile = tdc->ihint) {
848 if (tdc->f.inode != tfile->inum){
849 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
850 tdc, tdc->f.inode, tfile->inum );
852 tdc->ihint = tfile = 0;
862 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
863 /* mung uio structure to be right for this transfer */
864 afsio_copy(auio, &tuio, tvec);
866 afsio_trim(&tuio, trimlen);
867 tuio.afsio_offset = offset;
871 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
872 NULL, &afs_osi_cred);
876 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
877 /* Flush all JFS pages now for big performance gain in big file cases
878 * If we do something like this, must check to be sure that AFS file
879 * isn't mmapped... see afs_gn_map() for why.
882 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
883 many different ways to do similar things:
884 so far, the best performing one is #2, but #1 might match it if we
885 straighten out the confusion regarding which pages to flush. It
887 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
888 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
889 (len + PAGESIZE-1)/PAGESIZE);
890 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
891 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
892 tfile->vnode->v_gnode->gn_seg = NULL;
896 Unfortunately, this seems to cause frequent "cache corruption" episodes.
897 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
898 (len + PAGESIZE-1)/PAGESIZE);
902 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
908 VOP_RWLOCK(tfile->vnode, 0);
909 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
910 VOP_RWUNLOCK(tfile->vnode, 0);
913 #if defined(AFS_SGI_ENV)
915 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
916 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
918 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
922 tuio.uio_rw = UIO_READ;
924 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
926 #else /* AFS_OSF_ENV */
928 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
930 #if defined(AFS_HPUX100_ENV)
932 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
935 #if defined(AFS_LINUX20_ENV)
937 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
940 #if defined(AFS_DARWIN_ENV)
942 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
943 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
944 VOP_UNLOCK(tfile->vnode, 0, current_proc());
947 #if defined(AFS_XBSD_ENV)
949 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
950 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
951 VOP_UNLOCK(tfile->vnode, 0, curproc);
954 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
966 if (!tdc->ihint && nihints < maxIHint) {
979 /* otherwise we've read some, fixup length, etc and continue with next seg */
980 len = len - tuio.afsio_resid; /* compute amount really transferred */
982 afsio_skip(auio, trimlen); /* update input uio structure */
984 transferLength += len;
986 if (len <= 0) break; /* surprise eof */
989 /* if we make it here with tdc non-zero, then it is the last chunk we
990 * dealt with, and we have to release it when we're done. We hold on
991 * to it in case we need to do a prefetch, obviously.
994 ReleaseReadLock(&tdc->lock);
995 #if !defined(AFS_VM_RDWR_ENV)
996 /* try to queue prefetch, if needed */
998 if (!(tdc->mflags & DFNextStarted))
999 afs_PrefetchChunk(avc, tdc, acred, &treq);
1005 ReleaseReadLock(&avc->lock);
1007 osi_FreeSmallSpace(tvec);
1008 error = afs_CheckCode(error, &treq, 13);