2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include <afsconfig.h>
20 #include "../afs/param.h"
24 #include "../afs/sysincludes.h" /* Standard vendor system headers */
25 #include "../afs/afsincludes.h" /* Afs-based standard headers */
26 #include "../afs/afs_stats.h" /* statistics */
27 #include "../afs/afs_cbqueue.h"
28 #include "../afs/nfsclient.h"
29 #include "../afs/afs_osidnlc.h"
32 extern char afs_zeros[AFS_ZEROS];
35 afs_int32 nihints; /* # of above actually in-use */
39 /* Imported variables */
40 extern afs_rwlock_t afs_xdcache;
41 extern unsigned char *afs_indexFlags;
42 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
43 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
46 /* Forward declarations */
47 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
48 struct AFS_UCRED *acred, struct vrequest *areq);
50 afs_MemRead(avc, auio, acred, albn, abpp, noLock)
51 register struct vcache *avc;
53 struct AFS_UCRED *acred;
58 afs_size_t totalLength;
59 afs_size_t transferLength;
61 afs_size_t offset, len, tlen;
64 afs_int32 error, trybusy=1;
71 AFS_STATCNT(afs_MemRead);
75 /* check that we have the latest status info in the vnode cache */
76 if (code = afs_InitReq(&treq, acred)) return code;
78 code = afs_VerifyVCache(avc, &treq);
80 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
85 #ifndef AFS_VM_RDWR_ENV
86 if (AFS_NFSXLATORREQ(acred)) {
87 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
88 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
89 return afs_CheckCode(EACCES, &treq, 9);
94 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
95 totalLength = auio->afsio_resid;
96 filePos = auio->afsio_offset;
97 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
98 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
99 ICL_TYPE_INT32, totalLength,
100 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
104 ObtainReadLock(&avc->lock);
105 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
106 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
107 hset(avc->flushDV, avc->m.DataVersion);
115 while (totalLength > 0) {
116 /* read all of the cached info */
117 if (filePos >= avc->m.Length) break; /* all done */
120 ReleaseReadLock(&tdc->lock);
123 tdc = afs_FindDCache(avc, filePos);
125 ObtainReadLock(&tdc->lock);
126 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
127 len = tdc->f.chunkBytes - offset;
130 /* a tricky question: does the presence of the DFFetching flag
131 mean that we're fetching the latest version of the file? No.
132 The server could update the file as soon as the fetch responsible
133 for the setting of the DFFetching flag completes.
135 However, the presence of the DFFetching flag (visible under
136 a dcache read lock since it is set and cleared only under a
137 dcache write lock) means that we're fetching as good a version
138 as was known to this client at the time of the last call to
139 afs_VerifyVCache, since the latter updates the stat cache's
140 m.DataVersion field under a vcache write lock, and from the
141 time that the DFFetching flag goes on in afs_GetDCache (before
142 the fetch starts), to the time it goes off (after the fetch
143 completes), afs_GetDCache keeps at least a read lock on the
146 This means that if the DFFetching flag is set, we can use that
147 data for any reads that must come from the current version of
148 the file (current == m.DataVersion).
150 Another way of looking at this same point is this: if we're
151 fetching some data and then try do an afs_VerifyVCache, the
152 VerifyVCache operation will not complete until after the
153 DFFetching flag is turned off and the dcache entry's f.versionNo
156 Note, by the way, that if DFFetching is set,
157 m.DataVersion > f.versionNo (the latter is not updated until
158 after the fetch completes).
161 ReleaseReadLock(&tdc->lock);
162 afs_PutDCache(tdc); /* before reusing tdc */
164 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
165 ObtainReadLock(&tdc->lock);
166 /* now, first try to start transfer, if we'll need the data. If
167 * data already coming, we don't need to do this, obviously. Type
168 * 2 requests never return a null dcache entry, btw.
170 if (!(tdc->dflags & DFFetching)
171 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
172 /* have cache entry, it is not coming in now,
173 * and we'll need new data */
175 if (trybusy && !afs_BBusy()) {
177 /* daemon is not busy */
178 ObtainSharedLock(&tdc->mflock, 665);
179 if (!(tdc->mflags & DFFetchReq)) {
180 /* start the daemon (may already be running, however) */
181 UpgradeSToWLock(&tdc->mflock, 666);
182 tdc->mflags |= DFFetchReq;
183 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
184 (afs_size_t)filePos, (afs_size_t) 0,
187 tdc->mflags &= ~DFFetchReq;
188 trybusy = 0; /* Avoid bkg daemon since they're too busy */
189 ReleaseWriteLock(&tdc->mflock);
192 ConvertWToSLock(&tdc->mflock);
193 /* don't use bp pointer! */
196 ConvertSToRLock(&tdc->mflock);
197 while (!code && tdc->mflags & DFFetchReq) {
198 /* don't need waiting flag on this one */
199 ReleaseReadLock(&tdc->mflock);
200 ReleaseReadLock(&tdc->lock);
201 ReleaseReadLock(&avc->lock);
202 code = afs_osi_SleepSig(&tdc->validPos);
203 ObtainReadLock(&avc->lock);
204 ObtainReadLock(&tdc->lock);
205 ObtainReadLock(&tdc->mflock);
207 ReleaseReadLock(&tdc->mflock);
214 /* now data may have started flowing in (if DFFetching is on). If
215 * data is now streaming in, then wait for some interesting stuff.
218 while (!code && (tdc->dflags & DFFetching) &&
219 tdc->validPos <= filePos) {
220 /* too early: wait for DFFetching flag to vanish,
221 * or data to appear */
222 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
223 ICL_TYPE_STRING, __FILE__,
224 ICL_TYPE_INT32, __LINE__,
225 ICL_TYPE_POINTER, tdc,
226 ICL_TYPE_INT32, tdc->dflags);
227 ReleaseReadLock(&tdc->lock);
228 ReleaseReadLock(&avc->lock);
229 code = afs_osi_SleepSig(&tdc->validPos);
230 ObtainReadLock(&avc->lock);
231 ObtainReadLock(&tdc->lock);
237 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
238 if (tdc->dflags & DFFetching) {
239 /* still fetching, some new data is here: compute length and offset */
240 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
241 len = tdc->validPos - filePos;
244 /* no longer fetching, verify data version (avoid new GetDCache call) */
245 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
246 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
247 len = tdc->f.chunkBytes - offset;
250 /* don't have current data, so get it below */
251 ReleaseReadLock(&tdc->lock);
253 tdc = (struct dcache *) 0;
258 ReleaseReadLock(&avc->lock);
259 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
260 ObtainReadLock(&avc->lock);
261 if (tdc) ObtainReadLock(&tdc->lock);
276 if (len > totalLength) len = totalLength; /* will read len bytes */
277 if (len <= 0) { /* shouldn't get here if DFFetching is on */
278 /* read past the end of a chunk, may not be at next chunk yet, and yet
279 also not at eof, so may have to supply fake zeros */
280 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
281 if (len > totalLength) len = totalLength; /* and still within xfr request */
282 tlen = avc->m.Length - offset; /* and still within file */
283 if (len > tlen) len = tlen;
284 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
285 afsio_copy(auio, &tuio, tvec);
287 afsio_trim(&tuio, trimlen);
288 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
295 /* get the data from the mem cache */
297 /* mung uio structure to be right for this transfer */
298 afsio_copy(auio, &tuio, tvec);
300 afsio_trim(&tuio, trimlen);
301 tuio.afsio_offset = offset;
303 code = afs_MemReadUIO(tdc->f.inode, &tuio);
310 /* otherwise we've read some, fixup length, etc and continue with next seg */
311 len = len - tuio.afsio_resid; /* compute amount really transferred */
313 afsio_skip(auio, trimlen); /* update input uio structure */
315 transferLength += len;
318 if (len <= 0) break; /* surprise eof */
319 } /* the whole while loop */
324 * tdc->lock(R) if tdc
327 /* if we make it here with tdc non-zero, then it is the last chunk we
328 * dealt with, and we have to release it when we're done. We hold on
329 * to it in case we need to do a prefetch.
332 ReleaseReadLock(&tdc->lock);
333 #ifndef AFS_VM_RDWR_ENV
334 /* try to queue prefetch, if needed */
336 afs_PrefetchChunk(avc, tdc, acred, &treq);
342 ReleaseReadLock(&avc->lock);
343 osi_FreeSmallSpace(tvec);
344 error = afs_CheckCode(error, &treq, 10);
348 /* called with the dcache entry triggering the fetch, the vcache entry involved,
349 * and a vrequest for the read call. Marks the dcache entry as having already
350 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
351 * flag in the prefetched block, so that the next call to read knows to wait
352 * for the daemon to start doing things.
354 * This function must be called with the vnode at least read-locked, and
355 * no locks on the dcache, because it plays around with dcache entries.
357 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
358 struct AFS_UCRED *acred, struct vrequest *areq)
360 register struct dcache *tdc;
362 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
364 offset = adc->f.chunk+1; /* next chunk we'll need */
365 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
366 ObtainReadLock(&adc->lock);
367 ObtainSharedLock(&adc->mflock, 662);
368 if (offset < avc->m.Length && !(adc->mflags & DFNextStarted) && !afs_BBusy()) {
371 UpgradeSToWLock(&adc->mflock, 663);
372 adc->mflags |= DFNextStarted; /* we've tried to prefetch for this guy */
373 ReleaseWriteLock(&adc->mflock);
374 ReleaseReadLock(&adc->lock);
376 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
377 ObtainSharedLock(&tdc->mflock, 651);
378 if (!(tdc->mflags & DFFetchReq)) {
379 /* ask the daemon to do the work */
380 UpgradeSToWLock(&tdc->mflock, 652);
381 tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
382 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
383 * since we don't want to wait for it to finish before doing so ourselves.
385 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
386 (afs_size_t) offset, (afs_size_t) 1, tdc);
388 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
389 tdc->mflags &= ~DFFetchReq;
390 ReleaseWriteLock(&tdc->mflock);
394 * DCLOCKXXX: This is a little sketchy, since someone else
395 * could have already started a prefetch.. In practice,
396 * this probably doesn't matter; at most it would cause an
397 * extra slot in the BKG table to be used up when someone
398 * prefetches this for the second time.
400 ObtainReadLock(&adc->lock);
401 ObtainWriteLock(&adc->mflock, 664);
402 adc->mflags &= ~DFNextStarted;
403 ReleaseWriteLock(&adc->mflock);
404 ReleaseReadLock(&adc->lock);
406 ReleaseWriteLock(&tdc->mflock);
409 ReleaseSharedLock(&tdc->mflock);
413 ReleaseSharedLock(&adc->mflock);
414 ReleaseReadLock(&adc->lock);
419 /* if the vcache is up-to-date, and the request fits entirely into the chunk
420 * that the hint here references, then we just use it quickly, otherwise we
421 * have to call the slow read.
423 * This could be generalized in several ways to take advantage of partial
424 * state even when all the chips don't fall the right way. For instance,
425 * if the hint is good and the first part of the read request can be
426 * satisfied from the chunk, then just do the read. After the read has
427 * completed, check to see if there's more. (Chances are there won't be.)
428 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
430 * For the time being, I'm ignoring quick.f, but it should be used at
432 * do this in the future avc->quick.f = tfile; but I think it
433 * has to be done under a write lock, but don't want to wait on the
436 /* everywhere that a dcache can be freed (look for NULLIDX)
437 * probably does it under a write lock on xdcache. Need to invalidate
439 * Also need to worry about DFFetching, and IFFree, I think. */
440 static struct dcache *savedc = 0;
442 afs_UFSReadFast(avc, auio, acred, albn, abpp, noLock)
443 register struct vcache *avc;
445 struct AFS_UCRED *acred;
450 struct vrequest treq;
454 struct osi_file *tfile;
458 ObtainReadLock(&avc->lock);
459 ObtainReadLock(&afs_xdcache);
461 if ((avc->states & CStatd) /* up to date */
462 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
463 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
468 ReleaseReadLock(&afs_xdcache);
469 if (tdc->stamp == avc->quick.stamp) {
471 ObtainReadLock(&tdc->lock);
474 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
475 && ((offDiff = (auio->afsio_offset - avc->quick.minLoc)) >= 0)
476 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
477 && !(tdc->dflags & DFFetching)) { /* fits in chunk */
479 auio->afsio_offset -= avc->quick.minLoc;
481 afs_Trace4(afs_iclSetp, CM_TRACE_READFAST, ICL_TYPE_POINTER, avc,
482 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->afsio_offset),
483 ICL_TYPE_INT32, auio->afsio_resid,
484 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
486 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
491 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
495 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
497 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
503 VOP_RWLOCK(tfile->vnode, 0);
504 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
505 VOP_RWUNLOCK(tfile->vnode, 0);
508 #if defined(AFS_SGI_ENV)
510 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
511 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
512 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
516 auio->uio_rw = UIO_READ;
518 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
520 #else /* AFS_OSF_ENV */
521 #if defined(AFS_HPUX100_ENV)
523 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
526 #if defined(AFS_LINUX20_ENV)
528 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
531 #if defined(AFS_DARWIN_ENV)
533 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
534 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
535 VOP_UNLOCK(tfile->vnode, 0, current_proc());
538 #if defined(AFS_FBSD_ENV)
540 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
541 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
542 VOP_UNLOCK(tfile->vnode, 0, curproc);
545 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
554 auio->afsio_offset += avc->quick.minLoc;
556 /* Fix up LRU info */
557 hset(afs_indexTimes[tdc->index], afs_indexCounter);
558 hadd32(afs_indexCounter, 1);
561 ReleaseReadLock(&avc->lock);
562 #ifndef AFS_VM_RDWR_ENV
563 if (!(code = afs_InitReq(&treq, acred))) {
564 if (!(tdc->mflags & DFNextStarted))
565 afs_PrefetchChunk(avc, tdc, acred, &treq);
569 if (readLocked) ReleaseReadLock(&tdc->lock);
573 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
576 if (readLocked) ReleaseReadLock(&tdc->lock);
579 ReleaseReadLock(&afs_xdcache);
582 /* come here if fast path doesn't work for some reason or other */
584 ReleaseReadLock(&avc->lock);
585 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
588 afs_UFSRead(avc, auio, acred, albn, abpp, noLock)
591 struct AFS_UCRED *acred;
596 afs_size_t totalLength;
597 afs_size_t transferLength;
599 afs_size_t offset, len, tlen;
601 struct dcache *tdc=0;
605 struct osi_file *tfile;
609 struct vrequest treq;
611 AFS_STATCNT(afs_UFSRead);
612 if (avc && avc->vc_error)
615 /* check that we have the latest status info in the vnode cache */
616 if (code = afs_InitReq(&treq, acred)) return code;
619 osi_Panic ("null avc in afs_UFSRead");
621 code = afs_VerifyVCache(avc, &treq);
623 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
629 #ifndef AFS_VM_RDWR_ENV
630 if (AFS_NFSXLATORREQ(acred)) {
631 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
632 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
633 return afs_CheckCode(EACCES, &treq, 12);
638 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
639 totalLength = auio->afsio_resid;
640 filePos = auio->afsio_offset;
641 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
642 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
643 ICL_TYPE_INT32, totalLength,
644 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
648 ObtainReadLock(&avc->lock);
649 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
650 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
651 hset(avc->flushDV, avc->m.DataVersion);
655 while (totalLength > 0) {
656 /* read all of the cached info */
657 if (filePos >= avc->m.Length) break; /* all done */
660 ReleaseReadLock(&tdc->lock);
663 tdc = afs_FindDCache(avc, filePos);
665 ObtainReadLock(&tdc->lock);
666 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
667 len = tdc->f.chunkBytes - offset;
670 /* a tricky question: does the presence of the DFFetching flag
671 mean that we're fetching the latest version of the file? No.
672 The server could update the file as soon as the fetch responsible
673 for the setting of the DFFetching flag completes.
675 However, the presence of the DFFetching flag (visible under
676 a dcache read lock since it is set and cleared only under a
677 dcache write lock) means that we're fetching as good a version
678 as was known to this client at the time of the last call to
679 afs_VerifyVCache, since the latter updates the stat cache's
680 m.DataVersion field under a vcache write lock, and from the
681 time that the DFFetching flag goes on in afs_GetDCache (before
682 the fetch starts), to the time it goes off (after the fetch
683 completes), afs_GetDCache keeps at least a read lock on the
686 This means that if the DFFetching flag is set, we can use that
687 data for any reads that must come from the current version of
688 the file (current == m.DataVersion).
690 Another way of looking at this same point is this: if we're
691 fetching some data and then try do an afs_VerifyVCache, the
692 VerifyVCache operation will not complete until after the
693 DFFetching flag is turned off and the dcache entry's f.versionNo
696 Note, by the way, that if DFFetching is set,
697 m.DataVersion > f.versionNo (the latter is not updated until
698 after the fetch completes).
701 ReleaseReadLock(&tdc->lock);
702 afs_PutDCache(tdc); /* before reusing tdc */
704 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
705 ObtainReadLock(&tdc->lock);
709 /* now, first try to start transfer, if we'll need the data. If
710 * data already coming, we don't need to do this, obviously. Type
711 * 2 requests never return a null dcache entry, btw. */
712 if (!(tdc->dflags & DFFetching)
713 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
714 /* have cache entry, it is not coming in now, and we'll need new data */
716 if (trybusy && !afs_BBusy()) {
718 /* daemon is not busy */
719 ObtainSharedLock(&tdc->mflock, 667);
720 if (!(tdc->mflags & DFFetchReq)) {
721 UpgradeSToWLock(&tdc->mflock, 668);
722 tdc->mflags |= DFFetchReq;
723 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
724 (afs_size_t) filePos, (afs_size_t) 0,
727 /* Bkg table full; retry deadlocks */
728 tdc->mflags &= ~DFFetchReq;
729 trybusy = 0; /* Avoid bkg daemon since they're too busy */
730 ReleaseWriteLock(&tdc->mflock);
733 ConvertWToSLock(&tdc->mflock);
736 ConvertSToRLock(&tdc->mflock);
737 while (!code && tdc->mflags & DFFetchReq) {
738 /* don't need waiting flag on this one */
739 ReleaseReadLock(&tdc->mflock);
740 ReleaseReadLock(&tdc->lock);
741 ReleaseReadLock(&avc->lock);
742 code = afs_osi_SleepSig(&tdc->validPos);
743 ObtainReadLock(&avc->lock);
744 ObtainReadLock(&tdc->lock);
745 ObtainReadLock(&tdc->mflock);
747 ReleaseReadLock(&tdc->mflock);
754 /* now data may have started flowing in (if DFFetching is on). If
755 * data is now streaming in, then wait for some interesting stuff.
758 while (!code && (tdc->dflags & DFFetching) &&
759 tdc->validPos <= filePos) {
760 /* too early: wait for DFFetching flag to vanish,
761 * or data to appear */
762 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
763 ICL_TYPE_STRING, __FILE__,
764 ICL_TYPE_INT32, __LINE__,
765 ICL_TYPE_POINTER, tdc,
766 ICL_TYPE_INT32, tdc->dflags);
767 ReleaseReadLock(&tdc->lock);
768 ReleaseReadLock(&avc->lock);
769 code = afs_osi_SleepSig(&tdc->validPos);
770 ObtainReadLock(&avc->lock);
771 ObtainReadLock(&tdc->lock);
777 /* fetching flag gone, data is here, or we never tried
778 * (BBusy for instance) */
779 if (tdc->dflags & DFFetching) {
780 /* still fetching, some new data is here:
781 * compute length and offset */
782 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
783 len = tdc->validPos - filePos;
786 /* no longer fetching, verify data version (avoid new
788 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
789 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
790 len = tdc->f.chunkBytes - offset;
793 /* don't have current data, so get it below */
794 ReleaseReadLock(&tdc->lock);
796 tdc = (struct dcache *) 0;
801 ReleaseReadLock(&avc->lock);
802 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
803 ObtainReadLock(&avc->lock);
804 if (tdc) ObtainReadLock(&tdc->lock);
808 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD,
809 ICL_TYPE_POINTER, tdc,
810 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
811 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
816 if (len > totalLength) len = totalLength; /* will read len bytes */
817 if (len <= 0) { /* shouldn't get here if DFFetching is on */
818 /* read past the end of a chunk, may not be at next chunk yet, and yet
819 also not at eof, so may have to supply fake zeros */
820 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
821 if (len > totalLength) len = totalLength; /* and still within xfr request */
822 tlen = avc->m.Length - offset; /* and still within file */
823 if (len > tlen) len = tlen;
824 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
825 afsio_copy(auio, &tuio, tvec);
827 afsio_trim(&tuio, trimlen);
828 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
835 /* get the data from the file */
837 if (tfile = tdc->ihint) {
838 if (tdc->f.inode != tfile->inum){
839 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
840 tdc, tdc->f.inode, tfile->inum );
842 tdc->ihint = tfile = 0;
852 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
853 /* mung uio structure to be right for this transfer */
854 afsio_copy(auio, &tuio, tvec);
856 afsio_trim(&tuio, trimlen);
857 tuio.afsio_offset = offset;
861 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
862 NULL, &afs_osi_cred);
866 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
867 /* Flush all JFS pages now for big performance gain in big file cases
868 * If we do something like this, must check to be sure that AFS file
869 * isn't mmapped... see afs_gn_map() for why.
872 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
873 many different ways to do similar things:
874 so far, the best performing one is #2, but #1 might match it if we
875 straighten out the confusion regarding which pages to flush. It
877 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
878 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
879 (len + PAGESIZE-1)/PAGESIZE);
880 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
881 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
882 tfile->vnode->v_gnode->gn_seg = NULL;
886 Unfortunately, this seems to cause frequent "cache corruption" episodes.
887 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
888 (len + PAGESIZE-1)/PAGESIZE);
892 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
898 VOP_RWLOCK(tfile->vnode, 0);
899 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
900 VOP_RWUNLOCK(tfile->vnode, 0);
903 #if defined(AFS_SGI_ENV)
905 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
906 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
908 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
912 tuio.uio_rw = UIO_READ;
914 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
916 #else /* AFS_OSF_ENV */
918 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
920 #if defined(AFS_HPUX100_ENV)
922 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
925 #if defined(AFS_LINUX20_ENV)
927 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
930 #if defined(AFS_DARWIN_ENV)
932 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
933 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
934 VOP_UNLOCK(tfile->vnode, 0, current_proc());
937 #if defined(AFS_FBSD_ENV)
939 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
940 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
941 VOP_UNLOCK(tfile->vnode, 0, curproc);
944 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
956 if (!tdc->ihint && nihints < maxIHint) {
969 /* otherwise we've read some, fixup length, etc and continue with next seg */
970 len = len - tuio.afsio_resid; /* compute amount really transferred */
972 afsio_skip(auio, trimlen); /* update input uio structure */
974 transferLength += len;
976 if (len <= 0) break; /* surprise eof */
979 /* if we make it here with tdc non-zero, then it is the last chunk we
980 * dealt with, and we have to release it when we're done. We hold on
981 * to it in case we need to do a prefetch, obviously.
984 ReleaseReadLock(&tdc->lock);
985 #ifndef AFS_VM_RDWR_ENV
986 /* try to queue prefetch, if needed */
988 if (!(tdc->mflags & DFNextStarted))
989 afs_PrefetchChunk(avc, tdc, acred, &treq);
995 ReleaseReadLock(&avc->lock);
997 osi_FreeSmallSpace(tvec);
998 error = afs_CheckCode(error, &treq, 13);