2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include <afsconfig.h>
20 #include "../afs/param.h"
24 #include "../afs/sysincludes.h" /* Standard vendor system headers */
25 #include "../afs/afsincludes.h" /* Afs-based standard headers */
26 #include "../afs/afs_stats.h" /* statistics */
27 #include "../afs/afs_cbqueue.h"
28 #include "../afs/nfsclient.h"
29 #include "../afs/afs_osidnlc.h"
32 extern char afs_zeros[AFS_ZEROS];
35 afs_int32 nihints; /* # of above actually in-use */
39 /* Imported variables */
40 extern afs_rwlock_t afs_xdcache;
41 extern unsigned char *afs_indexFlags;
42 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
43 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
46 /* Forward declarations */
47 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
48 struct AFS_UCRED *acred, struct vrequest *areq);
50 afs_MemRead(avc, auio, acred, albn, abpp, noLock)
51 register struct vcache *avc;
53 struct AFS_UCRED *acred;
58 afs_size_t totalLength;
59 afs_size_t transferLength;
61 afs_size_t offset, len, tlen;
64 afs_int32 error, trybusy=1;
71 AFS_STATCNT(afs_MemRead);
75 /* check that we have the latest status info in the vnode cache */
76 if (code = afs_InitReq(&treq, acred)) return code;
78 code = afs_VerifyVCache(avc, &treq);
80 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
85 #ifndef AFS_VM_RDWR_ENV
86 if (AFS_NFSXLATORREQ(acred)) {
87 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
88 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
89 return afs_CheckCode(EACCES, &treq, 9);
94 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
95 totalLength = auio->afsio_resid;
96 filePos = auio->afsio_offset;
97 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
98 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
99 ICL_TYPE_INT32, totalLength,
100 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
104 ObtainReadLock(&avc->lock);
105 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
106 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
107 hset(avc->flushDV, avc->m.DataVersion);
115 while (totalLength > 0) {
116 /* read all of the cached info */
117 if (filePos >= avc->m.Length) break; /* all done */
120 ReleaseReadLock(&tdc->lock);
123 tdc = afs_FindDCache(avc, filePos);
125 ObtainReadLock(&tdc->lock);
126 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
127 len = tdc->f.chunkBytes - offset;
130 /* a tricky question: does the presence of the DFFetching flag
131 mean that we're fetching the latest version of the file? No.
132 The server could update the file as soon as the fetch responsible
133 for the setting of the DFFetching flag completes.
135 However, the presence of the DFFetching flag (visible under
136 a dcache read lock since it is set and cleared only under a
137 dcache write lock) means that we're fetching as good a version
138 as was known to this client at the time of the last call to
139 afs_VerifyVCache, since the latter updates the stat cache's
140 m.DataVersion field under a vcache write lock, and from the
141 time that the DFFetching flag goes on in afs_GetDCache (before
142 the fetch starts), to the time it goes off (after the fetch
143 completes), afs_GetDCache keeps at least a read lock on the
146 This means that if the DFFetching flag is set, we can use that
147 data for any reads that must come from the current version of
148 the file (current == m.DataVersion).
150 Another way of looking at this same point is this: if we're
151 fetching some data and then try do an afs_VerifyVCache, the
152 VerifyVCache operation will not complete until after the
153 DFFetching flag is turned off and the dcache entry's f.versionNo
156 Note, by the way, that if DFFetching is set,
157 m.DataVersion > f.versionNo (the latter is not updated until
158 after the fetch completes).
161 ReleaseReadLock(&tdc->lock);
162 afs_PutDCache(tdc); /* before reusing tdc */
164 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
165 ObtainReadLock(&tdc->lock);
166 /* now, first try to start transfer, if we'll need the data. If
167 * data already coming, we don't need to do this, obviously. Type
168 * 2 requests never return a null dcache entry, btw.
170 if (!(tdc->dflags & DFFetching)
171 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
172 /* have cache entry, it is not coming in now,
173 * and we'll need new data */
175 if (trybusy && !afs_BBusy()) {
177 /* daemon is not busy */
178 ObtainSharedLock(&tdc->mflock, 665);
179 if (!(tdc->mflags & DFFetchReq)) {
180 /* start the daemon (may already be running, however) */
181 UpgradeSToWLock(&tdc->mflock, 666);
182 tdc->mflags |= DFFetchReq;
183 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
184 (afs_size_t)filePos, (afs_size_t) 0,
187 tdc->mflags &= ~DFFetchReq;
188 trybusy = 0; /* Avoid bkg daemon since they're too busy */
189 ReleaseWriteLock(&tdc->mflock);
192 ConvertWToSLock(&tdc->mflock);
193 /* don't use bp pointer! */
196 ConvertSToRLock(&tdc->mflock);
197 while (!code && tdc->mflags & DFFetchReq) {
198 /* don't need waiting flag on this one */
199 ReleaseReadLock(&tdc->mflock);
200 ReleaseReadLock(&tdc->lock);
201 ReleaseReadLock(&avc->lock);
202 code = afs_osi_SleepSig(&tdc->validPos);
203 ObtainReadLock(&avc->lock);
204 ObtainReadLock(&tdc->lock);
205 ObtainReadLock(&tdc->mflock);
207 ReleaseReadLock(&tdc->mflock);
214 /* now data may have started flowing in (if DFFetching is on). If
215 * data is now streaming in, then wait for some interesting stuff.
218 while (!code && (tdc->dflags & DFFetching) &&
219 tdc->validPos <= filePos) {
220 /* too early: wait for DFFetching flag to vanish,
221 * or data to appear */
222 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
223 ICL_TYPE_STRING, __FILE__,
224 ICL_TYPE_INT32, __LINE__,
225 ICL_TYPE_POINTER, tdc,
226 ICL_TYPE_INT32, tdc->dflags);
227 ReleaseReadLock(&tdc->lock);
228 ReleaseReadLock(&avc->lock);
229 code = afs_osi_SleepSig(&tdc->validPos);
230 ObtainReadLock(&avc->lock);
231 ObtainReadLock(&tdc->lock);
237 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
238 if (tdc->dflags & DFFetching) {
239 /* still fetching, some new data is here: compute length and offset */
240 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
241 len = tdc->validPos - filePos;
244 /* no longer fetching, verify data version (avoid new GetDCache call) */
245 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
246 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
247 len = tdc->f.chunkBytes - offset;
250 /* don't have current data, so get it below */
251 ReleaseReadLock(&tdc->lock);
253 tdc = (struct dcache *) 0;
258 ReleaseReadLock(&avc->lock);
259 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
260 ObtainReadLock(&avc->lock);
261 ObtainReadLock(&tdc->lock);
276 if (len > totalLength) len = totalLength; /* will read len bytes */
277 if (len <= 0) { /* shouldn't get here if DFFetching is on */
278 /* read past the end of a chunk, may not be at next chunk yet, and yet
279 also not at eof, so may have to supply fake zeros */
280 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
281 if (len > totalLength) len = totalLength; /* and still within xfr request */
282 tlen = avc->m.Length - offset; /* and still within file */
283 if (len > tlen) len = tlen;
284 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
285 afsio_copy(auio, &tuio, tvec);
287 afsio_trim(&tuio, trimlen);
288 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
295 /* get the data from the mem cache */
297 /* mung uio structure to be right for this transfer */
298 afsio_copy(auio, &tuio, tvec);
300 afsio_trim(&tuio, trimlen);
301 tuio.afsio_offset = offset;
303 code = afs_MemReadUIO(tdc->f.inode, &tuio);
310 /* otherwise we've read some, fixup length, etc and continue with next seg */
311 len = len - tuio.afsio_resid; /* compute amount really transferred */
313 afsio_skip(auio, trimlen); /* update input uio structure */
315 transferLength += len;
318 if (len <= 0) break; /* surprise eof */
319 } /* the whole while loop */
324 * tdc->lock(R) if tdc
327 /* if we make it here with tdc non-zero, then it is the last chunk we
328 * dealt with, and we have to release it when we're done. We hold on
329 * to it in case we need to do a prefetch.
332 ReleaseReadLock(&tdc->lock);
333 #ifndef AFS_VM_RDWR_ENV
334 /* try to queue prefetch, if needed */
336 afs_PrefetchChunk(avc, tdc, acred, &treq);
342 ReleaseReadLock(&avc->lock);
343 osi_FreeSmallSpace(tvec);
344 error = afs_CheckCode(error, &treq, 10);
348 /* called with the dcache entry triggering the fetch, the vcache entry involved,
349 * and a vrequest for the read call. Marks the dcache entry as having already
350 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
351 * flag in the prefetched block, so that the next call to read knows to wait
352 * for the daemon to start doing things.
354 * This function must be called with the vnode at least read-locked, and
355 * no locks on the dcache, because it plays around with dcache entries.
357 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
358 struct AFS_UCRED *acred, struct vrequest *areq)
360 register struct dcache *tdc;
362 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
364 offset = adc->f.chunk+1; /* next chunk we'll need */
365 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
366 ObtainReadLock(&adc->lock);
367 ObtainSharedLock(&adc->mflock, 662);
368 if (offset < avc->m.Length && !(adc->mflags & DFNextStarted) && !afs_BBusy()) {
371 UpgradeSToWLock(&adc->mflock, 663);
372 adc->mflags |= DFNextStarted; /* we've tried to prefetch for this guy */
373 ReleaseWriteLock(&adc->mflock);
374 ReleaseReadLock(&adc->lock);
376 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
377 ObtainSharedLock(&tdc->mflock, 651);
378 if (!(tdc->mflags & DFFetchReq)) {
379 /* ask the daemon to do the work */
380 UpgradeSToWLock(&tdc->mflock, 652);
381 tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
382 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
383 * since we don't want to wait for it to finish before doing so ourselves.
385 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
386 (afs_size_t) offset, (afs_size_t) 1, tdc);
388 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
389 tdc->mflags &= ~DFFetchReq;
390 ReleaseWriteLock(&tdc->mflock);
394 * DCLOCKXXX: This is a little sketchy, since someone else
395 * could have already started a prefetch.. In practice,
396 * this probably doesn't matter; at most it would cause an
397 * extra slot in the BKG table to be used up when someone
398 * prefetches this for the second time.
400 ObtainReadLock(&adc->lock);
401 ObtainWriteLock(&adc->mflock, 664);
402 adc->mflags &= ~DFNextStarted;
403 ReleaseWriteLock(&adc->mflock);
404 ReleaseReadLock(&adc->lock);
406 ReleaseWriteLock(&tdc->mflock);
409 ReleaseSharedLock(&tdc->mflock);
413 ReleaseSharedLock(&adc->mflock);
414 ReleaseReadLock(&adc->lock);
419 /* if the vcache is up-to-date, and the request fits entirely into the chunk
420 * that the hint here references, then we just use it quickly, otherwise we
421 * have to call the slow read.
423 * This could be generalized in several ways to take advantage of partial
424 * state even when all the chips don't fall the right way. For instance,
425 * if the hint is good and the first part of the read request can be
426 * satisfied from the chunk, then just do the read. After the read has
427 * completed, check to see if there's more. (Chances are there won't be.)
428 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
430 * For the time being, I'm ignoring quick.f, but it should be used at
432 * do this in the future avc->quick.f = tfile; but I think it
433 * has to be done under a write lock, but don't want to wait on the
436 /* everywhere that a dcache can be freed (look for NULLIDX)
437 * probably does it under a write lock on xdcache. Need to invalidate
439 * Also need to worry about DFFetching, and IFFree, I think. */
440 static struct dcache *savedc = 0;
442 afs_UFSReadFast(avc, auio, acred, albn, abpp, noLock)
443 register struct vcache *avc;
445 struct AFS_UCRED *acred;
450 struct vrequest treq;
454 struct osi_file *tfile;
458 ObtainReadLock(&avc->lock);
459 ObtainReadLock(&afs_xdcache);
461 if ((avc->states & CStatd) /* up to date */
462 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
463 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
468 ReleaseReadLock(&afs_xdcache);
469 if (tdc->stamp == avc->quick.stamp) {
471 ObtainReadLock(&tdc->lock);
474 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
475 && ((offDiff = (auio->afsio_offset - avc->quick.minLoc)) >= 0)
476 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
477 && !(tdc->dflags & DFFetching)) { /* fits in chunk */
479 auio->afsio_offset -= avc->quick.minLoc;
481 afs_Trace4(afs_iclSetp, CM_TRACE_READFAST, ICL_TYPE_POINTER, avc,
482 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->afsio_offset),
483 ICL_TYPE_INT32, auio->afsio_resid,
484 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
486 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
491 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
495 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
497 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
503 VOP_RWLOCK(tfile->vnode, 0);
504 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
505 VOP_RWUNLOCK(tfile->vnode, 0);
508 #if defined(AFS_SGI_ENV)
510 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
511 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
512 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
516 auio->uio_rw = UIO_READ;
518 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
520 #else /* AFS_OSF_ENV */
521 #if defined(AFS_HPUX100_ENV)
523 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
526 #if defined(AFS_LINUX20_ENV)
528 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
531 #if defined(AFS_DARWIN_ENV)
533 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
534 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
535 VOP_UNLOCK(tfile->vnode, 0, current_proc());
538 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
546 auio->afsio_offset += avc->quick.minLoc;
548 /* Fix up LRU info */
549 hset(afs_indexTimes[tdc->index], afs_indexCounter);
550 hadd32(afs_indexCounter, 1);
553 ReleaseReadLock(&avc->lock);
554 #ifndef AFS_VM_RDWR_ENV
555 if (!(code = afs_InitReq(&treq, acred))) {
556 if (!(tdc->mflags & DFNextStarted))
557 afs_PrefetchChunk(avc, tdc, acred, &treq);
561 if (readLocked) ReleaseReadLock(&tdc->lock);
565 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
568 if (readLocked) ReleaseReadLock(&tdc->lock);
571 ReleaseReadLock(&afs_xdcache);
574 /* come here if fast path doesn't work for some reason or other */
576 ReleaseReadLock(&avc->lock);
577 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
580 afs_UFSRead(avc, auio, acred, albn, abpp, noLock)
583 struct AFS_UCRED *acred;
588 afs_size_t totalLength;
589 afs_size_t transferLength;
591 afs_size_t offset, len, tlen;
593 struct dcache *tdc=0;
597 struct osi_file *tfile;
601 struct vrequest treq;
603 AFS_STATCNT(afs_UFSRead);
607 /* check that we have the latest status info in the vnode cache */
608 if (code = afs_InitReq(&treq, acred)) return code;
611 osi_Panic ("null avc in afs_UFSRead");
613 code = afs_VerifyVCache(avc, &treq);
615 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
621 #ifndef AFS_VM_RDWR_ENV
622 if (AFS_NFSXLATORREQ(acred)) {
623 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
624 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
625 return afs_CheckCode(EACCES, &treq, 12);
630 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
631 totalLength = auio->afsio_resid;
632 filePos = auio->afsio_offset;
633 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
634 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
635 ICL_TYPE_INT32, totalLength,
636 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
640 ObtainReadLock(&avc->lock);
641 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
642 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
643 hset(avc->flushDV, avc->m.DataVersion);
647 while (totalLength > 0) {
648 /* read all of the cached info */
649 if (filePos >= avc->m.Length) break; /* all done */
652 ReleaseReadLock(&tdc->lock);
655 tdc = afs_FindDCache(avc, filePos);
657 ObtainReadLock(&tdc->lock);
658 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
659 len = tdc->f.chunkBytes - offset;
662 /* a tricky question: does the presence of the DFFetching flag
663 mean that we're fetching the latest version of the file? No.
664 The server could update the file as soon as the fetch responsible
665 for the setting of the DFFetching flag completes.
667 However, the presence of the DFFetching flag (visible under
668 a dcache read lock since it is set and cleared only under a
669 dcache write lock) means that we're fetching as good a version
670 as was known to this client at the time of the last call to
671 afs_VerifyVCache, since the latter updates the stat cache's
672 m.DataVersion field under a vcache write lock, and from the
673 time that the DFFetching flag goes on in afs_GetDCache (before
674 the fetch starts), to the time it goes off (after the fetch
675 completes), afs_GetDCache keeps at least a read lock on the
678 This means that if the DFFetching flag is set, we can use that
679 data for any reads that must come from the current version of
680 the file (current == m.DataVersion).
682 Another way of looking at this same point is this: if we're
683 fetching some data and then try do an afs_VerifyVCache, the
684 VerifyVCache operation will not complete until after the
685 DFFetching flag is turned off and the dcache entry's f.versionNo
688 Note, by the way, that if DFFetching is set,
689 m.DataVersion > f.versionNo (the latter is not updated until
690 after the fetch completes).
693 ReleaseReadLock(&tdc->lock);
694 afs_PutDCache(tdc); /* before reusing tdc */
696 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
697 ObtainReadLock(&tdc->lock);
701 /* now, first try to start transfer, if we'll need the data. If
702 * data already coming, we don't need to do this, obviously. Type
703 * 2 requests never return a null dcache entry, btw. */
704 if (!(tdc->dflags & DFFetching)
705 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
706 /* have cache entry, it is not coming in now, and we'll need new data */
708 if (trybusy && !afs_BBusy()) {
710 /* daemon is not busy */
711 ObtainSharedLock(&tdc->mflock, 667);
712 if (!(tdc->mflags & DFFetchReq)) {
713 UpgradeSToWLock(&tdc->mflock, 668);
714 tdc->mflags |= DFFetchReq;
715 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
716 (afs_size_t) filePos, (afs_size_t) 0,
719 /* Bkg table full; retry deadlocks */
720 tdc->mflags &= ~DFFetchReq;
721 trybusy = 0; /* Avoid bkg daemon since they're too busy */
722 ReleaseWriteLock(&tdc->mflock);
725 ConvertWToSLock(&tdc->mflock);
728 ConvertSToRLock(&tdc->mflock);
729 while (!code && tdc->mflags & DFFetchReq) {
730 /* don't need waiting flag on this one */
731 ReleaseReadLock(&tdc->mflock);
732 ReleaseReadLock(&tdc->lock);
733 ReleaseReadLock(&avc->lock);
734 code = afs_osi_SleepSig(&tdc->validPos);
735 ObtainReadLock(&avc->lock);
736 ObtainReadLock(&tdc->lock);
737 ObtainReadLock(&tdc->mflock);
739 ReleaseReadLock(&tdc->mflock);
746 /* now data may have started flowing in (if DFFetching is on). If
747 * data is now streaming in, then wait for some interesting stuff.
750 while (!code && (tdc->dflags & DFFetching) &&
751 tdc->validPos <= filePos) {
752 /* too early: wait for DFFetching flag to vanish,
753 * or data to appear */
754 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
755 ICL_TYPE_STRING, __FILE__,
756 ICL_TYPE_INT32, __LINE__,
757 ICL_TYPE_POINTER, tdc,
758 ICL_TYPE_INT32, tdc->dflags);
759 ReleaseReadLock(&tdc->lock);
760 ReleaseReadLock(&avc->lock);
761 code = afs_osi_SleepSig(&tdc->validPos);
762 ObtainReadLock(&avc->lock);
763 ObtainReadLock(&tdc->lock);
769 /* fetching flag gone, data is here, or we never tried
770 * (BBusy for instance) */
771 if (tdc->dflags & DFFetching) {
772 /* still fetching, some new data is here:
773 * compute length and offset */
774 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
775 len = tdc->validPos - filePos;
778 /* no longer fetching, verify data version (avoid new
780 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
781 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
782 len = tdc->f.chunkBytes - offset;
785 /* don't have current data, so get it below */
786 ReleaseReadLock(&tdc->lock);
788 tdc = (struct dcache *) 0;
793 ReleaseReadLock(&avc->lock);
794 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
795 ObtainReadLock(&avc->lock);
796 ObtainReadLock(&tdc->lock);
800 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD,
801 ICL_TYPE_POINTER, tdc,
802 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
803 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
808 if (len > totalLength) len = totalLength; /* will read len bytes */
809 if (len <= 0) { /* shouldn't get here if DFFetching is on */
810 /* read past the end of a chunk, may not be at next chunk yet, and yet
811 also not at eof, so may have to supply fake zeros */
812 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
813 if (len > totalLength) len = totalLength; /* and still within xfr request */
814 tlen = avc->m.Length - offset; /* and still within file */
815 if (len > tlen) len = tlen;
816 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
817 afsio_copy(auio, &tuio, tvec);
819 afsio_trim(&tuio, trimlen);
820 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
827 /* get the data from the file */
829 if (tfile = tdc->ihint) {
830 if (tdc->f.inode != tfile->inum){
831 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
832 tdc, tdc->f.inode, tfile->inum );
834 tdc->ihint = tfile = 0;
844 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
845 /* mung uio structure to be right for this transfer */
846 afsio_copy(auio, &tuio, tvec);
848 afsio_trim(&tuio, trimlen);
849 tuio.afsio_offset = offset;
853 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
854 NULL, &afs_osi_cred);
858 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
859 /* Flush all JFS pages now for big performance gain in big file cases
860 * If we do something like this, must check to be sure that AFS file
861 * isn't mmapped... see afs_gn_map() for why.
864 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
865 many different ways to do similar things:
866 so far, the best performing one is #2, but #1 might match it if we
867 straighten out the confusion regarding which pages to flush. It
869 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
870 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
871 (len + PAGESIZE-1)/PAGESIZE);
872 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
873 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
874 tfile->vnode->v_gnode->gn_seg = NULL;
878 Unfortunately, this seems to cause frequent "cache corruption" episodes.
879 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
880 (len + PAGESIZE-1)/PAGESIZE);
884 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
890 VOP_RWLOCK(tfile->vnode, 0);
891 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
892 VOP_RWUNLOCK(tfile->vnode, 0);
895 #if defined(AFS_SGI_ENV)
897 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
898 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
900 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
904 tuio.uio_rw = UIO_READ;
906 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
908 #else /* AFS_OSF_ENV */
910 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
912 #if defined(AFS_HPUX100_ENV)
914 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
917 #if defined(AFS_LINUX20_ENV)
919 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
922 #if defined(AFS_DARWIN_ENV)
924 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
925 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
926 VOP_UNLOCK(tfile->vnode, 0, current_proc());
929 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
940 if (!tdc->ihint && nihints < maxIHint) {
953 /* otherwise we've read some, fixup length, etc and continue with next seg */
954 len = len - tuio.afsio_resid; /* compute amount really transferred */
956 afsio_skip(auio, trimlen); /* update input uio structure */
958 transferLength += len;
960 if (len <= 0) break; /* surprise eof */
963 /* if we make it here with tdc non-zero, then it is the last chunk we
964 * dealt with, and we have to release it when we're done. We hold on
965 * to it in case we need to do a prefetch, obviously.
968 ReleaseReadLock(&tdc->lock);
969 #ifndef AFS_VM_RDWR_ENV
970 /* try to queue prefetch, if needed */
972 if (!(tdc->mflags & DFNextStarted))
973 afs_PrefetchChunk(avc, tdc, acred, &treq);
979 ReleaseReadLock(&avc->lock);
981 osi_FreeSmallSpace(tvec);
982 error = afs_CheckCode(error, &treq, 13);