2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include <afsconfig.h>
20 #include "../afs/param.h"
24 #include "../afs/sysincludes.h" /* Standard vendor system headers */
25 #include "../afs/afsincludes.h" /* Afs-based standard headers */
26 #include "../afs/afs_stats.h" /* statistics */
27 #include "../afs/afs_cbqueue.h"
28 #include "../afs/nfsclient.h"
29 #include "../afs/afs_osidnlc.h"
32 extern char afs_zeros[AFS_ZEROS];
35 afs_int32 nihints; /* # of above actually in-use */
39 /* Imported variables */
40 extern afs_rwlock_t afs_xdcache;
41 extern unsigned char *afs_indexFlags;
42 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
43 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
46 /* Forward declarations */
47 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
48 struct AFS_UCRED *acred, struct vrequest *areq);
50 afs_MemRead(avc, auio, acred, albn, abpp, noLock)
51 register struct vcache *avc;
53 struct AFS_UCRED *acred;
58 afs_size_t totalLength;
59 afs_size_t transferLength;
61 afs_size_t offset, len, tlen;
64 afs_int32 error, trybusy=1;
71 AFS_STATCNT(afs_MemRead);
75 /* check that we have the latest status info in the vnode cache */
76 if (code = afs_InitReq(&treq, acred)) return code;
78 code = afs_VerifyVCache(avc, &treq);
80 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
85 #ifndef AFS_VM_RDWR_ENV
86 if (AFS_NFSXLATORREQ(acred)) {
87 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
88 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
89 return afs_CheckCode(EACCES, &treq, 9);
94 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
95 totalLength = auio->afsio_resid;
96 filePos = auio->afsio_offset;
97 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
98 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
99 ICL_TYPE_INT32, totalLength,
100 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
104 ObtainReadLock(&avc->lock);
105 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
106 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
107 hset(avc->flushDV, avc->m.DataVersion);
115 while (totalLength > 0) {
116 /* read all of the cached info */
117 if (filePos >= avc->m.Length) break; /* all done */
120 ReleaseReadLock(&tdc->lock);
123 tdc = afs_FindDCache(avc, filePos);
125 ObtainReadLock(&tdc->lock);
126 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
127 len = tdc->f.chunkBytes - offset;
130 /* a tricky question: does the presence of the DFFetching flag
131 mean that we're fetching the latest version of the file? No.
132 The server could update the file as soon as the fetch responsible
133 for the setting of the DFFetching flag completes.
135 However, the presence of the DFFetching flag (visible under
136 a dcache read lock since it is set and cleared only under a
137 dcache write lock) means that we're fetching as good a version
138 as was known to this client at the time of the last call to
139 afs_VerifyVCache, since the latter updates the stat cache's
140 m.DataVersion field under a vcache write lock, and from the
141 time that the DFFetching flag goes on in afs_GetDCache (before
142 the fetch starts), to the time it goes off (after the fetch
143 completes), afs_GetDCache keeps at least a read lock on the
146 This means that if the DFFetching flag is set, we can use that
147 data for any reads that must come from the current version of
148 the file (current == m.DataVersion).
150 Another way of looking at this same point is this: if we're
151 fetching some data and then try do an afs_VerifyVCache, the
152 VerifyVCache operation will not complete until after the
153 DFFetching flag is turned off and the dcache entry's f.versionNo
156 Note, by the way, that if DFFetching is set,
157 m.DataVersion > f.versionNo (the latter is not updated until
158 after the fetch completes).
161 ReleaseReadLock(&tdc->lock);
162 afs_PutDCache(tdc); /* before reusing tdc */
164 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
165 ObtainReadLock(&tdc->lock);
166 /* now, first try to start transfer, if we'll need the data. If
167 * data already coming, we don't need to do this, obviously. Type
168 * 2 requests never return a null dcache entry, btw.
170 if (!(tdc->dflags & DFFetching)
171 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
172 /* have cache entry, it is not coming in now,
173 * and we'll need new data */
175 if (trybusy && !afs_BBusy()) {
177 /* daemon is not busy */
178 ObtainSharedLock(&tdc->mflock, 665);
179 if (!(tdc->mflags & DFFetchReq)) {
180 /* start the daemon (may already be running, however) */
181 UpgradeSToWLock(&tdc->mflock, 666);
182 tdc->mflags |= DFFetchReq;
183 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
184 (afs_size_t)filePos, (afs_size_t) 0,
187 tdc->mflags &= ~DFFetchReq;
188 trybusy = 0; /* Avoid bkg daemon since they're too busy */
189 ReleaseWriteLock(&tdc->mflock);
192 ConvertWToSLock(&tdc->mflock);
193 /* don't use bp pointer! */
195 ConvertSToRLock(&tdc->mflock);
196 while (tdc->mflags & DFFetchReq) {
197 /* don't need waiting flag on this one */
198 ReleaseReadLock(&tdc->mflock);
199 ReleaseReadLock(&tdc->lock);
200 ReleaseReadLock(&avc->lock);
201 afs_osi_Sleep(&tdc->validPos);
202 ObtainReadLock(&avc->lock);
203 ObtainReadLock(&tdc->lock);
204 ObtainReadLock(&tdc->mflock);
206 ReleaseReadLock(&tdc->mflock);
209 /* now data may have started flowing in (if DFFetching is on). If
210 * data is now streaming in, then wait for some interesting stuff. */
211 while ((tdc->dflags & DFFetching) && tdc->validPos <= filePos) {
212 /* too early: wait for DFFetching flag to vanish, or data to appear */
213 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
214 ICL_TYPE_STRING, __FILE__,
215 ICL_TYPE_INT32, __LINE__,
216 ICL_TYPE_POINTER, tdc,
217 ICL_TYPE_INT32, tdc->dflags);
218 ReleaseReadLock(&tdc->lock);
219 ReleaseReadLock(&avc->lock);
220 afs_osi_Sleep(&tdc->validPos);
221 ObtainReadLock(&avc->lock);
222 ObtainReadLock(&tdc->lock);
224 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
225 if (tdc->dflags & DFFetching) {
226 /* still fetching, some new data is here: compute length and offset */
227 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
228 len = tdc->validPos - filePos;
231 /* no longer fetching, verify data version (avoid new GetDCache call) */
232 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
233 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
234 len = tdc->f.chunkBytes - offset;
237 /* don't have current data, so get it below */
238 ReleaseReadLock(&tdc->lock);
240 tdc = (struct dcache *) 0;
245 ReleaseReadLock(&avc->lock);
246 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
247 ObtainReadLock(&avc->lock);
248 ObtainReadLock(&tdc->lock);
263 if (len > totalLength) len = totalLength; /* will read len bytes */
264 if (len <= 0) { /* shouldn't get here if DFFetching is on */
265 /* read past the end of a chunk, may not be at next chunk yet, and yet
266 also not at eof, so may have to supply fake zeros */
267 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
268 if (len > totalLength) len = totalLength; /* and still within xfr request */
269 tlen = avc->m.Length - offset; /* and still within file */
270 if (len > tlen) len = tlen;
271 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
272 afsio_copy(auio, &tuio, tvec);
274 afsio_trim(&tuio, trimlen);
275 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
282 /* get the data from the mem cache */
284 /* mung uio structure to be right for this transfer */
285 afsio_copy(auio, &tuio, tvec);
287 afsio_trim(&tuio, trimlen);
288 tuio.afsio_offset = offset;
290 code = afs_MemReadUIO(tdc->f.inode, &tuio);
297 /* otherwise we've read some, fixup length, etc and continue with next seg */
298 len = len - tuio.afsio_resid; /* compute amount really transferred */
300 afsio_skip(auio, trimlen); /* update input uio structure */
302 transferLength += len;
305 if (len <= 0) break; /* surprise eof */
306 } /* the whole while loop */
311 * tdc->lock(R) if tdc
314 /* if we make it here with tdc non-zero, then it is the last chunk we
315 * dealt with, and we have to release it when we're done. We hold on
316 * to it in case we need to do a prefetch.
319 ReleaseReadLock(&tdc->lock);
320 #ifndef AFS_VM_RDWR_ENV
321 /* try to queue prefetch, if needed */
323 afs_PrefetchChunk(avc, tdc, acred, &treq);
329 ReleaseReadLock(&avc->lock);
330 osi_FreeSmallSpace(tvec);
331 error = afs_CheckCode(error, &treq, 10);
335 /* called with the dcache entry triggering the fetch, the vcache entry involved,
336 * and a vrequest for the read call. Marks the dcache entry as having already
337 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
338 * flag in the prefetched block, so that the next call to read knows to wait
339 * for the daemon to start doing things.
341 * This function must be called with the vnode at least read-locked, and
342 * no locks on the dcache, because it plays around with dcache entries.
344 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
345 struct AFS_UCRED *acred, struct vrequest *areq)
347 register struct dcache *tdc;
349 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
351 offset = adc->f.chunk+1; /* next chunk we'll need */
352 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
353 ObtainReadLock(&adc->lock);
354 ObtainSharedLock(&adc->mflock, 662);
355 if (offset < avc->m.Length && !(adc->mflags & DFNextStarted) && !afs_BBusy()) {
358 UpgradeSToWLock(&adc->mflock, 663);
359 adc->mflags |= DFNextStarted; /* we've tried to prefetch for this guy */
360 ReleaseWriteLock(&adc->mflock);
361 ReleaseReadLock(&adc->lock);
363 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
364 ObtainSharedLock(&tdc->mflock, 651);
365 if (!(tdc->mflags & DFFetchReq)) {
366 /* ask the daemon to do the work */
367 UpgradeSToWLock(&tdc->mflock, 652);
368 tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
369 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
370 * since we don't want to wait for it to finish before doing so ourselves.
372 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
373 (afs_size_t) offset, (afs_size_t) 1, tdc);
375 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
376 tdc->mflags &= ~DFFetchReq;
377 ReleaseWriteLock(&tdc->mflock);
381 * DCLOCKXXX: This is a little sketchy, since someone else
382 * could have already started a prefetch.. In practice,
383 * this probably doesn't matter; at most it would cause an
384 * extra slot in the BKG table to be used up when someone
385 * prefetches this for the second time.
387 ObtainReadLock(&adc->lock);
388 ObtainWriteLock(&adc->mflock, 664);
389 adc->mflags &= ~DFNextStarted;
390 ReleaseWriteLock(&adc->mflock);
391 ReleaseReadLock(&adc->lock);
393 ReleaseWriteLock(&tdc->mflock);
396 ReleaseSharedLock(&tdc->mflock);
400 ReleaseSharedLock(&adc->mflock);
401 ReleaseReadLock(&adc->lock);
406 /* if the vcache is up-to-date, and the request fits entirely into the chunk
407 * that the hint here references, then we just use it quickly, otherwise we
408 * have to call the slow read.
410 * This could be generalized in several ways to take advantage of partial
411 * state even when all the chips don't fall the right way. For instance,
412 * if the hint is good and the first part of the read request can be
413 * satisfied from the chunk, then just do the read. After the read has
414 * completed, check to see if there's more. (Chances are there won't be.)
415 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
417 * For the time being, I'm ignoring quick.f, but it should be used at
419 * do this in the future avc->quick.f = tfile; but I think it
420 * has to be done under a write lock, but don't want to wait on the
423 /* everywhere that a dcache can be freed (look for NULLIDX)
424 * probably does it under a write lock on xdcache. Need to invalidate
426 * Also need to worry about DFFetching, and IFFree, I think. */
427 static struct dcache *savedc = 0;
429 afs_UFSReadFast(avc, auio, acred, albn, abpp, noLock)
430 register struct vcache *avc;
432 struct AFS_UCRED *acred;
437 struct vrequest treq;
441 struct osi_file *tfile;
445 ObtainReadLock(&avc->lock);
446 ObtainReadLock(&afs_xdcache);
448 if ((avc->states & CStatd) /* up to date */
449 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
450 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
455 ReleaseReadLock(&afs_xdcache);
456 if (tdc->stamp == avc->quick.stamp) {
458 ObtainReadLock(&tdc->lock);
461 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
462 && ((offDiff = (auio->afsio_offset - avc->quick.minLoc)) >= 0)
463 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
464 && !(tdc->dflags & DFFetching)) { /* fits in chunk */
466 auio->afsio_offset -= avc->quick.minLoc;
468 afs_Trace4(afs_iclSetp, CM_TRACE_READFAST, ICL_TYPE_POINTER, avc,
469 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->afsio_offset),
470 ICL_TYPE_INT32, auio->afsio_resid,
471 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
473 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
478 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
482 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
484 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
490 VOP_RWLOCK(tfile->vnode, 0);
491 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
492 VOP_RWUNLOCK(tfile->vnode, 0);
495 #if defined(AFS_SGI_ENV)
497 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
498 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
499 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
503 auio->uio_rw = UIO_READ;
505 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
507 #else /* AFS_OSF_ENV */
508 #if defined(AFS_HPUX100_ENV)
510 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
513 #if defined(AFS_LINUX20_ENV)
515 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
518 #if defined(AFS_DARWIN_ENV)
520 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
521 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
522 VOP_UNLOCK(tfile->vnode, 0, current_proc());
525 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
533 auio->afsio_offset += avc->quick.minLoc;
535 /* Fix up LRU info */
536 hset(afs_indexTimes[tdc->index], afs_indexCounter);
537 hadd32(afs_indexCounter, 1);
540 ReleaseReadLock(&avc->lock);
541 #ifndef AFS_VM_RDWR_ENV
542 if (!(code = afs_InitReq(&treq, acred))) {
543 if (!(tdc->mflags & DFNextStarted))
544 afs_PrefetchChunk(avc, tdc, acred, &treq);
548 if (readLocked) ReleaseReadLock(&tdc->lock);
552 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
555 if (readLocked) ReleaseReadLock(&tdc->lock);
558 ReleaseReadLock(&afs_xdcache);
561 /* come here if fast path doesn't work for some reason or other */
563 ReleaseReadLock(&avc->lock);
564 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
567 afs_UFSRead(avc, auio, acred, albn, abpp, noLock)
570 struct AFS_UCRED *acred;
575 afs_size_t totalLength;
576 afs_size_t transferLength;
578 afs_size_t offset, len, tlen;
580 struct dcache *tdc=0;
584 struct osi_file *tfile;
588 struct vrequest treq;
590 AFS_STATCNT(afs_UFSRead);
594 /* check that we have the latest status info in the vnode cache */
595 if (code = afs_InitReq(&treq, acred)) return code;
598 osi_Panic ("null avc in afs_UFSRead");
600 code = afs_VerifyVCache(avc, &treq);
602 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
608 #ifndef AFS_VM_RDWR_ENV
609 if (AFS_NFSXLATORREQ(acred)) {
610 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
611 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
612 return afs_CheckCode(EACCES, &treq, 12);
617 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
618 totalLength = auio->afsio_resid;
619 filePos = auio->afsio_offset;
620 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
621 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
622 ICL_TYPE_INT32, totalLength,
623 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
627 ObtainReadLock(&avc->lock);
628 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
629 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
630 hset(avc->flushDV, avc->m.DataVersion);
634 while (totalLength > 0) {
635 /* read all of the cached info */
636 if (filePos >= avc->m.Length) break; /* all done */
639 ReleaseReadLock(&tdc->lock);
642 tdc = afs_FindDCache(avc, filePos);
644 ObtainReadLock(&tdc->lock);
645 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
646 len = tdc->f.chunkBytes - offset;
649 /* a tricky question: does the presence of the DFFetching flag
650 mean that we're fetching the latest version of the file? No.
651 The server could update the file as soon as the fetch responsible
652 for the setting of the DFFetching flag completes.
654 However, the presence of the DFFetching flag (visible under
655 a dcache read lock since it is set and cleared only under a
656 dcache write lock) means that we're fetching as good a version
657 as was known to this client at the time of the last call to
658 afs_VerifyVCache, since the latter updates the stat cache's
659 m.DataVersion field under a vcache write lock, and from the
660 time that the DFFetching flag goes on in afs_GetDCache (before
661 the fetch starts), to the time it goes off (after the fetch
662 completes), afs_GetDCache keeps at least a read lock on the
665 This means that if the DFFetching flag is set, we can use that
666 data for any reads that must come from the current version of
667 the file (current == m.DataVersion).
669 Another way of looking at this same point is this: if we're
670 fetching some data and then try do an afs_VerifyVCache, the
671 VerifyVCache operation will not complete until after the
672 DFFetching flag is turned off and the dcache entry's f.versionNo
675 Note, by the way, that if DFFetching is set,
676 m.DataVersion > f.versionNo (the latter is not updated until
677 after the fetch completes).
680 ReleaseReadLock(&tdc->lock);
681 afs_PutDCache(tdc); /* before reusing tdc */
683 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
684 ObtainReadLock(&tdc->lock);
688 /* now, first try to start transfer, if we'll need the data. If
689 * data already coming, we don't need to do this, obviously. Type
690 * 2 requests never return a null dcache entry, btw. */
691 if (!(tdc->dflags & DFFetching)
692 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
693 /* have cache entry, it is not coming in now, and we'll need new data */
695 if (trybusy && !afs_BBusy()) {
697 /* daemon is not busy */
698 ObtainSharedLock(&tdc->mflock, 667);
699 if (!(tdc->mflags & DFFetchReq)) {
700 UpgradeSToWLock(&tdc->mflock, 668);
701 tdc->mflags |= DFFetchReq;
702 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
703 (afs_size_t) filePos, (afs_size_t) 0,
706 /* Bkg table full; retry deadlocks */
707 tdc->mflags &= ~DFFetchReq;
708 trybusy = 0; /* Avoid bkg daemon since they're too busy */
709 ReleaseWriteLock(&tdc->mflock);
712 ConvertWToSLock(&tdc->mflock);
714 ConvertSToRLock(&tdc->mflock);
715 while (tdc->mflags & DFFetchReq) {
716 /* don't need waiting flag on this one */
717 ReleaseReadLock(&tdc->mflock);
718 ReleaseReadLock(&tdc->lock);
719 ReleaseReadLock(&avc->lock);
720 afs_osi_Sleep(&tdc->validPos);
721 ObtainReadLock(&avc->lock);
722 ObtainReadLock(&tdc->lock);
723 ObtainReadLock(&tdc->mflock);
725 ReleaseReadLock(&tdc->mflock);
728 /* now data may have started flowing in (if DFFetching is on). If
729 * data is now streaming in, then wait for some interesting stuff. */
730 while ((tdc->dflags & DFFetching) && tdc->validPos <= filePos) {
731 /* too early: wait for DFFetching flag to vanish, or data to appear */
732 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
733 ICL_TYPE_STRING, __FILE__,
734 ICL_TYPE_INT32, __LINE__,
735 ICL_TYPE_POINTER, tdc,
736 ICL_TYPE_INT32, tdc->dflags);
737 ReleaseReadLock(&tdc->lock);
738 ReleaseReadLock(&avc->lock);
739 afs_osi_Sleep(&tdc->validPos);
740 ObtainReadLock(&avc->lock);
741 ObtainReadLock(&tdc->lock);
743 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
744 if (tdc->dflags & DFFetching) {
745 /* still fetching, some new data is here: compute length and offset */
746 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
747 len = tdc->validPos - filePos;
750 /* no longer fetching, verify data version (avoid new GetDCache call) */
751 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
752 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
753 len = tdc->f.chunkBytes - offset;
756 /* don't have current data, so get it below */
757 ReleaseReadLock(&tdc->lock);
759 tdc = (struct dcache *) 0;
764 ReleaseReadLock(&avc->lock);
765 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
766 ObtainReadLock(&avc->lock);
767 ObtainReadLock(&tdc->lock);
771 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD,
772 ICL_TYPE_POINTER, tdc,
773 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
774 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
779 if (len > totalLength) len = totalLength; /* will read len bytes */
780 if (len <= 0) { /* shouldn't get here if DFFetching is on */
781 /* read past the end of a chunk, may not be at next chunk yet, and yet
782 also not at eof, so may have to supply fake zeros */
783 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
784 if (len > totalLength) len = totalLength; /* and still within xfr request */
785 tlen = avc->m.Length - offset; /* and still within file */
786 if (len > tlen) len = tlen;
787 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
788 afsio_copy(auio, &tuio, tvec);
790 afsio_trim(&tuio, trimlen);
791 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
798 /* get the data from the file */
800 if (tfile = tdc->ihint) {
801 if (tdc->f.inode != tfile->inum){
802 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
803 tdc, tdc->f.inode, tfile->inum );
805 tdc->ihint = tfile = 0;
815 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
816 /* mung uio structure to be right for this transfer */
817 afsio_copy(auio, &tuio, tvec);
819 afsio_trim(&tuio, trimlen);
820 tuio.afsio_offset = offset;
824 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
825 NULL, &afs_osi_cred);
829 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
830 /* Flush all JFS pages now for big performance gain in big file cases
831 * If we do something like this, must check to be sure that AFS file
832 * isn't mmapped... see afs_gn_map() for why.
835 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
836 many different ways to do similar things:
837 so far, the best performing one is #2, but #1 might match it if we
838 straighten out the confusion regarding which pages to flush. It
840 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
841 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
842 (len + PAGESIZE-1)/PAGESIZE);
843 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
844 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
845 tfile->vnode->v_gnode->gn_seg = NULL;
849 Unfortunately, this seems to cause frequent "cache corruption" episodes.
850 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
851 (len + PAGESIZE-1)/PAGESIZE);
855 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
861 VOP_RWLOCK(tfile->vnode, 0);
862 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
863 VOP_RWUNLOCK(tfile->vnode, 0);
866 #if defined(AFS_SGI_ENV)
868 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
869 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
871 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
875 tuio.uio_rw = UIO_READ;
877 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
879 #else /* AFS_OSF_ENV */
881 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
883 #if defined(AFS_HPUX100_ENV)
885 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
888 #if defined(AFS_LINUX20_ENV)
890 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
893 #if defined(AFS_DARWIN_ENV)
895 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
896 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
897 VOP_UNLOCK(tfile->vnode, 0, current_proc());
900 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
911 if (!tdc->ihint && nihints < maxIHint) {
924 /* otherwise we've read some, fixup length, etc and continue with next seg */
925 len = len - tuio.afsio_resid; /* compute amount really transferred */
927 afsio_skip(auio, trimlen); /* update input uio structure */
929 transferLength += len;
931 if (len <= 0) break; /* surprise eof */
934 /* if we make it here with tdc non-zero, then it is the last chunk we
935 * dealt with, and we have to release it when we're done. We hold on
936 * to it in case we need to do a prefetch, obviously.
939 ReleaseReadLock(&tdc->lock);
940 #ifndef AFS_VM_RDWR_ENV
941 /* try to queue prefetch, if needed */
943 if (!(tdc->mflags & DFNextStarted))
944 afs_PrefetchChunk(avc, tdc, acred, &treq);
950 ReleaseReadLock(&avc->lock);
952 osi_FreeSmallSpace(tvec);
953 error = afs_CheckCode(error, &treq, 13);