2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include <afsconfig.h>
20 #include "../afs/param.h"
24 #include "../afs/sysincludes.h" /* Standard vendor system headers */
25 #include "../afs/afsincludes.h" /* Afs-based standard headers */
26 #include "../afs/afs_stats.h" /* statistics */
27 #include "../afs/afs_cbqueue.h"
28 #include "../afs/nfsclient.h"
29 #include "../afs/afs_osidnlc.h"
32 extern char afs_zeros[AFS_ZEROS];
35 afs_int32 nihints; /* # of above actually in-use */
39 /* Imported variables */
40 extern afs_rwlock_t afs_xdcache;
41 extern unsigned char *afs_indexFlags;
42 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
43 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
46 /* Forward declarations */
47 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
48 struct AFS_UCRED *acred, struct vrequest *areq);
50 int afs_MemRead(register struct vcache *avc, struct uio *auio, struct AFS_UCRED *acred,
51 daddr_t albn, struct buf **abpp, int noLock)
53 afs_size_t totalLength;
54 afs_size_t transferLength;
56 afs_size_t offset, len, tlen;
59 afs_int32 error, trybusy=1;
66 AFS_STATCNT(afs_MemRead);
70 /* check that we have the latest status info in the vnode cache */
71 if ((code = afs_InitReq(&treq, acred))) return code;
73 code = afs_VerifyVCache(avc, &treq);
75 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
80 #ifndef AFS_VM_RDWR_ENV
81 if (AFS_NFSXLATORREQ(acred)) {
82 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
83 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
84 return afs_CheckCode(EACCES, &treq, 9);
89 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
90 totalLength = auio->afsio_resid;
91 filePos = auio->afsio_offset;
92 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
93 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
94 ICL_TYPE_INT32, totalLength,
95 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
99 ObtainReadLock(&avc->lock);
100 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
101 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
102 hset(avc->flushDV, avc->m.DataVersion);
110 while (totalLength > 0) {
111 /* read all of the cached info */
112 if (filePos >= avc->m.Length) break; /* all done */
115 ReleaseReadLock(&tdc->lock);
118 tdc = afs_FindDCache(avc, filePos);
120 ObtainReadLock(&tdc->lock);
121 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
122 len = tdc->f.chunkBytes - offset;
125 /* a tricky question: does the presence of the DFFetching flag
126 mean that we're fetching the latest version of the file? No.
127 The server could update the file as soon as the fetch responsible
128 for the setting of the DFFetching flag completes.
130 However, the presence of the DFFetching flag (visible under
131 a dcache read lock since it is set and cleared only under a
132 dcache write lock) means that we're fetching as good a version
133 as was known to this client at the time of the last call to
134 afs_VerifyVCache, since the latter updates the stat cache's
135 m.DataVersion field under a vcache write lock, and from the
136 time that the DFFetching flag goes on in afs_GetDCache (before
137 the fetch starts), to the time it goes off (after the fetch
138 completes), afs_GetDCache keeps at least a read lock on the
141 This means that if the DFFetching flag is set, we can use that
142 data for any reads that must come from the current version of
143 the file (current == m.DataVersion).
145 Another way of looking at this same point is this: if we're
146 fetching some data and then try do an afs_VerifyVCache, the
147 VerifyVCache operation will not complete until after the
148 DFFetching flag is turned off and the dcache entry's f.versionNo
151 Note, by the way, that if DFFetching is set,
152 m.DataVersion > f.versionNo (the latter is not updated until
153 after the fetch completes).
156 ReleaseReadLock(&tdc->lock);
157 afs_PutDCache(tdc); /* before reusing tdc */
159 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
160 ObtainReadLock(&tdc->lock);
161 /* now, first try to start transfer, if we'll need the data. If
162 * data already coming, we don't need to do this, obviously. Type
163 * 2 requests never return a null dcache entry, btw.
165 if (!(tdc->dflags & DFFetching)
166 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
167 /* have cache entry, it is not coming in now,
168 * and we'll need new data */
170 if (trybusy && !afs_BBusy()) {
172 /* daemon is not busy */
173 ObtainSharedLock(&tdc->mflock, 665);
174 if (!(tdc->mflags & DFFetchReq)) {
175 /* start the daemon (may already be running, however) */
176 UpgradeSToWLock(&tdc->mflock, 666);
177 tdc->mflags |= DFFetchReq;
178 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
179 (afs_size_t)filePos, (afs_size_t) 0,
182 tdc->mflags &= ~DFFetchReq;
183 trybusy = 0; /* Avoid bkg daemon since they're too busy */
184 ReleaseWriteLock(&tdc->mflock);
187 ConvertWToSLock(&tdc->mflock);
188 /* don't use bp pointer! */
191 ConvertSToRLock(&tdc->mflock);
192 while (!code && tdc->mflags & DFFetchReq) {
193 /* don't need waiting flag on this one */
194 ReleaseReadLock(&tdc->mflock);
195 ReleaseReadLock(&tdc->lock);
196 ReleaseReadLock(&avc->lock);
197 code = afs_osi_SleepSig(&tdc->validPos);
198 ObtainReadLock(&avc->lock);
199 ObtainReadLock(&tdc->lock);
200 ObtainReadLock(&tdc->mflock);
202 ReleaseReadLock(&tdc->mflock);
209 /* now data may have started flowing in (if DFFetching is on). If
210 * data is now streaming in, then wait for some interesting stuff.
213 while (!code && (tdc->dflags & DFFetching) &&
214 tdc->validPos <= filePos) {
215 /* too early: wait for DFFetching flag to vanish,
216 * or data to appear */
217 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
218 ICL_TYPE_STRING, __FILE__,
219 ICL_TYPE_INT32, __LINE__,
220 ICL_TYPE_POINTER, tdc,
221 ICL_TYPE_INT32, tdc->dflags);
222 ReleaseReadLock(&tdc->lock);
223 ReleaseReadLock(&avc->lock);
224 code = afs_osi_SleepSig(&tdc->validPos);
225 ObtainReadLock(&avc->lock);
226 ObtainReadLock(&tdc->lock);
232 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
233 if (tdc->dflags & DFFetching) {
234 /* still fetching, some new data is here: compute length and offset */
235 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
236 len = tdc->validPos - filePos;
239 /* no longer fetching, verify data version (avoid new GetDCache call) */
240 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
241 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
242 len = tdc->f.chunkBytes - offset;
245 /* don't have current data, so get it below */
246 ReleaseReadLock(&tdc->lock);
253 ReleaseReadLock(&avc->lock);
254 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
255 ObtainReadLock(&avc->lock);
256 if (tdc) ObtainReadLock(&tdc->lock);
271 if (len > totalLength) len = totalLength; /* will read len bytes */
272 if (len <= 0) { /* shouldn't get here if DFFetching is on */
273 /* read past the end of a chunk, may not be at next chunk yet, and yet
274 also not at eof, so may have to supply fake zeros */
275 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
276 if (len > totalLength) len = totalLength; /* and still within xfr request */
277 tlen = avc->m.Length - offset; /* and still within file */
278 if (len > tlen) len = tlen;
279 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
280 afsio_copy(auio, &tuio, tvec);
282 afsio_trim(&tuio, trimlen);
283 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
290 /* get the data from the mem cache */
292 /* mung uio structure to be right for this transfer */
293 afsio_copy(auio, &tuio, tvec);
295 afsio_trim(&tuio, trimlen);
296 tuio.afsio_offset = offset;
298 code = afs_MemReadUIO(tdc->f.inode, &tuio);
305 /* otherwise we've read some, fixup length, etc and continue with next seg */
306 len = len - tuio.afsio_resid; /* compute amount really transferred */
308 afsio_skip(auio, trimlen); /* update input uio structure */
310 transferLength += len;
313 if (len <= 0) break; /* surprise eof */
314 } /* the whole while loop */
319 * tdc->lock(R) if tdc
322 /* if we make it here with tdc non-zero, then it is the last chunk we
323 * dealt with, and we have to release it when we're done. We hold on
324 * to it in case we need to do a prefetch.
327 ReleaseReadLock(&tdc->lock);
328 #if !defined(AFS_VM_RDWR_ENV)
329 /* try to queue prefetch, if needed */
331 afs_PrefetchChunk(avc, tdc, acred, &treq);
337 ReleaseReadLock(&avc->lock);
338 osi_FreeSmallSpace(tvec);
339 error = afs_CheckCode(error, &treq, 10);
343 /* called with the dcache entry triggering the fetch, the vcache entry involved,
344 * and a vrequest for the read call. Marks the dcache entry as having already
345 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
346 * flag in the prefetched block, so that the next call to read knows to wait
347 * for the daemon to start doing things.
349 * This function must be called with the vnode at least read-locked, and
350 * no locks on the dcache, because it plays around with dcache entries.
352 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
353 struct AFS_UCRED *acred, struct vrequest *areq)
355 register struct dcache *tdc;
357 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
359 offset = adc->f.chunk+1; /* next chunk we'll need */
360 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
361 ObtainReadLock(&adc->lock);
362 ObtainSharedLock(&adc->mflock, 662);
363 if (offset < avc->m.Length && !(adc->mflags & DFNextStarted) && !afs_BBusy()) {
366 UpgradeSToWLock(&adc->mflock, 663);
367 adc->mflags |= DFNextStarted; /* we've tried to prefetch for this guy */
368 ReleaseWriteLock(&adc->mflock);
369 ReleaseReadLock(&adc->lock);
371 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
372 ObtainSharedLock(&tdc->mflock, 651);
373 if (!(tdc->mflags & DFFetchReq)) {
374 /* ask the daemon to do the work */
375 UpgradeSToWLock(&tdc->mflock, 652);
376 tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
377 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
378 * since we don't want to wait for it to finish before doing so ourselves.
380 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
381 (afs_size_t) offset, (afs_size_t) 1, tdc);
383 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
384 tdc->mflags &= ~DFFetchReq;
385 ReleaseWriteLock(&tdc->mflock);
389 * DCLOCKXXX: This is a little sketchy, since someone else
390 * could have already started a prefetch.. In practice,
391 * this probably doesn't matter; at most it would cause an
392 * extra slot in the BKG table to be used up when someone
393 * prefetches this for the second time.
395 ObtainReadLock(&adc->lock);
396 ObtainWriteLock(&adc->mflock, 664);
397 adc->mflags &= ~DFNextStarted;
398 ReleaseWriteLock(&adc->mflock);
399 ReleaseReadLock(&adc->lock);
401 ReleaseWriteLock(&tdc->mflock);
404 ReleaseSharedLock(&tdc->mflock);
408 ReleaseSharedLock(&adc->mflock);
409 ReleaseReadLock(&adc->lock);
414 /* if the vcache is up-to-date, and the request fits entirely into the chunk
415 * that the hint here references, then we just use it quickly, otherwise we
416 * have to call the slow read.
418 * This could be generalized in several ways to take advantage of partial
419 * state even when all the chips don't fall the right way. For instance,
420 * if the hint is good and the first part of the read request can be
421 * satisfied from the chunk, then just do the read. After the read has
422 * completed, check to see if there's more. (Chances are there won't be.)
423 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
425 * For the time being, I'm ignoring quick.f, but it should be used at
427 * do this in the future avc->quick.f = tfile; but I think it
428 * has to be done under a write lock, but don't want to wait on the
431 /* everywhere that a dcache can be freed (look for NULLIDX)
432 * probably does it under a write lock on xdcache. Need to invalidate
434 * Also need to worry about DFFetching, and IFFree, I think. */
435 static struct dcache *savedc = 0;
437 int afs_UFSReadFast(register struct vcache *avc, struct uio *auio,
438 struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
440 struct vrequest treq;
444 struct osi_file *tfile;
448 ObtainReadLock(&avc->lock);
449 ObtainReadLock(&afs_xdcache);
451 if ((avc->states & CStatd) /* up to date */
452 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
453 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
458 ReleaseReadLock(&afs_xdcache);
459 if (tdc->stamp == avc->quick.stamp) {
461 ObtainReadLock(&tdc->lock);
464 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
465 && ((offDiff = (auio->afsio_offset - avc->quick.minLoc)) >= 0)
466 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
467 && !(tdc->dflags & DFFetching)) { /* fits in chunk */
469 auio->afsio_offset -= avc->quick.minLoc;
471 afs_Trace4(afs_iclSetp, CM_TRACE_READFAST, ICL_TYPE_POINTER, avc,
472 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->afsio_offset),
473 ICL_TYPE_INT32, auio->afsio_resid,
474 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
476 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
481 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
485 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
487 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
493 VOP_RWLOCK(tfile->vnode, 0);
494 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
495 VOP_RWUNLOCK(tfile->vnode, 0);
498 #if defined(AFS_SGI_ENV)
500 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
501 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
502 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
506 auio->uio_rw = UIO_READ;
508 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
510 #else /* AFS_OSF_ENV */
511 #if defined(AFS_HPUX100_ENV)
513 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
516 #if defined(AFS_LINUX20_ENV)
518 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
521 #if defined(AFS_DARWIN_ENV)
523 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
524 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
525 VOP_UNLOCK(tfile->vnode, 0, current_proc());
528 #if defined(AFS_FBSD_ENV)
530 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
531 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
532 VOP_UNLOCK(tfile->vnode, 0, curproc);
535 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
544 auio->afsio_offset += avc->quick.minLoc;
546 /* Fix up LRU info */
547 hset(afs_indexTimes[tdc->index], afs_indexCounter);
548 hadd32(afs_indexCounter, 1);
551 ReleaseReadLock(&avc->lock);
552 #if !defined(AFS_VM_RDWR_ENV)
553 if (!(code = afs_InitReq(&treq, acred))) {
554 if (!(tdc->mflags & DFNextStarted))
555 afs_PrefetchChunk(avc, tdc, acred, &treq);
559 if (readLocked) ReleaseReadLock(&tdc->lock);
563 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
566 if (readLocked) ReleaseReadLock(&tdc->lock);
569 ReleaseReadLock(&afs_xdcache);
572 /* come here if fast path doesn't work for some reason or other */
574 ReleaseReadLock(&avc->lock);
575 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
578 int afs_UFSRead(register struct vcache *avc, struct uio *auio,
579 struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
581 afs_size_t totalLength;
582 afs_size_t transferLength;
584 afs_size_t offset, len, tlen;
586 struct dcache *tdc=0;
590 struct osi_file *tfile;
594 struct vrequest treq;
596 AFS_STATCNT(afs_UFSRead);
597 if (avc && avc->vc_error)
600 /* check that we have the latest status info in the vnode cache */
601 if ((code = afs_InitReq(&treq, acred))) return code;
604 osi_Panic ("null avc in afs_UFSRead");
606 code = afs_VerifyVCache(avc, &treq);
608 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
614 #ifndef AFS_VM_RDWR_ENV
615 if (AFS_NFSXLATORREQ(acred)) {
616 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
617 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
618 return afs_CheckCode(EACCES, &treq, 12);
623 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
624 totalLength = auio->afsio_resid;
625 filePos = auio->afsio_offset;
626 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
627 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
628 ICL_TYPE_INT32, totalLength,
629 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
633 ObtainReadLock(&avc->lock);
634 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
635 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
636 hset(avc->flushDV, avc->m.DataVersion);
640 while (totalLength > 0) {
641 /* read all of the cached info */
642 if (filePos >= avc->m.Length) break; /* all done */
645 ReleaseReadLock(&tdc->lock);
648 tdc = afs_FindDCache(avc, filePos);
650 ObtainReadLock(&tdc->lock);
651 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
652 len = tdc->f.chunkBytes - offset;
655 /* a tricky question: does the presence of the DFFetching flag
656 mean that we're fetching the latest version of the file? No.
657 The server could update the file as soon as the fetch responsible
658 for the setting of the DFFetching flag completes.
660 However, the presence of the DFFetching flag (visible under
661 a dcache read lock since it is set and cleared only under a
662 dcache write lock) means that we're fetching as good a version
663 as was known to this client at the time of the last call to
664 afs_VerifyVCache, since the latter updates the stat cache's
665 m.DataVersion field under a vcache write lock, and from the
666 time that the DFFetching flag goes on in afs_GetDCache (before
667 the fetch starts), to the time it goes off (after the fetch
668 completes), afs_GetDCache keeps at least a read lock on the
671 This means that if the DFFetching flag is set, we can use that
672 data for any reads that must come from the current version of
673 the file (current == m.DataVersion).
675 Another way of looking at this same point is this: if we're
676 fetching some data and then try do an afs_VerifyVCache, the
677 VerifyVCache operation will not complete until after the
678 DFFetching flag is turned off and the dcache entry's f.versionNo
681 Note, by the way, that if DFFetching is set,
682 m.DataVersion > f.versionNo (the latter is not updated until
683 after the fetch completes).
686 ReleaseReadLock(&tdc->lock);
687 afs_PutDCache(tdc); /* before reusing tdc */
689 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
690 ObtainReadLock(&tdc->lock);
694 /* now, first try to start transfer, if we'll need the data. If
695 * data already coming, we don't need to do this, obviously. Type
696 * 2 requests never return a null dcache entry, btw. */
697 if (!(tdc->dflags & DFFetching)
698 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
699 /* have cache entry, it is not coming in now, and we'll need new data */
701 if (trybusy && !afs_BBusy()) {
703 /* daemon is not busy */
704 ObtainSharedLock(&tdc->mflock, 667);
705 if (!(tdc->mflags & DFFetchReq)) {
706 UpgradeSToWLock(&tdc->mflock, 668);
707 tdc->mflags |= DFFetchReq;
708 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
709 (afs_size_t) filePos, (afs_size_t) 0,
712 /* Bkg table full; retry deadlocks */
713 tdc->mflags &= ~DFFetchReq;
714 trybusy = 0; /* Avoid bkg daemon since they're too busy */
715 ReleaseWriteLock(&tdc->mflock);
718 ConvertWToSLock(&tdc->mflock);
721 ConvertSToRLock(&tdc->mflock);
722 while (!code && tdc->mflags & DFFetchReq) {
723 /* don't need waiting flag on this one */
724 ReleaseReadLock(&tdc->mflock);
725 ReleaseReadLock(&tdc->lock);
726 ReleaseReadLock(&avc->lock);
727 code = afs_osi_SleepSig(&tdc->validPos);
728 ObtainReadLock(&avc->lock);
729 ObtainReadLock(&tdc->lock);
730 ObtainReadLock(&tdc->mflock);
732 ReleaseReadLock(&tdc->mflock);
739 /* now data may have started flowing in (if DFFetching is on). If
740 * data is now streaming in, then wait for some interesting stuff.
743 while (!code && (tdc->dflags & DFFetching) &&
744 tdc->validPos <= filePos) {
745 /* too early: wait for DFFetching flag to vanish,
746 * or data to appear */
747 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
748 ICL_TYPE_STRING, __FILE__,
749 ICL_TYPE_INT32, __LINE__,
750 ICL_TYPE_POINTER, tdc,
751 ICL_TYPE_INT32, tdc->dflags);
752 ReleaseReadLock(&tdc->lock);
753 ReleaseReadLock(&avc->lock);
754 code = afs_osi_SleepSig(&tdc->validPos);
755 ObtainReadLock(&avc->lock);
756 ObtainReadLock(&tdc->lock);
762 /* fetching flag gone, data is here, or we never tried
763 * (BBusy for instance) */
764 if (tdc->dflags & DFFetching) {
765 /* still fetching, some new data is here:
766 * compute length and offset */
767 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
768 len = tdc->validPos - filePos;
771 /* no longer fetching, verify data version (avoid new
773 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
774 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
775 len = tdc->f.chunkBytes - offset;
778 /* don't have current data, so get it below */
779 ReleaseReadLock(&tdc->lock);
786 ReleaseReadLock(&avc->lock);
787 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
788 ObtainReadLock(&avc->lock);
789 if (tdc) ObtainReadLock(&tdc->lock);
793 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD,
794 ICL_TYPE_POINTER, tdc,
795 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
796 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
801 if (len > totalLength) len = totalLength; /* will read len bytes */
802 if (len <= 0) { /* shouldn't get here if DFFetching is on */
803 /* read past the end of a chunk, may not be at next chunk yet, and yet
804 also not at eof, so may have to supply fake zeros */
805 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
806 if (len > totalLength) len = totalLength; /* and still within xfr request */
807 tlen = avc->m.Length - offset; /* and still within file */
808 if (len > tlen) len = tlen;
809 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
810 afsio_copy(auio, &tuio, tvec);
812 afsio_trim(&tuio, trimlen);
813 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
820 /* get the data from the file */
822 if (tfile = tdc->ihint) {
823 if (tdc->f.inode != tfile->inum){
824 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
825 tdc, tdc->f.inode, tfile->inum );
827 tdc->ihint = tfile = 0;
837 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
838 /* mung uio structure to be right for this transfer */
839 afsio_copy(auio, &tuio, tvec);
841 afsio_trim(&tuio, trimlen);
842 tuio.afsio_offset = offset;
846 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
847 NULL, &afs_osi_cred);
851 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
852 /* Flush all JFS pages now for big performance gain in big file cases
853 * If we do something like this, must check to be sure that AFS file
854 * isn't mmapped... see afs_gn_map() for why.
857 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
858 many different ways to do similar things:
859 so far, the best performing one is #2, but #1 might match it if we
860 straighten out the confusion regarding which pages to flush. It
862 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
863 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
864 (len + PAGESIZE-1)/PAGESIZE);
865 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
866 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
867 tfile->vnode->v_gnode->gn_seg = NULL;
871 Unfortunately, this seems to cause frequent "cache corruption" episodes.
872 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
873 (len + PAGESIZE-1)/PAGESIZE);
877 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
883 VOP_RWLOCK(tfile->vnode, 0);
884 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
885 VOP_RWUNLOCK(tfile->vnode, 0);
888 #if defined(AFS_SGI_ENV)
890 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
891 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
893 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
897 tuio.uio_rw = UIO_READ;
899 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
901 #else /* AFS_OSF_ENV */
903 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
905 #if defined(AFS_HPUX100_ENV)
907 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
910 #if defined(AFS_LINUX20_ENV)
912 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
915 #if defined(AFS_DARWIN_ENV)
917 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
918 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
919 VOP_UNLOCK(tfile->vnode, 0, current_proc());
922 #if defined(AFS_FBSD_ENV)
924 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
925 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
926 VOP_UNLOCK(tfile->vnode, 0, curproc);
929 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
941 if (!tdc->ihint && nihints < maxIHint) {
954 /* otherwise we've read some, fixup length, etc and continue with next seg */
955 len = len - tuio.afsio_resid; /* compute amount really transferred */
957 afsio_skip(auio, trimlen); /* update input uio structure */
959 transferLength += len;
961 if (len <= 0) break; /* surprise eof */
964 /* if we make it here with tdc non-zero, then it is the last chunk we
965 * dealt with, and we have to release it when we're done. We hold on
966 * to it in case we need to do a prefetch, obviously.
969 ReleaseReadLock(&tdc->lock);
970 #if !defined(AFS_VM_RDWR_ENV)
971 /* try to queue prefetch, if needed */
973 if (!(tdc->mflags & DFNextStarted))
974 afs_PrefetchChunk(avc, tdc, acred, &treq);
980 ReleaseReadLock(&avc->lock);
982 osi_FreeSmallSpace(tvec);
983 error = afs_CheckCode(error, &treq, 13);