2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include <afsconfig.h>
20 #include "../afs/param.h"
24 #include "../afs/sysincludes.h" /* Standard vendor system headers */
25 #include "../afs/afsincludes.h" /* Afs-based standard headers */
26 #include "../afs/afs_stats.h" /* statistics */
27 #include "../afs/afs_cbqueue.h"
28 #include "../afs/nfsclient.h"
29 #include "../afs/afs_osidnlc.h"
32 extern char afs_zeros[AFS_ZEROS];
35 afs_int32 nihints; /* # of above actually in-use */
39 /* Imported variables */
40 extern afs_rwlock_t afs_xdcache;
41 extern unsigned char *afs_indexFlags;
42 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
43 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
46 /* Forward declarations */
47 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
48 struct AFS_UCRED *acred, struct vrequest *areq);
50 int afs_MemRead(register struct vcache *avc, struct uio *auio, struct AFS_UCRED *acred,
51 daddr_t albn, struct buf **abpp, int noLock)
53 afs_size_t totalLength;
54 afs_size_t transferLength;
56 afs_size_t offset, len, tlen;
59 afs_int32 error, trybusy=1;
66 AFS_STATCNT(afs_MemRead);
70 /* check that we have the latest status info in the vnode cache */
71 if ((code = afs_InitReq(&treq, acred))) return code;
73 code = afs_VerifyVCache(avc, &treq);
75 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
80 #ifndef AFS_VM_RDWR_ENV
81 if (AFS_NFSXLATORREQ(acred)) {
82 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
83 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
84 return afs_CheckCode(EACCES, &treq, 9);
89 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
90 totalLength = auio->afsio_resid;
91 filePos = auio->afsio_offset;
92 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
93 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
94 ICL_TYPE_INT32, totalLength,
95 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
99 ObtainReadLock(&avc->lock);
100 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
101 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
102 hset(avc->flushDV, avc->m.DataVersion);
110 while (totalLength > 0) {
111 /* read all of the cached info */
112 if (filePos >= avc->m.Length) break; /* all done */
115 ReleaseReadLock(&tdc->lock);
118 tdc = afs_FindDCache(avc, filePos);
120 ObtainReadLock(&tdc->lock);
121 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
122 len = tdc->f.chunkBytes - offset;
125 /* a tricky question: does the presence of the DFFetching flag
126 mean that we're fetching the latest version of the file? No.
127 The server could update the file as soon as the fetch responsible
128 for the setting of the DFFetching flag completes.
130 However, the presence of the DFFetching flag (visible under
131 a dcache read lock since it is set and cleared only under a
132 dcache write lock) means that we're fetching as good a version
133 as was known to this client at the time of the last call to
134 afs_VerifyVCache, since the latter updates the stat cache's
135 m.DataVersion field under a vcache write lock, and from the
136 time that the DFFetching flag goes on in afs_GetDCache (before
137 the fetch starts), to the time it goes off (after the fetch
138 completes), afs_GetDCache keeps at least a read lock on the
141 This means that if the DFFetching flag is set, we can use that
142 data for any reads that must come from the current version of
143 the file (current == m.DataVersion).
145 Another way of looking at this same point is this: if we're
146 fetching some data and then try do an afs_VerifyVCache, the
147 VerifyVCache operation will not complete until after the
148 DFFetching flag is turned off and the dcache entry's f.versionNo
151 Note, by the way, that if DFFetching is set,
152 m.DataVersion > f.versionNo (the latter is not updated until
153 after the fetch completes).
156 ReleaseReadLock(&tdc->lock);
157 afs_PutDCache(tdc); /* before reusing tdc */
159 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
160 ObtainReadLock(&tdc->lock);
161 /* now, first try to start transfer, if we'll need the data. If
162 * data already coming, we don't need to do this, obviously. Type
163 * 2 requests never return a null dcache entry, btw.
165 if (!(tdc->dflags & DFFetching)
166 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
167 /* have cache entry, it is not coming in now,
168 * and we'll need new data */
170 if (trybusy && !afs_BBusy()) {
172 /* daemon is not busy */
173 ObtainSharedLock(&tdc->mflock, 665);
174 if (!(tdc->mflags & DFFetchReq)) {
175 /* start the daemon (may already be running, however) */
176 UpgradeSToWLock(&tdc->mflock, 666);
177 tdc->mflags |= DFFetchReq;
178 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
179 (afs_size_t)filePos, (afs_size_t) 0,
182 tdc->mflags &= ~DFFetchReq;
183 trybusy = 0; /* Avoid bkg daemon since they're too busy */
184 ReleaseWriteLock(&tdc->mflock);
187 ConvertWToSLock(&tdc->mflock);
188 /* don't use bp pointer! */
191 ConvertSToRLock(&tdc->mflock);
192 while (!code && tdc->mflags & DFFetchReq) {
193 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
194 ICL_TYPE_STRING, __FILE__,
195 ICL_TYPE_INT32, __LINE__,
196 ICL_TYPE_POINTER, tdc,
197 ICL_TYPE_INT32, tdc->dflags);
198 /* don't need waiting flag on this one */
199 ReleaseReadLock(&tdc->mflock);
200 ReleaseReadLock(&tdc->lock);
201 ReleaseReadLock(&avc->lock);
202 code = afs_osi_SleepSig(&tdc->validPos);
203 ObtainReadLock(&avc->lock);
204 ObtainReadLock(&tdc->lock);
205 ObtainReadLock(&tdc->mflock);
207 ReleaseReadLock(&tdc->mflock);
214 /* now data may have started flowing in (if DFFetching is on). If
215 * data is now streaming in, then wait for some interesting stuff.
218 while (!code && (tdc->dflags & DFFetching) &&
219 tdc->validPos <= filePos) {
220 /* too early: wait for DFFetching flag to vanish,
221 * or data to appear */
222 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
223 ICL_TYPE_STRING, __FILE__,
224 ICL_TYPE_INT32, __LINE__,
225 ICL_TYPE_POINTER, tdc,
226 ICL_TYPE_INT32, tdc->dflags);
227 ReleaseReadLock(&tdc->lock);
228 ReleaseReadLock(&avc->lock);
229 code = afs_osi_SleepSig(&tdc->validPos);
230 ObtainReadLock(&avc->lock);
231 ObtainReadLock(&tdc->lock);
237 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
238 if (tdc->dflags & DFFetching) {
239 /* still fetching, some new data is here: compute length and offset */
240 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
241 len = tdc->validPos - filePos;
244 /* no longer fetching, verify data version (avoid new GetDCache call) */
245 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
246 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
247 len = tdc->f.chunkBytes - offset;
250 /* don't have current data, so get it below */
251 afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
252 ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
253 ICL_TYPE_HYPER, &avc->m.DataVersion,
254 ICL_TYPE_HYPER, &tdc->f.versionNo);
255 ReleaseReadLock(&tdc->lock);
262 ReleaseReadLock(&avc->lock);
263 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
264 ObtainReadLock(&avc->lock);
265 if (tdc) ObtainReadLock(&tdc->lock);
280 if (len > totalLength) len = totalLength; /* will read len bytes */
281 if (len <= 0) { /* shouldn't get here if DFFetching is on */
282 /* read past the end of a chunk, may not be at next chunk yet, and yet
283 also not at eof, so may have to supply fake zeros */
284 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
285 if (len > totalLength) len = totalLength; /* and still within xfr request */
286 tlen = avc->m.Length - offset; /* and still within file */
287 if (len > tlen) len = tlen;
288 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
289 afsio_copy(auio, &tuio, tvec);
291 afsio_trim(&tuio, trimlen);
292 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
299 /* get the data from the mem cache */
301 /* mung uio structure to be right for this transfer */
302 afsio_copy(auio, &tuio, tvec);
304 afsio_trim(&tuio, trimlen);
305 tuio.afsio_offset = offset;
307 code = afs_MemReadUIO(tdc->f.inode, &tuio);
314 /* otherwise we've read some, fixup length, etc and continue with next seg */
315 len = len - tuio.afsio_resid; /* compute amount really transferred */
317 afsio_skip(auio, trimlen); /* update input uio structure */
319 transferLength += len;
322 if (len <= 0) break; /* surprise eof */
323 } /* the whole while loop */
328 * tdc->lock(R) if tdc
331 /* if we make it here with tdc non-zero, then it is the last chunk we
332 * dealt with, and we have to release it when we're done. We hold on
333 * to it in case we need to do a prefetch.
336 ReleaseReadLock(&tdc->lock);
337 #if !defined(AFS_VM_RDWR_ENV)
338 /* try to queue prefetch, if needed */
340 afs_PrefetchChunk(avc, tdc, acred, &treq);
346 ReleaseReadLock(&avc->lock);
347 osi_FreeSmallSpace(tvec);
348 error = afs_CheckCode(error, &treq, 10);
352 /* called with the dcache entry triggering the fetch, the vcache entry involved,
353 * and a vrequest for the read call. Marks the dcache entry as having already
354 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
355 * flag in the prefetched block, so that the next call to read knows to wait
356 * for the daemon to start doing things.
358 * This function must be called with the vnode at least read-locked, and
359 * no locks on the dcache, because it plays around with dcache entries.
361 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
362 struct AFS_UCRED *acred, struct vrequest *areq)
364 register struct dcache *tdc;
366 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
368 offset = adc->f.chunk+1; /* next chunk we'll need */
369 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
370 ObtainReadLock(&adc->lock);
371 ObtainSharedLock(&adc->mflock, 662);
372 if (offset < avc->m.Length && !(adc->mflags & DFNextStarted) && !afs_BBusy()) {
375 UpgradeSToWLock(&adc->mflock, 663);
376 adc->mflags |= DFNextStarted; /* we've tried to prefetch for this guy */
377 ReleaseWriteLock(&adc->mflock);
378 ReleaseReadLock(&adc->lock);
380 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
381 ObtainSharedLock(&tdc->mflock, 651);
382 if (!(tdc->mflags & DFFetchReq)) {
383 /* ask the daemon to do the work */
384 UpgradeSToWLock(&tdc->mflock, 652);
385 tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
386 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
387 * since we don't want to wait for it to finish before doing so ourselves.
389 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
390 (afs_size_t) offset, (afs_size_t) 1, tdc);
392 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
393 tdc->mflags &= ~DFFetchReq;
394 ReleaseWriteLock(&tdc->mflock);
398 * DCLOCKXXX: This is a little sketchy, since someone else
399 * could have already started a prefetch.. In practice,
400 * this probably doesn't matter; at most it would cause an
401 * extra slot in the BKG table to be used up when someone
402 * prefetches this for the second time.
404 ObtainReadLock(&adc->lock);
405 ObtainWriteLock(&adc->mflock, 664);
406 adc->mflags &= ~DFNextStarted;
407 ReleaseWriteLock(&adc->mflock);
408 ReleaseReadLock(&adc->lock);
410 ReleaseWriteLock(&tdc->mflock);
413 ReleaseSharedLock(&tdc->mflock);
417 ReleaseSharedLock(&adc->mflock);
418 ReleaseReadLock(&adc->lock);
423 /* if the vcache is up-to-date, and the request fits entirely into the chunk
424 * that the hint here references, then we just use it quickly, otherwise we
425 * have to call the slow read.
427 * This could be generalized in several ways to take advantage of partial
428 * state even when all the chips don't fall the right way. For instance,
429 * if the hint is good and the first part of the read request can be
430 * satisfied from the chunk, then just do the read. After the read has
431 * completed, check to see if there's more. (Chances are there won't be.)
432 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
434 * For the time being, I'm ignoring quick.f, but it should be used at
436 * do this in the future avc->quick.f = tfile; but I think it
437 * has to be done under a write lock, but don't want to wait on the
440 /* everywhere that a dcache can be freed (look for NULLIDX)
441 * probably does it under a write lock on xdcache. Need to invalidate
443 * Also need to worry about DFFetching, and IFFree, I think. */
444 static struct dcache *savedc = 0;
446 int afs_UFSReadFast(register struct vcache *avc, struct uio *auio,
447 struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
449 struct vrequest treq;
453 struct osi_file *tfile;
457 ObtainReadLock(&avc->lock);
458 ObtainReadLock(&afs_xdcache);
460 if ((avc->states & CStatd) /* up to date */
461 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
462 && !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
467 ReleaseReadLock(&afs_xdcache);
468 if (tdc->stamp == avc->quick.stamp) {
470 ObtainReadLock(&tdc->lock);
473 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
474 && ((offDiff = (afs_size_t)(auio->afsio_offset - avc->quick.minLoc)) >= 0)
475 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
476 && !(tdc->dflags & DFFetching)) { /* fits in chunk */
478 auio->afsio_offset -= avc->quick.minLoc;
480 afs_Trace4(afs_iclSetp, CM_TRACE_READFAST, ICL_TYPE_POINTER, avc,
481 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->afsio_offset),
482 ICL_TYPE_INT32, auio->afsio_resid,
483 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
485 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
490 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
494 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
496 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
502 VOP_RWLOCK(tfile->vnode, 0);
503 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
504 VOP_RWUNLOCK(tfile->vnode, 0);
507 #if defined(AFS_SGI_ENV)
509 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
510 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
511 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
515 auio->uio_rw = UIO_READ;
517 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
519 #else /* AFS_OSF_ENV */
520 #if defined(AFS_HPUX100_ENV)
522 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
525 #if defined(AFS_LINUX20_ENV)
527 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
530 #if defined(AFS_DARWIN_ENV)
532 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
533 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
534 VOP_UNLOCK(tfile->vnode, 0, current_proc());
537 #if defined(AFS_FBSD_ENV)
539 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
540 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
541 VOP_UNLOCK(tfile->vnode, 0, curproc);
544 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
553 auio->afsio_offset += avc->quick.minLoc;
555 /* Fix up LRU info */
556 hset(afs_indexTimes[tdc->index], afs_indexCounter);
557 hadd32(afs_indexCounter, 1);
560 ReleaseReadLock(&avc->lock);
561 #if !defined(AFS_VM_RDWR_ENV)
562 if (!(code = afs_InitReq(&treq, acred))) {
563 if (!(tdc->mflags & DFNextStarted))
564 afs_PrefetchChunk(avc, tdc, acred, &treq);
568 if (readLocked) ReleaseReadLock(&tdc->lock);
572 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
575 if (readLocked) ReleaseReadLock(&tdc->lock);
578 ReleaseReadLock(&afs_xdcache);
581 /* come here if fast path doesn't work for some reason or other */
583 ReleaseReadLock(&avc->lock);
584 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
587 int afs_UFSRead(register struct vcache *avc, struct uio *auio,
588 struct AFS_UCRED *acred, daddr_t albn, struct buf **abpp, int noLock)
590 afs_size_t totalLength;
591 afs_size_t transferLength;
593 afs_size_t offset, len, tlen;
595 struct dcache *tdc=0;
599 struct osi_file *tfile;
603 struct vrequest treq;
605 AFS_STATCNT(afs_UFSRead);
606 if (avc && avc->vc_error)
609 /* check that we have the latest status info in the vnode cache */
610 if ((code = afs_InitReq(&treq, acred))) return code;
613 osi_Panic ("null avc in afs_UFSRead");
615 code = afs_VerifyVCache(avc, &treq);
617 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
623 #ifndef AFS_VM_RDWR_ENV
624 if (AFS_NFSXLATORREQ(acred)) {
625 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
626 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
627 return afs_CheckCode(EACCES, &treq, 12);
632 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
633 totalLength = auio->afsio_resid;
634 filePos = auio->afsio_offset;
635 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
636 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
637 ICL_TYPE_INT32, totalLength,
638 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
642 ObtainReadLock(&avc->lock);
643 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
644 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
645 hset(avc->flushDV, avc->m.DataVersion);
649 while (totalLength > 0) {
650 /* read all of the cached info */
651 if (filePos >= avc->m.Length) break; /* all done */
654 ReleaseReadLock(&tdc->lock);
657 tdc = afs_FindDCache(avc, filePos);
659 ObtainReadLock(&tdc->lock);
660 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
661 len = tdc->f.chunkBytes - offset;
664 /* a tricky question: does the presence of the DFFetching flag
665 mean that we're fetching the latest version of the file? No.
666 The server could update the file as soon as the fetch responsible
667 for the setting of the DFFetching flag completes.
669 However, the presence of the DFFetching flag (visible under
670 a dcache read lock since it is set and cleared only under a
671 dcache write lock) means that we're fetching as good a version
672 as was known to this client at the time of the last call to
673 afs_VerifyVCache, since the latter updates the stat cache's
674 m.DataVersion field under a vcache write lock, and from the
675 time that the DFFetching flag goes on in afs_GetDCache (before
676 the fetch starts), to the time it goes off (after the fetch
677 completes), afs_GetDCache keeps at least a read lock on the
680 This means that if the DFFetching flag is set, we can use that
681 data for any reads that must come from the current version of
682 the file (current == m.DataVersion).
684 Another way of looking at this same point is this: if we're
685 fetching some data and then try do an afs_VerifyVCache, the
686 VerifyVCache operation will not complete until after the
687 DFFetching flag is turned off and the dcache entry's f.versionNo
690 Note, by the way, that if DFFetching is set,
691 m.DataVersion > f.versionNo (the latter is not updated until
692 after the fetch completes).
695 ReleaseReadLock(&tdc->lock);
696 afs_PutDCache(tdc); /* before reusing tdc */
698 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
699 ObtainReadLock(&tdc->lock);
703 /* now, first try to start transfer, if we'll need the data. If
704 * data already coming, we don't need to do this, obviously. Type
705 * 2 requests never return a null dcache entry, btw. */
706 if (!(tdc->dflags & DFFetching)
707 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
708 /* have cache entry, it is not coming in now, and we'll need new data */
710 if (trybusy && !afs_BBusy()) {
712 /* daemon is not busy */
713 ObtainSharedLock(&tdc->mflock, 667);
714 if (!(tdc->mflags & DFFetchReq)) {
715 UpgradeSToWLock(&tdc->mflock, 668);
716 tdc->mflags |= DFFetchReq;
717 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
718 (afs_size_t) filePos, (afs_size_t) 0,
721 /* Bkg table full; retry deadlocks */
722 tdc->mflags &= ~DFFetchReq;
723 trybusy = 0; /* Avoid bkg daemon since they're too busy */
724 ReleaseWriteLock(&tdc->mflock);
727 ConvertWToSLock(&tdc->mflock);
730 ConvertSToRLock(&tdc->mflock);
731 while (!code && tdc->mflags & DFFetchReq) {
732 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
733 ICL_TYPE_STRING, __FILE__,
734 ICL_TYPE_INT32, __LINE__,
735 ICL_TYPE_POINTER, tdc,
736 ICL_TYPE_INT32, tdc->dflags);
737 /* don't need waiting flag on this one */
738 ReleaseReadLock(&tdc->mflock);
739 ReleaseReadLock(&tdc->lock);
740 ReleaseReadLock(&avc->lock);
741 code = afs_osi_SleepSig(&tdc->validPos);
742 ObtainReadLock(&avc->lock);
743 ObtainReadLock(&tdc->lock);
744 ObtainReadLock(&tdc->mflock);
746 ReleaseReadLock(&tdc->mflock);
753 /* now data may have started flowing in (if DFFetching is on). If
754 * data is now streaming in, then wait for some interesting stuff.
757 while (!code && (tdc->dflags & DFFetching) &&
758 tdc->validPos <= filePos) {
759 /* too early: wait for DFFetching flag to vanish,
760 * or data to appear */
761 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
762 ICL_TYPE_STRING, __FILE__,
763 ICL_TYPE_INT32, __LINE__,
764 ICL_TYPE_POINTER, tdc,
765 ICL_TYPE_INT32, tdc->dflags);
766 ReleaseReadLock(&tdc->lock);
767 ReleaseReadLock(&avc->lock);
768 code = afs_osi_SleepSig(&tdc->validPos);
769 ObtainReadLock(&avc->lock);
770 ObtainReadLock(&tdc->lock);
776 /* fetching flag gone, data is here, or we never tried
777 * (BBusy for instance) */
778 if (tdc->dflags & DFFetching) {
779 /* still fetching, some new data is here:
780 * compute length and offset */
781 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
782 len = tdc->validPos - filePos;
785 /* no longer fetching, verify data version (avoid new
787 if (hsame(avc->m.DataVersion, tdc->f.versionNo)
788 && tdc->f.chunkBytes) {
789 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
790 len = tdc->f.chunkBytes - offset;
793 /* don't have current data, so get it below */
794 afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
795 ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
796 ICL_TYPE_HYPER, &avc->m.DataVersion,
797 ICL_TYPE_HYPER, &tdc->f.versionNo);
798 ReleaseReadLock(&tdc->lock);
805 /* If we get, it was not possible to start the
806 * background daemon. With flag == 1 afs_GetDCache
807 * does the FetchData rpc synchronously.
809 ReleaseReadLock(&avc->lock);
810 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
811 ObtainReadLock(&avc->lock);
812 if (tdc) ObtainReadLock(&tdc->lock);
816 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD,
817 ICL_TYPE_POINTER, tdc,
818 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
819 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
824 if (len > totalLength) len = totalLength; /* will read len bytes */
825 if (len <= 0) { /* shouldn't get here if DFFetching is on */
826 afs_Trace4(afs_iclSetp, CM_TRACE_VNODEREAD2,
827 ICL_TYPE_POINTER, tdc,
828 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tdc->validPos),
829 ICL_TYPE_INT32, tdc->f.chunkBytes,
830 ICL_TYPE_INT32, tdc->dflags);
831 /* read past the end of a chunk, may not be at next chunk yet, and yet
832 also not at eof, so may have to supply fake zeros */
833 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
834 if (len > totalLength) len = totalLength; /* and still within xfr request */
835 tlen = avc->m.Length - offset; /* and still within file */
836 if (len > tlen) len = tlen;
837 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
838 afsio_copy(auio, &tuio, tvec);
840 afsio_trim(&tuio, trimlen);
841 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
848 /* get the data from the file */
850 if (tfile = tdc->ihint) {
851 if (tdc->f.inode != tfile->inum){
852 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
853 tdc, tdc->f.inode, tfile->inum );
855 tdc->ihint = tfile = 0;
865 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
866 /* mung uio structure to be right for this transfer */
867 afsio_copy(auio, &tuio, tvec);
869 afsio_trim(&tuio, trimlen);
870 tuio.afsio_offset = offset;
874 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
875 NULL, &afs_osi_cred);
879 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
880 /* Flush all JFS pages now for big performance gain in big file cases
881 * If we do something like this, must check to be sure that AFS file
882 * isn't mmapped... see afs_gn_map() for why.
885 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
886 many different ways to do similar things:
887 so far, the best performing one is #2, but #1 might match it if we
888 straighten out the confusion regarding which pages to flush. It
890 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
891 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
892 (len + PAGESIZE-1)/PAGESIZE);
893 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
894 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
895 tfile->vnode->v_gnode->gn_seg = NULL;
899 Unfortunately, this seems to cause frequent "cache corruption" episodes.
900 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
901 (len + PAGESIZE-1)/PAGESIZE);
905 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
911 VOP_RWLOCK(tfile->vnode, 0);
912 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
913 VOP_RWUNLOCK(tfile->vnode, 0);
916 #if defined(AFS_SGI_ENV)
918 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
919 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
921 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
925 tuio.uio_rw = UIO_READ;
927 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
929 #else /* AFS_OSF_ENV */
931 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
933 #if defined(AFS_HPUX100_ENV)
935 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
938 #if defined(AFS_LINUX20_ENV)
940 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
943 #if defined(AFS_DARWIN_ENV)
945 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
946 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
947 VOP_UNLOCK(tfile->vnode, 0, current_proc());
950 #if defined(AFS_FBSD_ENV)
952 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, curproc);
953 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
954 VOP_UNLOCK(tfile->vnode, 0, curproc);
957 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
969 if (!tdc->ihint && nihints < maxIHint) {
982 /* otherwise we've read some, fixup length, etc and continue with next seg */
983 len = len - tuio.afsio_resid; /* compute amount really transferred */
985 afsio_skip(auio, trimlen); /* update input uio structure */
987 transferLength += len;
989 if (len <= 0) break; /* surprise eof */
992 /* if we make it here with tdc non-zero, then it is the last chunk we
993 * dealt with, and we have to release it when we're done. We hold on
994 * to it in case we need to do a prefetch, obviously.
997 ReleaseReadLock(&tdc->lock);
998 #if !defined(AFS_VM_RDWR_ENV)
999 /* try to queue prefetch, if needed */
1001 if (!(tdc->mflags & DFNextStarted))
1002 afs_PrefetchChunk(avc, tdc, acred, &treq);
1008 ReleaseReadLock(&avc->lock);
1010 osi_FreeSmallSpace(tvec);
1011 error = afs_CheckCode(error, &treq, 13);