2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include <afsconfig.h>
20 #include "../afs/param.h"
24 #include "../afs/sysincludes.h" /* Standard vendor system headers */
25 #include "../afs/afsincludes.h" /* Afs-based standard headers */
26 #include "../afs/afs_stats.h" /* statistics */
27 #include "../afs/afs_cbqueue.h"
28 #include "../afs/nfsclient.h"
29 #include "../afs/afs_osidnlc.h"
32 extern char afs_zeros[AFS_ZEROS];
35 afs_int32 nihints; /* # of above actually in-use */
39 /* Imported variables */
40 extern afs_rwlock_t afs_xdcache;
41 extern unsigned char *afs_indexFlags;
42 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
43 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
46 /* Forward declarations */
47 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
48 struct AFS_UCRED *acred, struct vrequest *areq);
50 afs_MemRead(avc, auio, acred, albn, abpp, noLock)
51 register struct vcache *avc;
53 struct AFS_UCRED *acred;
58 afs_size_t totalLength;
59 afs_size_t transferLength;
61 afs_size_t offset, len, tlen;
64 afs_int32 error, trybusy=1;
71 AFS_STATCNT(afs_MemRead);
75 /* check that we have the latest status info in the vnode cache */
76 if (code = afs_InitReq(&treq, acred)) return code;
78 code = afs_VerifyVCache(avc, &treq);
80 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
85 #ifndef AFS_VM_RDWR_ENV
86 if (AFS_NFSXLATORREQ(acred)) {
87 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
88 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
89 return afs_CheckCode(EACCES, &treq, 9);
94 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
95 totalLength = auio->afsio_resid;
96 filePos = auio->afsio_offset;
97 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
98 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
99 ICL_TYPE_INT32, totalLength,
100 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
104 ObtainReadLock(&avc->lock);
105 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
106 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
107 hset(avc->flushDV, avc->m.DataVersion);
110 while (totalLength > 0) {
111 /* read all of the cached info */
112 if (filePos >= avc->m.Length) break; /* all done */
114 if (tdc) afs_PutDCache(tdc);
115 tdc = afs_FindDCache(avc, filePos);
117 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
118 len = tdc->f.chunkBytes - offset;
121 /* a tricky question: does the presence of the DFFetching flag
122 mean that we're fetching the latest version of the file? No.
123 The server could update the file as soon as the fetch responsible
124 for the setting of the DFFetching flag completes.
126 However, the presence of the DFFetching flag (visible under a
127 read lock since it is set and cleared only under a write lock)
128 means that we're fetching as good a version as was known to this
129 client at the time of the last call to afs_VerifyVCache, since
130 the latter updates the stat cache's m.DataVersion field under a
131 write lock, and from the time that the DFFetching flag goes on
132 (before the fetch starts), to the time it goes off (after the
133 fetch completes), afs_GetDCache keeps at least a read lock
134 (actually it keeps an S lock) on the cache entry.
136 This means that if the DFFetching flag is set, we can use that
137 data for any reads that must come from the current version of
138 the file (current == m.DataVersion).
140 Another way of looking at this same point is this: if we're
141 fetching some data and then try do an afs_VerifyVCache, the
142 VerifyVCache operation will not complete until after the
143 DFFetching flag is turned off and the dcache entry's f.versionNo
146 Note, by the way, that if DFFetching is set,
147 m.DataVersion > f.versionNo (the latter is not updated until
148 after the fetch completes).
150 if (tdc) afs_PutDCache(tdc); /* before reusing tdc */
151 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
152 /* now, first try to start transfer, if we'll need the data. If
153 * data already coming, we don't need to do this, obviously. Type
154 * 2 requests never return a null dcache entry, btw.
156 if (!(tdc->flags & DFFetching)
157 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
158 /* have cache entry, it is not coming in now,
159 * and we'll need new data */
161 if (trybusy && !afs_BBusy()) {
163 /* daemon is not busy */
164 if (!(tdc->flags & DFFetchReq)) {
165 /* start the daemon (may already be running, however) */
166 tdc->flags |= DFFetchReq;
167 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
168 (afs_size_t)filePos, (afs_size_t) 0,
171 tdc->flags &= ~DFFetchReq;
172 trybusy = 0; /* Avoid bkg daemon since they're too busy */
175 /* don't use bp pointer! */
177 while (tdc->flags & DFFetchReq) {
178 /* don't need waiting flag on this one */
179 ReleaseReadLock(&avc->lock);
180 afs_osi_Sleep(&tdc->validPos);
181 ObtainReadLock(&avc->lock);
185 /* now data may have started flowing in (if DFFetching is on). If
186 * data is now streaming in, then wait for some interesting stuff. */
187 while ((tdc->flags & DFFetching) && tdc->validPos <= filePos) {
188 /* too early: wait for DFFetching flag to vanish, or data to appear */
189 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
190 ICL_TYPE_STRING, __FILE__,
191 ICL_TYPE_INT32, __LINE__,
192 ICL_TYPE_POINTER, tdc,
193 ICL_TYPE_INT32, tdc->flags);
194 tdc->flags |= DFWaiting;
195 ReleaseReadLock(&avc->lock);
196 afs_osi_Sleep(&tdc->validPos);
197 ObtainReadLock(&avc->lock);
199 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
200 if (tdc->flags & DFFetching) {
201 /* still fetching, some new data is here: compute length and offset */
202 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
203 len = tdc->validPos - filePos;
206 /* no longer fetching, verify data version (avoid new GetDCache call) */
207 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
208 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
209 len = tdc->f.chunkBytes - offset;
212 /* don't have current data, so get it below */
214 tdc = (struct dcache *) 0;
219 ReleaseReadLock(&avc->lock);
220 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
221 ObtainReadLock(&avc->lock);
229 if (len > totalLength) len = totalLength; /* will read len bytes */
230 if (len <= 0) { /* shouldn't get here if DFFetching is on */
231 /* read past the end of a chunk, may not be at next chunk yet, and yet
232 also not at eof, so may have to supply fake zeros */
233 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
234 if (len > totalLength) len = totalLength; /* and still within xfr request */
235 tlen = avc->m.Length - offset; /* and still within file */
236 if (len > tlen) len = tlen;
237 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
238 afsio_copy(auio, &tuio, tvec);
240 afsio_trim(&tuio, trimlen);
241 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
248 /* get the data from the mem cache */
250 /* mung uio structure to be right for this transfer */
251 afsio_copy(auio, &tuio, tvec);
253 afsio_trim(&tuio, trimlen);
254 tuio.afsio_offset = offset;
256 code = afs_MemReadUIO(tdc->f.inode, &tuio);
263 /* otherwise we've read some, fixup length, etc and continue with next seg */
264 len = len - tuio.afsio_resid; /* compute amount really transferred */
266 afsio_skip(auio, trimlen); /* update input uio structure */
268 transferLength += len;
271 if (len <= 0) break; /* surprise eof */
272 } /* the whole while loop */
274 /* if we make it here with tdc non-zero, then it is the last chunk we
275 * dealt with, and we have to release it when we're done. We hold on
276 * to it in case we need to do a prefetch.
279 #ifndef AFS_VM_RDWR_ENV
280 /* try to queue prefetch, if needed */
281 if (!(tdc->flags & DFNextStarted) && !noLock) {
282 afs_PrefetchChunk(avc, tdc, acred, &treq);
288 ReleaseReadLock(&avc->lock);
289 osi_FreeSmallSpace(tvec);
290 error = afs_CheckCode(error, &treq, 10);
294 /* called with the dcache entry triggering the fetch, the vcache entry involved,
295 * and a vrequest for the read call. Marks the dcache entry as having already
296 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
297 * flag in the prefetched block, so that the next call to read knows to wait
298 * for the daemon to start doing things.
300 * This function must be called with the vnode at least read-locked
301 * because it plays around with dcache entries.
303 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
304 struct AFS_UCRED *acred, struct vrequest *areq)
306 register struct dcache *tdc;
308 afs_size_t j1, j2; /* junk vbls for GetDCache to trash */
310 offset = adc->f.chunk+1; /* next chunk we'll need */
311 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
312 if (offset < avc->m.Length && !afs_BBusy()) {
314 adc->flags |= DFNextStarted; /* we've tried to prefetch for this guy */
315 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
316 if (!(tdc->flags & DFFetchReq)) {
317 /* ask the daemon to do the work */
318 tdc->flags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
319 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
320 * since we don't want to wait for it to finish before doing so ourselves.
323 mutex_exit(&tdc->lock);
325 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
326 (afs_size_t) offset, (afs_size_t) 1, tdc);
328 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
329 tdc->flags &= ~(DFNextStarted | DFFetchReq);
340 /* if the vcache is up-to-date, and the request fits entirely into the chunk
341 * that the hint here references, then we just use it quickly, otherwise we
342 * have to call the slow read.
344 * This could be generalized in several ways to take advantage of partial
345 * state even when all the chips don't fall the right way. For instance,
346 * if the hint is good and the first part of the read request can be
347 * satisfied from the chunk, then just do the read. After the read has
348 * completed, check to see if there's more. (Chances are there won't be.)
349 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
351 * For the time being, I'm ignoring quick.f, but it should be used at
353 * do this in the future avc->quick.f = tfile; but I think it
354 * has to be done under a write lock, but don't want to wait on the
357 /* everywhere that a dcache can be freed (look for NULLIDX)
358 * probably does it under a write lock on xdcache. Need to invalidate
360 * Also need to worry about DFFetching, and IFFree, I think. */
361 static struct dcache *savedc = 0;
363 afs_UFSReadFast(avc, auio, acred, albn, abpp, noLock)
364 register struct vcache *avc;
366 struct AFS_UCRED *acred;
371 struct vrequest treq;
375 struct osi_file *tfile;
379 ObtainReadLock(&avc->lock);
380 ObtainReadLock(&afs_xdcache);
382 if ((avc->states & CStatd) /* up to date */
383 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
384 && !(afs_indexFlags[tdc->index] & IFFree)) {
387 ReleaseReadLock(&afs_xdcache);
389 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
390 && ((offDiff = (auio->afsio_offset - avc->quick.minLoc)) >= 0)
391 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
392 && !(tdc->flags & DFFetching)) { /* fits in chunk */
394 auio->afsio_offset -= avc->quick.minLoc;
396 afs_Trace4(afs_iclSetp, CM_TRACE_READFAST, ICL_TYPE_POINTER, avc,
397 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->afsio_offset),
398 ICL_TYPE_INT32, auio->afsio_resid,
399 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
401 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
406 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
410 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
412 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
418 VOP_RWLOCK(tfile->vnode, 0);
419 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
420 VOP_RWUNLOCK(tfile->vnode, 0);
423 #if defined(AFS_SGI_ENV)
425 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
426 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
427 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
431 auio->uio_rw = UIO_READ;
433 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
435 #else /* AFS_OSF_ENV */
436 #if defined(AFS_HPUX100_ENV)
438 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
441 #if defined(AFS_LINUX20_ENV)
443 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
446 #if defined(AFS_DARWIN_ENV)
448 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
449 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
450 VOP_UNLOCK(tfile->vnode, 0, current_proc());
453 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
461 auio->afsio_offset += avc->quick.minLoc;
463 /* Fix up LRU info */
464 hset(afs_indexTimes[tdc->index], afs_indexCounter);
465 hadd32(afs_indexCounter, 1);
468 #ifndef AFS_VM_RDWR_ENV
469 if (!(code = afs_InitReq(&treq, acred))&& (!(tdc->flags & DFNextStarted)))
470 afs_PrefetchChunk(avc, tdc, acred, &treq);
472 ReleaseReadLock(&avc->lock);
477 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
482 ReleaseReadLock(&afs_xdcache);
485 /* come here if fast path doesn't work for some reason or other */
487 ReleaseReadLock(&avc->lock);
488 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
491 afs_UFSRead(avc, auio, acred, albn, abpp, noLock)
494 struct AFS_UCRED *acred;
499 afs_size_t totalLength;
500 afs_size_t transferLength;
502 afs_size_t offset, len, tlen;
504 struct dcache *tdc=0;
508 struct osi_file *tfile;
510 int munlocked, trybusy=1;
512 struct vrequest treq;
514 AFS_STATCNT(afs_UFSRead);
518 /* check that we have the latest status info in the vnode cache */
519 if (code = afs_InitReq(&treq, acred)) return code;
522 osi_Panic ("null avc in afs_UFSRead");
524 code = afs_VerifyVCache(avc, &treq);
526 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
532 #ifndef AFS_VM_RDWR_ENV
533 if (AFS_NFSXLATORREQ(acred)) {
534 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
535 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
536 return afs_CheckCode(EACCES, &treq, 12);
541 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
542 totalLength = auio->afsio_resid;
543 filePos = auio->afsio_offset;
544 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
545 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos),
546 ICL_TYPE_INT32, totalLength,
547 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
551 ObtainReadLock(&avc->lock);
552 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
553 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
554 hset(avc->flushDV, avc->m.DataVersion);
558 while (totalLength > 0) {
559 /* read all of the cached info */
560 if (filePos >= avc->m.Length) break; /* all done */
562 if (tdc) afs_PutDCache(tdc);
563 tdc = afs_FindDCache(avc, filePos);
565 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
566 len = tdc->f.chunkBytes - offset;
569 /* a tricky question: does the presence of the DFFetching flag
570 mean that we're fetching the latest version of the file? No.
571 The server could update the file as soon as the fetch responsible
572 for the setting of the DFFetching flag completes.
574 However, the presence of the DFFetching flag (visible under a
575 read lock since it is set and cleared only under a write lock)
576 means that we're fetching as good a version as was known to this
577 client at the time of the last call to afs_VerifyVCache, since
578 the latter updates the stat cache's m.DataVersion field under a
579 write lock, and from the time that the DFFetching flag goes on
580 (before the fetch starts), to the time it goes off (after the
581 fetch completes), afs_GetDCache keeps at least a read lock
582 (actually it keeps an S lock) on the cache entry.
584 This means that if the DFFetching flag is set, we can use that
585 data for any reads that must come from the current version of
586 the file (current == m.DataVersion).
588 Another way of looking at this same point is this: if we're
589 fetching some data and then try do an afs_VerifyVCache, the
590 VerifyVCache operation will not complete until after the
591 DFFetching flag is turned off and the dcache entry's f.versionNo
594 Note, by the way, that if DFFetching is set,
595 m.DataVersion > f.versionNo (the latter is not updated until
596 after the fetch completes).
598 if (tdc) afs_PutDCache(tdc); /* before reusing tdc */
600 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
604 /* now, first try to start transfer, if we'll need the data. If
605 * data already coming, we don't need to do this, obviously. Type
606 * 2 requests never return a null dcache entry, btw. */
607 if (!(tdc->flags & DFFetching)
608 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
609 /* have cache entry, it is not coming in now, and we'll need new data */
611 if (trybusy && !afs_BBusy()) {
613 /* daemon is not busy */
614 if (!(tdc->flags & DFFetchReq)) {
615 tdc->flags |= DFFetchReq;
617 mutex_exit(&tdc->lock);
620 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
621 (afs_size_t)filePos, (afs_size_t) 0,
624 /* Bkg table full; retry deadlocks */
625 tdc->flags &= ~DFFetchReq;
626 trybusy = 0; /* Avoid bkg daemon since they're too busy */
630 while (tdc->flags & DFFetchReq) {
631 /* don't need waiting flag on this one */
632 ReleaseReadLock(&avc->lock);
633 afs_osi_Sleep(&tdc->validPos);
634 ObtainReadLock(&avc->lock);
638 /* now data may have started flowing in (if DFFetching is on). If
639 * data is now streaming in, then wait for some interesting stuff. */
640 while ((tdc->flags & DFFetching) && tdc->validPos <= filePos) {
641 /* too early: wait for DFFetching flag to vanish, or data to appear */
642 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
643 ICL_TYPE_STRING, __FILE__,
644 ICL_TYPE_INT32, __LINE__,
645 ICL_TYPE_POINTER, tdc,
646 ICL_TYPE_INT32, tdc->flags);
647 tdc->flags |= DFWaiting;
648 ReleaseReadLock(&avc->lock);
649 afs_osi_Sleep(&tdc->validPos);
650 ObtainReadLock(&avc->lock);
652 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
653 if (tdc->flags & DFFetching) {
654 /* still fetching, some new data is here: compute length and offset */
655 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
656 len = tdc->validPos - filePos;
659 /* no longer fetching, verify data version (avoid new GetDCache call) */
660 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
661 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
662 len = tdc->f.chunkBytes - offset;
665 /* don't have current data, so get it below */
667 tdc = (struct dcache *) 0;
672 ReleaseReadLock(&avc->lock);
673 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
674 ObtainReadLock(&avc->lock);
678 afs_Trace3(afs_iclSetp, CM_TRACE_VNODEREAD,
679 ICL_TYPE_POINTER, tdc,
680 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(offset),
681 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(len));
686 if (len > totalLength) len = totalLength; /* will read len bytes */
687 if (len <= 0) { /* shouldn't get here if DFFetching is on */
688 /* read past the end of a chunk, may not be at next chunk yet, and yet
689 also not at eof, so may have to supply fake zeros */
690 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
691 if (len > totalLength) len = totalLength; /* and still within xfr request */
692 tlen = avc->m.Length - offset; /* and still within file */
693 if (len > tlen) len = tlen;
694 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
695 afsio_copy(auio, &tuio, tvec);
697 afsio_trim(&tuio, trimlen);
698 AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, &tuio, code);
705 /* get the data from the file */
707 if (tfile = tdc->ihint) {
708 if (tdc->f.inode != tfile->inum){
709 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
710 tdc, tdc->f.inode, tfile->inum );
712 tdc->ihint = tfile = 0;
722 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
723 /* mung uio structure to be right for this transfer */
724 afsio_copy(auio, &tuio, tvec);
726 afsio_trim(&tuio, trimlen);
727 tuio.afsio_offset = offset;
731 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
732 NULL, &afs_osi_cred);
736 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
737 /* Flush all JFS pages now for big performance gain in big file cases
738 * If we do something like this, must check to be sure that AFS file
739 * isn't mmapped... see afs_gn_map() for why.
742 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
743 many different ways to do similar things:
744 so far, the best performing one is #2, but #1 might match it if we
745 straighten out the confusion regarding which pages to flush. It
747 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
748 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
749 (len + PAGESIZE-1)/PAGESIZE);
750 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
751 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
752 tfile->vnode->v_gnode->gn_seg = NULL;
756 Unfortunately, this seems to cause frequent "cache corruption" episodes.
757 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
758 (len + PAGESIZE-1)/PAGESIZE);
762 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
768 VOP_RWLOCK(tfile->vnode, 0);
769 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
770 VOP_RWUNLOCK(tfile->vnode, 0);
773 #if defined(AFS_SGI_ENV)
775 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
776 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
778 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
782 tuio.uio_rw = UIO_READ;
784 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
786 #else /* AFS_OSF_ENV */
788 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
790 #if defined(AFS_HPUX100_ENV)
792 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
795 #if defined(AFS_LINUX20_ENV)
797 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
800 #if defined(AFS_DARWIN_ENV)
802 VOP_LOCK(tfile->vnode, LK_EXCLUSIVE, current_proc());
803 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
804 VOP_UNLOCK(tfile->vnode, 0, current_proc());
807 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
818 if (!tdc->ihint && nihints < maxIHint) {
831 /* otherwise we've read some, fixup length, etc and continue with next seg */
832 len = len - tuio.afsio_resid; /* compute amount really transferred */
834 afsio_skip(auio, trimlen); /* update input uio structure */
836 transferLength += len;
838 if (len <= 0) break; /* surprise eof */
841 /* if we make it here with tdc non-zero, then it is the last chunk we
842 * dealt with, and we have to release it when we're done. We hold on
843 * to it in case we need to do a prefetch, obviously.
846 #ifndef AFS_VM_RDWR_ENV
847 /* try to queue prefetch, if needed */
848 if (!(tdc->flags & DFNextStarted) && !noLock) {
849 afs_PrefetchChunk(avc, tdc, acred, &treq);
855 ReleaseReadLock(&avc->lock);
857 osi_FreeSmallSpace(tvec);
858 error = afs_CheckCode(error, &treq, 13);