2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 #include "../afs/param.h" /* Should be always first */
20 #include "../afs/sysincludes.h" /* Standard vendor system headers */
21 #include "../afs/afsincludes.h" /* Afs-based standard headers */
22 #include "../afs/afs_stats.h" /* statistics */
23 #include "../afs/afs_cbqueue.h"
24 #include "../afs/nfsclient.h"
25 #include "../afs/afs_osidnlc.h"
28 extern char afs_zeros[AFS_ZEROS];
31 afs_int32 nihints; /* # of above actually in-use */
35 /* Imported variables */
36 extern afs_rwlock_t afs_xdcache;
37 extern unsigned char *afs_indexFlags;
38 extern afs_hyper_t *afs_indexTimes; /* Dcache entry Access times */
39 extern afs_hyper_t afs_indexCounter; /* Fake time for marking index */
42 /* Forward declarations */
43 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
44 struct AFS_UCRED *acred, struct vrequest *areq);
46 afs_MemRead(avc, auio, acred, albn, abpp, noLock)
47 register struct vcache *avc;
49 struct AFS_UCRED *acred;
53 afs_int32 totalLength;
54 afs_int32 transferLength;
57 afs_int32 offset, len, error, trybusy=1;
64 AFS_STATCNT(afs_MemRead);
68 /* check that we have the latest status info in the vnode cache */
69 if (code = afs_InitReq(&treq, acred)) return code;
71 code = afs_VerifyVCache(avc, &treq);
73 code = afs_CheckCode(code, &treq, 8); /* failed to get it */
78 #ifndef AFS_VM_RDWR_ENV
79 if (AFS_NFSXLATORREQ(acred)) {
80 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
81 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
82 return afs_CheckCode(EACCES, &treq, 9);
87 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
88 totalLength = auio->afsio_resid;
89 filePos = auio->afsio_offset;
90 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
91 ICL_TYPE_INT32, filePos, ICL_TYPE_INT32, totalLength,
92 ICL_TYPE_INT32, avc->m.Length);
96 ObtainReadLock(&avc->lock);
97 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
98 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
99 hset(avc->flushDV, avc->m.DataVersion);
102 while (totalLength > 0) {
103 /* read all of the cached info */
104 if (filePos >= avc->m.Length) break; /* all done */
106 if (tdc) afs_PutDCache(tdc);
107 tdc = afs_FindDCache(avc, filePos);
109 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
110 len = tdc->f.chunkBytes - offset;
113 /* a tricky question: does the presence of the DFFetching flag
114 mean that we're fetching the latest version of the file? No.
115 The server could update the file as soon as the fetch responsible
116 for the setting of the DFFetching flag completes.
118 However, the presence of the DFFetching flag (visible under a
119 read lock since it is set and cleared only under a write lock)
120 means that we're fetching as good a version as was known to this
121 client at the time of the last call to afs_VerifyVCache, since
122 the latter updates the stat cache's m.DataVersion field under a
123 write lock, and from the time that the DFFetching flag goes on
124 (before the fetch starts), to the time it goes off (after the
125 fetch completes), afs_GetDCache keeps at least a read lock
126 (actually it keeps an S lock) on the cache entry.
128 This means that if the DFFetching flag is set, we can use that
129 data for any reads that must come from the current version of
130 the file (current == m.DataVersion).
132 Another way of looking at this same point is this: if we're
133 fetching some data and then try do an afs_VerifyVCache, the
134 VerifyVCache operation will not complete until after the
135 DFFetching flag is turned off and the dcache entry's f.versionNo
138 Note, by the way, that if DFFetching is set,
139 m.DataVersion > f.versionNo (the latter is not updated until
140 after the fetch completes).
142 if (tdc) afs_PutDCache(tdc); /* before reusing tdc */
143 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
144 /* now, first try to start transfer, if we'll need the data. If
145 * data already coming, we don't need to do this, obviously. Type
146 * 2 requests never return a null dcache entry, btw.
148 if (!(tdc->flags & DFFetching)
149 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
150 /* have cache entry, it is not coming in now,
151 * and we'll need new data */
153 if (trybusy && !afs_BBusy()) {
155 /* daemon is not busy */
156 if (!(tdc->flags & DFFetchReq)) {
157 /* start the daemon (may already be running, however) */
158 tdc->flags |= DFFetchReq;
159 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
160 (long)filePos, (long) tdc, 0L, 0L);
162 tdc->flags &= ~DFFetchReq;
163 trybusy = 0; /* Avoid bkg daemon since they're too busy */
166 /* don't use bp pointer! */
168 while (tdc->flags & DFFetchReq) {
169 /* don't need waiting flag on this one */
170 ReleaseReadLock(&avc->lock);
171 afs_osi_Sleep(&tdc->validPos);
172 ObtainReadLock(&avc->lock);
176 /* now data may have started flowing in (if DFFetching is on). If
177 * data is now streaming in, then wait for some interesting stuff. */
178 while ((tdc->flags & DFFetching) && tdc->validPos <= filePos) {
179 /* too early: wait for DFFetching flag to vanish, or data to appear */
180 tdc->flags |= DFWaiting;
181 ReleaseReadLock(&avc->lock);
182 afs_osi_Sleep(&tdc->validPos);
183 ObtainReadLock(&avc->lock);
185 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
186 if (tdc->flags & DFFetching) {
187 /* still fetching, some new data is here: compute length and offset */
188 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
189 len = tdc->validPos - filePos;
192 /* no longer fetching, verify data version (avoid new GetDCache call) */
193 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
194 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
195 len = tdc->f.chunkBytes - offset;
198 /* don't have current data, so get it below */
200 tdc = (struct dcache *) 0;
205 ReleaseReadLock(&avc->lock);
206 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
207 ObtainReadLock(&avc->lock);
215 if (len > totalLength) len = totalLength; /* will read len bytes */
216 if (len <= 0) { /* shouldn't get here if DFFetching is on */
217 /* read past the end of a chunk, may not be at next chunk yet, and yet
218 also not at eof, so may have to supply fake zeros */
219 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
220 if (len > totalLength) len = totalLength; /* and still within xfr request */
221 code = avc->m.Length - offset; /* and still within file */
222 if (len > code) len = code;
223 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
224 afsio_copy(auio, &tuio, tvec);
225 afsio_trim(&tuio, len);
226 AFS_UIOMOVE(afs_zeros, len, UIO_READ, &tuio, code);
233 /* get the data from the mem cache */
235 /* mung uio structure to be right for this transfer */
236 afsio_copy(auio, &tuio, tvec);
237 afsio_trim(&tuio, len);
238 tuio.afsio_offset = offset;
240 code = afs_MemReadUIO(tdc->f.inode, &tuio);
247 /* otherwise we've read some, fixup length, etc and continue with next seg */
248 len = len - tuio.afsio_resid; /* compute amount really transferred */
249 afsio_skip(auio, len); /* update input uio structure */
251 transferLength += len;
254 if (len <= 0) break; /* surprise eof */
255 } /* the whole while loop */
257 /* if we make it here with tdc non-zero, then it is the last chunk we
258 * dealt with, and we have to release it when we're done. We hold on
259 * to it in case we need to do a prefetch.
262 #ifndef AFS_VM_RDWR_ENV
263 /* try to queue prefetch, if needed */
264 if (!(tdc->flags & DFNextStarted) && !noLock) {
265 afs_PrefetchChunk(avc, tdc, acred, &treq);
271 ReleaseReadLock(&avc->lock);
272 osi_FreeSmallSpace(tvec);
273 error = afs_CheckCode(error, &treq, 10);
277 /* called with the dcache entry triggering the fetch, the vcache entry involved,
278 * and a vrequest for the read call. Marks the dcache entry as having already
279 * triggered a prefetch, starts the prefetch going and sets the DFFetchReq
280 * flag in the prefetched block, so that the next call to read knows to wait
281 * for the daemon to start doing things.
283 * This function must be called with the vnode at least read-locked
284 * because it plays around with dcache entries.
286 void afs_PrefetchChunk(struct vcache *avc, struct dcache *adc,
287 struct AFS_UCRED *acred, struct vrequest *areq)
289 register struct dcache *tdc;
290 register afs_int32 offset;
291 afs_int32 j1, j2; /* junk vbls for GetDCache to trash */
293 offset = adc->f.chunk+1; /* next chunk we'll need */
294 offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
295 if (offset < avc->m.Length && !afs_BBusy()) {
297 adc->flags |= DFNextStarted; /* we've tried to prefetch for this guy */
298 tdc = afs_GetDCache(avc, offset, areq, &j1, &j2, 2); /* type 2 never returns 0 */
299 if (!(tdc->flags & DFFetchReq)) {
300 /* ask the daemon to do the work */
301 tdc->flags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */
302 /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done,
303 * since we don't want to wait for it to finish before doing so ourselves.
306 mutex_exit(&tdc->lock);
308 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred, (long)offset,
309 (long) tdc, 1L, 0L, 0L);
311 /* Bkg table full; just abort non-important prefetching to avoid deadlocks */
312 tdc->flags &= ~(DFNextStarted | DFFetchReq);
323 /* if the vcache is up-to-date, and the request fits entirely into the chunk
324 * that the hint here references, then we just use it quickly, otherwise we
325 * have to call the slow read.
327 * This could be generalized in several ways to take advantage of partial
328 * state even when all the chips don't fall the right way. For instance,
329 * if the hint is good and the first part of the read request can be
330 * satisfied from the chunk, then just do the read. After the read has
331 * completed, check to see if there's more. (Chances are there won't be.)
332 * If there is more, then just call afs_UFSReadSlow and let it do the rest.
334 * For the time being, I'm ignoring quick.f, but it should be used at
336 * do this in the future avc->quick.f = tfile; but I think it
337 * has to be done under a write lock, but don't want to wait on the
340 /* everywhere that a dcache can be freed (look for NULLIDX)
341 * probably does it under a write lock on xdcache. Need to invalidate
343 * Also need to worry about DFFetching, and IFFree, I think. */
344 static struct dcache *savedc = 0;
346 afs_UFSReadFast(avc, auio, acred, albn, abpp, noLock)
347 register struct vcache *avc;
349 struct AFS_UCRED *acred;
353 struct vrequest treq;
357 struct osi_file *tfile;
361 ObtainReadLock(&avc->lock);
362 ObtainReadLock(&afs_xdcache);
364 if ((avc->states & CStatd) /* up to date */
365 && (tdc = avc->quick.dc) && (tdc->index != NULLIDX)
366 && !(afs_indexFlags[tdc->index] & IFFree)) {
369 ReleaseReadLock(&afs_xdcache);
371 if ((tdc->stamp == avc->quick.stamp) /* hint matches */
372 && ((offDiff = (auio->afsio_offset - avc->quick.minLoc)) >= 0)
373 && (tdc->f.chunkBytes >= auio->afsio_resid + offDiff)
374 && !(tdc->flags & DFFetching)) { /* fits in chunk */
376 auio->afsio_offset -= avc->quick.minLoc;
378 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
383 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL, NULL, &afs_osi_cred);
387 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, auio, NULL, NULL);
389 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&auio->afsio_offset, auio, NULL, NULL, -1);
395 VOP_RWLOCK(tfile->vnode, 0);
396 code = VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred);
397 VOP_RWUNLOCK(tfile->vnode, 0);
400 #if defined(AFS_SGI_ENV)
402 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
403 AFS_VOP_READ(tfile->vnode, auio, IO_ISLOCKED, &afs_osi_cred, code);
404 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
408 auio->uio_rw = UIO_READ;
410 VOP_READ(tfile->vnode, auio, 0, &afs_osi_cred, code);
412 #else /* AFS_OSF_ENV */
413 #if defined(AFS_HPUX100_ENV)
415 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
418 #if defined(AFS_LINUX20_ENV)
420 code = osi_file_uio_rdwr(tfile, auio, UIO_READ);
423 code = VOP_RDWR(tfile->vnode, auio, UIO_READ, 0, &afs_osi_cred);
430 auio->afsio_offset += avc->quick.minLoc;
432 /* Fix up LRU info */
433 hset(afs_indexTimes[tdc->index], afs_indexCounter);
434 hadd32(afs_indexCounter, 1);
437 #ifndef AFS_VM_RDWR_ENV
438 if (!(code = afs_InitReq(&treq, acred))&& (!(tdc->flags & DFNextStarted)))
439 afs_PrefetchChunk(avc, tdc, acred, &treq);
441 ReleaseReadLock(&avc->lock);
446 if (!tdc->f.chunkBytes) { /* debugging f.chunkBytes == 0 problem */
451 ReleaseReadLock(&afs_xdcache);
454 /* come here if fast path doesn't work for some reason or other */
456 ReleaseReadLock(&avc->lock);
457 return afs_UFSRead(avc, auio, acred, albn, abpp, noLock);
460 afs_UFSRead(avc, auio, acred, albn, abpp, noLock)
463 struct AFS_UCRED *acred;
467 afs_int32 totalLength;
468 afs_int32 transferLength;
470 struct dcache *tdc=0;
471 afs_int32 offset, len, error;
474 struct osi_file *tfile;
476 int munlocked, trybusy=1;
478 struct vrequest treq;
480 AFS_STATCNT(afs_UFSRead);
484 /* check that we have the latest status info in the vnode cache */
485 if (code = afs_InitReq(&treq, acred)) return code;
488 osi_Panic ("null avc in afs_UFSRead");
490 code = afs_VerifyVCache(avc, &treq);
492 code = afs_CheckCode(code, &treq, 11); /* failed to get it */
498 #ifndef AFS_VM_RDWR_ENV
499 if (AFS_NFSXLATORREQ(acred)) {
500 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
501 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
502 return afs_CheckCode(EACCES, &treq, 12);
507 tvec = (struct iovec *) osi_AllocSmallSpace(sizeof(struct iovec));
508 totalLength = auio->afsio_resid;
509 filePos = auio->afsio_offset;
510 afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
511 ICL_TYPE_INT32, filePos, ICL_TYPE_INT32, totalLength,
512 ICL_TYPE_INT32, avc->m.Length);
516 ObtainReadLock(&avc->lock);
517 #if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
518 if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
519 hset(avc->flushDV, avc->m.DataVersion);
523 while (totalLength > 0) {
524 /* read all of the cached info */
525 if (filePos >= avc->m.Length) break; /* all done */
527 if (tdc) afs_PutDCache(tdc);
528 tdc = afs_FindDCache(avc, filePos);
530 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
531 len = tdc->f.chunkBytes - offset;
534 /* a tricky question: does the presence of the DFFetching flag
535 mean that we're fetching the latest version of the file? No.
536 The server could update the file as soon as the fetch responsible
537 for the setting of the DFFetching flag completes.
539 However, the presence of the DFFetching flag (visible under a
540 read lock since it is set and cleared only under a write lock)
541 means that we're fetching as good a version as was known to this
542 client at the time of the last call to afs_VerifyVCache, since
543 the latter updates the stat cache's m.DataVersion field under a
544 write lock, and from the time that the DFFetching flag goes on
545 (before the fetch starts), to the time it goes off (after the
546 fetch completes), afs_GetDCache keeps at least a read lock
547 (actually it keeps an S lock) on the cache entry.
549 This means that if the DFFetching flag is set, we can use that
550 data for any reads that must come from the current version of
551 the file (current == m.DataVersion).
553 Another way of looking at this same point is this: if we're
554 fetching some data and then try do an afs_VerifyVCache, the
555 VerifyVCache operation will not complete until after the
556 DFFetching flag is turned off and the dcache entry's f.versionNo
559 Note, by the way, that if DFFetching is set,
560 m.DataVersion > f.versionNo (the latter is not updated until
561 after the fetch completes).
563 if (tdc) afs_PutDCache(tdc); /* before reusing tdc */
565 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 2);
569 /* now, first try to start transfer, if we'll need the data. If
570 * data already coming, we don't need to do this, obviously. Type
571 * 2 requests never return a null dcache entry, btw. */
572 if (!(tdc->flags & DFFetching)
573 && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
574 /* have cache entry, it is not coming in now, and we'll need new data */
576 if (trybusy && !afs_BBusy()) {
578 /* daemon is not busy */
579 if (!(tdc->flags & DFFetchReq)) {
580 tdc->flags |= DFFetchReq;
582 mutex_exit(&tdc->lock);
585 bp = afs_BQueue(BOP_FETCH, avc, B_DONTWAIT, 0, acred,
586 (long)filePos, (long) tdc, 0L, 0L);
588 /* Bkg table full; retry deadlocks */
589 tdc->flags &= ~DFFetchReq;
590 trybusy = 0; /* Avoid bkg daemon since they're too busy */
594 while (tdc->flags & DFFetchReq) {
595 /* don't need waiting flag on this one */
596 ReleaseReadLock(&avc->lock);
597 afs_osi_Sleep(&tdc->validPos);
598 ObtainReadLock(&avc->lock);
602 /* now data may have started flowing in (if DFFetching is on). If
603 * data is now streaming in, then wait for some interesting stuff. */
604 while ((tdc->flags & DFFetching) && tdc->validPos <= filePos) {
605 /* too early: wait for DFFetching flag to vanish, or data to appear */
606 tdc->flags |= DFWaiting;
607 ReleaseReadLock(&avc->lock);
608 afs_osi_Sleep(&tdc->validPos);
609 ObtainReadLock(&avc->lock);
611 /* fetching flag gone, data is here, or we never tried (BBusy for instance) */
612 if (tdc->flags & DFFetching) {
613 /* still fetching, some new data is here: compute length and offset */
614 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
615 len = tdc->validPos - filePos;
618 /* no longer fetching, verify data version (avoid new GetDCache call) */
619 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
620 offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
621 len = tdc->f.chunkBytes - offset;
624 /* don't have current data, so get it below */
626 tdc = (struct dcache *) 0;
631 ReleaseReadLock(&avc->lock);
632 tdc = afs_GetDCache(avc, filePos, &treq, &offset, &len, 1);
633 ObtainReadLock(&avc->lock);
641 if (len > totalLength) len = totalLength; /* will read len bytes */
642 if (len <= 0) { /* shouldn't get here if DFFetching is on */
643 /* read past the end of a chunk, may not be at next chunk yet, and yet
644 also not at eof, so may have to supply fake zeros */
645 len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
646 if (len > totalLength) len = totalLength; /* and still within xfr request */
647 code = avc->m.Length - offset; /* and still within file */
648 if (len > code) len = code;
649 if (len > AFS_ZEROS) len = sizeof(afs_zeros); /* and in 0 buffer */
650 afsio_copy(auio, &tuio, tvec);
651 afsio_trim(&tuio, len);
652 AFS_UIOMOVE(afs_zeros, len, UIO_READ, &tuio, code);
659 /* get the data from the file */
661 if (tfile = tdc->ihint) {
662 if (tdc->f.inode != tfile->inum){
663 afs_warn( "afs_UFSRead: %x hint mismatch tdc %d inum %d\n",
664 tdc, tdc->f.inode, tfile->inum );
666 tdc->ihint = tfile = 0;
676 tfile = (struct osi_file *)osi_UFSOpen(tdc->f.inode);
677 /* mung uio structure to be right for this transfer */
678 afsio_copy(auio, &tuio, tvec);
679 afsio_trim(&tuio, len);
680 tuio.afsio_offset = offset;
684 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL,
685 NULL, &afs_osi_cred);
689 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, &tuio, NULL, NULL);
690 /* Flush all JFS pages now for big performance gain in big file cases
691 * If we do something like this, must check to be sure that AFS file
692 * isn't mmapped... see afs_gn_map() for why.
695 if (tfile->vnode->v_gnode && tfile->vnode->v_gnode->gn_seg) {
696 many different ways to do similar things:
697 so far, the best performing one is #2, but #1 might match it if we
698 straighten out the confusion regarding which pages to flush. It
700 1. vm_flushp(tfile->vnode->v_gnode->gn_seg, 0, len/PAGESIZE - 1);
701 2. vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
702 (len + PAGESIZE-1)/PAGESIZE);
703 3. vms_inactive(tfile->vnode->v_gnode->gn_seg) Doesn't work correctly
704 4. vms_delete(tfile->vnode->v_gnode->gn_seg) probably also fails
705 tfile->vnode->v_gnode->gn_seg = NULL;
709 Unfortunately, this seems to cause frequent "cache corruption" episodes.
710 vm_releasep(tfile->vnode->v_gnode->gn_seg, offset/PAGESIZE,
711 (len + PAGESIZE-1)/PAGESIZE);
715 code = VNOP_RDWR(tfile->vnode, UIO_READ, FREAD, (off_t)&offset, &tuio, NULL, NULL, -1);
721 VOP_RWLOCK(tfile->vnode, 0);
722 code = VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred);
723 VOP_RWUNLOCK(tfile->vnode, 0);
726 #if defined(AFS_SGI_ENV)
728 AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_READ);
729 AFS_VOP_READ(tfile->vnode, &tuio, IO_ISLOCKED, &afs_osi_cred,
731 AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_READ);
735 tuio.uio_rw = UIO_READ;
737 VOP_READ(tfile->vnode, &tuio, 0, &afs_osi_cred, code);
739 #else /* AFS_OSF_ENV */
741 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
743 #if defined(AFS_HPUX100_ENV)
745 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
748 #if defined(AFS_LINUX20_ENV)
750 code = osi_file_uio_rdwr(tfile, &tuio, UIO_READ);
753 code = VOP_RDWR(tfile->vnode, &tuio, UIO_READ, 0, &afs_osi_cred);
763 if (!tdc->ihint && nihints < maxIHint) {
776 /* otherwise we've read some, fixup length, etc and continue with next seg */
777 len = len - tuio.afsio_resid; /* compute amount really transferred */
778 afsio_skip(auio, len); /* update input uio structure */
780 transferLength += len;
782 if (len <= 0) break; /* surprise eof */
785 /* if we make it here with tdc non-zero, then it is the last chunk we
786 * dealt with, and we have to release it when we're done. We hold on
787 * to it in case we need to do a prefetch, obviously.
790 #ifndef AFS_VM_RDWR_ENV
791 /* try to queue prefetch, if needed */
792 if (!(tdc->flags & DFNextStarted) && !noLock) {
793 afs_PrefetchChunk(avc, tdc, acred, &treq);
799 ReleaseReadLock(&avc->lock);
801 osi_FreeSmallSpace(tvec);
802 error = afs_CheckCode(error, &treq, 13);