{
struct vcache *avc = VTOAFS(FILE_INODE(fp));
struct vrequest treq;
- register struct dcache *tdc;
+ struct dcache *tdc;
int code;
int offset;
int dirpos;
#endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
AFS_GLOCK();
- code = afs_lockctl(vcp, &flock, cmd, credp);
+ code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
if ((code == 0 || flp->fl_type == F_UNLCK) &&
flp->fl_end = flock.l_start + flock.l_len - 1;
crfree(credp);
- return afs_convert_code(code);
+ return code;
}
#ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
#endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
AFS_GLOCK();
- code = afs_lockctl(vcp, &flock, cmd, credp);
+ code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
if ((code == 0 || flp->fl_type == F_UNLCK) &&
flp->fl_pid = flock.l_pid;
crfree(credp);
- return afs_convert_code(code);
+ return code;
}
#endif
cred_t *credp;
int code;
#if defined(AFS_CACHE_BYPASS)
- int bypasscache;
+ int bypasscache = 0;
#endif
AFS_GLOCK();
if (code)
goto out;
#if defined(AFS_CACHE_BYPASS)
- /* If caching is bypassed for this file, or globally, just return 0 */
- if(cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
- bypasscache = 1;
- else {
- ObtainReadLock(&vcp->lock);
- if(vcp->cachingStates & FCSBypass)
- bypasscache = 1;
- ReleaseReadLock(&vcp->lock);
- }
- if(bypasscache) {
- /* future proof: don't rely on 0 return from afs_InitReq */
- code = 0; goto out;
- }
+ /* If caching is bypassed for this file, or globally, just return 0 */
+ if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
+ bypasscache = 1;
+ else {
+ ObtainReadLock(&vcp->lock);
+ if (vcp->cachingStates & FCSBypass)
+ bypasscache = 1;
+ ReleaseReadLock(&vcp->lock);
+ }
+ if (bypasscache) {
+ /* future proof: don't rely on 0 return from afs_InitReq */
+ code = 0;
+ goto out;
+ }
#endif
ObtainSharedLock(&vcp->lock, 535);
page_cache_release(pp);
iovecp[page_ix].iov_base = (void *) 0;
base_index++;
- continue;
+ ancr->length -= PAGE_SIZE;
+ continue;
}
base_index++;
if(code) {
lock_page(pp);
}
+ /* increment page refcount--our original design assumed
+ * that locking it would effectively pin it; protect
+ * ourselves from the possiblity that this assumption is
+ * is faulty, at low cost (provided we do not fail to
+ * do the corresponding decref on the other side) */
+ get_page(pp);
+
/* save the page for background map */
iovecp[page_ix].iov_base = (void*) pp;
struct nocache_read_request *ancr;
int code;
+ /*
+ * Special case: if page is at or past end of file, just zero it and set
+ * it as up to date.
+ */
+ if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
+ zero_user_segment(pp, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(pp);
+ unlock_page(pp);
+ return 0;
+ }
+
ClearPageError(pp);
/* receiver frees */
PAGE_SIZE, UIO_READ, AFS_UIOSYS);
/* save the page for background map */
- /* XXX - Shouldn't we get a reference count here? */
+ get_page(pp); /* see above */
auio->uio_iov->iov_base = (void*) pp;
/* the background thread will free this */
ancr = osi_Alloc(sizeof(struct nocache_read_request));