dput(alias);
} else {
iput(ip);
- unlock_kernel();
+ afs_maybe_unlock_kernel();
crfree(credp);
return alias;
}
afs_offs_t offset;
struct iovec* iovecp;
struct nocache_read_request *ancr;
- struct page *pp, *ppt;
+ struct page *pp;
struct pagevec lrupv;
afs_int32 code = 0;
/* we do not flush, release, or unmap pages--that will be
* done for us by the background thread as each page comes in
* from the fileserver */
-out:
return afs_convert_code(code);
}
uio_t *auio;
struct iovec *iovecp;
struct nocache_read_request *ancr;
- afs_int32 isize;
+ int code;
ClearPageError(pp);
/* the background thread will free this */
ancr = osi_Alloc(sizeof(struct nocache_read_request));
ancr->auio = auio;
- ancr->offset = offset;
+ ancr->offset = page_offset(pp);
ancr->length = PAGE_SIZE;
credp = crref();
if(i_size_read(ip) > cache_bypass_threshold)
return 1;
default:
+ return 0;
}
- return 0;
}
/* Check if a file is permitted to bypass the cache by policy, and modify
static inline int
afs_linux_bypass_check(struct inode *ip) {
- struct cred* credp;
+ cred_t* credp;
int bypass = afs_linux_can_bypass(ip);
credp = crref();
- trydo_cache_transition(VTOAFS(ip)), credp, bypass);
+ trydo_cache_transition(VTOAFS(ip), credp, bypass);
crfree(credp);
return bypass;
/* Catch recursive writeback. This occurs if the kernel decides
* writeback is required whilst we are writing to the cache, or
- * flushing to the server. */
+ * flushing to the server. When we're running syncronously (as
+ * opposed to from writepage) we can't actually do anything about
+ * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
+ */
AFS_GLOCK();
ObtainWriteLock(&vcp->lock, 532);
- code = afs_linux_prepare_writeback(vcp);
- if (code) {
- ReleaseWriteLock(&vcp->lock);
- AFS_GUNLOCK();
- return code;
- }
+ afs_linux_prepare_writeback(vcp);
ReleaseWriteLock(&vcp->lock);
AFS_GUNLOCK();