return afs_convert_code(code);
}
+#ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
+static ssize_t
+afs_linux_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long segs, loff_t pos)
+{
+ struct file *fp = iocb->ki_filp;
+ ssize_t code = 0;
+ struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
+
+ AFS_GLOCK();
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32, segs, ICL_TYPE_INT32,
+ 99999);
+ code = afs_linux_VerifyVCache(vcp, NULL);
+
+ if (code == 0) {
+ /* Linux's FlushPages implementation doesn't ever use credp,
+ * so we optimise by not using it */
+ osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
+ AFS_GUNLOCK();
+ code = generic_file_aio_read(iocb, iov, segs, pos);
+ AFS_GLOCK();
+ }
+
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32, segs, ICL_TYPE_INT32,
+ code);
+ AFS_GUNLOCK();
+ return code;
+}
+#else
static ssize_t
afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
{
AFS_GUNLOCK();
return code;
}
+#endif
-/* Now we have integrated VM for writes as well as reads. generic_file_write
- * also takes care of re-positioning the pointer if file is open in append
+/* Now we have integrated VM for writes as well as reads. the generic write operations
+ * also take care of re-positioning the pointer if file is open in append
* mode. Call fake open/close to ensure we do writes of core dumps.
*/
+#ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
+static ssize_t
+afs_linux_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long segs, loff_t pos)
+{
+ ssize_t code = 0;
+ struct vcache *vcp = VTOAFS(iocb->ki_filp->f_dentry->d_inode);
+ cred_t *credp;
+
+ AFS_GLOCK();
+
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32, segs, ICL_TYPE_INT32,
+ (iocb->ki_filp->f_flags & O_APPEND) ? 99998 : 99999);
+
+ code = afs_linux_VerifyVCache(vcp, &credp);
+
+ ObtainWriteLock(&vcp->lock, 529);
+ afs_FakeOpen(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ if (code == 0) {
+ AFS_GUNLOCK();
+ code = generic_file_aio_write(iocb, iov, segs, pos);
+ AFS_GLOCK();
+ }
+
+ ObtainWriteLock(&vcp->lock, 530);
+
+ if (vcp->execsOrWriters == 1 && !credp)
+ credp = crref();
+
+ afs_FakeClose(vcp, credp);
+ ReleaseWriteLock(&vcp->lock);
+
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32, segs, ICL_TYPE_INT32,
+ code);
+
+ if (credp)
+ crfree(credp);
+ AFS_GUNLOCK();
+ return code;
+}
+#else
static ssize_t
afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
{
AFS_GUNLOCK();
return code;
}
+#endif
extern int BlobScan(struct dcache * afile, afs_int32 ablob);
};
struct file_operations afs_file_fops = {
+#ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
+ .aio_read = afs_linux_aio_read,
+ .aio_write = afs_linux_aio_write,
+#else
.read = afs_linux_read,
.write = afs_linux_write,
-#ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
#endif
#ifdef HAVE_UNLOCKED_IOCTL
.unlocked_ioctl = afs_unlocked_xioctl,
int chunk, struct pagevec *lrupv,
struct afs_pagecopy_task *task) {
loff_t offset = page_offset(page);
+ struct inode *cacheinode = cachefp->f_dentry->d_inode;
struct page *newpage, *cachepage;
struct address_space *cachemapping;
- int pageindex;
+ int pageindex, endindex;
int code = 0;
- cachemapping = cachefp->f_dentry->d_inode->i_mapping;
+ cachemapping = cacheinode->i_mapping;
newpage = NULL;
cachepage = NULL;
+ /* If we're trying to read a page that's past the end of the disk
+ * cache file, then just return a zeroed page */
+ if (offset >= i_size_read(cacheinode)) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ if (task)
+ unlock_page(page);
+ return 0;
+ }
+
/* From our offset, we now need to work out which page in the disk
* file it corresponds to. This will be fun ... */
pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;