#endif
#include <linux/pagemap.h>
#include <linux/writeback.h>
-#include <linux/pagevec.h>
+#if defined(HAVE_LINUX_LRU_CACHE_ADD_FILE)
+# include <linux/swap.h>
+#else
+# include <linux/pagevec.h>
+#endif
#include <linux/aio.h>
#include "afs/lock.h"
#include "afs/afs_bypasscache.h"
#include "osi_compat.h"
#include "osi_pagecopy.h"
-#ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
-#define __pagevec_lru_add_file __pagevec_lru_add
-#endif
-
#ifndef MAX_ERRNO
#define MAX_ERRNO 1000L
#endif
extern struct vcache *afs_globalVp;
+/* Handle interfacing with Linux's pagevec/lru facilities */
+
+#if defined(HAVE_LINUX_LRU_CACHE_ADD_FILE) || defined(HAVE_LINUX_LRU_CACHE_ADD)
+
+/*
+ * Linux's lru_cache_add_file provides a simplified LRU interface without
+ * needing a pagevec
+ */
+struct afs_lru_pages {
+ char unused;
+};
+
+static inline void
+afs_lru_cache_init(struct afs_lru_pages *alrupages)
+{
+ return;
+}
+
+static inline void
+afs_lru_cache_add(struct afs_lru_pages *alrupages, struct page *page)
+{
+# if defined(HAVE_LINUX_LRU_CACHE_ADD)
+ lru_cache_add(page);
+# elif defined(HAVE_LINUX_LRU_CACHE_ADD_FILE)
+ lru_cache_add_file(page);
+# else
+# error need a kernel function to add a page to the kernel lru cache
+# endif
+}
+
+static inline void
+afs_lru_cache_finalize(struct afs_lru_pages *alrupages)
+{
+ return;
+}
+#else
+
+/* Linux's pagevec/lru interfaces require a pagevec */
+struct afs_lru_pages {
+ struct pagevec lrupv;
+};
+
+static inline void
+afs_lru_cache_init(struct afs_lru_pages *alrupages)
+{
+# if defined(PAGEVEC_INIT_COLD_ARG)
+ pagevec_init(&alrupages->lrupv, 0);
+# else
+ pagevec_init(&alrupages->lrupv);
+# endif
+}
+
+# ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
+# define __pagevec_lru_add_file __pagevec_lru_add
+# endif
+
+static inline void
+afs_lru_cache_add(struct afs_lru_pages *alrupages, struct page *page)
+{
+ get_page(page);
+ if (!pagevec_add(&alrupages->lrupv, page))
+ __pagevec_lru_add_file(&alrupages->lrupv);
+}
+
+static inline void
+afs_lru_cache_finalize(struct afs_lru_pages *alrupages)
+{
+ if (pagevec_count(&alrupages->lrupv))
+ __pagevec_lru_add_file(&alrupages->lrupv);
+}
+#endif /* !HAVE_LINUX_LRU_ADD_FILE */
+
/* This function converts a positive error code from AFS into a negative
* code suitable for passing into the Linux VFS layer. It checks that the
* error code is within the permissable bounds for the ERR_PTR mechanism.
code = afs_CreateReq(&treq, credp);
if (code == 0) {
- code = afs_VerifyVCache2(avc, treq);
+ code = afs_VerifyVCache(avc, treq);
afs_DestroyReq(treq);
}
/* update the cache entry */
tagain:
- code = afs_convert_code(afs_VerifyVCache2(avc, treq));
+ code = afs_convert_code(afs_VerifyVCache(avc, treq));
if (code)
goto out;
extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
unsigned long arg);
-#if defined(HAVE_UNLOCKED_IOCTL) || defined(HAVE_COMPAT_IOCTL)
static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
unsigned long arg) {
return afs_xioctl(FILE_INODE(fp), fp, com, arg);
}
-#endif
static int
int code;
AFS_GLOCK();
- afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
- ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
- vmap->vm_end - vmap->vm_start);
+ afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_LONG,
+ vmap->vm_end - vmap->vm_start, ICL_TYPE_LONG, 0);
/* get a validated vcache entry */
code = afs_linux_VerifyVCache(vcp, NULL);
#else
.readdir = afs_linux_readdir,
#endif
-#ifdef HAVE_UNLOCKED_IOCTL
.unlocked_ioctl = afs_unlocked_xioctl,
-#else
- .ioctl = afs_xioctl,
-#endif
-#ifdef HAVE_COMPAT_IOCTL
.compat_ioctl = afs_unlocked_xioctl,
-#endif
.open = afs_linux_open,
.release = afs_linux_release,
.llseek = default_llseek,
.read = afs_linux_read,
.write = afs_linux_write,
#endif
-#ifdef HAVE_UNLOCKED_IOCTL
.unlocked_ioctl = afs_unlocked_xioctl,
-#else
- .ioctl = afs_xioctl,
-#endif
-#ifdef HAVE_COMPAT_IOCTL
.compat_ioctl = afs_unlocked_xioctl,
-#endif
.mmap = afs_linux_mmap,
.open = afs_linux_open,
.flush = afs_linux_flush,
*/
static int
afs_linux_read_cache(struct file *cachefp, struct page *page,
- int chunk, struct pagevec *lrupv,
+ int chunk, struct afs_lru_pages *alrupages,
struct afs_pagecopy_task *task) {
loff_t offset = page_offset(page);
struct inode *cacheinode = cachefp->f_dentry->d_inode;
if (code == 0) {
cachepage = newpage;
newpage = NULL;
-
- get_page(cachepage);
- if (!pagevec_add(lrupv, cachepage))
- __pagevec_lru_add_file(lrupv);
-
+ afs_lru_cache_add(alrupages, cachepage);
} else {
put_page(newpage);
newpage = NULL;
struct file *cacheFp = NULL;
int code;
int dcLocked = 0;
- struct pagevec lrupv;
+ struct afs_lru_pages lrupages;
/* Not a UFS cache, don't do anything */
if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
/* XXX - I suspect we should be locking the inodes before we use them! */
AFS_GUNLOCK();
cacheFp = afs_linux_raw_open(&tdc->f.inode);
- osi_Assert(cacheFp);
+ if (cacheFp == NULL) {
+ /* Problem getting the inode */
+ AFS_GLOCK();
+ goto out;
+ }
if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
cachefs_noreadpage = 1;
AFS_GLOCK();
goto out;
}
-#if defined(PAGEVEC_INIT_COLD_ARG)
- pagevec_init(&lrupv, 0);
-#else
- pagevec_init(&lrupv);
-#endif
- code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
+ afs_lru_cache_init(&lrupages);
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
+ code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupages, NULL);
+
+ afs_lru_cache_finalize(&lrupages);
filp_close(cacheFp, NULL);
AFS_GLOCK();
return 1;
out:
+ if (cacheFp != NULL) {
+ filp_close(cacheFp, NULL);
+ }
ReleaseWriteLock(&avc->lock);
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
struct iovec* iovecp;
struct nocache_read_request *ancr;
struct page *pp;
- struct pagevec lrupv;
+ struct afs_lru_pages lrupages;
afs_int32 code = 0;
cred_t *credp;
ancr->offset = auio->uio_offset;
ancr->length = auio->uio_resid;
-#if defined(PAGEVEC_INIT_COLD_ARG)
- pagevec_init(&lrupv, 0);
-#else
- pagevec_init(&lrupv);
-#endif
+ afs_lru_cache_init(&lrupages);
for(page_ix = 0; page_ix < num_pages; ++page_ix) {
lock_page(pp);
}
- /* increment page refcount--our original design assumed
- * that locking it would effectively pin it; protect
- * ourselves from the possiblity that this assumption is
- * is faulty, at low cost (provided we do not fail to
- * do the corresponding decref on the other side) */
- get_page(pp);
-
/* save the page for background map */
iovecp[page_ix].iov_base = (void*) pp;
/* and put it on the LRU cache */
- if (!pagevec_add(&lrupv, pp))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_add(&lrupages, pp);
}
}
/* If there were useful pages in the page list, make sure all pages
* are in the LRU cache, then schedule the read */
if(page_count) {
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_finalize(&lrupages);
credp = crref();
code = afs_ReadNoCache(avc, ancr, credp);
crfree(credp);
case LARGE_FILES_BYPASS_CACHE:
if (i_size_read(ip) > cache_bypass_threshold)
return 1;
- /* fall through */
+ AFS_FALLTHROUGH;
default:
return 0;
}
int code;
unsigned int page_idx;
loff_t offset;
- struct pagevec lrupv;
+ struct afs_lru_pages lrupages;
struct afs_pagecopy_task *task;
if (afs_linux_bypass_check(inode))
task = afs_pagecopy_init_task();
tdc = NULL;
-#if defined(PAGEVEC_INIT_COLD_ARG)
- pagevec_init(&lrupv, 0);
-#else
- pagevec_init(&lrupv);
-#endif
+
+ afs_lru_cache_init(&lrupages);
+
for (page_idx = 0; page_idx < num_pages; page_idx++) {
struct page *page = list_entry(page_list->prev, struct page, lru);
list_del(&page->lru);
afs_PutDCache(tdc);
AFS_GUNLOCK();
tdc = NULL;
- if (cacheFp)
+ if (cacheFp) {
filp_close(cacheFp, NULL);
+ cacheFp = NULL;
+ }
}
if (!tdc) {
AFS_GUNLOCK();
if (tdc) {
cacheFp = afs_linux_raw_open(&tdc->f.inode);
- osi_Assert(cacheFp);
+ if (cacheFp == NULL) {
+ /* Problem getting the inode */
+ goto out;
+ }
if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
cachefs_noreadpage = 1;
goto out;
if (tdc && !add_to_page_cache(page, mapping, page->index,
GFP_KERNEL)) {
- get_page(page);
- if (!pagevec_add(&lrupv, page))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_add(&lrupages, page);
/* Note that add_to_page_cache() locked 'page'.
* afs_linux_read_cache() is guaranteed to handle unlocking it. */
- afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
+ afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupages, task);
}
put_page(page);
}
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_finalize(&lrupages);
out:
- if (tdc)
+ if (cacheFp)
filp_close(cacheFp, NULL);
afs_pagecopy_put_task(task);