#include "afsincludes.h"
#include "afs/afs_stats.h"
#include <linux/mm.h>
+#include <linux/buffer_head.h>
#ifdef HAVE_MM_INLINE_H
#include <linux/mm_inline.h>
#endif
#include <linux/pagemap.h>
#include <linux/writeback.h>
-#if defined(HAVE_LINUX_LRU_CACHE_ADD_FILE)
+#if defined(HAVE_LINUX_FOLIO_ADD_LRU) || defined(HAVE_LINUX_LRU_CACHE_ADD_FILE)
# include <linux/swap.h>
#else
# include <linux/pagevec.h>
# define D_SPLICE_ALIAS_RACE
#endif
+#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE_SHARED)
+# define USE_FOP_ITERATE 1
+#elif defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE) && !defined(FMODE_KABI_ITERATE)
/* Workaround for RH 7.5 which introduced file operation iterate() but requires
* each file->f_mode to be marked with FMODE_KABI_ITERATE. Instead OpenAFS will
* continue to use file opearation readdir() in this case.
*/
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE) && !defined(FMODE_KABI_ITERATE)
-#define USE_FOP_ITERATE 1
+# define USE_FOP_ITERATE 1
#else
-#undef USE_FOP_ITERATE
+# undef USE_FOP_ITERATE
#endif
/* Kernels from before 2.6.19 may not be able to return errors from
/* Handle interfacing with Linux's pagevec/lru facilities */
-#if defined(HAVE_LINUX_LRU_CACHE_ADD_FILE) || defined(HAVE_LINUX_LRU_CACHE_ADD)
+#if defined(HAVE_LINUX_FOLIO_ADD_LRU) || \
+ defined(HAVE_LINUX_LRU_CACHE_ADD_FILE) || defined(HAVE_LINUX_LRU_CACHE_ADD)
/*
* Linux's lru_cache_add_file provides a simplified LRU interface without
static inline void
afs_lru_cache_add(struct afs_lru_pages *alrupages, struct page *page)
{
-# if defined(HAVE_LINUX_LRU_CACHE_ADD)
+# if defined(HAVE_LINUX_FOLIO_ADD_LRU)
+ struct folio *folio = page_folio(page);
+ folio_add_lru(folio);
+# elif defined(HAVE_LINUX_LRU_CACHE_ADD)
lru_cache_add(page);
# elif defined(HAVE_LINUX_LRU_CACHE_ADD_FILE)
lru_cache_add_file(page);
}
#endif /* !HAVE_LINUX_LRU_ADD_FILE */
+static inline int
+afs_add_to_page_cache_lru(struct afs_lru_pages *alrupages, struct page *page,
+ struct address_space *mapping,
+ pgoff_t index, gfp_t gfp)
+{
+#if defined(HAVE_LINUX_ADD_TO_PAGE_CACHE_LRU)
+ return add_to_page_cache_lru(page, mapping, index, gfp);
+#else
+ int code;
+ code = add_to_page_cache(page, mapping, index, gfp);
+ if (code == 0) {
+ afs_lru_cache_add(alrupages, page);
+ }
+ return code;
+#endif
+}
+
/* This function converts a positive error code from AFS into a negative
* code suitable for passing into the Linux VFS layer. It checks that the
* error code is within the permissable bounds for the ERR_PTR mechanism.
}
#endif
-extern int BlobScan(struct dcache * afile, afs_int32 ablob, afs_int32 *ablobOut);
-
/* This is a complete rewrite of afs_readdir, since we can make use of
* filldir instead of afs_readdir_move. Note that changes to vcache/dcache
* handling and use of bulkstats will need to be reflected here as well.
int code;
int offset;
afs_int32 dirpos;
- struct DirEntry *de;
+ struct DirEntryFlex *de;
struct DirBuffer entry;
ino_t ino;
int len;
goto unlock_out;
}
- de = (struct DirEntry *)entry.data;
+ de = entry.data;
ino = afs_calc_inum (avc->f.fid.Cell, avc->f.fid.Fid.Volume,
ntohl(de->fid.vnode));
len = strlen(de->name);
afid.Fid.Unique = ntohl(de->fid.vunique);
if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
type = DT_DIR;
- } else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
+ } else if ((tvc = afs_FindVCache(&afid, 0))) {
if (tvc->mvstat != AFS_MVSTAT_FILE) {
type = DT_DIR;
} else if (((tvc->f.states) & (CStatd | CTruth))) {
}
-/* in afs_pioctl.c */
-extern int afs_xioctl(struct inode *ip, struct file *fp, unsigned int com,
- unsigned long arg);
-
static long afs_unlocked_xioctl(struct file *fp, unsigned int com,
unsigned long arg) {
return afs_xioctl(FILE_INODE(fp), fp, com, arg);
struct file_operations afs_dir_fops = {
.read = generic_read_dir,
-#if defined(USE_FOP_ITERATE)
+#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE_SHARED)
+ .iterate_shared = afs_linux_readdir,
+#elif defined(USE_FOP_ITERATE)
.iterate = afs_linux_readdir,
#else
.readdir = afs_linux_readdir,
# else
.splice_write = generic_file_splice_write,
# endif
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(6,5,0)
+ .splice_read = filemap_splice_read,
+# else
.splice_read = generic_file_splice_read,
+# endif
#endif
.release = afs_linux_release,
.fsync = afs_linux_fsync,
afs_d_alias_lock(ip);
-#if defined(D_ALIAS_IS_HLIST)
-# if defined(HLIST_ITERATOR_NO_NODE)
- hlist_for_each_entry(cur, &ip->i_dentry, d_alias) {
-# else
- hlist_for_each_entry(cur, p, &ip->i_dentry, d_alias) {
-# endif
-#else
- list_for_each_entry_reverse(cur, &ip->i_dentry, d_alias) {
-#endif
-
+ afs_d_alias_foreach_reverse(cur, ip, p) {
if (!vcp->target_link || cur == vcp->target_link) {
ret = cur;
break;
ip->i_uid = afs_make_kuid(vp->va_uid);
ip->i_gid = afs_make_kgid(vp->va_gid);
i_size_write(ip, vp->va_size);
- ip->i_atime.tv_sec = vp->va_atime.tv_sec;
- ip->i_atime.tv_nsec = 0;
- ip->i_mtime.tv_sec = vp->va_mtime.tv_sec;
+ afs_inode_set_atime(ip, vp->va_atime.tv_sec, 0);
/* Set the mtime nanoseconds to the sysname generation number.
* This convinces NFS clients that all directories have changed
* any time the sysname list changes.
*/
- ip->i_mtime.tv_nsec = afs_sysnamegen;
- ip->i_ctime.tv_sec = vp->va_ctime.tv_sec;
- ip->i_ctime.tv_nsec = 0;
+ afs_inode_set_mtime(ip, vp->va_mtime.tv_sec, afs_sysnamegen);
+ afs_inode_set_ctime(ip, vp->va_ctime.tv_sec, 0);
}
/* afs_notify_change
* Linux version of setattr call. What to change is in the iattr struct.
* We need to set bits in both the Linux inode as well as the vcache.
*/
+#if defined(IOP_TAKES_MNT_IDMAP)
+static int
+afs_notify_change(struct mnt_idmap *idmap, struct dentry *dp, struct iattr *iattrp)
+#elif defined(IOP_TAKES_USER_NAMESPACE)
+static int
+afs_notify_change(struct user_namespace *mnt_userns, struct dentry *dp, struct iattr *iattrp)
+#else
static int
afs_notify_change(struct dentry *dp, struct iattr *iattrp)
+#endif
{
struct vattr *vattr = NULL;
cred_t *credp = crref();
return afs_convert_code(code);
}
-#if defined(IOP_GETATTR_TAKES_PATH_STRUCT)
+#if defined(IOP_TAKES_MNT_IDMAP)
+static int
+afs_linux_getattr(struct mnt_idmap *idmap, const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int sync_mode)
+{
+ int err = afs_linux_revalidate(path->dentry);
+ if (!err) {
+# if defined(GENERIC_FILLATTR_TAKES_REQUEST_MASK)
+ generic_fillattr(afs_mnt_idmap, request_mask, path->dentry->d_inode, stat);
+# else
+ generic_fillattr(afs_mnt_idmap, path->dentry->d_inode, stat);
+# endif
+ }
+ return err;
+}
+#elif defined(IOP_TAKES_USER_NAMESPACE)
+static int
+afs_linux_getattr(struct user_namespace *mnt_userns, const struct path *path, struct kstat *stat,
+ u32 request_mask, unsigned int sync_mode)
+{
+ int err = afs_linux_revalidate(path->dentry);
+ if (!err) {
+ generic_fillattr(afs_ns, path->dentry->d_inode, stat);
+ }
+ return err;
+}
+#elif defined(IOP_GETATTR_TAKES_PATH_STRUCT)
static int
afs_linux_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int sync_mode)
{
if (code == ENOENT && fatal_signal_pending(current)) {
return EINTR;
}
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
+# error fatal_signal_pending not available, but it should be
#endif
return code;
}
#else
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
static DEFINE_MUTEX(dentry_race_sem);
-# else
-static DECLARE_MUTEX(dentry_race_sem);
-# endif
static inline void
dentry_race_lock(void)
*
* name is in kernel space at this point.
*/
+
+#if defined(IOP_TAKES_MNT_IDMAP)
+static int
+afs_linux_create(struct mnt_idmap *idmap, struct inode *dip,
+ struct dentry *dp, umode_t mode, bool excl)
+#elif defined(IOP_TAKES_USER_NAMESPACE)
+static int
+afs_linux_create(struct user_namespace *mnt_userns, struct inode *dip,
+ struct dentry *dp, umode_t mode, bool excl)
+#elif defined(IOP_CREATE_TAKES_BOOL)
static int
-#if defined(IOP_CREATE_TAKES_BOOL)
afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
bool excl)
#elif defined(IOP_CREATE_TAKES_UMODE_T)
+static int
afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
struct nameidata *nd)
#elif defined(IOP_CREATE_TAKES_NAMEIDATA)
+static int
afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
struct nameidata *nd)
#else
+static int
afs_linux_create(struct inode *dip, struct dentry *dp, int mode)
#endif
{
}
+#if defined(IOP_TAKES_MNT_IDMAP)
+static int
+afs_linux_symlink(struct mnt_idmap *idmap, struct inode *dip,
+ struct dentry *dp, const char *target)
+#elif defined(IOP_TAKES_USER_NAMESPACE)
+static int
+afs_linux_symlink(struct user_namespace *mnt_userns, struct inode *dip,
+ struct dentry *dp, const char *target)
+#else
static int
afs_linux_symlink(struct inode *dip, struct dentry *dp, const char *target)
+#endif
{
int code;
cred_t *credp = crref();
return afs_convert_code(code);
}
+#if defined(IOP_TAKES_MNT_IDMAP)
+static int
+afs_linux_mkdir(struct mnt_idmap *idmap, struct inode *dip,
+ struct dentry *dp, umode_t mode)
+#elif defined(IOP_TAKES_USER_NAMESPACE)
+static int
+afs_linux_mkdir(struct user_namespace *mnt_userns, struct inode *dip,
+ struct dentry *dp, umode_t mode)
+#elif defined(IOP_MKDIR_TAKES_UMODE_T)
static int
-#if defined(IOP_MKDIR_TAKES_UMODE_T)
afs_linux_mkdir(struct inode *dip, struct dentry *dp, umode_t mode)
#else
+static int
afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
#endif
{
}
+#if defined(IOP_TAKES_MNT_IDMAP)
+static int
+afs_linux_rename(struct mnt_idmap *idmap,
+ struct inode *oldip, struct dentry *olddp,
+ struct inode *newip, struct dentry *newdp,
+ unsigned int flags)
+#elif defined(IOP_TAKES_USER_NAMESPACE)
+static int
+afs_linux_rename(struct user_namespace *mnt_userns,
+ struct inode *oldip, struct dentry *olddp,
+ struct inode *newip, struct dentry *newdp,
+ unsigned int flags)
+#elif defined(HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS)
+static int
+afs_linux_rename(struct inode *oldip, struct dentry *olddp,
+ struct inode *newip, struct dentry *newdp,
+ unsigned int flags)
+#else
static int
afs_linux_rename(struct inode *oldip, struct dentry *olddp,
- struct inode *newip, struct dentry *newdp
-#ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
- , unsigned int flags
+ struct inode *newip, struct dentry *newdp)
#endif
- )
{
int code;
cred_t *credp = crref();
const char *newname = newdp->d_name.name;
struct dentry *rehash = NULL;
-#ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
+#if defined(HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS) || \
+ defined(IOP_TAKES_MNT_IDMAP) || defined(IOP_TAKES_USER_NAMESPACE)
if (flags)
return -EINVAL; /* no support for new flags yet */
#endif
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
+/*
+ * Call the mapping function that reads data for a given page.
+ * Note: When we return, it is expected that the page is unlocked. It is the
+ * responsibility of the called function (e.g. ->readpage) to unlock the given
+ * page, even when an error occurs.
+ */
+static int
+mapping_read_page(struct address_space *mapping, struct page *page)
+{
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READ_FOLIO)
+ return mapping->a_ops->read_folio(NULL, page_folio(page));
+#else
+ return mapping->a_ops->readpage(NULL, page);
+#endif
+}
+
/* Populate a page by filling it from the cache file pointed at by cachefp
* (which contains indicated chunk)
* If task is NULL, the page copy occurs syncronously, and the routine
goto out;
}
- code = add_to_page_cache(newpage, cachemapping,
- pageindex, GFP_KERNEL);
+ code = afs_add_to_page_cache_lru(alrupages, newpage, cachemapping,
+ pageindex, GFP_KERNEL);
if (code == 0) {
cachepage = newpage;
newpage = NULL;
- afs_lru_cache_add(alrupages, cachepage);
} else {
put_page(newpage);
newpage = NULL;
if (!PageUptodate(cachepage)) {
ClearPageError(cachepage);
- /* Note that ->readpage always handles unlocking the given page, even
- * when an error is returned. */
- code = cachemapping->a_ops->readpage(NULL, cachepage);
+ /* Note that mapping_read_page always handles unlocking the given page,
+ * even when an error is returned. */
+ code = mapping_read_page(cachemapping, cachepage);
if (!code && !task) {
wait_on_page_locked(cachepage);
}
return code;
}
+/*
+ * Return true if the file has a mapping that can read pages
+ */
+static int inline
+file_can_read_pages(struct file *fp)
+{
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READ_FOLIO)
+ if (fp->f_dentry->d_inode->i_mapping->a_ops->read_folio != NULL)
+ return 1;
+#else
+ if (fp->f_dentry->d_inode->i_mapping->a_ops->readpage != NULL)
+ return 1;
+#endif
+ return 0;
+}
+
static int inline
afs_linux_readpage_fastpath(struct file *fp, struct page *pp, int *codep)
{
AFS_GLOCK();
goto out;
}
- if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
+
+ if (!file_can_read_pages(cacheFp)) {
cachefs_noreadpage = 1;
AFS_GLOCK();
goto out;
}
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READAHEAD)
+/*
+ * Bypass the cache while performing a readahead.
+ * See the comments for afs_linux_readahead for the semantics
+ * for 'rac'.
+ */
+static void
+afs_linux_bypass_readahead(struct readahead_control *rac)
+{
+ struct file *fp = rac->file;
+ unsigned num_pages = readahead_count(rac);
+ afs_int32 page_ix;
+ afs_offs_t offset;
+ struct iovec* iovecp;
+ struct nocache_read_request *ancr;
+ struct page *pp;
+ afs_int32 code = 0;
+
+ cred_t *credp;
+ struct inode *ip = FILE_INODE(fp);
+ struct vcache *avc = VTOAFS(ip);
+ afs_int32 base_index = 0;
+ afs_int32 page_count = 0;
+ afs_int32 isize;
+
+ ancr = afs_alloc_ncr(num_pages);
+ if (ancr == NULL)
+ goto done;
+
+ iovecp = ancr->auio->uio_iov;
+
+ for (page_ix = 0; page_ix < num_pages; ++page_ix) {
+ pp = readahead_page(rac);
+ if (pp == NULL)
+ break;
+
+ isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_SHIFT;
+ if (pp->index > isize) {
+ if (PageLocked(pp))
+ unlock_page(pp);
+ put_page(pp);
+ continue;
+ }
+
+ if (page_ix == 0) {
+ offset = page_offset(pp);
+ ancr->offset = ancr->auio->uio_offset = offset;
+ base_index = pp->index;
+ }
+ iovecp[page_ix].iov_len = PAGE_SIZE;
+ if (base_index != pp->index) {
+ if (PageLocked(pp))
+ unlock_page(pp);
+ put_page(pp);
+ iovecp[page_ix].iov_base = NULL;
+ base_index++;
+ ancr->length -= PAGE_SIZE;
+ continue;
+ }
+ base_index++;
+ page_count++;
+ /* save the page for background map */
+ iovecp[page_ix].iov_base = pp;
+ }
+
+ /* If there were useful pages in the page list, schedule
+ * the read */
+ if (page_count > 0) {
+ credp = crref();
+ /* The background thread frees the ancr */
+ code = afs_ReadNoCache(avc, ancr, credp);
+ crfree(credp);
+ } else {
+ /* If there is nothing for the background thread to handle,
+ * it won't be freeing the things that we never gave it */
+ afs_free_ncr(&ancr);
+ }
+ /* we do not flush, release, or unmap pages--that will be
+ * done for us by the background thread as each page comes in
+ * from the fileserver */
+
+ done:
+ /* The vfs layer will unlock/put any of the pages in the rac that were not
+ * processed */
+ return;
+}
+#else /* STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READAHEAD */
static int
afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{
afs_int32 page_ix;
- struct uio *auio;
afs_offs_t offset;
struct iovec* iovecp;
struct nocache_read_request *ancr;
afs_int32 page_count = 0;
afs_int32 isize;
- /* background thread must free: iovecp, auio, ancr */
- iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
-
- auio = osi_Alloc(sizeof(struct uio));
- auio->uio_iov = iovecp;
- auio->uio_iovcnt = num_pages;
- auio->uio_flag = UIO_READ;
- auio->uio_seg = AFS_UIOSYS;
- auio->uio_resid = num_pages * PAGE_SIZE;
-
- ancr = osi_Alloc(sizeof(struct nocache_read_request));
- ancr->auio = auio;
- ancr->offset = auio->uio_offset;
- ancr->length = auio->uio_resid;
+ ancr = afs_alloc_ncr(num_pages);
+ if (ancr == NULL)
+ return afs_convert_code(ENOMEM);
+ iovecp = ancr->auio->uio_iov;
afs_lru_cache_init(&lrupages);
if(pp->index > isize) {
if(PageLocked(pp))
unlock_page(pp);
+ put_page(pp);
continue;
}
if(page_ix == 0) {
offset = page_offset(pp);
- ancr->offset = auio->uio_offset = offset;
+ ancr->offset = ancr->auio->uio_offset = offset;
base_index = pp->index;
}
iovecp[page_ix].iov_len = PAGE_SIZE;
if(page_count) {
afs_lru_cache_finalize(&lrupages);
credp = crref();
+ /* background thread frees the ancr */
code = afs_ReadNoCache(avc, ancr, credp);
crfree(credp);
} else {
/* If there is nothing for the background thread to handle,
* it won't be freeing the things that we never gave it */
- osi_Free(iovecp, num_pages * sizeof(struct iovec));
- osi_Free(auio, sizeof(struct uio));
- osi_Free(ancr, sizeof(struct nocache_read_request));
+ afs_free_ncr(&ancr);
}
/* we do not flush, release, or unmap pages--that will be
* done for us by the background thread as each page comes in
* from the fileserver */
return afs_convert_code(code);
}
-
+#endif /* STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READAHEAD */
static int
afs_linux_bypass_readpage(struct file *fp, struct page *pp)
ClearPageError(pp);
/* receiver frees */
- auio = osi_Alloc(sizeof(struct uio));
- iovecp = osi_Alloc(sizeof(struct iovec));
+ ancr = afs_alloc_ncr(1);
+ if (ancr == NULL) {
+ SetPageError(pp);
+ return afs_convert_code(ENOMEM);
+ }
+ /*
+ * afs_alloc_ncr has already set the auio->uio_iov, make sure setup_uio
+ * uses the existing value when it sets auio->uio_iov.
+ */
+ auio = ancr->auio;
+ iovecp = auio->uio_iov;
/* address can be NULL, because we overwrite it with 'pp', below */
setup_uio(auio, iovecp, NULL, page_offset(pp),
get_page(pp); /* see above */
auio->uio_iov->iov_base = (void*) pp;
/* the background thread will free this */
- ancr = osi_Alloc(sizeof(struct nocache_read_request));
- ancr->auio = auio;
ancr->offset = page_offset(pp);
ancr->length = PAGE_SIZE;
return code;
}
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READ_FOLIO)
+static int
+afs_linux_read_folio(struct file *fp, struct folio *folio)
+{
+ struct page *pp = &folio->page;
+
+ return afs_linux_readpage(fp, pp);
+}
+#endif
+
+/*
+ * Updates the adc and acacheFp parameters
+ * Returns:
+ * 0 - success
+ * -1 - problem getting inode or no mapping function
+ */
+static int
+get_dcache_readahead(struct dcache **adc, struct file **acacheFp,
+ struct vcache *avc, loff_t offset)
+{
+ struct dcache *tdc = *adc;
+ struct file *cacheFp = *acacheFp;
+ int code;
+
+ if (tdc != NULL && tdc->f.chunk != AFS_CHUNK(offset)) {
+ AFS_GLOCK();
+ ReleaseReadLock(&tdc->lock);
+ afs_PutDCache(tdc);
+ AFS_GUNLOCK();
+ tdc = NULL;
+ if (cacheFp != NULL) {
+ filp_close(cacheFp, NULL);
+ cacheFp = NULL;
+ }
+ }
+
+ if (tdc == NULL) {
+ AFS_GLOCK();
+ tdc = afs_FindDCache(avc, offset);
+ if (tdc != NULL) {
+ ObtainReadLock(&tdc->lock);
+ if (!afs_IsDCacheFresh(tdc, avc) ||
+ (tdc->dflags & DFFetching) != 0) {
+ ReleaseReadLock(&tdc->lock);
+ afs_PutDCache(tdc);
+ tdc = NULL;
+ }
+ }
+ AFS_GUNLOCK();
+ if (tdc != NULL) {
+ cacheFp = afs_linux_raw_open(&tdc->f.inode);
+ if (cacheFp == NULL) {
+ /* Problem getting the inode */
+ code = -1;
+ goto out;
+ }
+ if (!file_can_read_pages(cacheFp)) {
+ cachefs_noreadpage = 1;
+ /* No mapping function */
+ code = -1;
+ goto out;
+ }
+ }
+ }
+ code = 0;
+
+ out:
+ if (code != 0) {
+ if (cacheFp != NULL) {
+ filp_close(cacheFp, NULL);
+ cacheFp = NULL;
+ }
+ if (tdc != NULL) {
+ AFS_GLOCK();
+ ReleaseReadLock(&tdc->lock);
+ afs_PutDCache(tdc);
+ AFS_GUNLOCK();
+ tdc = NULL;
+ }
+ }
+ *adc = tdc;
+ *acacheFp = cacheFp;
+ return code;
+}
+
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READAHEAD)
+/*
+ * Readahead reads a number of pages for a particular file. We use
+ * this to optimise the reading, by limiting the number of times upon which
+ * we have to lookup, lock and open vcaches and dcaches.
+ *
+ * Upon return, the vfs layer handles unlocking and putting any pages in the
+ * rac that we did not process here.
+ *
+ * Note: any errors detected during readahead are ignored at this stage by the
+ * vfs. We just need to unlock/put the page and return. Errors will be detected
+ * later in the vfs processing.
+ */
+static void
+afs_linux_readahead(struct readahead_control *rac)
+{
+ struct page *page;
+ struct address_space *mapping = rac->mapping;
+ struct inode *inode = mapping->host;
+ struct vcache *avc = VTOAFS(inode);
+ struct dcache *tdc;
+ struct file *cacheFp = NULL;
+ int code;
+ loff_t offset;
+ struct afs_lru_pages lrupages;
+ struct afs_pagecopy_task *task;
+
+ if (afs_linux_bypass_check(inode)) {
+ afs_linux_bypass_readahead(rac);
+ return;
+ }
+ if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
+ return;
+
+ /* No readpage (ex: tmpfs) , skip */
+ if (cachefs_noreadpage)
+ return;
+
+ AFS_GLOCK();
+ code = afs_linux_VerifyVCache(avc, NULL);
+ if (code != 0) {
+ AFS_GUNLOCK();
+ return;
+ }
+
+ ObtainWriteLock(&avc->lock, 912);
+ AFS_GUNLOCK();
+
+ task = afs_pagecopy_init_task();
+
+ tdc = NULL;
+
+ afs_lru_cache_init(&lrupages);
+
+ while ((page = readahead_page(rac)) != NULL) {
+ offset = page_offset(page);
+
+ code = get_dcache_readahead(&tdc, &cacheFp, avc, offset);
+ if (code != 0) {
+ if (PageLocked(page)) {
+ unlock_page(page);
+ }
+ put_page(page);
+ goto done;
+ }
+
+ if (tdc != NULL) {
+ /* afs_linux_read_cache will unlock the page */
+ afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupages, task);
+ } else if (PageLocked(page)) {
+ unlock_page(page);
+ }
+ put_page(page);
+ }
+
+ done:
+ afs_lru_cache_finalize(&lrupages);
+
+ if (cacheFp != NULL)
+ filp_close(cacheFp, NULL);
+
+ afs_pagecopy_put_task(task);
+
+ AFS_GLOCK();
+ if (tdc != NULL) {
+ ReleaseReadLock(&tdc->lock);
+ afs_PutDCache(tdc);
+ }
+
+ ReleaseWriteLock(&avc->lock);
+ AFS_GUNLOCK();
+ return;
+}
+#else /* STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READAHEAD */
/* Readpages reads a number of pages for a particular file. We use
* this to optimise the reading, by limiting the number of times upon which
* we have to lookup, lock and open vcaches and dcaches
*/
-
static int
afs_linux_readpages(struct file *fp, struct address_space *mapping,
struct list_head *page_list, unsigned int num_pages)
list_del(&page->lru);
offset = page_offset(page);
- if (tdc && tdc->f.chunk != AFS_CHUNK(offset)) {
- AFS_GLOCK();
- ReleaseReadLock(&tdc->lock);
- afs_PutDCache(tdc);
- AFS_GUNLOCK();
- tdc = NULL;
- if (cacheFp) {
- filp_close(cacheFp, NULL);
- cacheFp = NULL;
- }
+ code = get_dcache_readahead(&tdc, &cacheFp, avc, offset);
+ if (code != 0) {
+ put_page(page);
+ goto out;
}
- if (!tdc) {
- AFS_GLOCK();
- if ((tdc = afs_FindDCache(avc, offset))) {
- ObtainReadLock(&tdc->lock);
- if (!afs_IsDCacheFresh(tdc, avc) ||
- (tdc->dflags & DFFetching)) {
- ReleaseReadLock(&tdc->lock);
- afs_PutDCache(tdc);
- tdc = NULL;
- }
- }
- AFS_GUNLOCK();
- if (tdc) {
- cacheFp = afs_linux_raw_open(&tdc->f.inode);
- if (cacheFp == NULL) {
- /* Problem getting the inode */
- goto out;
- }
- if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
- cachefs_noreadpage = 1;
- goto out;
- }
- }
- }
-
- if (tdc && !add_to_page_cache(page, mapping, page->index,
- GFP_KERNEL)) {
- afs_lru_cache_add(&lrupages, page);
-
- /* Note that add_to_page_cache() locked 'page'.
+ if (tdc && !afs_add_to_page_cache_lru(&lrupages, page, mapping, page->index,
+ GFP_KERNEL)) {
+ /* Note that afs_add_to_page_cache_lru() locks the 'page'.
* afs_linux_read_cache() is guaranteed to handle unlocking it. */
afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupages, task);
}
AFS_GUNLOCK();
return 0;
}
+#endif /* STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READAHEAD */
/* Prepare an AFS vcache for writeback. Should be called with the vcache
* locked */
/* afs_linux_permission
* Check access rights - returns error if can't check or permission denied.
*/
+
+#if defined(IOP_TAKES_MNT_IDMAP)
+static int
+afs_linux_permission(struct mnt_idmap *idmap, struct inode *ip, int mode)
+#elif defined(IOP_TAKES_USER_NAMESPACE)
+static int
+afs_linux_permission(struct user_namespace *mnt_userns, struct inode *ip, int mode)
+#elif defined(IOP_PERMISSION_TAKES_FLAGS)
static int
-#if defined(IOP_PERMISSION_TAKES_FLAGS)
afs_linux_permission(struct inode *ip, int mode, unsigned int flags)
#elif defined(IOP_PERMISSION_TAKES_NAMEIDATA)
+static int
afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
#else
+static int
afs_linux_permission(struct inode *ip, int mode)
#endif
{
unsigned to)
{
- /* http://kerneltrap.org/node/4941 details the expected behaviour of
- * prepare_write. Essentially, if the page exists within the file,
- * and is not being fully written, then we should populate it.
+ /*
+ * Linux's Documentation/filesystems/vfs.txt (.rst) details the expected
+ * behaviour of prepare_write (prior to 2.6.28) and write_begin (2.6.28).
+ * Essentially, if the page exists within the file, and is not being fully
+ * written, then we should populate it.
*/
if (!PageUptodate(page)) {
SetPageChecked(page);
/* Is the page readable, if it's wronly, we don't care, because we're
* not actually going to read from it ... */
- } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
+ } else if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
/* We don't care if fillpage fails, because if it does the page
* won't be marked as up to date
*/
return code;
}
+# if defined(HAVE_LINUX_GRAB_CACHE_PAGE_WRITE_BEGIN_NOFLAGS)
+static int
+afs_linux_write_begin(struct file *file, struct address_space *mapping,
+ loff_t pos, unsigned len,
+ struct page **pagep, void **fsdata)
+{
+ struct page *page;
+ pgoff_t index = pos >> PAGE_SHIFT;
+ unsigned int from = pos & (PAGE_SIZE - 1);
+ int code;
+
+ page = grab_cache_page_write_begin(mapping, index);
+ if (!page) {
+ return -ENOMEM;
+ }
+
+ *pagep = page;
+
+ code = afs_linux_prepare_write(file, page, from, from + len);
+ if (code) {
+ unlock_page(page);
+ put_page(page);
+ }
+
+ return code;
+}
+# else
static int
afs_linux_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
return code;
}
-#endif
+# endif /* HAVE_LINUX_GRAB_CACHE_PAGE_WRITE_BEGIN_NOFLAGS */
+#endif /* STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN */
#ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
static void *
};
static struct address_space_operations afs_file_aops = {
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READ_FOLIO)
+ .read_folio = afs_linux_read_folio,
+#else
.readpage = afs_linux_readpage,
+#endif
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READAHEAD)
+ .readahead = afs_linux_readahead,
+#else
.readpages = afs_linux_readpages,
+#endif
.writepage = afs_linux_writepage,
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_DIRTY_FOLIO) && \
+ defined(HAVE_LINUX_BLOCK_DIRTY_FOLIO)
+ .dirty_folio = block_dirty_folio,
+#else
+ .set_page_dirty = __set_page_dirty_buffers,
+#endif
#if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
.write_begin = afs_linux_write_begin,
.write_end = afs_linux_write_end,
unlock_page(page);
return code;
}
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READ_FOLIO)
+static int
+afs_symlink_filler_folio(struct file *file, struct folio *folio)
+{
+ struct page *page = &folio->page;
+ return afs_symlink_filler(file, page);
+}
+#endif
+
static struct address_space_operations afs_symlink_aops = {
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_READ_FOLIO)
+ .read_folio = afs_symlink_filler_folio
+#else
.readpage = afs_symlink_filler
+#endif
};
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
.put_link = afs_linux_put_link,
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
.setattr = afs_notify_change,
+ .getattr = afs_linux_getattr,
};
void