/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
* So far the only truly scary part is that Linux relies on the inode cache
* to be up to date. Don't you dare break a callback and expect an fstat
* to give you meaningful information. This appears to be fixed in the 2.1
- * development kernels. As it is we can fix this now by intercepting the
+ * development kernels. As it is we can fix this now by intercepting the
* stat calls.
*/
#include "osi_compat.h"
#include "osi_pagecopy.h"
-#ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
-#define __pagevec_lru_add_file __pagevec_lru_add
-#endif
-
#ifndef MAX_ERRNO
#define MAX_ERRNO 1000L
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
+/* Enable our workaround for a race with d_splice_alias. The race was fixed in
+ * 2.6.34, so don't do it after that point. */
+# define D_SPLICE_ALIAS_RACE
+#endif
+
+/* Workaround for RH 7.5 which introduced file operation iterate() but requires
+ * each file->f_mode to be marked with FMODE_KABI_ITERATE. Instead OpenAFS will
+ * continue to use file opearation readdir() in this case.
+ */
+#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE) && !defined(FMODE_KABI_ITERATE)
+#define USE_FOP_ITERATE 1
+#else
+#undef USE_FOP_ITERATE
+#endif
+
int cachefs_noreadpage = 0;
extern struct backing_dev_info *afs_backing_dev_info;
extern struct vcache *afs_globalVp;
+/* Handle interfacing with Linux's pagevec/lru facilities */
+
+struct afs_lru_pages {
+ struct pagevec lrupv;
+};
+
+static inline void
+afs_lru_cache_init(struct afs_lru_pages *alrupages)
+{
+#if defined(PAGEVEC_INIT_COLD_ARG)
+ pagevec_init(&alrupages->lrupv, 0);
+#else
+ pagevec_init(&alrupages->lrupv);
+#endif
+}
+
+#ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
+# define __pagevec_lru_add_file __pagevec_lru_add
+#endif
+
+static inline void
+afs_lru_cache_add(struct afs_lru_pages *alrupages, struct page *page)
+{
+ get_page(page);
+ if (!pagevec_add(&alrupages->lrupv, page))
+ __pagevec_lru_add_file(&alrupages->lrupv);
+}
+
+static inline void
+afs_lru_cache_finalize(struct afs_lru_pages *alrupages)
+{
+ if (pagevec_count(&alrupages->lrupv))
+ __pagevec_lru_add_file(&alrupages->lrupv);
+}
+
/* This function converts a positive error code from AFS into a negative
* code suitable for passing into the Linux VFS layer. It checks that the
* error code is within the permissable bounds for the ERR_PTR mechanism.
}
#endif
-extern int BlobScan(struct dcache * afile, afs_int32 ablob);
+extern int BlobScan(struct dcache * afile, afs_int32 ablob, afs_int32 *ablobOut);
/* This is a complete rewrite of afs_readdir, since we can make use of
* filldir instead of afs_readdir_move. Note that changes to vcache/dcache
* handling and use of bulkstats will need to be reflected here as well.
*/
static int
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
afs_linux_readdir(struct file *fp, struct dir_context *ctx)
#else
afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
struct dcache *tdc;
int code;
int offset;
- int dirpos;
+ afs_int32 dirpos;
struct DirEntry *de;
struct DirBuffer entry;
ino_t ino;
tdc = afs_GetDCache(avc, (afs_size_t) 0, treq, &origOffset, &tlen, 1);
len = tlen;
if (!tdc) {
- code = -ENOENT;
+ code = -EIO;
goto out;
}
ObtainWriteLock(&avc->lock, 811);
*/
while ((avc->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
+ && afs_IsDCacheFresh(tdc, avc)) {
ReleaseReadLock(&tdc->lock);
ReleaseWriteLock(&avc->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&tdc->lock);
}
if (!(avc->f.states & CStatd)
- || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
+ || !afs_IsDCacheFresh(tdc, avc)) {
ReleaseReadLock(&tdc->lock);
ReleaseWriteLock(&avc->lock);
afs_PutDCache(tdc);
* takes an offset in units of blobs, rather than bytes.
*/
code = 0;
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
offset = ctx->pos;
#else
offset = (int) fp->f_pos;
#endif
while (1) {
- dirpos = BlobScan(tdc, offset);
- if (!dirpos)
- break;
+ dirpos = 0;
+ code = BlobScan(tdc, offset, &dirpos);
+ if (code == 0 && dirpos == 0) {
+ /* We've reached EOF of the dir blob, so we can stop looking for
+ * entries. */
+ break;
+ }
- code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
+ if (code == 0) {
+ code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
+ }
if (code) {
if (!(avc->f.states & CCorrupt)) {
struct cell *tc = afs_GetCellStale(avc->f.fid.Cell, READ_LOCK);
- afs_warn("Corrupt directory (%d.%d.%d.%d [%s] @%lx, pos %d)",
+ afs_warn("afs: Corrupt directory (%d.%d.%d.%d [%s] @%lx, pos %d)\n",
avc->f.fid.Cell, avc->f.fid.Fid.Volume,
avc->f.fid.Fid.Vnode, avc->f.fid.Fid.Unique,
tc ? tc->cellName : "",
UpgradeSToWLock(&avc->lock, 814);
avc->f.states |= CCorrupt;
}
- code = -ENOENT;
+ code = -EIO;
goto unlock_out;
}
if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
type = DT_DIR;
} else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
- if (tvc->mvstat) {
+ if (tvc->mvstat != AFS_MVSTAT_FILE) {
type = DT_DIR;
} else if (((tvc->f.states) & (CStatd | CTruth))) {
/* CTruth will be set if the object has
/* clean up from afs_FindVCache */
afs_PutVCache(tvc);
}
- /*
+ /*
* If this is NFS readdirplus, then the filler is going to
* call getattr on this inode, which will deadlock if we're
* holding the GLOCK.
*/
AFS_GUNLOCK();
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
/* dir_emit returns a bool - true when it succeeds.
* Inverse the result to fit with how we check "code" */
code = !dir_emit(ctx, de->name, len, ino, type);
code = 0;
unlock_out:
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
ctx->pos = (loff_t) offset;
#else
fp->f_pos = (loff_t) offset;
int code;
AFS_GLOCK();
- afs_Trace3(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
- ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_INT32,
- vmap->vm_end - vmap->vm_start);
+ afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_POINTER, vmap->vm_start, ICL_TYPE_LONG,
+ vmap->vm_end - vmap->vm_start, ICL_TYPE_LONG, 0);
/* get a validated vcache entry */
code = afs_linux_VerifyVCache(vcp, NULL);
cred_t *credp = crref();
#if defined(FOP_FSYNC_TAKES_RANGE)
- mutex_lock(&ip->i_mutex);
+ afs_linux_lock_inode(ip);
#endif
AFS_GLOCK();
code = afs_fsync(VTOAFS(ip), credp);
AFS_GUNLOCK();
#if defined(FOP_FSYNC_TAKES_RANGE)
- mutex_unlock(&ip->i_mutex);
+ afs_linux_unlock_inode(ip);
#endif
crfree(credp);
return afs_convert_code(code);
struct vcache *vcp = VTOAFS(FILE_INODE(fp));
cred_t *credp = crref();
struct AFS_FLOCK flock;
-
+
/* Convert to a lock format afs_lockctl understands. */
memset(&flock, 0, sizeof(flock));
flock.l_type = flp->fl_type;
code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
- if ((code == 0 || flp->fl_type == F_UNLCK) &&
+ if ((code == 0 || flp->fl_type == F_UNLCK) &&
(cmd == F_SETLK || cmd == F_SETLKW)) {
code = afs_posix_lock_file(fp, flp);
if (code && flp->fl_type != F_UNLCK) {
return 0;
}
}
-
+
/* Convert flock back to Linux's file_lock */
flp->fl_type = flock.l_type;
flp->fl_pid = flock.l_pid;
code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
- if ((code == 0 || flp->fl_type == F_UNLCK) &&
+ if ((code == 0 || flp->fl_type == F_UNLCK) &&
(cmd == F_SETLK || cmd == F_SETLKW)) {
flp->fl_flags &=~ FL_SLEEP;
code = flock_lock_file_wait(fp, flp);
struct file_operations afs_dir_fops = {
.read = generic_read_dir,
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
.iterate = afs_linux_readdir,
#else
.readdir = afs_linux_readdir,
#ifdef STRUCT_FILE_OPERATIONS_HAS_READ_ITER
.read_iter = afs_linux_read_iter,
.write_iter = afs_linux_write_iter,
+# if !defined(HAVE_LINUX___VFS_WRITE) && !defined(HAVE_LINUX_KERNEL_WRITE)
.read = new_sync_read,
.write = new_sync_write,
+# endif
#elif defined(HAVE_LINUX_GENERIC_FILE_AIO_READ)
.aio_read = afs_linux_aio_read,
.aio_write = afs_linux_aio_write,
#if defined(STRUCT_FILE_OPERATIONS_HAS_SENDFILE)
.sendfile = generic_file_sendfile,
#endif
-#if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE)
+#if defined(STRUCT_FILE_OPERATIONS_HAS_SPLICE) && !defined(HAVE_LINUX_DEFAULT_FILE_SPLICE_READ)
# if defined(HAVE_LINUX_ITER_FILE_SPLICE_WRITE)
.splice_write = iter_file_splice_write,
# else
d_prune_aliases(ip);
-# ifdef HAVE_DCACHE_LOCK
- spin_lock(&dcache_lock);
-# else
- spin_lock(&ip->i_lock);
-# endif
+ afs_d_alias_lock(ip);
#if defined(D_ALIAS_IS_HLIST)
# if defined(HLIST_ITERATOR_NO_NODE)
vcp->target_link = ret;
-# ifdef HAVE_DCACHE_LOCK
if (ret) {
- dget_locked(ret);
+ afs_linux_dget(ret);
}
- spin_unlock(&dcache_lock);
-# else
- if (ret) {
- dget(ret);
- }
- spin_unlock(&ip->i_lock);
-# endif
+ afs_d_alias_unlock(ip);
return ret;
}
cred_t *credp;
int code;
- if (afs_shuttingdown)
+ if (afs_shuttingdown != AFS_RUNNING)
return EIO;
AFS_GLOCK();
* changes in afs_getattr that don't get replicated here!
*/
if (vcp->f.states & CStatd &&
- (!afs_fakestat_enable || vcp->mvstat != 1) &&
+ (!afs_fakestat_enable || vcp->mvstat != AFS_MVSTAT_MTPT) &&
!afs_nfsexporter &&
(vType(vcp) == VDIR || vType(vcp) == VLNK)) {
code = afs_CopyOutAttrs(vcp, vattr);
vattrp->va_size = iattrp->ia_size;
if (iattrp->ia_valid & ATTR_ATIME) {
vattrp->va_atime.tv_sec = iattrp->ia_atime.tv_sec;
- vattrp->va_atime.tv_usec = 0;
+ vattrp->va_atime.tv_nsec = 0;
}
if (iattrp->ia_valid & ATTR_MTIME) {
vattrp->va_mtime.tv_sec = iattrp->ia_mtime.tv_sec;
- vattrp->va_mtime.tv_usec = 0;
+ vattrp->va_mtime.tv_nsec = 0;
}
if (iattrp->ia_valid & ATTR_CTIME) {
vattrp->va_ctime.tv_sec = iattrp->ia_ctime.tv_sec;
- vattrp->va_ctime.tv_usec = 0;
+ vattrp->va_ctime.tv_nsec = 0;
}
}
return afs_convert_code(code);
}
+#if defined(IOP_GETATTR_TAKES_PATH_STRUCT)
+static int
+afs_linux_getattr(const struct path *path, struct kstat *stat, u32 request_mask, unsigned int sync_mode)
+{
+ int err = afs_linux_revalidate(path->dentry);
+ if (!err) {
+ generic_fillattr(path->dentry->d_inode, stat);
+ }
+ return err;
+}
+#else
static int
afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
int err = afs_linux_revalidate(dentry);
if (!err) {
generic_fillattr(dentry->d_inode, stat);
+ }
+ return err;
}
- return err;
-}
+#endif
static afs_uint32
parent_vcache_dv(struct inode *inode, cred_t *credp)
* us. The fake entry is the one with the useful DataVersion.
*/
pvcp = VTOAFS(inode);
- if (pvcp->mvstat == 1 && afs_fakestat_enable) {
+ if (pvcp->mvstat == AFS_MVSTAT_MTPT && afs_fakestat_enable) {
struct vrequest treq;
struct afs_fakestat_state fakestate;
return hgetlo(pvcp->f.m.DataVersion);
}
+static inline int
+filter_enoent(int code)
+{
+#ifdef HAVE_LINUX_FATAL_SIGNAL_PENDING
+ if (code == ENOENT && fatal_signal_pending(current)) {
+ return EINTR;
+ }
+#endif
+ return code;
+}
+
+#ifndef D_SPLICE_ALIAS_RACE
+
+static inline void dentry_race_lock(void) {}
+static inline void dentry_race_unlock(void) {}
+
+#else
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+static DEFINE_MUTEX(dentry_race_sem);
+# else
+static DECLARE_MUTEX(dentry_race_sem);
+# endif
+
+static inline void
+dentry_race_lock(void)
+{
+ mutex_lock(&dentry_race_sem);
+}
+static inline void
+dentry_race_unlock(void)
+{
+ mutex_unlock(&dentry_race_sem);
+}
+
+/* Leave some trace that this code is enabled; otherwise it's pretty hard to
+ * tell. */
+static __attribute__((used)) const char dentry_race_marker[] = "d_splice_alias race workaround enabled";
+
+static int
+check_dentry_race(struct dentry *dp)
+{
+ int raced = 0;
+ if (!dp->d_inode) {
+ /* In Linux, before commit 4919c5e45a91b5db5a41695fe0357fbdff0d5767,
+ * d_splice_alias can momentarily hash a dentry before it's fully
+ * populated. This only happens for a moment, since it's unhashed again
+ * right after (in d_move), but this can make the dentry be found by
+ * __d_lookup, and then given to us.
+ *
+ * So check if the dentry is unhashed; if it is, then the dentry is not
+ * valid. We lock dentry_race_lock() to ensure that d_splice_alias is
+ * no longer running. Locking d_lock is required to check the dentry's
+ * flags, so lock that, too.
+ */
+ dentry_race_lock();
+ spin_lock(&dp->d_lock);
+ if (d_unhashed(dp)) {
+ raced = 1;
+ }
+ spin_unlock(&dp->d_lock);
+ dentry_race_unlock();
+ }
+ return raced;
+}
+#endif /* D_SPLICE_ALIAS_RACE */
+
/* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
* In kernels 2.2.10 and above, we are passed an additional flags var which
* may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
- * we are advised to follow the entry if it is a link or to make sure that
+ * we are advised to follow the entry if it is a link or to make sure that
* it is a directory. But since the kernel itself checks these possibilities
* later on, we shouldn't have to do it until later. Perhaps in the future..
*
struct dentry *parent;
int valid;
struct afs_fakestat_state fakestate;
- int locked = 0;
int force_drop = 0;
afs_uint32 parent_dv;
return -ECHILD;
#endif
+#ifdef D_SPLICE_ALIAS_RACE
+ if (check_dentry_race(dp)) {
+ valid = 0;
+ return valid;
+ }
+#endif
+
+ AFS_GLOCK();
afs_InitFakeStat(&fakestate);
if (dp->d_inode) {
if (vcp == afs_globalVp)
goto good_dentry;
- parent = dget_parent(dp);
- pvcp = VTOAFS(parent->d_inode);
+ if (vcp->mvstat == AFS_MVSTAT_MTPT) {
+ if (vcp->mvid.target_root && (vcp->f.states & CMValid)) {
+ int tryEvalOnly = 0;
+ int code = 0;
+ struct vrequest *treq = NULL;
- if ((vcp->mvstat == 1) || (vcp->mvstat == 2) ||
- (pvcp->mvstat == 1 && afs_fakestat_enable)) { /* need to lock */
- credp = crref();
- AFS_GLOCK();
- locked = 1;
- }
+ credp = crref();
- if (locked) {
- if (vcp->mvstat == 1) { /* mount point */
- if (vcp->mvid && (vcp->f.states & CMValid)) {
- int tryEvalOnly = 0;
- int code = 0;
- struct vrequest *treq = NULL;
-
- code = afs_CreateReq(&treq, credp);
- if (code) {
- dput(parent);
- goto bad_dentry;
- }
- if ((strcmp(dp->d_name.name, ".directory") == 0)) {
- tryEvalOnly = 1;
- }
- if (tryEvalOnly)
- code = afs_TryEvalFakeStat(&vcp, &fakestate, treq);
- else
- code = afs_EvalFakeStat(&vcp, &fakestate, treq);
- afs_DestroyReq(treq);
- if ((tryEvalOnly && vcp->mvstat == 1) || code) {
- /* a mount point, not yet replaced by its directory */
- dput(parent);
- goto bad_dentry;
- }
+ code = afs_CreateReq(&treq, credp);
+ if (code) {
+ goto bad_dentry;
+ }
+ if ((strcmp(dp->d_name.name, ".directory") == 0)) {
+ tryEvalOnly = 1;
+ }
+ if (tryEvalOnly)
+ code = afs_TryEvalFakeStat(&vcp, &fakestate, treq);
+ else
+ code = afs_EvalFakeStat(&vcp, &fakestate, treq);
+ afs_DestroyReq(treq);
+ if ((tryEvalOnly && vcp->mvstat == AFS_MVSTAT_MTPT) || code) {
+ /* a mount point, not yet replaced by its directory */
+ goto bad_dentry;
}
}
+ } else if (vcp->mvstat == AFS_MVSTAT_ROOT && *dp->d_name.name != '/') {
+ osi_Assert(vcp->mvid.parent != NULL);
}
-#ifdef notdef
- /* If the last looker changes, we should make sure the current
- * looker still has permission to examine this file. This would
- * always require a crref() which would be "slow".
- */
- if (vcp->last_looker != treq.uid) {
- if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
- dput(parent);
- goto bad_dentry;
- }
-
- vcp->last_looker = treq.uid;
- }
-#endif
-
+ parent = dget_parent(dp);
+ pvcp = VTOAFS(parent->d_inode);
parent_dv = parent_vcache_dv(parent->d_inode, credp);
/* If the parent's DataVersion has changed or the vnode
* isn't enough since the vnode may have been renamed.
*/
- if ((!locked) && (parent_dv > dp->d_time || !(vcp->f.states & CStatd)) ) {
- credp = crref();
- AFS_GLOCK();
- locked = 1;
- }
-
- if (locked && (parent_dv > dp->d_time || !(vcp->f.states & CStatd))) {
+ if (parent_dv > dp->d_time || !(vcp->f.states & CStatd)) {
struct vattr *vattr = NULL;
int code;
int lookup_good;
+ if (credp == NULL) {
+ credp = crref();
+ }
code = afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
+ code = filter_enoent(code);
if (code) {
/* We couldn't perform the lookup, so we're not okay. */
/* unlikely--the vcache entry hasn't changed */
dput(parent);
+
} else {
-#ifdef notyet
- /* If this code is ever enabled, we should use dget_parent to handle
- * getting the parent, and dput() to dispose of it. See above for an
- * example ... */
- pvcp = VTOAFS(dp->d_parent->d_inode);
- if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
- goto bad_dentry;
-#endif
- /* No change in parent's DataVersion so this negative
- * lookup is still valid. BUT, if a server is down a
- * negative lookup can result so there should be a
- * liftime as well. For now, always expire.
- */
+ /* 'dp' represents a cached negative lookup. */
+
+ parent = dget_parent(dp);
+ pvcp = VTOAFS(parent->d_inode);
+ parent_dv = parent_vcache_dv(parent->d_inode, credp);
- goto bad_dentry;
+ if (parent_dv > dp->d_time || !(pvcp->f.states & CStatd)
+ || afs_IsDynroot(pvcp)) {
+ dput(parent);
+ goto bad_dentry;
+ }
+
+ dput(parent);
}
good_dentry:
valid = 1;
+ goto done;
+
+ bad_dentry:
+ valid = 0;
+#ifndef D_INVALIDATE_IS_VOID
+ /* When (v3.18) d_invalidate was converted to void, it also started
+ * being called automatically from revalidate, and automatically
+ * handled:
+ * - shrink_dcache_parent
+ * - automatic detach of submounts
+ * - d_drop
+ * Therefore, after that point, OpenAFS revalidate logic no longer needs
+ * to do any of those things itself for invalid dentry structs. We only need
+ * to tell VFS it's invalid (by returning 0), and VFS will handle the rest.
+ */
+ if (have_submounts(dp))
+ valid = 1;
+#endif
done:
/* Clean up */
if (tvc)
afs_PutVCache(tvc);
- afs_PutFakeStat(&fakestate); /* from here on vcp may be no longer valid */
- if (locked) {
- /* we hold the global lock if we evaluated a mount point */
- AFS_GUNLOCK();
- }
+ afs_PutFakeStat(&fakestate);
+ AFS_GUNLOCK();
if (credp)
crfree(credp);
+#ifndef D_INVALIDATE_IS_VOID
if (!valid) {
/*
* If we had a negative lookup for the name we want to forcibly
} else
d_invalidate(dp);
}
-
+#endif
return valid;
- bad_dentry:
- if (have_submounts(dp))
- valid = 1;
- else
- valid = 0;
- goto done;
}
static void
afs_dentry_iput(struct dentry *dp, struct inode *ip)
{
struct vcache *vcp = VTOAFS(ip);
+ int haveGlock = ISAFS_GLOCK();
+
+ if (!haveGlock) {
+ AFS_GLOCK();
+ }
- AFS_GLOCK();
if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
(void) afs_InactiveVCache(vcp, NULL);
}
- AFS_GUNLOCK();
+
+ if (!haveGlock) {
+ AFS_GUNLOCK();
+ }
+
afs_linux_clear_nfsfs_renamed(dp);
iput(ip);
{
struct dentry *target;
- /* avoid symlink resolution limits when resolving; we cannot contribute to
- * an infinite symlink loop */
+ /*
+ * Avoid symlink resolution limits when resolving; we cannot contribute to
+ * an infinite symlink loop.
+ *
+ * On newer kernels the field has moved to the private nameidata structure
+ * so we can't adjust it here. This may cause ELOOP when using a path with
+ * 40 or more directories that are not already in the dentry cache.
+ */
+#if defined(STRUCT_TASK_STRUCT_HAS_TOTAL_LINK_COUNT)
current->total_link_count--;
+#endif
target = canonical_dentry(path->dentry->d_inode);
int code;
AFS_GLOCK();
+
code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
-
- if (!code) {
+ code = filter_enoent(code);
+ if (code == ENOENT) {
+ /* It's ok for the file to not be found. That's noted by the caller by
+ * seeing that the dp->d_inode field is NULL (set by d_splice_alias or
+ * d_add, below). */
+ code = 0;
+ osi_Assert(vcp == NULL);
+ }
+ if (code) {
+ AFS_GUNLOCK();
+ goto done;
+ }
+
+ if (vcp) {
struct vattr *vattr = NULL;
struct vcache *parent_vc = VTOAFS(dip);
d_prune_aliases(ip);
#ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
- ip->i_flags |= S_AUTOMOUNT;
+ /* Only needed if this is a volume root */
+ if (vcp->mvstat == 2)
+ ip->i_flags |= S_AUTOMOUNT;
#endif
}
/*
igrab(ip);
#endif
+ dentry_race_lock();
newdp = d_splice_alias(ip, dp);
+ dentry_race_unlock();
done:
crfree(credp);
- /* It's ok for the file to not be found. That's noted by the caller by
- * seeing that the dp->d_inode field is NULL.
- */
- if (!code || code == ENOENT) {
- /*
- * d_splice_alias can return an error (EIO) if there is an existing
- * connected directory alias for this dentry.
- */
- if (!IS_ERR(newdp)) {
- iput(ip);
- return newdp;
- } else {
- d_add(dp, ip);
- /*
- * Depending on the kernel version, d_splice_alias may or may
- * not drop the inode reference on error. If it didn't, do it
- * here.
- */
+ if (IS_ERR(newdp)) {
+ /* d_splice_alias can return an error (EIO) if there is an existing
+ * connected directory alias for this dentry. Add our dentry manually
+ * ourselves if this happens. */
+ d_add(dp, ip);
+
#if defined(D_SPLICE_ALIAS_LEAK_ON_ERROR)
- iput(ip);
+ /* Depending on the kernel version, d_splice_alias may or may not drop
+ * the inode reference on error. If it didn't, do it here. */
+ iput(ip);
#endif
- return NULL;
- }
- } else {
+ return NULL;
+ }
+
+ if (code) {
if (ip)
iput(ip);
return ERR_PTR(afs_convert_code(code));
}
+
+ iput(ip);
+ return newdp;
}
static int
VTOAFS(dir), (char *)__dp->d_name.name,
credp);
if (!code) {
- tvc->mvid = (void *) __name;
+ tvc->mvid.silly_name = __name;
crhold(credp);
if (tvc->uncred) {
crfree(tvc->uncred);
tvc->uncred = credp;
tvc->f.states |= CUnlinked;
afs_linux_set_nfsfs_renamed(dentry);
+
+ __dp->d_time = 0; /* force to revalidate */
+ d_move(dentry, __dp);
} else {
osi_FreeSmallSpace(__name);
}
AFS_GUNLOCK();
- if (!code) {
- __dp->d_time = hgetlo(VTOAFS(dir)->f.m.DataVersion);
- d_move(dentry, __dp);
- }
dput(__dp);
return code;
#if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
dp->d_op = &afs_dentry_operations;
#endif
- dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
+ dp->d_time = parent_vcache_dv(dip, credp);
d_instantiate(dp, ip);
}
afs_DestroyAttr(vattr);
static int
afs_linux_rename(struct inode *oldip, struct dentry *olddp,
- struct inode *newip, struct dentry *newdp)
+ struct inode *newip, struct dentry *newdp
+#ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
+ , unsigned int flags
+#endif
+ )
{
int code;
cred_t *credp = crref();
const char *newname = newdp->d_name.name;
struct dentry *rehash = NULL;
+#ifdef HAVE_LINUX_INODE_OPERATIONS_RENAME_TAKES_FLAGS
+ if (flags)
+ return -EINVAL; /* no support for new flags yet */
+#endif
+
/* Prevent any new references during rename operation. */
if (!d_unhashed(newdp)) {
}
-/* afs_linux_ireadlink
+/* afs_linux_ireadlink
* Internal readlink which can return link contents to user or kernel space.
* Note that the buffer is NOT supposed to be null-terminated.
*/
}
#if !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
-/* afs_linux_readlink
+/* afs_linux_readlink
* Fill target (which is in user space) with contents of symlink.
*/
static int
/* afs_linux_follow_link
* a file system dependent link following routine.
*/
+#if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
+static const char *afs_linux_follow_link(struct dentry *dentry, void **link_data)
+#else
static int afs_linux_follow_link(struct dentry *dentry, struct nameidata *nd)
+#endif
{
int code;
char *name;
name = kmalloc(PATH_MAX, GFP_NOFS);
if (!name) {
+#if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
+ return ERR_PTR(-EIO);
+#else
return -EIO;
+#endif
}
AFS_GLOCK();
AFS_GUNLOCK();
if (code < 0) {
+#if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
+ return ERR_PTR(code);
+#else
return code;
+#endif
}
name[code] = '\0';
+#if defined(HAVE_LINUX_INODE_OPERATIONS_FOLLOW_LINK_NO_NAMEIDATA)
+ return *link_data = name;
+#else
nd_set_link(nd, name);
return 0;
+#endif
}
+#if defined(HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA)
+static void
+afs_linux_put_link(struct inode *inode, void *link_data)
+{
+ char *name = link_data;
+
+ if (name && !IS_ERR(name))
+ kfree(name);
+}
+#else
static void
afs_linux_put_link(struct dentry *dentry, struct nameidata *nd)
{
if (name && !IS_ERR(name))
kfree(name);
}
+#endif /* HAVE_LINUX_INODE_OPERATIONS_PUT_LINK_NO_NAMEIDATA */
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
* If task is NULL, the page copy occurs syncronously, and the routine
* returns with page still locked. If task is non-NULL, then page copies
* may occur in the background, and the page will be unlocked when it is
- * ready for use.
+ * ready for use. Note that if task is non-NULL and we encounter an error
+ * before we start the background copy, we MUST unlock 'page' before we return.
*/
static int
afs_linux_read_cache(struct file *cachefp, struct page *page,
- int chunk, struct pagevec *lrupv,
+ int chunk, struct afs_lru_pages *alrupages,
struct afs_pagecopy_task *task) {
loff_t offset = page_offset(page);
struct inode *cacheinode = cachefp->f_dentry->d_inode;
/* If we're trying to read a page that's past the end of the disk
* cache file, then just return a zeroed page */
if (AFS_CHUNKOFFSET(offset) >= i_size_read(cacheinode)) {
- zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(page, 0, PAGE_SIZE);
SetPageUptodate(page);
if (task)
unlock_page(page);
/* From our offset, we now need to work out which page in the disk
* file it corresponds to. This will be fun ... */
- pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
+ pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_SHIFT;
while (cachepage == NULL) {
cachepage = find_get_page(cachemapping, pageindex);
if (!cachepage) {
if (!newpage)
- newpage = page_cache_alloc_cold(cachemapping);
+ newpage = page_cache_alloc(cachemapping);
if (!newpage) {
code = -ENOMEM;
goto out;
if (code == 0) {
cachepage = newpage;
newpage = NULL;
-
- page_cache_get(cachepage);
- if (!pagevec_add(lrupv, cachepage))
- __pagevec_lru_add_file(lrupv);
-
+ afs_lru_cache_add(alrupages, cachepage);
} else {
- page_cache_release(newpage);
+ put_page(newpage);
newpage = NULL;
if (code != -EEXIST)
goto out;
if (!PageUptodate(cachepage)) {
ClearPageError(cachepage);
- code = cachemapping->a_ops->readpage(NULL, cachepage);
+ /* Note that ->readpage always handles unlocking the given page, even
+ * when an error is returned. */
+ code = cachemapping->a_ops->readpage(NULL, cachepage);
if (!code && !task) {
wait_on_page_locked(cachepage);
}
}
}
+ out:
if (code && task) {
unlock_page(page);
}
-out:
if (cachepage)
- page_cache_release(cachepage);
+ put_page(cachepage);
return code;
}
struct file *cacheFp = NULL;
int code;
int dcLocked = 0;
- struct pagevec lrupv;
+ struct afs_lru_pages lrupages;
/* Not a UFS cache, don't do anything */
if (cacheDiskType != AFS_FCACHE_TYPE_UFS)
ObtainReadLock(&tdc->lock);
/* Is the dcache we've been given currently up to date */
- if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
+ if (!afs_IsDCacheFresh(tdc, avc) ||
(tdc->dflags & DFFetching))
goto out;
/* XXX - I suspect we should be locking the inodes before we use them! */
AFS_GUNLOCK();
cacheFp = afs_linux_raw_open(&tdc->f.inode);
+ osi_Assert(cacheFp);
if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
cachefs_noreadpage = 1;
AFS_GLOCK();
goto out;
}
- pagevec_init(&lrupv, 0);
- code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
+ afs_lru_cache_init(&lrupages);
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
+ code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupages, NULL);
+
+ afs_lru_cache_finalize(&lrupages);
filp_close(cacheFp, NULL);
AFS_GLOCK();
99999); /* not a possible code value */
code = afs_rdwr(avc, auio, UIO_READ, 0, credp);
-
+
afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, cnt, ICL_TYPE_INT32,
code);
if (tdc) {
if (!(tdc->mflags & DFNextStarted))
afs_PrefetchChunk(avc, tdc, credp, treq);
- afs_PutDCache(tdc);
+ afs_PutDCache(tdc);
}
ReleaseWriteLock(&avc->lock);
}
struct iovec* iovecp;
struct nocache_read_request *ancr;
struct page *pp;
- struct pagevec lrupv;
+ struct afs_lru_pages lrupages;
afs_int32 code = 0;
cred_t *credp;
ancr->offset = auio->uio_offset;
ancr->length = auio->uio_resid;
- pagevec_init(&lrupv, 0);
+ afs_lru_cache_init(&lrupages);
for(page_ix = 0; page_ix < num_pages; ++page_ix) {
/* If we allocate a page and don't remove it from page_list,
* the page cache gets upset. */
list_del(&pp->lru);
- isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
+ isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_SHIFT;
if(pp->index > isize) {
if(PageLocked(pp))
unlock_page(pp);
if(base_index != pp->index) {
if(PageLocked(pp))
unlock_page(pp);
- page_cache_release(pp);
+ put_page(pp);
iovecp[page_ix].iov_base = (void *) 0;
base_index++;
ancr->length -= PAGE_SIZE;
if(code) {
if(PageLocked(pp))
unlock_page(pp);
- page_cache_release(pp);
+ put_page(pp);
iovecp[page_ix].iov_base = (void *) 0;
} else {
page_count++;
lock_page(pp);
}
- /* increment page refcount--our original design assumed
- * that locking it would effectively pin it; protect
- * ourselves from the possiblity that this assumption is
- * is faulty, at low cost (provided we do not fail to
- * do the corresponding decref on the other side) */
- get_page(pp);
-
/* save the page for background map */
iovecp[page_ix].iov_base = (void*) pp;
/* and put it on the LRU cache */
- if (!pagevec_add(&lrupv, pp))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_add(&lrupages, pp);
}
}
/* If there were useful pages in the page list, make sure all pages
* are in the LRU cache, then schedule the read */
if(page_count) {
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_finalize(&lrupages);
credp = crref();
code = afs_ReadNoCache(avc, ancr, credp);
crfree(credp);
* it as up to date.
*/
if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
- zero_user_segment(pp, 0, PAGE_CACHE_SIZE);
+ zero_user_segment(pp, 0, PAGE_SIZE);
SetPageUptodate(pp);
unlock_page(pp);
return 0;
case LARGE_FILES_BYPASS_CACHE:
if (i_size_read(ip) > cache_bypass_threshold)
return 1;
+ /* fall through */
default:
return 0;
}
int code;
unsigned int page_idx;
loff_t offset;
- struct pagevec lrupv;
+ struct afs_lru_pages lrupages;
struct afs_pagecopy_task *task;
if (afs_linux_bypass_check(inode))
task = afs_pagecopy_init_task();
tdc = NULL;
- pagevec_init(&lrupv, 0);
+
+ afs_lru_cache_init(&lrupages);
+
for (page_idx = 0; page_idx < num_pages; page_idx++) {
struct page *page = list_entry(page_list->prev, struct page, lru);
list_del(&page->lru);
AFS_GLOCK();
if ((tdc = afs_FindDCache(avc, offset))) {
ObtainReadLock(&tdc->lock);
- if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
+ if (!afs_IsDCacheFresh(tdc, avc) ||
(tdc->dflags & DFFetching)) {
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
AFS_GUNLOCK();
if (tdc) {
cacheFp = afs_linux_raw_open(&tdc->f.inode);
+ osi_Assert(cacheFp);
if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
cachefs_noreadpage = 1;
goto out;
if (tdc && !add_to_page_cache(page, mapping, page->index,
GFP_KERNEL)) {
- page_cache_get(page);
- if (!pagevec_add(&lrupv, page))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_add(&lrupages, page);
- afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
+ /* Note that add_to_page_cache() locked 'page'.
+ * afs_linux_read_cache() is guaranteed to handle unlocking it. */
+ afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupages, task);
}
- page_cache_release(page);
+ put_page(page);
}
- if (pagevec_count(&lrupv))
- __pagevec_lru_add_file(&lrupv);
+ afs_lru_cache_finalize(&lrupages);
out:
if (tdc)
* locked */
static inline int
afs_linux_prepare_writeback(struct vcache *avc) {
- if (avc->f.states & CPageWrite) {
- return AOP_WRITEPAGE_ACTIVATE;
+ pid_t pid;
+ struct pagewriter *pw;
+
+ pid = MyPidxx2Pid(MyPidxx);
+ /* Prevent recursion into the writeback code */
+ spin_lock(&avc->pagewriter_lock);
+ list_for_each_entry(pw, &avc->pagewriters, link) {
+ if (pw->writer == pid) {
+ spin_unlock(&avc->pagewriter_lock);
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
}
- avc->f.states |= CPageWrite;
+ spin_unlock(&avc->pagewriter_lock);
+
+ /* Add ourselves to writer list */
+ pw = osi_Alloc(sizeof(struct pagewriter));
+ pw->writer = pid;
+ spin_lock(&avc->pagewriter_lock);
+ list_add_tail(&pw->link, &avc->pagewriters);
+ spin_unlock(&avc->pagewriter_lock);
+
return 0;
}
static inline void
afs_linux_complete_writeback(struct vcache *avc) {
- avc->f.states &= ~CPageWrite;
+ struct pagewriter *pw, *store;
+ pid_t pid;
+ struct list_head tofree;
+
+ INIT_LIST_HEAD(&tofree);
+ pid = MyPidxx2Pid(MyPidxx);
+ /* Remove ourselves from writer list */
+ spin_lock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
+ if (pw->writer == pid) {
+ list_del(&pw->link);
+ /* osi_Free may sleep so we need to defer it */
+ list_add_tail(&pw->link, &tofree);
+ }
+ }
+ spin_unlock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &tofree, link) {
+ list_del(&pw->link);
+ osi_Free(pw, sizeof(struct pagewriter));
+ }
}
/* Writeback a given page syncronously. Called with no AFS locks held */
struct inode *inode;
struct vcache *vcp;
cred_t *credp;
- unsigned int to = PAGE_CACHE_SIZE;
+ unsigned int to = PAGE_SIZE;
loff_t isize;
int code = 0;
int code1 = 0;
- page_cache_get(pp);
+ get_page(pp);
inode = mapping->host;
vcp = VTOAFS(inode);
done:
end_page_writeback(pp);
- page_cache_release(pp);
+ put_page(pp);
if (code1)
return code1;
/* Is the location we are writing to beyond the end of the file? */
if (pagebase >= isize ||
((from == 0) && (pagebase + to) >= isize)) {
- zero_user_segments(page, 0, from, to, PAGE_CACHE_SIZE);
+ zero_user_segments(page, 0, from, to, PAGE_SIZE);
SetPageChecked(page);
/* Are we we writing a full page */
- } else if (from == 0 && to == PAGE_CACHE_SIZE) {
+ } else if (from == 0 && to == PAGE_SIZE) {
SetPageChecked(page);
/* Is the page readable, if it's wronly, we don't care, because we're
* not actually going to read from it ... */
struct page *page, void *fsdata)
{
int code;
- unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int from = pos & (PAGE_SIZE - 1);
- code = afs_linux_commit_write(file, page, from, from + len);
+ code = afs_linux_commit_write(file, page, from, from + copied);
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
return code;
}
struct page **pagep, void **fsdata)
{
struct page *page;
- pgoff_t index = pos >> PAGE_CACHE_SHIFT;
- unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+ pgoff_t index = pos >> PAGE_SHIFT;
+ unsigned int from = pos & (PAGE_SIZE - 1);
int code;
page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page) {
+ return -ENOMEM;
+ }
+
*pagep = page;
code = afs_linux_prepare_write(file, page, from, from + len);
if (code) {
unlock_page(page);
- page_cache_release(page);
+ put_page(page);
}
return code;
static struct inode_operations afs_symlink_iops = {
#if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
.readlink = page_readlink,
-# if defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
+# if defined(HAVE_LINUX_PAGE_GET_LINK)
+ .get_link = page_get_link,
+# elif defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
.follow_link = page_follow_link,
# else
.follow_link = page_follow_link_light,
void
afs_fill_inode(struct inode *ip, struct vattr *vattr)
{
-
if (vattr)
vattr2inode(ip, vattr);
+#ifdef STRUCT_ADDRESS_SPACE_HAS_BACKING_DEV_INFO
ip->i_mapping->backing_dev_info = afs_backing_dev_info;
+#endif
/* Reset ops if symlink or directory. */
if (S_ISREG(ip->i_mode)) {
ip->i_op = &afs_file_iops;
} else if (S_ISLNK(ip->i_mode)) {
ip->i_op = &afs_symlink_iops;
+#if defined(HAVE_LINUX_INODE_NOHIGHMEM)
+ inode_nohighmem(ip);
+#endif
#if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
ip->i_data.a_ops = &afs_symlink_aops;
ip->i_mapping = &ip->i_data;