# define D_SPLICE_ALIAS_RACE
#endif
+/* Workaround for RH 7.5 which introduced file operation iterate() but requires
+ * each file->f_mode to be marked with FMODE_KABI_ITERATE. Instead OpenAFS will
+ * continue to use file opearation readdir() in this case.
+ */
+#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE) && !defined(FMODE_KABI_ITERATE)
+#define USE_FOP_ITERATE 1
+#else
+#undef USE_FOP_ITERATE
+#endif
+
int cachefs_noreadpage = 0;
extern struct backing_dev_info *afs_backing_dev_info;
* handling and use of bulkstats will need to be reflected here as well.
*/
static int
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
afs_linux_readdir(struct file *fp, struct dir_context *ctx)
#else
afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
* takes an offset in units of blobs, rather than bytes.
*/
code = 0;
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
offset = ctx->pos;
#else
offset = (int) fp->f_pos;
* holding the GLOCK.
*/
AFS_GUNLOCK();
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
/* dir_emit returns a bool - true when it succeeds.
* Inverse the result to fit with how we check "code" */
code = !dir_emit(ctx, de->name, len, ino, type);
code = 0;
unlock_out:
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
ctx->pos = (loff_t) offset;
#else
fp->f_pos = (loff_t) offset;
struct file_operations afs_dir_fops = {
.read = generic_read_dir,
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
.iterate = afs_linux_readdir,
#else
.readdir = afs_linux_readdir,
vcp->target_link = ret;
-# ifdef HAVE_DCACHE_LOCK
- if (ret) {
- afs_linux_dget(ret);
- }
- afs_d_alias_unlock(ip);
-# else
if (ret) {
afs_linux_dget(ret);
}
afs_d_alias_unlock(ip);
-# endif
return ret;
}
return hgetlo(pvcp->f.m.DataVersion);
}
-#ifdef D_SPLICE_ALIAS_RACE
+#ifndef D_SPLICE_ALIAS_RACE
+
+static inline void dentry_race_lock(void) {}
+static inline void dentry_race_unlock(void) {}
+
+#else
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+static DEFINE_MUTEX(dentry_race_sem);
+# else
+static DECLARE_MUTEX(dentry_race_sem);
+# endif
+
+static inline void
+dentry_race_lock(void)
+{
+ mutex_lock(&dentry_race_sem);
+}
+static inline void
+dentry_race_unlock(void)
+{
+ mutex_unlock(&dentry_race_sem);
+}
+
/* Leave some trace that this code is enabled; otherwise it's pretty hard to
* tell. */
static __attribute__((used)) const char dentry_race_marker[] = "d_splice_alias race workaround enabled";
{
int raced = 0;
if (!dp->d_inode) {
- struct dentry *parent = dget_parent(dp);
-
/* In Linux, before commit 4919c5e45a91b5db5a41695fe0357fbdff0d5767,
* d_splice_alias can momentarily hash a dentry before it's fully
* populated. This only happens for a moment, since it's unhashed again
* __d_lookup, and then given to us.
*
* So check if the dentry is unhashed; if it is, then the dentry is not
- * valid. We lock the parent inode to ensure that d_splice_alias is no
- * longer running (the inode mutex will be held during
- * afs_linux_lookup). Locking d_lock is required to check the dentry's
+ * valid. We lock dentry_race_lock() to ensure that d_splice_alias is
+ * no longer running. Locking d_lock is required to check the dentry's
* flags, so lock that, too.
*/
- afs_linux_lock_inode(parent->d_inode);
+ dentry_race_lock();
spin_lock(&dp->d_lock);
if (d_unhashed(dp)) {
raced = 1;
}
spin_unlock(&dp->d_lock);
- afs_linux_unlock_inode(parent->d_inode);
-
- dput(parent);
+ dentry_race_unlock();
}
return raced;
}
igrab(ip);
#endif
+ dentry_race_lock();
newdp = d_splice_alias(ip, dp);
+ dentry_race_unlock();
done:
crfree(credp);