#define MAX_ERRNO 1000L
#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)
+/* Enable our workaround for a race with d_splice_alias. The race was fixed in
+ * 2.6.34, so don't do it after that point. */
+# define D_SPLICE_ALIAS_RACE
+#endif
+
+/* Workaround for RH 7.5 which introduced file operation iterate() but requires
+ * each file->f_mode to be marked with FMODE_KABI_ITERATE. Instead OpenAFS will
+ * continue to use file opearation readdir() in this case.
+ */
+#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE) && !defined(FMODE_KABI_ITERATE)
+#define USE_FOP_ITERATE 1
+#else
+#undef USE_FOP_ITERATE
+#endif
+
int cachefs_noreadpage = 0;
extern struct backing_dev_info *afs_backing_dev_info;
* handling and use of bulkstats will need to be reflected here as well.
*/
static int
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
afs_linux_readdir(struct file *fp, struct dir_context *ctx)
#else
afs_linux_readdir(struct file *fp, void *dirbuf, filldir_t filldir)
*/
while ((avc->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
+ && afs_IsDCacheFresh(tdc, avc)) {
ReleaseReadLock(&tdc->lock);
ReleaseWriteLock(&avc->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&tdc->lock);
}
if (!(avc->f.states & CStatd)
- || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
+ || !afs_IsDCacheFresh(tdc, avc)) {
ReleaseReadLock(&tdc->lock);
ReleaseWriteLock(&avc->lock);
afs_PutDCache(tdc);
* takes an offset in units of blobs, rather than bytes.
*/
code = 0;
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
offset = ctx->pos;
#else
offset = (int) fp->f_pos;
#endif
while (1) {
+ dirpos = 0;
code = BlobScan(tdc, offset, &dirpos);
- if (code || !dirpos)
- break;
+ if (code == 0 && dirpos == 0) {
+ /* We've reached EOF of the dir blob, so we can stop looking for
+ * entries. */
+ break;
+ }
- code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
+ if (code == 0) {
+ code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
+ }
if (code) {
if (!(avc->f.states & CCorrupt)) {
struct cell *tc = afs_GetCellStale(avc->f.fid.Cell, READ_LOCK);
* holding the GLOCK.
*/
AFS_GUNLOCK();
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
/* dir_emit returns a bool - true when it succeeds.
* Inverse the result to fit with how we check "code" */
code = !dir_emit(ctx, de->name, len, ino, type);
code = 0;
unlock_out:
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
ctx->pos = (loff_t) offset;
#else
fp->f_pos = (loff_t) offset;
struct file_operations afs_dir_fops = {
.read = generic_read_dir,
-#if defined(STRUCT_FILE_OPERATIONS_HAS_ITERATE)
+#if defined(USE_FOP_ITERATE)
.iterate = afs_linux_readdir,
#else
.readdir = afs_linux_readdir,
#ifdef STRUCT_FILE_OPERATIONS_HAS_READ_ITER
.read_iter = afs_linux_read_iter,
.write_iter = afs_linux_write_iter,
-# if !defined(HAVE_LINUX___VFS_READ)
+# if !defined(HAVE_LINUX___VFS_WRITE) && !defined(HAVE_LINUX_KERNEL_WRITE)
.read = new_sync_read,
.write = new_sync_write,
# endif
d_prune_aliases(ip);
-# ifdef HAVE_DCACHE_LOCK
- spin_lock(&dcache_lock);
-# else
- spin_lock(&ip->i_lock);
-# endif
+ afs_d_alias_lock(ip);
#if defined(D_ALIAS_IS_HLIST)
# if defined(HLIST_ITERATOR_NO_NODE)
vcp->target_link = ret;
-# ifdef HAVE_DCACHE_LOCK
- if (ret) {
- dget_locked(ret);
- }
- spin_unlock(&dcache_lock);
-# else
if (ret) {
- dget(ret);
+ afs_linux_dget(ret);
}
- spin_unlock(&ip->i_lock);
-# endif
+ afs_d_alias_unlock(ip);
return ret;
}
return hgetlo(pvcp->f.m.DataVersion);
}
+static inline int
+filter_enoent(int code)
+{
+#ifdef HAVE_LINUX_FATAL_SIGNAL_PENDING
+ if (code == ENOENT && fatal_signal_pending(current)) {
+ return EINTR;
+ }
+#endif
+ return code;
+}
+
+#ifndef D_SPLICE_ALIAS_RACE
+
+static inline void dentry_race_lock(void) {}
+static inline void dentry_race_unlock(void) {}
+
+#else
+
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+static DEFINE_MUTEX(dentry_race_sem);
+# else
+static DECLARE_MUTEX(dentry_race_sem);
+# endif
+
+static inline void
+dentry_race_lock(void)
+{
+ mutex_lock(&dentry_race_sem);
+}
+static inline void
+dentry_race_unlock(void)
+{
+ mutex_unlock(&dentry_race_sem);
+}
+
+/* Leave some trace that this code is enabled; otherwise it's pretty hard to
+ * tell. */
+static __attribute__((used)) const char dentry_race_marker[] = "d_splice_alias race workaround enabled";
+
+static int
+check_dentry_race(struct dentry *dp)
+{
+ int raced = 0;
+ if (!dp->d_inode) {
+ /* In Linux, before commit 4919c5e45a91b5db5a41695fe0357fbdff0d5767,
+ * d_splice_alias can momentarily hash a dentry before it's fully
+ * populated. This only happens for a moment, since it's unhashed again
+ * right after (in d_move), but this can make the dentry be found by
+ * __d_lookup, and then given to us.
+ *
+ * So check if the dentry is unhashed; if it is, then the dentry is not
+ * valid. We lock dentry_race_lock() to ensure that d_splice_alias is
+ * no longer running. Locking d_lock is required to check the dentry's
+ * flags, so lock that, too.
+ */
+ dentry_race_lock();
+ spin_lock(&dp->d_lock);
+ if (d_unhashed(dp)) {
+ raced = 1;
+ }
+ spin_unlock(&dp->d_lock);
+ dentry_race_unlock();
+ }
+ return raced;
+}
+#endif /* D_SPLICE_ALIAS_RACE */
+
/* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
* In kernels 2.2.10 and above, we are passed an additional flags var which
* may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
return -ECHILD;
#endif
+#ifdef D_SPLICE_ALIAS_RACE
+ if (check_dentry_race(dp)) {
+ valid = 0;
+ return valid;
+ }
+#endif
+
AFS_GLOCK();
afs_InitFakeStat(&fakestate);
credp = crref();
}
code = afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
+ code = filter_enoent(code);
if (code) {
/* We couldn't perform the lookup, so we're not okay. */
AFS_GLOCK();
code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
+ code = filter_enoent(code);
if (code == ENOENT) {
/* It's ok for the file to not be found. That's noted by the caller by
* seeing that the dp->d_inode field is NULL (set by d_splice_alias or
igrab(ip);
#endif
+ dentry_race_lock();
newdp = d_splice_alias(ip, dp);
+ dentry_race_unlock();
done:
crfree(credp);
* If task is NULL, the page copy occurs syncronously, and the routine
* returns with page still locked. If task is non-NULL, then page copies
* may occur in the background, and the page will be unlocked when it is
- * ready for use.
+ * ready for use. Note that if task is non-NULL and we encounter an error
+ * before we start the background copy, we MUST unlock 'page' before we return.
*/
static int
afs_linux_read_cache(struct file *cachefp, struct page *page,
cachepage = find_get_page(cachemapping, pageindex);
if (!cachepage) {
if (!newpage)
- newpage = page_cache_alloc_cold(cachemapping);
+ newpage = page_cache_alloc(cachemapping);
if (!newpage) {
code = -ENOMEM;
goto out;
if (!PageUptodate(cachepage)) {
ClearPageError(cachepage);
- code = cachemapping->a_ops->readpage(NULL, cachepage);
+ /* Note that ->readpage always handles unlocking the given page, even
+ * when an error is returned. */
+ code = cachemapping->a_ops->readpage(NULL, cachepage);
if (!code && !task) {
wait_on_page_locked(cachepage);
}
}
}
+ out:
if (code && task) {
unlock_page(page);
}
-out:
if (cachepage)
put_page(cachepage);
ObtainReadLock(&tdc->lock);
/* Is the dcache we've been given currently up to date */
- if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
+ if (!afs_IsDCacheFresh(tdc, avc) ||
(tdc->dflags & DFFetching))
goto out;
/* XXX - I suspect we should be locking the inodes before we use them! */
AFS_GUNLOCK();
cacheFp = afs_linux_raw_open(&tdc->f.inode);
+ osi_Assert(cacheFp);
if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
cachefs_noreadpage = 1;
AFS_GLOCK();
goto out;
}
+#if defined(PAGEVEC_INIT_COLD_ARG)
pagevec_init(&lrupv, 0);
+#else
+ pagevec_init(&lrupv);
+#endif
code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
ancr->offset = auio->uio_offset;
ancr->length = auio->uio_resid;
+#if defined(PAGEVEC_INIT_COLD_ARG)
pagevec_init(&lrupv, 0);
+#else
+ pagevec_init(&lrupv);
+#endif
for(page_ix = 0; page_ix < num_pages; ++page_ix) {
task = afs_pagecopy_init_task();
tdc = NULL;
+#if defined(PAGEVEC_INIT_COLD_ARG)
pagevec_init(&lrupv, 0);
+#else
+ pagevec_init(&lrupv);
+#endif
for (page_idx = 0; page_idx < num_pages; page_idx++) {
struct page *page = list_entry(page_list->prev, struct page, lru);
list_del(&page->lru);
AFS_GLOCK();
if ((tdc = afs_FindDCache(avc, offset))) {
ObtainReadLock(&tdc->lock);
- if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) ||
+ if (!afs_IsDCacheFresh(tdc, avc) ||
(tdc->dflags & DFFetching)) {
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
AFS_GUNLOCK();
if (tdc) {
cacheFp = afs_linux_raw_open(&tdc->f.inode);
+ osi_Assert(cacheFp);
if (!cacheFp->f_dentry->d_inode->i_mapping->a_ops->readpage) {
cachefs_noreadpage = 1;
goto out;
if (!pagevec_add(&lrupv, page))
__pagevec_lru_add_file(&lrupv);
+ /* Note that add_to_page_cache() locked 'page'.
+ * afs_linux_read_cache() is guaranteed to handle unlocking it. */
afs_linux_read_cache(cacheFp, page, tdc->f.chunk, &lrupv, task);
}
put_page(page);
int code;
page = grab_cache_page_write_begin(mapping, index, flags);
+ if (!page) {
+ return -ENOMEM;
+ }
+
*pagep = page;
code = afs_linux_prepare_write(file, page, from, from + len);