#include <linux/mm_inline.h>
#endif
#include <linux/pagemap.h>
-#include <linux/smp_lock.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
-#if defined(AFS_CACHE_BYPASS)
+#include <linux/aio.h>
#include "afs/lock.h"
#include "afs/afs_bypasscache.h"
-#endif
#include "osi_compat.h"
#include "osi_pagecopy.h"
-#ifdef pgoff2loff
-#define pageoff(pp) pgoff2loff((pp)->index)
-#else
-#define pageoff(pp) pp->offset
-#endif
-
-#ifndef HAVE_PAGEVEC_LRU_ADD_FILE
+#ifndef HAVE_LINUX_PAGEVEC_LRU_ADD_FILE
#define __pagevec_lru_add_file __pagevec_lru_add
#endif
#define MAX_ERRNO 1000L
#endif
-#define LockPage(pp) lock_page(pp)
-#define UnlockPage(pp) unlock_page(pp)
-extern struct backing_dev_info afs_backing_dev_info;
+extern struct backing_dev_info *afs_backing_dev_info;
extern struct vcache *afs_globalVp;
-extern int afs_notify_change(struct dentry *dp, struct iattr *iattrp);
-/* Some uses of BKL are perhaps not needed for bypass or memcache--
- * why don't we try it out? */
-extern struct afs_cacheOps afs_UfsCacheOps;
-
-static inline void
-afs_maybe_lock_kernel(void) {
- if(afs_cacheType == &afs_UfsCacheOps)
- lock_kernel();
-}
-
-static inline void
-afs_maybe_unlock_kernel(void) {
- if(afs_cacheType == &afs_UfsCacheOps)
- unlock_kernel();
-}
/* This function converts a positive error code from AFS into a negative
* code suitable for passing into the Linux VFS layer. It checks that the
return afs_convert_code(code);
}
+#ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
+# ifdef LINUX_HAS_NONVECTOR_AIO
+static ssize_t
+afs_linux_aio_read(struct kiocb *iocb, char __user *buf, size_t bufsize,
+ loff_t pos)
+# else
+static ssize_t
+afs_linux_aio_read(struct kiocb *iocb, const struct iovec *buf,
+ unsigned long bufsize, loff_t pos)
+# endif
+{
+ struct file *fp = iocb->ki_filp;
+ ssize_t code = 0;
+ struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
+
+ AFS_GLOCK();
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
+ (afs_int32)bufsize, ICL_TYPE_INT32, 99999);
+ code = afs_linux_VerifyVCache(vcp, NULL);
+
+ if (code == 0) {
+ /* Linux's FlushPages implementation doesn't ever use credp,
+ * so we optimise by not using it */
+ osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
+ AFS_GUNLOCK();
+ code = generic_file_aio_read(iocb, buf, bufsize, pos);
+ AFS_GLOCK();
+ }
+
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOREADOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
+ (afs_int32)bufsize, ICL_TYPE_INT32, code);
+ AFS_GUNLOCK();
+ return code;
+}
+#else
static ssize_t
afs_linux_read(struct file *fp, char *buf, size_t count, loff_t * offp)
{
AFS_GUNLOCK();
return code;
}
+#endif
-/* Now we have integrated VM for writes as well as reads. generic_file_write
- * also takes care of re-positioning the pointer if file is open in append
+/* Now we have integrated VM for writes as well as reads. the generic write operations
+ * also take care of re-positioning the pointer if file is open in append
* mode. Call fake open/close to ensure we do writes of core dumps.
*/
+#ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
+# ifdef LINUX_HAS_NONVECTOR_AIO
+static ssize_t
+afs_linux_aio_write(struct kiocb *iocb, const char __user *buf, size_t bufsize,
+ loff_t pos)
+# else
+static ssize_t
+afs_linux_aio_write(struct kiocb *iocb, const struct iovec *buf,
+ unsigned long bufsize, loff_t pos)
+# endif
+{
+ ssize_t code = 0;
+ struct vcache *vcp = VTOAFS(iocb->ki_filp->f_dentry->d_inode);
+ cred_t *credp;
+
+ AFS_GLOCK();
+
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
+ (afs_int32)bufsize, ICL_TYPE_INT32,
+ (iocb->ki_filp->f_flags & O_APPEND) ? 99998 : 99999);
+
+ code = afs_linux_VerifyVCache(vcp, &credp);
+
+ ObtainWriteLock(&vcp->lock, 529);
+ afs_FakeOpen(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ if (code == 0) {
+ AFS_GUNLOCK();
+ code = generic_file_aio_write(iocb, buf, bufsize, pos);
+ AFS_GLOCK();
+ }
+
+ ObtainWriteLock(&vcp->lock, 530);
+
+ if (vcp->execsOrWriters == 1 && !credp)
+ credp = crref();
+
+ afs_FakeClose(vcp, credp);
+ ReleaseWriteLock(&vcp->lock);
+
+ afs_Trace4(afs_iclSetp, CM_TRACE_AIOWRITEOP, ICL_TYPE_POINTER, vcp,
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(pos), ICL_TYPE_INT32,
+ (afs_int32)bufsize, ICL_TYPE_INT32, code);
+
+ if (credp)
+ crfree(credp);
+ AFS_GUNLOCK();
+ return code;
+}
+#else
static ssize_t
afs_linux_write(struct file *fp, const char *buf, size_t count, loff_t * offp)
{
AFS_GUNLOCK();
return code;
}
+#endif
extern int BlobScan(struct dcache * afile, afs_int32 ablob);
{
struct vcache *avc = VTOAFS(FILE_INODE(fp));
struct vrequest treq;
- register struct dcache *tdc;
+ struct dcache *tdc;
int code;
int offset;
int dirpos;
struct DirEntry *de;
+ struct DirBuffer entry;
ino_t ino;
int len;
afs_size_t origOffset, tlen;
cred_t *credp = crref();
struct afs_fakestat_state fakestat;
- afs_maybe_lock_kernel();
AFS_GLOCK();
AFS_STATCNT(afs_readdir);
code = -ENOENT;
goto out;
}
- ObtainSharedLock(&avc->lock, 810);
- UpgradeSToWLock(&avc->lock, 811);
+ ObtainWriteLock(&avc->lock, 811);
ObtainReadLock(&tdc->lock);
/*
* Make sure that the data in the cache is current. There are two
&& (tdc->dflags & DFFetching)
&& hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
- ReleaseSharedLock(&avc->lock);
+ ReleaseWriteLock(&avc->lock);
afs_osi_Sleep(&tdc->validPos);
- ObtainSharedLock(&avc->lock, 812);
+ ObtainWriteLock(&avc->lock, 812);
ObtainReadLock(&tdc->lock);
}
if (!(avc->f.states & CStatd)
|| !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
- ReleaseSharedLock(&avc->lock);
+ ReleaseWriteLock(&avc->lock);
afs_PutDCache(tdc);
goto tagain;
}
if (!dirpos)
break;
- de = afs_dir_GetBlob(tdc, dirpos);
- if (!de)
- break;
-
- ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
-
- if (de->name)
- len = strlen(de->name);
- else {
- printf("afs_linux_readdir: afs_dir_GetBlob failed, null name (inode %lx, dirpos %d)\n",
- (unsigned long)&tdc->f.inode, dirpos);
- DRelease(de, 0);
+ code = afs_dir_GetVerifiedBlob(tdc, dirpos, &entry);
+ if (code) {
+ afs_warn("Corrupt directory (inode %lx, dirpos %d)",
+ (unsigned long)&tdc->f.inode, dirpos);
ReleaseSharedLock(&avc->lock);
afs_PutDCache(tdc);
code = -ENOENT;
goto out;
- }
+ }
+
+ de = (struct DirEntry *)entry.data;
+ ino = afs_calc_inum (avc->f.fid.Cell, avc->f.fid.Fid.Volume,
+ ntohl(de->fid.vnode));
+ len = strlen(de->name);
/* filldir returns -EINVAL when the buffer is full. */
{
code = (*filldir) (dirbuf, de->name, len, offset, ino, type);
AFS_GLOCK();
}
- DRelease(de, 0);
+ DRelease(&entry, 0);
if (code)
break;
offset = dirpos + 1 + ((len + 16) >> 5);
afs_PutFakeStat(&fakestat);
out1:
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
return code;
}
/* get a validated vcache entry */
code = afs_linux_VerifyVCache(vcp, NULL);
- /* Linux's Flushpage implementation doesn't use credp, so optimise
- * our code to not need to crref() it */
- osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
+ if (code == 0) {
+ /* Linux's Flushpage implementation doesn't use credp, so optimise
+ * our code to not need to crref() it */
+ osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
+ AFS_GUNLOCK();
+ code = generic_file_mmap(fp, vmap);
+ AFS_GLOCK();
+ if (!code)
+ vcp->f.states |= CMAPPED;
+ }
AFS_GUNLOCK();
- code = generic_file_mmap(fp, vmap);
- AFS_GLOCK();
- if (!code)
- vcp->f.states |= CMAPPED;
- AFS_GUNLOCK();
return code;
}
cred_t *credp = crref();
int code;
- afs_maybe_lock_kernel();
AFS_GLOCK();
code = afs_open(&vcp, fp->f_flags, credp);
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
crfree(credp);
return afs_convert_code(code);
cred_t *credp = crref();
int code = 0;
- afs_maybe_lock_kernel();
AFS_GLOCK();
code = afs_close(vcp, fp->f_flags, credp);
+ ObtainWriteLock(&vcp->lock, 807);
+ if (vcp->cred) {
+ crfree(vcp->cred);
+ vcp->cred = NULL;
+ }
+ ReleaseWriteLock(&vcp->lock);
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
crfree(credp);
return afs_convert_code(code);
}
static int
+#if defined(FOP_FSYNC_TAKES_DENTRY)
afs_linux_fsync(struct file *fp, struct dentry *dp, int datasync)
+#elif defined(FOP_FSYNC_TAKES_RANGE)
+afs_linux_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
+#else
+afs_linux_fsync(struct file *fp, int datasync)
+#endif
{
int code;
struct inode *ip = FILE_INODE(fp);
cred_t *credp = crref();
- afs_maybe_lock_kernel();
+#if defined(FOP_FSYNC_TAKES_RANGE)
+ mutex_lock(&ip->i_mutex);
+#endif
AFS_GLOCK();
code = afs_fsync(VTOAFS(ip), credp);
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
+#if defined(FOP_FSYNC_TAKES_RANGE)
+ mutex_unlock(&ip->i_mutex);
+#endif
crfree(credp);
return afs_convert_code(code);
struct AFS_FLOCK flock;
/* Convert to a lock format afs_lockctl understands. */
- memset((char *)&flock, 0, sizeof(flock));
+ memset(&flock, 0, sizeof(flock));
flock.l_type = flp->fl_type;
flock.l_pid = flp->fl_pid;
flock.l_whence = 0;
flock.l_start = flp->fl_start;
- flock.l_len = flp->fl_end - flp->fl_start + 1;
+ if (flp->fl_end == OFFSET_MAX)
+ flock.l_len = 0; /* Lock to end of file */
+ else
+ flock.l_len = flp->fl_end - flp->fl_start + 1;
/* Safe because there are no large files, yet */
#if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
#endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
AFS_GLOCK();
- code = afs_lockctl(vcp, &flock, cmd, credp);
+ if ((vcp->f.states & CRO)) {
+ if (flp->fl_type == F_WRLCK) {
+ code = EBADF;
+ } else {
+ code = 0;
+ }
+ AFS_GUNLOCK();
+ crfree(credp);
+ return code;
+ }
+ code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
if ((code == 0 || flp->fl_type == F_UNLCK) &&
flp->fl_type = flock.l_type;
flp->fl_pid = flock.l_pid;
flp->fl_start = flock.l_start;
- flp->fl_end = flock.l_start + flock.l_len - 1;
+ if (flock.l_len == 0)
+ flp->fl_end = OFFSET_MAX; /* Lock to end of file */
+ else
+ flp->fl_end = flock.l_start + flock.l_len - 1;
crfree(credp);
- return afs_convert_code(code);
+ return code;
}
#ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
cred_t *credp = crref();
struct AFS_FLOCK flock;
/* Convert to a lock format afs_lockctl understands. */
- memset((char *)&flock, 0, sizeof(flock));
+ memset(&flock, 0, sizeof(flock));
flock.l_type = flp->fl_type;
flock.l_pid = flp->fl_pid;
flock.l_whence = 0;
flock.l_start = 0;
- flock.l_len = OFFSET_MAX;
+ flock.l_len = 0;
/* Safe because there are no large files, yet */
#if defined(F_GETLK64) && (F_GETLK != F_GETLK64)
#endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
AFS_GLOCK();
- code = afs_lockctl(vcp, &flock, cmd, credp);
+ code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
if ((code == 0 || flp->fl_type == F_UNLCK) &&
flp->fl_pid = flock.l_pid;
crfree(credp);
- return afs_convert_code(code);
+ return code;
}
#endif
struct vcache *vcp;
cred_t *credp;
int code;
-#if defined(AFS_CACHE_BYPASS)
- int bypasscache;
-#endif
+ int bypasscache = 0;
AFS_GLOCK();
code = afs_InitReq(&treq, credp);
if (code)
goto out;
-#if defined(AFS_CACHE_BYPASS)
- /* If caching is bypassed for this file, or globally, just return 0 */
- if(cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
- bypasscache = 1;
- else {
- ObtainReadLock(&vcp->lock);
- if(vcp->cachingStates & FCSBypass)
- bypasscache = 1;
- ReleaseReadLock(&vcp->lock);
- }
- if(bypasscache) {
- /* future proof: don't rely on 0 return from afs_InitReq */
- code = 0; goto out;
- }
-#endif
+ /* If caching is bypassed for this file, or globally, just return 0 */
+ if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
+ bypasscache = 1;
+ else {
+ ObtainReadLock(&vcp->lock);
+ if (vcp->cachingStates & FCSBypass)
+ bypasscache = 1;
+ ReleaseReadLock(&vcp->lock);
+ }
+ if (bypasscache) {
+ /* future proof: don't rely on 0 return from afs_InitReq */
+ code = 0;
+ goto out;
+ }
ObtainSharedLock(&vcp->lock, 535);
if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
#endif
.open = afs_linux_open,
.release = afs_linux_release,
+ .llseek = default_llseek,
+#ifdef HAVE_LINUX_NOOP_FSYNC
+ .fsync = noop_fsync,
+#else
+ .fsync = simple_sync_file,
+#endif
};
struct file_operations afs_file_fops = {
+#ifdef HAVE_LINUX_GENERIC_FILE_AIO_READ
+ .aio_read = afs_linux_aio_read,
+ .aio_write = afs_linux_aio_write,
+#else
.read = afs_linux_read,
.write = afs_linux_write,
-#ifdef GENERIC_FILE_AIO_READ
- .aio_read = generic_file_aio_read,
- .aio_write = generic_file_aio_write,
#endif
#ifdef HAVE_UNLOCKED_IOCTL
.unlocked_ioctl = afs_unlocked_xioctl,
#ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
.flock = afs_linux_flock,
#endif
+ .llseek = default_llseek,
};
+static struct dentry *
+canonical_dentry(struct inode *ip)
+{
+ struct vcache *vcp = VTOAFS(ip);
+ struct dentry *first = NULL, *ret = NULL, *cur;
+#if defined(D_ALIAS_IS_HLIST) && !defined(HLIST_ITERATOR_NO_NODE)
+ struct hlist_node *p;
+#endif
+
+ /* general strategy:
+ * if vcp->target_link is set, and can be found in ip->i_dentry, use that.
+ * otherwise, use the first dentry in ip->i_dentry.
+ * if ip->i_dentry is empty, use the 'dentry' argument we were given.
+ */
+ /* note that vcp->target_link specifies which dentry to use, but we have
+ * no reference held on that dentry. so, we cannot use or dereference
+ * vcp->target_link itself, since it may have been freed. instead, we only
+ * use it to compare to pointers in the ip->i_dentry list. */
+
+ d_prune_aliases(ip);
+
+# ifdef HAVE_DCACHE_LOCK
+ spin_lock(&dcache_lock);
+# else
+ spin_lock(&ip->i_lock);
+# endif
+
+#if defined(D_ALIAS_IS_HLIST)
+# if defined(HLIST_ITERATOR_NO_NODE)
+ hlist_for_each_entry(cur, &ip->i_dentry, d_alias) {
+# else
+ hlist_for_each_entry(cur, p, &ip->i_dentry, d_alias) {
+# endif
+#else
+ list_for_each_entry_reverse(cur, &ip->i_dentry, d_alias) {
+#endif
+
+ if (!vcp->target_link || cur == vcp->target_link) {
+ ret = cur;
+ break;
+ }
+
+ if (!first) {
+ first = cur;
+ }
+ }
+ if (!ret && first) {
+ ret = first;
+ }
+
+ vcp->target_link = ret;
+
+# ifdef HAVE_DCACHE_LOCK
+ if (ret) {
+ dget_locked(ret);
+ }
+ spin_unlock(&dcache_lock);
+# else
+ if (ret) {
+ dget(ret);
+ }
+ spin_unlock(&ip->i_lock);
+# endif
+
+ return ret;
+}
/**********************************************************************
* AFS Linux dentry operations
**********************************************************************/
-/* check_bad_parent() : Checks if this dentry's vcache is a root vcache
+/* fix_bad_parent() : called if this dentry's vcache is a root vcache
* that has its mvid (parent dir's fid) pointer set to the wrong directory
- * due to being mounted in multiple points at once. If so, check_bad_parent()
+ * due to being mounted in multiple points at once. fix_bad_parent()
* calls afs_lookup() to correct the vcache's mvid, as well as the volume's
* dotdotfid and mtpoint fid members.
* Parameters:
* dp - dentry to be checked.
+ * credp - credentials
+ * vcp, pvc - item's and parent's vcache pointer
* Return Values:
* None.
* Sideeffects:
*/
static inline void
-check_bad_parent(struct dentry *dp)
+fix_bad_parent(struct dentry *dp, cred_t *credp, struct vcache *vcp, struct vcache *pvc)
{
- cred_t *credp;
- struct vcache *vcp = VTOAFS(dp->d_inode), *avc = NULL;
- struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
-
- if (vcp->mvid->Fid.Volume != pvc->f.fid.Fid.Volume) { /* bad parent */
- credp = crref();
-
- /* force a lookup, so vcp->mvid is fixed up */
- afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
- if (!avc || vcp != avc) { /* bad, very bad.. */
- afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
- "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
- ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
- ICL_TYPE_POINTER, dp);
- }
- if (avc)
- AFS_RELE(AFSTOV(avc));
- crfree(credp);
+ struct vcache *avc = NULL;
+
+ /* force a lookup, so vcp->mvid is fixed up */
+ afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
+ if (!avc || vcp != avc) { /* bad, very bad.. */
+ afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
+ "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
+ ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_POINTER, dp);
}
+ if (avc)
+ AFS_RELE(AFSTOV(avc));
return;
}
if (afs_shuttingdown)
return EIO;
- afs_maybe_lock_kernel();
AFS_GLOCK();
#ifdef notyet
/* Make this a fast path (no crref), since it's called so often. */
- if (vcp->f.states & CStatd) {
+ if (vcp->states & CStatd) {
+ struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
- if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
- check_bad_parent(dp); /* check and correct mvid */
-
- AFS_GUNLOCK();
- unlock_kernel();
+ if (*dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
+ if (vcp->mvid->Fid.Volume != pvc->fid.Fid.Volume) { /* bad parent */
+ credp = crref();
+ AFS_GLOCK();
+ fix_bad_parent(dp); /* check and correct mvid */
+ AFS_GUNLOCK();
+ crfree(credp);
+ }
+ }
return 0;
}
#endif
*/
if (vcp->f.states & CStatd &&
(!afs_fakestat_enable || vcp->mvstat != 1) &&
- !afs_nfsexporter) {
+ !afs_nfsexporter &&
+ (vType(vcp) == VDIR || vType(vcp) == VLNK)) {
code = afs_CopyOutAttrs(vcp, &vattr);
} else {
credp = crref();
code = afs_getattr(vcp, &vattr, credp);
crfree(credp);
}
+
if (!code)
afs_fill_inode(AFSTOV(vcp), &vattr);
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
return afs_convert_code(code);
}
+/* vattr_setattr
+ * Set iattr data into vattr. Assume vattr cleared before call.
+ */
+static void
+iattr2vattr(struct vattr *vattrp, struct iattr *iattrp)
+{
+ vattrp->va_mask = iattrp->ia_valid;
+ if (iattrp->ia_valid & ATTR_MODE)
+ vattrp->va_mode = iattrp->ia_mode;
+ if (iattrp->ia_valid & ATTR_UID)
+ vattrp->va_uid = iattrp->ia_uid;
+ if (iattrp->ia_valid & ATTR_GID)
+ vattrp->va_gid = iattrp->ia_gid;
+ if (iattrp->ia_valid & ATTR_SIZE)
+ vattrp->va_size = iattrp->ia_size;
+ if (iattrp->ia_valid & ATTR_ATIME) {
+ vattrp->va_atime.tv_sec = iattrp->ia_atime.tv_sec;
+ vattrp->va_atime.tv_usec = 0;
+ }
+ if (iattrp->ia_valid & ATTR_MTIME) {
+ vattrp->va_mtime.tv_sec = iattrp->ia_mtime.tv_sec;
+ vattrp->va_mtime.tv_usec = 0;
+ }
+ if (iattrp->ia_valid & ATTR_CTIME) {
+ vattrp->va_ctime.tv_sec = iattrp->ia_ctime.tv_sec;
+ vattrp->va_ctime.tv_usec = 0;
+ }
+}
+
+/* vattr2inode
+ * Rewrite the inode cache from the attr. Assumes all vattr fields are valid.
+ */
+void
+vattr2inode(struct inode *ip, struct vattr *vp)
+{
+ ip->i_ino = vp->va_nodeid;
+#ifdef HAVE_LINUX_SET_NLINK
+ set_nlink(ip, vp->va_nlink);
+#else
+ ip->i_nlink = vp->va_nlink;
+#endif
+ ip->i_blocks = vp->va_blocks;
+#ifdef STRUCT_INODE_HAS_I_BLKBITS
+ ip->i_blkbits = AFS_BLKBITS;
+#endif
+#ifdef STRUCT_INODE_HAS_I_BLKSIZE
+ ip->i_blksize = vp->va_blocksize;
+#endif
+ ip->i_rdev = vp->va_rdev;
+ ip->i_mode = vp->va_mode;
+ ip->i_uid = vp->va_uid;
+ ip->i_gid = vp->va_gid;
+ i_size_write(ip, vp->va_size);
+ ip->i_atime.tv_sec = vp->va_atime.tv_sec;
+ ip->i_atime.tv_nsec = 0;
+ ip->i_mtime.tv_sec = vp->va_mtime.tv_sec;
+ /* Set the mtime nanoseconds to the sysname generation number.
+ * This convinces NFS clients that all directories have changed
+ * any time the sysname list changes.
+ */
+ ip->i_mtime.tv_nsec = afs_sysnamegen;
+ ip->i_ctime.tv_sec = vp->va_ctime.tv_sec;
+ ip->i_ctime.tv_nsec = 0;
+}
+
+/* afs_notify_change
+ * Linux version of setattr call. What to change is in the iattr struct.
+ * We need to set bits in both the Linux inode as well as the vcache.
+ */
+static int
+afs_notify_change(struct dentry *dp, struct iattr *iattrp)
+{
+ struct vattr vattr;
+ cred_t *credp = crref();
+ struct inode *ip = dp->d_inode;
+ int code;
+
+ VATTR_NULL(&vattr);
+ iattr2vattr(&vattr, iattrp); /* Convert for AFS vnodeops call. */
+
+ AFS_GLOCK();
+ code = afs_setattr(VTOAFS(ip), &vattr, credp);
+ if (!code) {
+ afs_getattr(VTOAFS(ip), &vattr, credp);
+ vattr2inode(ip, &vattr);
+ }
+ AFS_GUNLOCK();
+ crfree(credp);
+ return afs_convert_code(code);
+}
+
static int
afs_linux_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
{
* we are advised to follow the entry if it is a link or to make sure that
* it is a directory. But since the kernel itself checks these possibilities
* later on, we shouldn't have to do it until later. Perhaps in the future..
+ *
+ * The code here assumes that on entry the global lock is not held
*/
static int
-#ifdef DOP_REVALIDATE_TAKES_NAMEIDATA
+#if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
+afs_linux_dentry_revalidate(struct dentry *dp, unsigned int flags)
+#elif defined(DOP_REVALIDATE_TAKES_NAMEIDATA)
afs_linux_dentry_revalidate(struct dentry *dp, struct nameidata *nd)
#else
afs_linux_dentry_revalidate(struct dentry *dp, int flags)
struct vattr vattr;
cred_t *credp = NULL;
struct vcache *vcp, *pvcp, *tvc = NULL;
+ struct dentry *parent;
int valid;
struct afs_fakestat_state fakestate;
+ int locked = 0;
+
+#ifdef LOOKUP_RCU
+ /* We don't support RCU path walking */
+# if defined(DOP_REVALIDATE_TAKES_UNSIGNED)
+ if (flags & LOOKUP_RCU)
+# else
+ if (nd->flags & LOOKUP_RCU)
+# endif
+ return -ECHILD;
+#endif
- afs_maybe_lock_kernel();
- AFS_GLOCK();
afs_InitFakeStat(&fakestate);
if (dp->d_inode) {
-
vcp = VTOAFS(dp->d_inode);
- pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
if (vcp == afs_globalVp)
goto good_dentry;
- if (vcp->mvstat == 1) { /* mount point */
+ parent = dget_parent(dp);
+ pvcp = VTOAFS(parent->d_inode);
+
+ if ((vcp->mvstat == 1) || (vcp->mvstat == 2)) { /* need to lock */
+ credp = crref();
+ AFS_GLOCK();
+ locked = 1;
+ }
+
+ if (locked && vcp->mvstat == 1) { /* mount point */
if (vcp->mvid && (vcp->f.states & CMValid)) {
int tryEvalOnly = 0;
int code = 0;
struct vrequest treq;
- credp = crref();
code = afs_InitReq(&treq, credp);
if (
(strcmp(dp->d_name.name, ".directory") == 0)) {
code = afs_EvalFakeStat(&vcp, &fakestate, &treq);
if ((tryEvalOnly && vcp->mvstat == 1) || code) {
/* a mount point, not yet replaced by its directory */
+ dput(parent);
goto bad_dentry;
}
}
} else
- if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
- check_bad_parent(dp); /* check and correct mvid */
+ if (locked && *dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
+ if (vcp->mvid->Fid.Volume != pvcp->f.fid.Fid.Volume) { /* bad parent */
+ fix_bad_parent(dp, credp, vcp, pvcp); /* check and correct mvid */
+ }
+ }
#ifdef notdef
/* If the last looker changes, we should make sure the current
* always require a crref() which would be "slow".
*/
if (vcp->last_looker != treq.uid) {
- if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
+ if (!afs_AccessOK(vcp, (vType(vcp) == VREG) ? PRSFS_READ : PRSFS_LOOKUP, &treq, CHECK_MODE_BITS)) {
+ dput(parent);
goto bad_dentry;
+ }
vcp->last_looker = treq.uid;
}
#endif
+
/* If the parent's DataVersion has changed or the vnode
* is longer valid, we need to do a full lookup. VerifyVCache
* isn't enough since the vnode may have been renamed.
*/
- if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) {
-
+ if ((!locked) && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) ) {
credp = crref();
+ AFS_GLOCK();
+ locked = 1;
+ }
+
+ if (locked && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd))) {
afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
- if (!tvc || tvc != vcp)
+ if (!tvc || tvc != vcp) {
+ dput(parent);
goto bad_dentry;
+ }
- if (afs_getattr(vcp, &vattr, credp))
+ if (afs_getattr(vcp, &vattr, credp)) {
+ dput(parent);
goto bad_dentry;
+ }
vattr2inode(AFSTOV(vcp), &vattr);
dp->d_time = hgetlo(pvcp->f.m.DataVersion);
/* should we always update the attributes at this point? */
/* unlikely--the vcache entry hasn't changed */
+ dput(parent);
} else {
#ifdef notyet
- pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
+ /* If this code is ever enabled, we should use dget_parent to handle
+ * getting the parent, and dput() to dispose of it. See above for an
+ * example ... */
+ pvcp = VTOAFS(dp->d_parent->d_inode);
if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
goto bad_dentry;
#endif
/* Clean up */
if (tvc)
afs_PutVCache(tvc);
- afs_PutFakeStat(&fakestate);
- AFS_GUNLOCK();
+ afs_PutFakeStat(&fakestate); /* from here on vcp may be no longer valid */
+ if (locked) {
+ /* we hold the global lock if we evaluated a mount point */
+ AFS_GUNLOCK();
+ }
if (credp)
crfree(credp);
shrink_dcache_parent(dp);
d_drop(dp);
}
- afs_maybe_unlock_kernel();
return valid;
bad_dentry:
}
static int
+#if defined(DOP_D_DELETE_TAKES_CONST)
+afs_dentry_delete(const struct dentry *dp)
+#else
afs_dentry_delete(struct dentry *dp)
+#endif
{
if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
return 1; /* bad inode? */
return 0;
}
+#ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
+static struct vfsmount *
+afs_dentry_automount(afs_linux_path_t *path)
+{
+ struct dentry *target;
+
+ /* avoid symlink resolution limits when resolving; we cannot contribute to
+ * an infinite symlink loop */
+ current->total_link_count--;
+
+ target = canonical_dentry(path->dentry->d_inode);
+
+ if (target == path->dentry) {
+ dput(target);
+ target = NULL;
+ }
+
+ if (target) {
+ dput(path->dentry);
+ path->dentry = target;
+
+ } else {
+ spin_lock(&path->dentry->d_lock);
+ path->dentry->d_flags &= ~DCACHE_NEED_AUTOMOUNT;
+ spin_unlock(&path->dentry->d_lock);
+ }
+
+ return NULL;
+}
+#endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
+
struct dentry_operations afs_dentry_operations = {
.d_revalidate = afs_linux_dentry_revalidate,
.d_delete = afs_dentry_delete,
.d_iput = afs_dentry_iput,
+#ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
+ .d_automount = afs_dentry_automount,
+#endif /* STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
};
/**********************************************************************
* name is in kernel space at this point.
*/
static int
-#ifdef IOP_CREATE_TAKES_NAMEIDATA
+#if defined(IOP_CREATE_TAKES_BOOL)
+afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
+ bool excl)
+#elif defined(IOP_CREATE_TAKES_UMODE_T)
+afs_linux_create(struct inode *dip, struct dentry *dp, umode_t mode,
+ struct nameidata *nd)
+#elif defined(IOP_CREATE_TAKES_NAMEIDATA)
afs_linux_create(struct inode *dip, struct dentry *dp, int mode,
struct nameidata *nd)
#else
vattr.va_mode = mode;
vattr.va_type = mode & S_IFMT;
- afs_maybe_lock_kernel();
AFS_GLOCK();
code = afs_create(VTOAFS(dip), (char *)name, &vattr, NONEXCL, mode,
&vcp, credp);
afs_getattr(vcp, &vattr, credp);
afs_fill_inode(ip, &vattr);
insert_inode_hash(ip);
+#if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
dp->d_op = &afs_dentry_operations;
+#endif
dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
d_instantiate(dp, ip);
}
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
crfree(credp);
return afs_convert_code(code);
}
/* afs_linux_lookup */
static struct dentry *
-#ifdef IOP_LOOKUP_TAKES_NAMEIDATA
+#if defined(IOP_LOOKUP_TAKES_UNSIGNED)
+afs_linux_lookup(struct inode *dip, struct dentry *dp,
+ unsigned flags)
+#elif defined(IOP_LOOKUP_TAKES_NAMEIDATA)
afs_linux_lookup(struct inode *dip, struct dentry *dp,
struct nameidata *nd)
#else
struct dentry *newdp = NULL;
int code;
- afs_maybe_lock_kernel();
AFS_GLOCK();
code = afs_lookup(VTOAFS(dip), (char *)comp, &vcp, credp);
if (vcp) {
struct vattr vattr;
+ struct vcache *parent_vc = VTOAFS(dip);
+
+ if (parent_vc == vcp) {
+ /* This is possible if the parent dir is a mountpoint to a volume,
+ * and the dir entry we looked up is a mountpoint to the same
+ * volume. Linux cannot cope with this, so return an error instead
+ * of risking a deadlock or panic. */
+ afs_PutVCache(vcp);
+ code = EDEADLK;
+ AFS_GUNLOCK();
+ goto done;
+ }
ip = AFSTOV(vcp);
afs_getattr(vcp, &vattr, credp);
if (hlist_unhashed(&ip->i_hash))
insert_inode_hash(ip);
}
+#if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
dp->d_op = &afs_dentry_operations;
+#endif
dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
AFS_GUNLOCK();
if (ip && S_ISDIR(ip->i_mode)) {
- struct dentry *alias;
-
- /* Try to invalidate an existing alias in favor of our new one */
- alias = d_find_alias(ip);
- /* But not if it's disconnected; then we want d_splice_alias below */
- if (alias && !(alias->d_flags & DCACHE_DISCONNECTED)) {
- if (d_invalidate(alias) == 0) {
- dput(alias);
- } else {
- iput(ip);
- unlock_kernel();
- crfree(credp);
- return alias;
- }
- }
+ d_prune_aliases(ip);
+
+#ifdef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
+ ip->i_flags |= S_AUTOMOUNT;
+#endif
}
newdp = d_splice_alias(ip, dp);
- afs_maybe_unlock_kernel();
+ done:
crfree(credp);
/* It's ok for the file to not be found. That's noted by the caller by
return afs_convert_code(code);
}
+/* We have to have a Linux specific sillyrename function, because we
+ * also have to keep the dcache up to date when we're doing a silly
+ * rename - so we don't want the generic vnodeops doing this behind our
+ * back.
+ */
+
+static int
+afs_linux_sillyrename(struct inode *dir, struct dentry *dentry,
+ cred_t *credp)
+{
+ struct vcache *tvc = VTOAFS(dentry->d_inode);
+ struct dentry *__dp = NULL;
+ char *__name = NULL;
+ int code;
+
+ if (afs_linux_nfsfs_renamed(dentry))
+ return EBUSY;
+
+ do {
+ dput(__dp);
+
+ AFS_GLOCK();
+ if (__name)
+ osi_FreeSmallSpace(__name);
+ __name = afs_newname();
+ AFS_GUNLOCK();
+
+ __dp = lookup_one_len(__name, dentry->d_parent, strlen(__name));
+
+ if (IS_ERR(__dp)) {
+ osi_FreeSmallSpace(__name);
+ return EBUSY;
+ }
+ } while (__dp->d_inode != NULL);
+
+ AFS_GLOCK();
+ code = afs_rename(VTOAFS(dir), (char *)dentry->d_name.name,
+ VTOAFS(dir), (char *)__dp->d_name.name,
+ credp);
+ if (!code) {
+ tvc->mvid = (void *) __name;
+ crhold(credp);
+ if (tvc->uncred) {
+ crfree(tvc->uncred);
+ }
+ tvc->uncred = credp;
+ tvc->f.states |= CUnlinked;
+ afs_linux_set_nfsfs_renamed(dentry);
+ } else {
+ osi_FreeSmallSpace(__name);
+ }
+ AFS_GUNLOCK();
+
+ if (!code) {
+ __dp->d_time = hgetlo(VTOAFS(dir)->f.m.DataVersion);
+ d_move(dentry, __dp);
+ }
+ dput(__dp);
+
+ return code;
+}
+
+
static int
afs_linux_unlink(struct inode *dip, struct dentry *dp)
{
const char *name = dp->d_name.name;
struct vcache *tvc = VTOAFS(dp->d_inode);
- afs_maybe_lock_kernel();
if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
&& !(tvc->f.states & CUnlinked)) {
- struct dentry *__dp;
- char *__name;
-
- __dp = NULL;
- __name = NULL;
- do {
- dput(__dp);
-
- AFS_GLOCK();
- if (__name)
- osi_FreeSmallSpace(__name);
- __name = afs_newname();
- AFS_GUNLOCK();
-
- __dp = lookup_one_len(__name, dp->d_parent, strlen(__name));
-
- if (IS_ERR(__dp))
- goto out;
- } while (__dp->d_inode != NULL);
+ code = afs_linux_sillyrename(dip, dp, credp);
+ } else {
AFS_GLOCK();
- code = afs_rename(VTOAFS(dip), (char *)dp->d_name.name, VTOAFS(dip), (char *)__dp->d_name.name, credp);
- if (!code) {
- tvc->mvid = (void *) __name;
- crhold(credp);
- if (tvc->uncred) {
- crfree(tvc->uncred);
- }
- tvc->uncred = credp;
- tvc->f.states |= CUnlinked;
- afs_linux_set_nfsfs_renamed(dp);
- } else {
- osi_FreeSmallSpace(__name);
- }
+ code = afs_remove(VTOAFS(dip), (char *)name, credp);
AFS_GUNLOCK();
-
- if (!code) {
- __dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
- d_move(dp, __dp);
- }
- dput(__dp);
-
- goto out;
+ if (!code)
+ d_drop(dp);
}
- AFS_GLOCK();
- code = afs_remove(VTOAFS(dip), (char *)name, credp);
- AFS_GUNLOCK();
- if (!code)
- d_drop(dp);
-out:
- afs_maybe_unlock_kernel();
crfree(credp);
return afs_convert_code(code);
}
}
static int
+#if defined(IOP_MKDIR_TAKES_UMODE_T)
+afs_linux_mkdir(struct inode *dip, struct dentry *dp, umode_t mode)
+#else
afs_linux_mkdir(struct inode *dip, struct dentry *dp, int mode)
+#endif
{
int code;
cred_t *credp = crref();
struct vattr vattr;
const char *name = dp->d_name.name;
- afs_maybe_lock_kernel();
VATTR_NULL(&vattr);
vattr.va_mask = ATTR_MODE;
vattr.va_mode = mode;
afs_getattr(tvcp, &vattr, credp);
afs_fill_inode(ip, &vattr);
+#if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP)
dp->d_op = &afs_dentry_operations;
+#endif
dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
d_instantiate(dp, ip);
}
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
crfree(credp);
return afs_convert_code(code);
}
struct dentry *rehash = NULL;
/* Prevent any new references during rename operation. */
- afs_maybe_lock_kernel();
if (!d_unhashed(newdp)) {
d_drop(newdp);
rehash = newdp;
}
+#if defined(D_COUNT_INT)
+ spin_lock(&olddp->d_lock);
+ if (olddp->d_count > 1) {
+ spin_unlock(&olddp->d_lock);
+ shrink_dcache_parent(olddp);
+ } else
+ spin_unlock(&olddp->d_lock);
+#else
if (atomic_read(&olddp->d_count) > 1)
shrink_dcache_parent(olddp);
+#endif
AFS_GLOCK();
code = afs_rename(VTOAFS(oldip), (char *)oldname, VTOAFS(newip), (char *)newname, credp);
if (rehash)
d_rehash(rehash);
- afs_maybe_unlock_kernel();
-
crfree(credp);
return afs_convert_code(code);
}
{
int code;
cred_t *credp = crref();
- uio_t tuio;
+ struct uio tuio;
struct iovec iov;
setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
int code;
char *name;
- name = osi_Alloc(PATH_MAX);
+ name = kmalloc(PATH_MAX, GFP_NOFS);
if (!name) {
return -EIO;
}
AFS_GUNLOCK();
if (code < 0) {
- goto out;
+ return code;
}
name[code] = '\0';
- code = vfs_follow_link(nd, name);
+ nd_set_link(nd, name);
+ return 0;
+}
-out:
- osi_Free(name, PATH_MAX);
+static void
+afs_linux_put_link(struct dentry *dentry, struct nameidata *nd)
+{
+ char *name = nd_get_link(nd);
- return code;
+ if (name && !IS_ERR(name))
+ kfree(name);
}
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
-#if defined(AFS_CACHE_BYPASS)
-#endif /* defined(AFS_CACHE_BYPASS */
-
/* Populate a page by filling it from the cache file pointed at by cachefp
* (which contains indicated chunk)
* If task is NULL, the page copy occurs syncronously, and the routine
int chunk, struct pagevec *lrupv,
struct afs_pagecopy_task *task) {
loff_t offset = page_offset(page);
+ struct inode *cacheinode = cachefp->f_dentry->d_inode;
struct page *newpage, *cachepage;
struct address_space *cachemapping;
int pageindex;
int code = 0;
- cachemapping = cachefp->f_dentry->d_inode->i_mapping;
+ cachemapping = cacheinode->i_mapping;
newpage = NULL;
cachepage = NULL;
+ /* If we're trying to read a page that's past the end of the disk
+ * cache file, then just return a zeroed page */
+ if (AFS_CHUNKOFFSET(offset) >= i_size_read(cacheinode)) {
+ zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
+ if (task)
+ unlock_page(page);
+ return 0;
+ }
+
/* From our offset, we now need to work out which page in the disk
* file it corresponds to. This will be fun ... */
pageindex = (offset - AFS_CHUNKTOBASE(chunk)) >> PAGE_CACHE_SHIFT;
SetPageUptodate(page);
if (task)
- UnlockPage(page);
+ unlock_page(page);
} else if (task) {
afs_pagecopy_queue_page(task, cachepage, page);
} else {
}
if (code && task) {
- UnlockPage(page);
+ unlock_page(page);
}
out:
/* XXX - I suspect we should be locking the inodes before we use them! */
AFS_GUNLOCK();
- cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
+ cacheFp = afs_linux_raw_open(&tdc->f.inode);
pagevec_init(&lrupv, 0);
code = afs_linux_read_cache(cacheFp, pp, tdc->f.chunk, &lrupv, NULL);
{
afs_int32 code;
char *address;
- uio_t *auio;
+ struct uio *auio;
struct iovec *iovecp;
struct inode *ip = FILE_INODE(fp);
afs_int32 cnt = page_count(pp);
address = kmap(pp);
ClearPageError(pp);
- auio = osi_Alloc(sizeof(uio_t));
- iovecp = osi_Alloc(sizeof(struct iovec));
+ auio = kmalloc(sizeof(struct uio), GFP_NOFS);
+ iovecp = kmalloc(sizeof(struct iovec), GFP_NOFS);
setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
AFS_UIOSYS);
- afs_maybe_lock_kernel();
AFS_GLOCK();
AFS_DISCON_LOCK();
afs_Trace4(afs_iclSetp, CM_TRACE_READPAGE, ICL_TYPE_POINTER, ip,
code);
AFS_DISCON_UNLOCK();
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
if (!code) {
/* XXX valid for no-cache also? Check last bits of files... :)
* Cognate code goes in afs_NoCacheFetchProc. */
kunmap(pp);
- osi_Free(auio, sizeof(uio_t));
- osi_Free(iovecp, sizeof(struct iovec));
+ kfree(auio);
+ kfree(iovecp);
crfree(credp);
return afs_convert_code(code);
}
-#if defined(AFS_CACHE_BYPASS)
-
static int
afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages)
{
afs_int32 page_ix;
- uio_t *auio;
+ struct uio *auio;
afs_offs_t offset;
struct iovec* iovecp;
struct nocache_read_request *ancr;
- struct page *pp, *ppt;
+ struct page *pp;
struct pagevec lrupv;
afs_int32 code = 0;
/* background thread must free: iovecp, auio, ancr */
iovecp = osi_Alloc(num_pages * sizeof(struct iovec));
- auio = osi_Alloc(sizeof(uio_t));
+ auio = osi_Alloc(sizeof(struct uio));
auio->uio_iov = iovecp;
auio->uio_iovcnt = num_pages;
auio->uio_flag = UIO_READ;
isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
if(pp->index > isize) {
if(PageLocked(pp))
- UnlockPage(pp);
+ unlock_page(pp);
continue;
}
if(page_ix == 0) {
offset = page_offset(pp);
- auio->uio_offset = offset;
+ ancr->offset = auio->uio_offset = offset;
base_index = pp->index;
}
iovecp[page_ix].iov_len = PAGE_SIZE;
code = add_to_page_cache(pp, mapping, pp->index, GFP_KERNEL);
if(base_index != pp->index) {
if(PageLocked(pp))
- UnlockPage(pp);
+ unlock_page(pp);
page_cache_release(pp);
iovecp[page_ix].iov_base = (void *) 0;
base_index++;
- continue;
+ ancr->length -= PAGE_SIZE;
+ continue;
}
base_index++;
if(code) {
if(PageLocked(pp))
- UnlockPage(pp);
+ unlock_page(pp);
page_cache_release(pp);
iovecp[page_ix].iov_base = (void *) 0;
} else {
page_count++;
if(!PageLocked(pp)) {
- LockPage(pp);
+ lock_page(pp);
}
+ /* increment page refcount--our original design assumed
+ * that locking it would effectively pin it; protect
+ * ourselves from the possiblity that this assumption is
+ * is faulty, at low cost (provided we do not fail to
+ * do the corresponding decref on the other side) */
+ get_page(pp);
+
/* save the page for background map */
iovecp[page_ix].iov_base = (void*) pp;
/* and put it on the LRU cache */
if (!pagevec_add(&lrupv, pp))
- __pagevec_lru_add(&lrupv);
+ __pagevec_lru_add_file(&lrupv);
}
}
/* If there were useful pages in the page list, make sure all pages
* are in the LRU cache, then schedule the read */
if(page_count) {
- pagevec_lru_add(&lrupv);
+ if (pagevec_count(&lrupv))
+ __pagevec_lru_add_file(&lrupv);
credp = crref();
code = afs_ReadNoCache(avc, ancr, credp);
crfree(credp);
/* If there is nothing for the background thread to handle,
* it won't be freeing the things that we never gave it */
osi_Free(iovecp, num_pages * sizeof(struct iovec));
- osi_Free(auio, sizeof(uio_t));
+ osi_Free(auio, sizeof(struct uio));
osi_Free(ancr, sizeof(struct nocache_read_request));
}
/* we do not flush, release, or unmap pages--that will be
* done for us by the background thread as each page comes in
* from the fileserver */
-out:
return afs_convert_code(code);
}
afs_linux_bypass_readpage(struct file *fp, struct page *pp)
{
cred_t *credp = NULL;
- uio_t *auio;
+ struct uio *auio;
struct iovec *iovecp;
struct nocache_read_request *ancr;
- afs_int32 isize;
+ int code;
+
+ /*
+ * Special case: if page is at or past end of file, just zero it and set
+ * it as up to date.
+ */
+ if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
+ zero_user_segment(pp, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(pp);
+ unlock_page(pp);
+ return 0;
+ }
ClearPageError(pp);
/* receiver frees */
- auio = osi_Alloc(sizeof(uio_t));
+ auio = osi_Alloc(sizeof(struct uio));
iovecp = osi_Alloc(sizeof(struct iovec));
/* address can be NULL, because we overwrite it with 'pp', below */
PAGE_SIZE, UIO_READ, AFS_UIOSYS);
/* save the page for background map */
- /* XXX - Shouldn't we get a reference count here? */
+ get_page(pp); /* see above */
auio->uio_iov->iov_base = (void*) pp;
/* the background thread will free this */
ancr = osi_Alloc(sizeof(struct nocache_read_request));
ancr->auio = auio;
- ancr->offset = offset;
+ ancr->offset = page_offset(pp);
ancr->length = PAGE_SIZE;
credp = crref();
- afs_maybe_lock_kernel();
code = afs_ReadNoCache(VTOAFS(FILE_INODE(fp)), ancr, credp);
- afs_maybe_unlock_kernel();
crfree(credp);
return afs_convert_code(code);
static inline int
afs_linux_can_bypass(struct inode *ip) {
+
switch(cache_bypass_strategy) {
case NEVER_BYPASS_CACHE:
return 0;
case ALWAYS_BYPASS_CACHE:
return 1;
case LARGE_FILES_BYPASS_CACHE:
- if(i_size_read(ip) > cache_bypass_threshold)
+ if (i_size_read(ip) > cache_bypass_threshold)
return 1;
default:
+ return 0;
}
- return 0;
}
/* Check if a file is permitted to bypass the cache by policy, and modify
static inline int
afs_linux_bypass_check(struct inode *ip) {
- struct cred* credp;
+ cred_t* credp;
int bypass = afs_linux_can_bypass(ip);
credp = crref();
- trydo_cache_transition(VTOAFS(ip)), credp, bypass);
+ trydo_cache_transition(VTOAFS(ip), credp, bypass);
crfree(credp);
return bypass;
}
-#else
-static inline int
-afs_linux_bypass_check(struct inode *ip) {
- return 0;
-}
-static inline int
-afs_linux_bypass_readpage(struct file *fp, struct page *pp) {
- return 0;
-}
-static inline int
-afs_linux_bypass_readpages(struct file *fp, struct address_space *mapping,
- struct list_head *page_list, unsigned int num_pages) {
- return 0;
-}
-#endif
static int
afs_linux_readpage(struct file *fp, struct page *pp)
code = afs_linux_fillpage(fp, pp);
if (!code)
code = afs_linux_prefetch(fp, pp);
- UnlockPage(pp);
+ unlock_page(pp);
}
return code;
if (afs_linux_bypass_check(inode))
return afs_linux_bypass_readpages(fp, mapping, page_list, num_pages);
+ if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
+ return 0;
+
AFS_GLOCK();
if ((code = afs_linux_VerifyVCache(avc, NULL))) {
AFS_GUNLOCK();
}
AFS_GUNLOCK();
if (tdc)
- cacheFp = afs_linux_raw_open(&tdc->f.inode, NULL);
+ cacheFp = afs_linux_raw_open(&tdc->f.inode);
}
if (tdc && !add_to_page_cache(page, mapping, page->index,
return 0;
}
+/* Prepare an AFS vcache for writeback. Should be called with the vcache
+ * locked */
+static inline int
+afs_linux_prepare_writeback(struct vcache *avc) {
+ if (avc->f.states & CPageWrite) {
+ return AOP_WRITEPAGE_ACTIVATE;
+ }
+ avc->f.states |= CPageWrite;
+ return 0;
+}
+
+static inline int
+afs_linux_dopartialwrite(struct vcache *avc, cred_t *credp) {
+ struct vrequest treq;
+ int code = 0;
+
+ if (!afs_InitReq(&treq, credp))
+ code = afs_DoPartialWrite(avc, &treq);
+
+ return afs_convert_code(code);
+}
+
+static inline void
+afs_linux_complete_writeback(struct vcache *avc) {
+ avc->f.states &= ~CPageWrite;
+}
+
+/* Writeback a given page syncronously. Called with no AFS locks held */
static int
-afs_linux_writepage_sync(struct inode *ip, struct page *pp,
- unsigned long offset, unsigned int count)
+afs_linux_page_writeback(struct inode *ip, struct page *pp,
+ unsigned long offset, unsigned int count,
+ cred_t *credp)
{
struct vcache *vcp = VTOAFS(ip);
char *buffer;
afs_offs_t base;
int code = 0;
- cred_t *credp;
- uio_t tuio;
+ struct uio tuio;
struct iovec iovec;
int f_flags = 0;
buffer = kmap(pp) + offset;
base = page_offset(pp) + offset;
- credp = crref();
- afs_maybe_lock_kernel();
AFS_GLOCK();
afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
ICL_TYPE_INT32, 99999);
- ObtainWriteLock(&vcp->lock, 532);
- if (vcp->f.states & CPageWrite) {
- ReleaseWriteLock(&vcp->lock);
- AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
- crfree(credp);
- kunmap(pp);
- return AOP_WRITEPAGE_ACTIVATE;
- }
- vcp->f.states |= CPageWrite;
- ReleaseWriteLock(&vcp->lock);
-
setup_uio(&tuio, &iovec, buffer, base, count, UIO_WRITE, AFS_UIOSYS);
code = afs_write(vcp, &tuio, f_flags, credp, 0);
i_size_write(ip, vcp->f.m.Length);
ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
- ObtainWriteLock(&vcp->lock, 533);
- if (!code) {
- struct vrequest treq;
-
- if (!afs_InitReq(&treq, credp))
- code = afs_DoPartialWrite(vcp, &treq);
- }
code = code ? afs_convert_code(code) : count - tuio.uio_resid;
- vcp->f.states &= ~CPageWrite;
- ReleaseWriteLock(&vcp->lock);
-
afs_Trace4(afs_iclSetp, CM_TRACE_UPDATEPAGE, ICL_TYPE_POINTER, vcp,
ICL_TYPE_POINTER, pp, ICL_TYPE_INT32, page_count(pp),
ICL_TYPE_INT32, code);
AFS_GUNLOCK();
- afs_maybe_unlock_kernel();
- crfree(credp);
kunmap(pp);
return code;
}
+static int
+afs_linux_writepage_sync(struct inode *ip, struct page *pp,
+ unsigned long offset, unsigned int count)
+{
+ int code;
+ int code1 = 0;
+ struct vcache *vcp = VTOAFS(ip);
+ cred_t *credp;
+
+ /* Catch recursive writeback. This occurs if the kernel decides
+ * writeback is required whilst we are writing to the cache, or
+ * flushing to the server. When we're running syncronously (as
+ * opposed to from writepage) we can't actually do anything about
+ * this case - as we can't return AOP_WRITEPAGE_ACTIVATE to write()
+ */
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 532);
+ afs_linux_prepare_writeback(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+
+ credp = crref();
+ code = afs_linux_page_writeback(ip, pp, offset, count, credp);
+
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 533);
+ if (code > 0)
+ code1 = afs_linux_dopartialwrite(vcp, credp);
+ afs_linux_complete_writeback(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+ crfree(credp);
+
+ if (code1)
+ return code1;
+
+ return code;
+}
static int
#ifdef AOP_WRITEPAGE_TAKES_WRITEBACK_CONTROL
{
struct address_space *mapping = pp->mapping;
struct inode *inode;
- unsigned long end_index;
- unsigned offset = PAGE_CACHE_SIZE;
- long status;
+ struct vcache *vcp;
+ cred_t *credp;
+ unsigned int to = PAGE_CACHE_SIZE;
+ loff_t isize;
+ int code = 0;
+ int code1 = 0;
if (PageReclaim(pp)) {
return AOP_WRITEPAGE_ACTIVATE;
+ /* XXX - Do we need to redirty the page here? */
}
- inode = (struct inode *)mapping->host;
- end_index = i_size_read(inode) >> PAGE_CACHE_SHIFT;
+ page_cache_get(pp);
+
+ inode = mapping->host;
+ vcp = VTOAFS(inode);
+ isize = i_size_read(inode);
+
+ /* Don't defeat an earlier truncate */
+ if (page_offset(pp) > isize) {
+ set_page_writeback(pp);
+ unlock_page(pp);
+ goto done;
+ }
+
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 537);
+ code = afs_linux_prepare_writeback(vcp);
+ if (code == AOP_WRITEPAGE_ACTIVATE) {
+ /* WRITEPAGE_ACTIVATE is the only return value that permits us
+ * to return with the page still locked */
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+ return code;
+ }
+
+ /* Grab the creds structure currently held in the vnode, and
+ * get a reference to it, in case it goes away ... */
+ credp = vcp->cred;
+ if (credp)
+ crhold(credp);
+ else
+ credp = crref();
+ ReleaseWriteLock(&vcp->lock);
+ AFS_GUNLOCK();
+
+ set_page_writeback(pp);
- /* easy case */
- if (pp->index < end_index)
- goto do_it;
- /* things got complicated... */
- offset = i_size_read(inode) & (PAGE_CACHE_SIZE - 1);
- /* OK, are we completely out? */
- if (pp->index >= end_index + 1 || !offset)
- return -EIO;
- do_it:
- status = afs_linux_writepage_sync(inode, pp, 0, offset);
SetPageUptodate(pp);
- if ( status != AOP_WRITEPAGE_ACTIVATE )
- UnlockPage(pp);
- if (status == offset)
+
+ /* We can unlock the page here, because it's protected by the
+ * page_writeback flag. This should make us less vulnerable to
+ * deadlocking in afs_write and afs_DoPartialWrite
+ */
+ unlock_page(pp);
+
+ /* If this is the final page, then just write the number of bytes that
+ * are actually in it */
+ if ((isize - page_offset(pp)) < to )
+ to = isize - page_offset(pp);
+
+ code = afs_linux_page_writeback(inode, pp, 0, to, credp);
+
+ AFS_GLOCK();
+ ObtainWriteLock(&vcp->lock, 538);
+
+ /* As much as we might like to ignore a file server error here,
+ * and just try again when we close(), unfortunately StoreAllSegments
+ * will invalidate our chunks if the server returns a permanent error,
+ * so we need to at least try and get that error back to the user
+ */
+ if (code == to)
+ code1 = afs_linux_dopartialwrite(vcp, credp);
+
+ afs_linux_complete_writeback(vcp);
+ ReleaseWriteLock(&vcp->lock);
+ crfree(credp);
+ AFS_GUNLOCK();
+
+done:
+ end_page_writeback(pp);
+ page_cache_release(pp);
+
+ if (code1)
+ return code1;
+
+ if (code == to)
return 0;
- else
- return status;
+
+ return code;
}
/* afs_linux_permission
* Check access rights - returns error if can't check or permission denied.
*/
static int
-#ifdef IOP_PERMISSION_TAKES_NAMEIDATA
+#if defined(IOP_PERMISSION_TAKES_FLAGS)
+afs_linux_permission(struct inode *ip, int mode, unsigned int flags)
+#elif defined(IOP_PERMISSION_TAKES_NAMEIDATA)
afs_linux_permission(struct inode *ip, int mode, struct nameidata *nd)
#else
afs_linux_permission(struct inode *ip, int mode)
#endif
{
int code;
- cred_t *credp = crref();
+ cred_t *credp;
int tmp = 0;
+ /* Check for RCU path walking */
+#if defined(IOP_PERMISSION_TAKES_FLAGS)
+ if (flags & IPERM_FLAG_RCU)
+ return -ECHILD;
+#elif defined(MAY_NOT_BLOCK)
+ if (mode & MAY_NOT_BLOCK)
+ return -ECHILD;
+#endif
+
+ credp = crref();
AFS_GLOCK();
if (mode & MAY_EXEC)
tmp |= VEXEC;
return afs_convert_code(code);
}
-#if !defined(HAVE_WRITE_BEGIN)
static int
afs_linux_commit_write(struct file *file, struct page *page, unsigned offset,
unsigned to)
{
int code;
+ struct inode *inode = FILE_INODE(file);
+ loff_t pagebase = page_offset(page);
+
+ if (i_size_read(inode) < (pagebase + offset))
+ i_size_write(inode, pagebase + offset);
+
+ if (PageChecked(page)) {
+ SetPageUptodate(page);
+ ClearPageChecked(page);
+ }
- code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
- offset, to - offset);
+ code = afs_linux_writepage_sync(inode, page, offset, to - offset);
return code;
}
afs_linux_prepare_write(struct file *file, struct page *page, unsigned from,
unsigned to)
{
+
+ /* http://kerneltrap.org/node/4941 details the expected behaviour of
+ * prepare_write. Essentially, if the page exists within the file,
+ * and is not being fully written, then we should populate it.
+ */
+
+ if (!PageUptodate(page)) {
+ loff_t pagebase = page_offset(page);
+ loff_t isize = i_size_read(page->mapping->host);
+
+ /* Is the location we are writing to beyond the end of the file? */
+ if (pagebase >= isize ||
+ ((from == 0) && (pagebase + to) >= isize)) {
+ zero_user_segments(page, 0, from, to, PAGE_CACHE_SIZE);
+ SetPageChecked(page);
+ /* Are we we writing a full page */
+ } else if (from == 0 && to == PAGE_CACHE_SIZE) {
+ SetPageChecked(page);
+ /* Is the page readable, if it's wronly, we don't care, because we're
+ * not actually going to read from it ... */
+ } else if ((file->f_flags && O_ACCMODE) != O_WRONLY) {
+ /* We don't care if fillpage fails, because if it does the page
+ * won't be marked as up to date
+ */
+ afs_linux_fillpage(file, page);
+ }
+ }
return 0;
}
-#else
+#if defined(STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
static int
afs_linux_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
int code;
- unsigned from = pos & (PAGE_CACHE_SIZE - 1);
+ unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+
+ code = afs_linux_commit_write(file, page, from, from + len);
- code = afs_linux_writepage_sync(file->f_dentry->d_inode, page,
- from, copied);
unlock_page(page);
page_cache_release(page);
return code;
{
struct page *page;
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
+ unsigned int from = pos & (PAGE_CACHE_SIZE - 1);
+ int code;
+
page = grab_cache_page_write_begin(mapping, index, flags);
*pagep = page;
- return 0;
+ code = afs_linux_prepare_write(file, page, from, from + len);
+ if (code) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+
+ return code;
}
#endif
+#ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
+static void *
+afs_linux_dir_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct dentry **dpp;
+ struct dentry *target;
+
+ if (current->total_link_count > 0) {
+ /* avoid symlink resolution limits when resolving; we cannot contribute to
+ * an infinite symlink loop */
+ /* only do this for follow_link when total_link_count is positive to be
+ * on the safe side; there is at least one code path in the Linux
+ * kernel where it seems like it may be possible to get here without
+ * total_link_count getting incremented. it is not clear on how that
+ * path is actually reached, but guard against it just to be safe */
+ current->total_link_count--;
+ }
+
+ target = canonical_dentry(dentry->d_inode);
+
+# ifdef STRUCT_NAMEIDATA_HAS_PATH
+ dpp = &nd->path.dentry;
+# else
+ dpp = &nd->dentry;
+# endif
+
+ dput(*dpp);
+
+ if (target) {
+ *dpp = target;
+ } else {
+ *dpp = dget(dentry);
+ }
+
+ nd->last_type = LAST_BIND;
+
+ return NULL;
+}
+#endif /* !STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT */
+
static struct inode_operations afs_file_iops = {
.permission = afs_linux_permission,
.readpage = afs_linux_readpage,
.readpages = afs_linux_readpages,
.writepage = afs_linux_writepage,
-#if defined (HAVE_WRITE_BEGIN)
+#if defined (STRUCT_ADDRESS_SPACE_OPERATIONS_HAS_WRITE_BEGIN)
.write_begin = afs_linux_write_begin,
.write_end = afs_linux_write_end,
#else
.rename = afs_linux_rename,
.getattr = afs_linux_getattr,
.permission = afs_linux_permission,
+#ifndef STRUCT_DENTRY_OPERATIONS_HAS_D_AUTOMOUNT
+ .follow_link = afs_linux_dir_follow_link,
+#endif
};
/* We really need a separate symlink set of ops, since do_follow_link()
char *p = (char *)kmap(page);
int code;
- afs_maybe_lock_kernel();
AFS_GLOCK();
code = afs_linux_ireadlink(ip, p, PAGE_SIZE, AFS_UIOSYS);
AFS_GUNLOCK();
if (code < 0)
goto fail;
p[code] = '\0'; /* null terminate? */
- afs_maybe_unlock_kernel();
SetPageUptodate(page);
kunmap(page);
- UnlockPage(page);
+ unlock_page(page);
return 0;
fail:
- afs_maybe_unlock_kernel();
-
SetPageError(page);
kunmap(page);
- UnlockPage(page);
+ unlock_page(page);
return code;
}
static struct inode_operations afs_symlink_iops = {
#if defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE)
.readlink = page_readlink,
-# if defined(HAVE_KERNEL_PAGE_FOLLOW_LINK)
+# if defined(HAVE_LINUX_PAGE_FOLLOW_LINK)
.follow_link = page_follow_link,
# else
.follow_link = page_follow_link_light,
#else /* !defined(USABLE_KERNEL_PAGE_SYMLINK_CACHE) */
.readlink = afs_linux_readlink,
.follow_link = afs_linux_follow_link,
+ .put_link = afs_linux_put_link,
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
.setattr = afs_notify_change,
};
if (vattr)
vattr2inode(ip, vattr);
- ip->i_mapping->backing_dev_info = &afs_backing_dev_info;
+ ip->i_mapping->backing_dev_info = afs_backing_dev_info;
/* Reset ops if symlink or directory. */
if (S_ISREG(ip->i_mode)) {
ip->i_op = &afs_file_iops;