#if defined(AFS_LINUX24_ENV)
#include "h/smp_lock.h"
#endif
-#if defined(AFS_CACHE_BYPASS)
#include "afs/lock.h"
#include "afs/afs_bypasscache.h"
-#endif
#ifdef pgoff2loff
#define pageoff(pp) pgoff2loff((pp)->index)
#endif
extern struct vcache *afs_globalVp;
-extern int afs_notify_change(struct dentry *dp, struct iattr *iattrp);
#if defined(AFS_LINUX24_ENV)
/* Some uses of BKL are perhaps not needed for bypass or memcache--
* why don't we try it out? */
{
ssize_t code = 0;
struct vcache *vcp = VTOAFS(fp->f_dentry->d_inode);
-#if defined(AFS_CACHE_BYPASS) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
afs_size_t isize, offindex;
#endif
code = afs_linux_VerifyVCache(vcp, NULL);
if (code == 0) {
-#if defined(AFS_CACHE_BYPASS) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
isize = (i_size_read(fp->f_mapping->host) - 1) >> PAGE_CACHE_SHIFT;
offindex = *offp >> PAGE_CACHE_SHIFT;
if(offindex > isize) {
afs_Trace4(afs_iclSetp, CM_TRACE_READOP, ICL_TYPE_POINTER, vcp,
ICL_TYPE_OFFSET, offp, ICL_TYPE_INT32, count, ICL_TYPE_INT32,
code);
-#if defined(AFS_CACHE_BYPASS) && LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
done:
#endif
AFS_GUNLOCK();
{
struct vcache *avc = VTOAFS(FILE_INODE(fp));
struct vrequest treq;
- register struct dcache *tdc;
+ struct dcache *tdc;
int code;
int offset;
int dirpos;
struct DirEntry *de;
+ struct DirBuffer entry;
ino_t ino;
int len;
afs_size_t origOffset, tlen;
if (!dirpos)
break;
- de = afs_dir_GetBlob(tdc, dirpos);
- if (!de)
+ code = afs_dir_GetBlob(tdc, dirpos, &entry);
+ if (code)
break;
+ de = (struct DirEntry *)entry.data;
- ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
+ ino = afs_calc_inum(avc->f.fid.Cell, avc->f.fid.Fid.Volume,
+ ntohl(de->fid.vnode));
if (de->name)
len = strlen(de->name);
else {
printf("afs_linux_readdir: afs_dir_GetBlob failed, null name (inode %lx, dirpos %d)\n",
(unsigned long)&tdc->f.inode, dirpos);
- DRelease(de, 0);
+ DRelease(&entry, 0);
ReleaseSharedLock(&avc->lock);
afs_PutDCache(tdc);
code = -ENOENT;
#else
code = (*filldir) (dirbuf, de->name, len, offset, ino);
#endif
- DRelease(de, 0);
+ DRelease(&entry, 0);
if (code)
break;
offset = dirpos + 1 + ((len + 16) >> 5);
struct vcache *vcp;
cred_t *credp;
int code;
-#if defined(AFS_CACHE_BYPASS)
int bypasscache;
-#endif
AFS_GLOCK();
code = afs_InitReq(&treq, credp);
if (code)
goto out;
-#if defined(AFS_CACHE_BYPASS)
/* If caching is bypassed for this file, or globally, just return 0 */
if(cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
bypasscache = 1;
/* future proof: don't rely on 0 return from afs_InitReq */
code = 0; goto out;
}
-#endif
ObtainSharedLock(&vcp->lock, 535);
if ((vcp->execsOrWriters > 0) && (file_count(fp) == 1)) {
#endif
};
+static struct dentry *
+canonical_dentry(struct inode *ip)
+{
+ struct vcache *vcp = VTOAFS(ip);
+ struct dentry *first = NULL, *ret = NULL, *cur;
+ struct list_head *head, *prev, *tmp;
+
+ /* general strategy:
+ * if vcp->target_link is set, and can be found in ip->i_dentry, use that.
+ * otherwise, use the first dentry in ip->i_dentry.
+ * if ip->i_dentry is empty, use the 'dentry' argument we were given.
+ */
+ /* note that vcp->target_link specifies which dentry to use, but we have
+ * no reference held on that dentry. so, we cannot use or dereference
+ * vcp->target_link itself, since it may have been freed. instead, we only
+ * use it to compare to pointers in the ip->i_dentry list. */
+
+ d_prune_aliases(ip);
+
+ spin_lock(&dcache_lock);
+
+ head = &ip->i_dentry;
+ prev = ip->i_dentry.prev;
+
+ while (prev != head) {
+ tmp = prev;
+ prev = tmp->prev;
+ cur = list_entry(tmp, struct dentry, d_alias);
+
+ if (!vcp->target_link || cur == vcp->target_link) {
+ ret = cur;
+ break;
+ }
+
+ if (!first) {
+ first = cur;
+ }
+ }
+ if (!ret && first) {
+ ret = first;
+ }
+
+ vcp->target_link = ret;
+
+ if (ret) {
+ dget_locked(ret);
+ }
+ spin_unlock(&dcache_lock);
+
+ return ret;
+}
/**********************************************************************
* AFS Linux dentry operations
return afs_convert_code(code);
}
+/* vattr_setattr
+ * Set iattr data into vattr. Assume vattr cleared before call.
+ */
+static void
+iattr2vattr(struct vattr *vattrp, struct iattr *iattrp)
+{
+ vattrp->va_mask = iattrp->ia_valid;
+ if (iattrp->ia_valid & ATTR_MODE)
+ vattrp->va_mode = iattrp->ia_mode;
+ if (iattrp->ia_valid & ATTR_UID)
+ vattrp->va_uid = iattrp->ia_uid;
+ if (iattrp->ia_valid & ATTR_GID)
+ vattrp->va_gid = iattrp->ia_gid;
+ if (iattrp->ia_valid & ATTR_SIZE)
+ vattrp->va_size = iattrp->ia_size;
+ if (iattrp->ia_valid & ATTR_ATIME) {
+ vattrp->va_atime.tv_sec = iattrp->ia_atime;
+ vattrp->va_atime.tv_usec = 0;
+ }
+ if (iattrp->ia_valid & ATTR_MTIME) {
+ vattrp->va_mtime.tv_sec = iattrp->ia_mtime;
+ vattrp->va_mtime.tv_usec = 0;
+ }
+ if (iattrp->ia_valid & ATTR_CTIME) {
+ vattrp->va_ctime.tv_sec = iattrp->ia_ctime;
+ vattrp->va_ctime.tv_usec = 0;
+ }
+}
+
+/* vattr2inode
+ * Rewrite the inode cache from the attr. Assumes all vattr fields are valid.
+ */
+void
+vattr2inode(struct inode *ip, struct vattr *vp)
+{
+ ip->i_ino = vp->va_nodeid;
+ ip->i_nlink = vp->va_nlink;
+ ip->i_blocks = vp->va_blocks;
+#ifdef STRUCT_INODE_HAS_I_BLKBITS
+ ip->i_blkbits = AFS_BLKBITS;
+#endif
+#ifdef STRUCT_INODE_HAS_I_BLKSIZE
+ ip->i_blksize = vp->va_blocksize;
+#endif
+ ip->i_rdev = vp->va_rdev;
+ ip->i_mode = vp->va_mode;
+ ip->i_uid = vp->va_uid;
+ ip->i_gid = vp->va_gid;
+ i_size_write(ip, vp->va_size);
+ ip->i_atime = vp->va_atime.tv_sec;
+ ip->i_mtime = vp->va_mtime.tv_sec;
+ ip->i_ctime = vp->va_ctime.tv_sec;
+}
+
+/* afs_notify_change
+ * Linux version of setattr call. What to change is in the iattr struct.
+ * We need to set bits in both the Linux inode as well as the vcache.
+ */
+int
+afs_notify_change(struct dentry *dp, struct iattr *iattrp)
+{
+ struct vattr vattr;
+ cred_t *credp = crref();
+ struct inode *ip = dp->d_inode;
+ int code;
+
+ VATTR_NULL(&vattr);
+ iattr2vattr(&vattr, iattrp); /* Convert for AFS vnodeops call. */
+
+ AFS_GLOCK();
+ code = afs_setattr(VTOAFS(ip), &vattr, credp);
+ if (!code) {
+ afs_getattr(VTOAFS(ip), &vattr, credp);
+ vattr2inode(ip, &vattr);
+ }
+ AFS_GUNLOCK();
+ crfree(credp);
+ return afs_convert_code(code);
+}
+
/* Validate a dentry. Return 1 if unchanged, 0 if VFS layer should re-evaluate.
* In kernels 2.2.10 and above, we are passed an additional flags var which
* may have either the LOOKUP_FOLLOW OR LOOKUP_DIRECTORY set in which case
if (vcp) {
struct vattr vattr;
+ struct vcache *parent_vc = VTOAFS(dip);
+
+ if (parent_vc == vcp) {
+ /* This is possible if the parent dir is a mountpoint to a volume,
+ * and the dir entry we looked up is a mountpoint to the same
+ * volume. Linux cannot cope with this, so return an error instead
+ * of risking a deadlock or panic. */
+ afs_PutVCache(vcp);
+ code = EDEADLK;
+ AFS_GUNLOCK();
+ goto done;
+ }
ip = AFSTOV(vcp);
afs_getattr(vcp, &vattr, credp);
#if defined(AFS_LINUX24_ENV)
if (ip && S_ISDIR(ip->i_mode)) {
+ int retry = 1;
struct dentry *alias;
- /* Try to invalidate an existing alias in favor of our new one */
- alias = d_find_alias(ip);
- if (alias) {
- if (d_invalidate(alias) == 0) {
- dput(alias);
- } else {
- iput(ip);
- crfree(credp);
- return alias;
+ while (retry) {
+ retry = 0;
+
+ /* Try to invalidate an existing alias in favor of our new one */
+ alias = d_find_alias(ip);
+ /* But not if it's disconnected; then we want d_splice_alias below */
+ if (alias) {
+ if (d_invalidate(alias) == 0) {
+ /* there may be more aliases; try again until we run out */
+ retry = 1;
+ }
}
+
+ dput(alias);
}
}
#endif
d_add(dp, ip);
+ done:
crfree(credp);
/* It's ok for the file to not be found. That's noted by the caller by
{
int code;
cred_t *credp = crref();
- uio_t tuio;
+ struct uio tuio;
struct iovec iov;
setup_uio(&tuio, &iov, target, (afs_offs_t) 0, maxlen, UIO_READ, seg);
#endif /* AFS_LINUX24_ENV */
#endif /* USABLE_KERNEL_PAGE_SYMLINK_CACHE */
+static inline int
+afs_linux_can_bypass(struct inode *ip) {
+ switch(cache_bypass_strategy) {
+ case NEVER_BYPASS_CACHE:
+ return 0;
+ case ALWAYS_BYPASS_CACHE:
+ return 1;
+ case LARGE_FILES_BYPASS_CACHE:
+ if(i_size_read(ip) > cache_bypass_threshold)
+ return 1;
+ default:
+ return 0;
+ }
+}
+
/* afs_linux_readpage
* all reads come through here. A strategy-like read call.
*/
ulong address = afs_linux_page_address(pp);
afs_offs_t offset = pageoff(pp);
#endif
-#if defined(AFS_CACHE_BYPASS)
afs_int32 bypasscache = 0; /* bypass for this read */
struct nocache_read_request *ancr;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
afs_int32 isize;
#endif
-#endif
- uio_t *auio;
+ struct uio *auio;
struct iovec *iovecp;
struct inode *ip = FILE_INODE(fp);
afs_int32 cnt = page_count(pp);
clear_bit(PG_error, &pp->flags);
#endif
/* if bypasscache, receiver frees, else we do */
- auio = osi_Alloc(sizeof(uio_t));
+ auio = osi_Alloc(sizeof(struct uio));
iovecp = osi_Alloc(sizeof(struct iovec));
setup_uio(auio, iovecp, (char *)address, offset, PAGE_SIZE, UIO_READ,
AFS_UIOSYS);
-#if defined(AFS_CACHE_BYPASS)
bypasscache = afs_linux_can_bypass(ip);
/* In the new incarnation of selective caching, a file's caching policy
goto done; /* skips release page, doing it in bg thread */
}
-#endif
#ifdef AFS_LINUX24_ENV
maybe_lock_kernel();
free_page(address);
#endif
-#if defined(AFS_CACHE_BYPASS)
/* do not call afs_GetDCache if cache is bypassed */
if(bypasscache)
goto done;
-#endif
/* free if not bypassing cache */
- osi_Free(auio, sizeof(uio_t));
+ osi_Free(auio, sizeof(struct uio));
osi_Free(iovecp, sizeof(struct iovec));
if (!code && AFS_CHUNKOFFSET(offset) == 0) {
AFS_GUNLOCK();
}
-#if defined(AFS_CACHE_BYPASS)
done:
-#endif
crfree(credp);
return afs_convert_code(code);
}
afs_offs_t base;
int code = 0;
cred_t *credp;
- uio_t tuio;
+ struct uio tuio;
struct iovec iovec;
int f_flags = 0;
u8 *page_addr = (u8 *) afs_linux_page_address(pp);
int code = 0;
cred_t *credp;
- uio_t tuio;
+ struct uio tuio;
struct iovec iovec;
set_bit(PG_locked, &pp->flags);
}
#endif
+static int
+afs_linux_dir_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+ struct dentry **dpp;
+ struct dentry *target;
+
+ target = canonical_dentry(dentry->d_inode);
+
+ dpp = &nd->dentry;
+
+ dput(*dpp);
+
+ if (target) {
+ *dpp = target;
+ } else {
+ *dpp = dget(dentry);
+ }
+
+ nd->last_type = LAST_BIND;
+
+ return 0;
+}
static struct inode_operations afs_file_iops = {
#if defined(AFS_LINUX24_ENV)
.rename = afs_linux_rename,
.revalidate = afs_linux_revalidate,
.permission = afs_linux_permission,
+ .follow_link = afs_linux_dir_follow_link,
};
/* We really need a separate symlink set of ops, since do_follow_link()