/*
- * Copyright 2000, International Business Machines Corporation and others.
- * All Rights Reserved.
- *
- * This software has been released under the terms of the IBM Public
- * License. For details, see the LICENSE file in the top-level source
- * directory or online at http://www.openafs.org/dl/license10.html
+ * A large chunk of this file appears to be copied directly from
+ * sys/nfsclient/nfs_bio.c, which has the following license:
+ */
+/*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
*/
-
/*
- * vnodeops structure and Digital Unix specific ops and support routines.
+ * Pursuant to a statement of U.C. Berkeley dated 1999-07-22, this license
+ * is amended to drop clause (3) above.
*/
-#include "../afs/param.h" /* Should be always first */
+#include <afsconfig.h>
+#include <afs/param.h>
+
-#include "../afs/sysincludes.h" /* Standard vendor system headers */
-#include "../afs/afsincludes.h" /* Afs-based standard headers */
-#include "../afs/afs_stats.h" /* statistics */
-#include <vm/vm.h>
+#include <afs/sysincludes.h> /* Standard vendor system headers */
+#include <afsincludes.h> /* Afs-based standard headers */
+#include <afs/afs_stats.h> /* statistics */
+#include <sys/malloc.h>
+#include <sys/namei.h>
+#include <sys/unistd.h>
+#if __FreeBSD_version >= 1000030
+#include <sys/rwlock.h>
+#endif
+#include <vm/vm_page.h>
+#include <vm/vm_object.h>
+#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
-#include <vm/vm_map.h>
-/* #include <vm/vm_ubc.h> */
-#include "../afs/afs_cbqueue.h"
-#include "../afs/nfsclient.h"
-#include "../afs/afs_osidnlc.h"
-
-
-extern int afs_lookup(), afs_create(), afs_noop(), afs_open(), afs_close();
-extern int afs_access(), afs_getattr(), afs_setattr(), afs_badop();
-extern int afs_fsync(), afs_seek(), afs_remove(), afs_link(), afs_rename();
-extern int afs_mkdir(), afs_rmdir(), afs_symlink(), afs_readdir();
-extern int afs_readlink(), afs_lockctl();
-extern int vn_pathconf_default(), seltrue();
-
-int mp_afs_lookup(), mp_afs_create(), mp_afs_open();
-int mp_afs_access(), mp_afs_getattr(), mp_afs_setattr(), mp_afs_ubcrdwr();
-int mp_afs_ubcrdwr(), mp_afs_mmap();
-int mp_afs_fsync(), mp_afs_seek(), mp_afs_remove(), mp_afs_link();
-int mp_afs_rename(), mp_afs_mkdir(), mp_afs_rmdir(), mp_afs_symlink();
-int mp_afs_readdir(), mp_afs_readlink(), mp_afs_abortop(), mp_afs_inactive();
-int mp_afs_reclaim(), mp_afs_bmap(), mp_afs_strategy(), mp_afs_print();
-int mp_afs_page_read(), mp_afs_page_write(), mp_afs_swap(), mp_afs_bread();
-int mp_afs_brelse(), mp_afs_lockctl(), mp_afs_syncdata(), mp_afs_close();
-int mp_afs_closex();
-
-#if 0
-/* AFS vnodeops */
-struct vnodeops Afs_vnodeops = {
- mp_afs_lookup,
- mp_afs_create,
- afs_noop, /* vn_mknod */
- mp_afs_open,
- mp_afs_close,
- mp_afs_access,
- mp_afs_getattr,
- mp_afs_setattr,
- mp_afs_ubcrdwr,
- mp_afs_ubcrdwr,
- afs_badop, /* vn_ioctl */
- seltrue, /* vn_select */
- mp_afs_mmap,
- mp_afs_fsync,
- mp_afs_seek,
- mp_afs_remove,
- mp_afs_link,
- mp_afs_rename,
- mp_afs_mkdir,
- mp_afs_rmdir,
- mp_afs_symlink,
- mp_afs_readdir,
- mp_afs_readlink,
- mp_afs_abortop,
- mp_afs_inactive,
- mp_afs_reclaim,
- mp_afs_bmap,
- mp_afs_strategy,
- mp_afs_print,
- mp_afs_page_read,
- mp_afs_page_write,
- mp_afs_swap,
- mp_afs_bread,
- mp_afs_brelse,
- mp_afs_lockctl,
- mp_afs_syncdata,
- afs_noop, /* Lock */
- afs_noop, /* unLock */
- afs_noop, /* get ext attrs */
- afs_noop, /* set ext attrs */
- afs_noop, /* del ext attrs */
- vn_pathconf_default,
-};
-struct vnodeops *afs_ops = &Afs_vnodeops;
-#endif /* 0 */
-
-/* vnode file operations, and our own */
-extern int vn_read();
-extern int vn_write();
-extern int vn_ioctl();
-extern int vn_select();
-extern int afs_closex();
-
-struct fileops afs_fileops = {
- vn_read,
- vn_write,
- vn_ioctl,
- vn_select,
- mp_afs_closex,
-};
+#include <sys/vmmeter.h>
+extern int afs_pbuf_freecnt;
+
+#define GETNAME() \
+ struct componentname *cnp = ap->a_cnp; \
+ char *name; \
+ name = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
+ memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
+ name[cnp->cn_namelen] = '\0'
+
+#define DROPNAME() free(name, M_TEMP)
+
+#ifdef LINK_MAX
+# define AFS_LINK_MAX LINK_MAX
+#else
+# define AFS_LINK_MAX (32767)
+#endif
-#if 0
-mp_afs_lookup(adp, ndp)
- struct vcache *adp;
- struct nameidata *ndp;
-{
- int code;
- AFS_GLOCK();
- code = afs_lookup(adp, ndp);
- AFS_GUNLOCK();
- return code;
-}
+/*
+ * Here we define compatibility functions/macros for interfaces that
+ * have changed between different FreeBSD versions.
+ */
+static __inline void ma_vm_page_lock_queues(void) {};
+static __inline void ma_vm_page_unlock_queues(void) {};
+static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
+static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
+
+#if __FreeBSD_version >= 1000030
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
+#else
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
+#endif
-mp_afs_create(ndp, attrs)
- struct nameidata *ndp;
- struct vattr *attrs;
-{
- int code;
- AFS_GLOCK();
- code = afs_create(ndp, attrs);
- AFS_GUNLOCK();
- return code;
-}
+#ifdef VM_CNT_ADD
+# define AFS_VM_CNT_ADD(var, x) VM_CNT_ADD(var, x)
+# define AFS_VM_CNT_INC(var) VM_CNT_INC(var)
+#else
+# define AFS_VM_CNT_ADD(var, x) PCPU_ADD(cnt.var, x)
+# define AFS_VM_CNT_INC(var) PCPU_INC(cnt.var)
+#endif
-mp_afs_open(avcp, aflags, acred)
- struct vcache **avcp;
- afs_int32 aflags;
- struct AFS_UCRED *acred;
+/*
+ * Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
+ * We should know the correct answers to these questions with
+ * respect to the AFS protocol (which may differ from the UFS
+ * values) but for the moment this will do.
+ */
+static int
+afs_vop_pathconf(struct vop_pathconf_args *ap)
{
- int code;
- AFS_GLOCK();
- code = afs_open(avcp, aflags, acred);
- AFS_GUNLOCK();
- return code;
+ int error;
+
+ error = 0;
+ switch (ap->a_name) {
+ case _PC_LINK_MAX:
+ *ap->a_retval = AFS_LINK_MAX;
+ break;
+ case _PC_NAME_MAX:
+ *ap->a_retval = NAME_MAX;
+ break;
+ case _PC_PATH_MAX:
+ *ap->a_retval = PATH_MAX;
+ break;
+ case _PC_PIPE_BUF:
+ *ap->a_retval = PIPE_BUF;
+ break;
+ case _PC_CHOWN_RESTRICTED:
+ *ap->a_retval = 1;
+ break;
+ case _PC_NO_TRUNC:
+ *ap->a_retval = 1;
+ break;
+#ifdef _PC_ACL_EXTENDED
+ case _PC_ACL_EXTENDED:
+ *ap->a_retval = 0;
+ break;
+ case _PC_ACL_PATH_MAX:
+ *ap->a_retval = 3;
+ break;
+#endif
+#ifdef _PC_MAC_PRESENT
+ case _PC_MAC_PRESENT:
+ *ap->a_retval = 0;
+ break;
+#endif
+#ifdef _PC_ASYNC_IO
+ case _PC_ASYNC_IO:
+ /* _PC_ASYNC_IO should have been handled by upper layers. */
+ KASSERT(0, ("_PC_ASYNC_IO should not get here"));
+ error = EINVAL;
+ break;
+ case _PC_PRIO_IO:
+ *ap->a_retval = 0;
+ break;
+ case _PC_SYNC_IO:
+ *ap->a_retval = 0;
+ break;
+#endif
+#ifdef _PC_ALLOC_SIZE_MIN
+ case _PC_ALLOC_SIZE_MIN:
+ *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize;
+ break;
+#endif
+#ifdef _PC_FILESIZEBITS
+ case _PC_FILESIZEBITS:
+ *ap->a_retval = 32; /* XXX */
+ break;
+#endif
+#ifdef _PC_REC_INCR_XFER_SIZE
+ case _PC_REC_INCR_XFER_SIZE:
+ *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
+ break;
+ case _PC_REC_MAX_XFER_SIZE:
+ *ap->a_retval = -1; /* means ``unlimited'' */
+ break;
+ case _PC_REC_MIN_XFER_SIZE:
+ *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
+ break;
+ case _PC_REC_XFER_ALIGN:
+ *ap->a_retval = PAGE_SIZE;
+ break;
+#endif
+#ifdef _PC_SYMLINK_MAX
+ case _PC_SYMLINK_MAX:
+ *ap->a_retval = MAXPATHLEN;
+ break;
+#endif
+ default:
+ error = EINVAL;
+ break;
+ }
+ return (error);
}
-mp_afs_access(avc, amode, acred)
- struct vcache *avc;
- afs_int32 amode;
- struct AFS_UCRED *acred;
+static int
+afs_vop_lookup(ap)
+ struct vop_lookup_args /* {
+ * struct vnodeop_desc * a_desc;
+ * struct vnode *a_dvp;
+ * struct vnode **a_vpp;
+ * struct componentname *a_cnp;
+ * } */ *ap;
{
- int code;
- AFS_GLOCK();
- code = afs_access(avc, amode, acred);
- AFS_GUNLOCK();
- return code;
-}
+ int error;
+ struct vcache *vcp;
+ struct vnode *vp, *dvp;
+ int flags = ap->a_cnp->cn_flags;
-mp_afs_close(avc, flags, cred)
- struct vnode *avc;
- int flags;
- struct ucred *cred;
-{
- int code;
- AFS_GLOCK();
- code = afs_close(avc, flags, cred);
- AFS_GUNLOCK();
- return code;
-}
+ dvp = ap->a_dvp;
+ if (dvp->v_type != VDIR) {
+ return ENOTDIR;
+ }
-mp_afs_getattr(avc, attrs, acred)
- struct vcache *avc;
- struct vattr *attrs;
- struct AFS_UCRED *acred;
-{
- int code;
- AFS_GLOCK();
- code = afs_getattr(avc, attrs, acred);
- AFS_GUNLOCK();
- return code;
-}
+ if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
+ return EIO;
+
+ GETNAME();
+
+#if __FreeBSD_version < 1000021
+ cnp->cn_flags |= MPSAFE; /* steel */
+#endif
+
+ /*
+ * Locking rules:
+ *
+ * - 'dvp' is locked by our caller. We must return it locked, whether we
+ * return success or error.
+ *
+ * - If the lookup succeeds, 'vp' must be locked before we return.
+ *
+ * - If we lock multiple vnodes, parent vnodes must be locked before
+ * children vnodes.
+ *
+ * As a result, looking up the parent directory (if 'flags' has ISDOTDOT
+ * set) is a bit of a special case. In that case, we must unlock 'dvp'
+ * before performing the lookup, since the lookup operation may lock the
+ * target vnode, and the target vnode is the parent of 'dvp' (so we must
+ * lock 'dvp' after locking the target vnode).
+ */
+
+ if (flags & ISDOTDOT)
+ VOP_UNLOCK(dvp, 0);
-mp_afs_setattr(avc, attrs, acred)
- struct vcache *avc;
- struct vattr *attrs;
- struct AFS_UCRED *acred;
-{
- int code;
AFS_GLOCK();
- code = afs_setattr(avc, attrs, acred);
+ error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
AFS_GUNLOCK();
- return code;
+
+ if (error) {
+ if (flags & ISDOTDOT)
+ vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
+ if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
+ && (flags & ISLASTCN) && error == ENOENT)
+ error = EJUSTRETURN;
+ if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
+ cnp->cn_flags |= SAVENAME;
+ DROPNAME();
+ *ap->a_vpp = 0;
+ return (error);
+ }
+ vp = AFSTOV(vcp); /* always get a node if no error */
+
+ if (flags & ISDOTDOT) {
+ /* Must lock 'vp' before 'dvp', since 'vp' is the parent vnode. */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
+ } else if (vp == dvp) {
+ /* they're the same; afs_lookup() already ref'ed the leaf.
+ * It came in locked, so we don't need to ref OR lock it */
+ } else {
+ vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
+ }
+ *ap->a_vpp = vp;
+
+ if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
+ cnp->cn_flags |= SAVENAME;
+
+ DROPNAME();
+ return error;
}
-mp_afs_fsync(avc, fflags, acred, waitfor)
- struct vcache *avc;
- int fflags;
- struct AFS_UCRED *acred;
- int waitfor;
+static int
+afs_vop_create(ap)
+ struct vop_create_args /* {
+ * struct vnode *a_dvp;
+ * struct vnode **a_vpp;
+ * struct componentname *a_cnp;
+ * struct vattr *a_vap;
+ * } */ *ap;
{
- int code;
+ int error = 0;
+ struct vcache *vcp;
+ struct vnode *dvp = ap->a_dvp;
+ GETNAME();
+
AFS_GLOCK();
- code = afs_fsync(avc, fflags, acred, waitfor);
+ error =
+ afs_create(VTOAFS(dvp), name, ap->a_vap,
+ ap->a_vap->va_vaflags & VA_EXCLUSIVE ? EXCL : NONEXCL,
+ ap->a_vap->va_mode, &vcp, cnp->cn_cred);
AFS_GUNLOCK();
- return code;
+ if (error) {
+ DROPNAME();
+ return (error);
+ }
+
+ if (vcp) {
+ *ap->a_vpp = AFSTOV(vcp);
+ vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY);
+ } else
+ *ap->a_vpp = 0;
+
+ DROPNAME();
+ return error;
}
-mp_afs_remove(ndp)
- struct nameidata *ndp;
+static int
+afs_vop_mknod(ap)
+ struct vop_mknod_args /* {
+ * struct vnode *a_dvp;
+ * struct vnode **a_vpp;
+ * struct componentname *a_cnp;
+ * struct vattr *a_vap;
+ * } */ *ap;
{
- int code;
- AFS_GLOCK();
- code = afs_remove(ndp);
- AFS_GUNLOCK();
- return code;
+ return (ENODEV);
}
-mp_afs_link(avc, ndp)
- struct vcache *avc;
- struct nameidata *ndp;
+static int
+afs_vop_open(ap)
+ struct vop_open_args /* {
+ * struct vnode *a_vp;
+ * int a_mode;
+ * struct ucred *a_cred;
+ * struct thread *a_td;
+ * struct file *a_fp;
+ * } */ *ap;
{
- int code;
+ int error;
+ struct vcache *vc = VTOAFS(ap->a_vp);
+
AFS_GLOCK();
- code = afs_link(avc, ndp);
+ error = afs_open(&vc, ap->a_mode, ap->a_cred);
+#ifdef DIAGNOSTIC
+ if (AFSTOV(vc) != ap->a_vp)
+ panic("AFS open changed vnode!");
+#endif
AFS_GUNLOCK();
- return code;
+ vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
+ osi_FlushPages(vc, ap->a_cred);
+ return error;
}
-mp_afs_rename(fndp, tndp)
- struct nameidata *fndp, *tndp;
+static int
+afs_vop_close(ap)
+ struct vop_close_args /* {
+ * struct vnode *a_vp;
+ * int a_fflag;
+ * struct ucred *a_cred;
+ * struct thread *a_td;
+ * } */ *ap;
{
- int code;
+ int code, iflag;
+ struct vnode *vp = ap->a_vp;
+ struct vcache *avc = VTOAFS(vp);
+
+ VI_LOCK(vp);
+ iflag = vp->v_iflag & VI_DOOMED;
+ VI_UNLOCK(vp);
+ if (iflag & VI_DOOMED) {
+ /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
+ * have an afs_close to process, in that case */
+ if (avc->opens != 0)
+ panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
+ vp, avc, avc->opens);
+ return 0;
+ }
+
AFS_GLOCK();
- code = afs_rename(fndp, tndp);
+ if (ap->a_cred)
+ code = afs_close(avc, ap->a_fflag, ap->a_cred);
+ else
+ code = afs_close(avc, ap->a_fflag, afs_osi_credp);
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
AFS_GUNLOCK();
return code;
}
-mp_afs_mkdir(ndp, attrs)
- struct nameidata *ndp;
- struct vattr *attrs;
+static int
+afs_vop_access(ap)
+ struct vop_access_args /* {
+ * struct vnode *a_vp;
+ * accmode_t a_accmode;
+ * struct ucred *a_cred;
+ * struct thread *a_td;
+ * } */ *ap;
{
int code;
AFS_GLOCK();
- code = afs_mkdir(ndp, attrs);
+ code = afs_access(VTOAFS(ap->a_vp), ap->a_accmode, ap->a_cred);
AFS_GUNLOCK();
return code;
}
-mp_afs_rmdir(ndp)
- struct nameidata *ndp;
+static int
+afs_vop_getattr(ap)
+ struct vop_getattr_args /* {
+ * struct vnode *a_vp;
+ * struct vattr *a_vap;
+ * struct ucred *a_cred;
+ * } */ *ap;
{
int code;
+
AFS_GLOCK();
- code = afs_rmdir(ndp);
+ code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
AFS_GUNLOCK();
+
return code;
}
-mp_afs_symlink(ndp, attrs, atargetName)
- struct nameidata *ndp;
- struct vattr *attrs;
- register char *atargetName;
+static int
+afs_vop_setattr(ap)
+ struct vop_setattr_args /* {
+ * struct vnode *a_vp;
+ * struct vattr *a_vap;
+ * struct ucred *a_cred;
+ * } */ *ap;
{
int code;
AFS_GLOCK();
- code = afs_symlink(ndp, attrs, atargetName);
+ code = afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
AFS_GUNLOCK();
return code;
}
-mp_afs_readdir(avc, auio, acred, eofp)
- struct vcache *avc;
- struct uio *auio;
- struct AFS_UCRED *acred;
- int *eofp;
+static int
+afs_vop_read(ap)
+ struct vop_read_args /* {
+ * struct vnode *a_vp;
+ * struct uio *a_uio;
+ * int a_ioflag;
+ * struct ucred *a_cred;
+ *
+ * } */ *ap;
{
int code;
+ struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
- code = afs_readdir(avc, auio, acred, eofp);
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
+ code = afs_read(avc, ap->a_uio, ap->a_cred, 0);
AFS_GUNLOCK();
return code;
}
-mp_afs_readlink(avc, auio, acred)
- struct vcache *avc;
- struct uio *auio;
- struct AFS_UCRED *acred;
+/* struct vop_getpages_args {
+ * struct vnode *a_vp;
+ * vm_page_t *a_m;
+ * int a_count;
+ * int *a_rbehind;
+ * int *a_rahead;
+ * };
+ */
+static int
+afs_vop_getpages(struct vop_getpages_args *ap)
{
int code;
+ int i, nextoff, size, toff, npages, count;
+ struct uio uio;
+ struct iovec iov;
+ struct buf *bp;
+ vm_offset_t kva;
+ vm_object_t object;
+ vm_page_t *pages;
+ struct vnode *vp;
+ struct vcache *avc;
+
+ memset(&uio, 0, sizeof(uio));
+ memset(&iov, 0, sizeof(iov));
+
+ vp = ap->a_vp;
+ avc = VTOAFS(vp);
+ pages = ap->a_m;
+#ifdef FBSD_VOP_GETPAGES_BUSIED
+ npages = ap->a_count;
+ if (ap->a_rbehind)
+ *ap->a_rbehind = 0;
+ if (ap->a_rahead)
+ *ap->a_rahead = 0;
+#else
+ npages = btoc(ap->a_count);
+#endif
+
+ if ((object = vp->v_object) == NULL) {
+ printf("afs_getpages: called with non-merged cache vnode??\n");
+ return VM_PAGER_ERROR;
+ }
+
+ /*
+ * If the requested page is partially valid, just return it and
+ * allow the pager to zero-out the blanks. Partially valid pages
+ * can only occur at the file EOF.
+ */
+ {
+#ifdef FBSD_VOP_GETPAGES_BUSIED
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
+ if(pages[npages - 1]->valid != 0) {
+ if (--npages == 0) {
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
+ return (VM_PAGER_OK);
+ }
+ }
+#else
+ vm_page_t m = pages[ap->a_reqpage];
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
+ if (m->valid != 0) {
+ /* handled by vm_fault now */
+ /* vm_page_zero_invalid(m, TRUE); */
+ for (i = 0; i < npages; ++i) {
+ if (i != ap->a_reqpage) {
+ ma_vm_page_lock(pages[i]);
+ vm_page_free(pages[i]);
+ ma_vm_page_unlock(pages[i]);
+ }
+ }
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
+ return (0);
+ }
+#endif
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
+ }
+ bp = getpbuf(&afs_pbuf_freecnt);
+
+ kva = (vm_offset_t) bp->b_data;
+ pmap_qenter(kva, pages, npages);
+ AFS_VM_CNT_INC(v_vnodein);
+ AFS_VM_CNT_ADD(v_vnodepgsin, npages);
+
+#ifdef FBSD_VOP_GETPAGES_BUSIED
+ count = ctob(npages);
+#else
+ count = ap->a_count;
+#endif
+ iov.iov_base = (caddr_t) kva;
+ iov.iov_len = count;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
+ uio.uio_resid = count;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+ uio.uio_td = curthread;
+
AFS_GLOCK();
- code = afs_readlink(avc, auio, acred);
+ osi_FlushPages(avc, osi_curcred()); /* hold GLOCK, but not basic vnode lock */
+ code = afs_read(avc, &uio, osi_curcred(), 0);
AFS_GUNLOCK();
- return code;
+ pmap_qremove(kva, npages);
+
+ relpbuf(bp, &afs_pbuf_freecnt);
+
+ if (code && (uio.uio_resid == count)) {
+#ifndef FBSD_VOP_GETPAGES_BUSIED
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
+ for (i = 0; i < npages; ++i) {
+ if (i != ap->a_reqpage)
+ vm_page_free(pages[i]);
+ }
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
+#endif
+ return VM_PAGER_ERROR;
+ }
+
+ size = count - uio.uio_resid;
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
+ for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
+ vm_page_t m;
+ nextoff = toff + PAGE_SIZE;
+ m = pages[i];
+
+ /* XXX not in nfsclient? */
+ m->flags &= ~PG_ZERO;
+
+ if (nextoff <= size) {
+ /*
+ * Read operation filled an entire page
+ */
+ m->valid = VM_PAGE_BITS_ALL;
+ KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
+ } else if (size > toff) {
+ /*
+ * Read operation filled a partial page.
+ */
+ m->valid = 0;
+ vm_page_set_validclean(m, 0, size - toff);
+ KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
+ }
+
+#ifndef FBSD_VOP_GETPAGES_BUSIED
+ if (i != ap->a_reqpage) {
+#if __FreeBSD_version >= 1000042
+ vm_page_readahead_finish(m);
+#else
+ /*
+ * Whether or not to leave the page activated is up in
+ * the air, but we should put the page on a page queue
+ * somewhere (it already is in the object). Result:
+ * It appears that emperical results show that
+ * deactivating pages is best.
+ */
+
+ /*
+ * Just in case someone was asking for this page we
+ * now tell them that it is ok to use.
+ */
+ if (!code) {
+ if (m->oflags & VPO_WANTED) {
+ ma_vm_page_lock(m);
+ vm_page_activate(m);
+ ma_vm_page_unlock(m);
+ }
+ else {
+ ma_vm_page_lock(m);
+ vm_page_deactivate(m);
+ ma_vm_page_unlock(m);
+ }
+ vm_page_wakeup(m);
+ } else {
+ ma_vm_page_lock(m);
+ vm_page_free(m);
+ ma_vm_page_unlock(m);
+ }
+#endif /* __FreeBSD_version 1000042 */
+ }
+#endif /* ndef FBSD_VOP_GETPAGES_BUSIED */
+ }
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
+ return VM_PAGER_OK;
}
-mp_afs_lockctl(avc, af, flag, acred, clid, offset)
- struct vcache *avc;
- struct eflock *af;
- struct AFS_UCRED *acred;
- int flag;
- pid_t clid;
- off_t offset;
+static int
+afs_vop_write(ap)
+ struct vop_write_args /* {
+ * struct vnode *a_vp;
+ * struct uio *a_uio;
+ * int a_ioflag;
+ * struct ucred *a_cred;
+ * } */ *ap;
{
int code;
+ struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
- code = afs_lockctl(avc, af, flag, acred, clid, offset);
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
+ code =
+ afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
AFS_GUNLOCK();
return code;
}
-mp_afs_closex(afd)
- struct file *afd;
+/*-
+ * struct vop_putpages_args {
+ * struct vnode *a_vp;
+ * vm_page_t *a_m;
+ * int a_count;
+ * int a_sync;
+ * int *a_rtvals;
+ * vm_oofset_t a_offset;
+ * };
+ */
+/*
+ * All of the pages passed to us in ap->a_m[] are already marked as busy,
+ * so there is no additional locking required to set their flags. -GAW
+ */
+static int
+afs_vop_putpages(struct vop_putpages_args *ap)
{
int code;
- AFS_GLOCK();
- code = afs_closex(afd);
- AFS_GUNLOCK();
- return code;
-}
-
-mp_afs_seek(avc, oldoff, newoff, cred)
+ int i, size, npages, sync;
+ struct uio uio;
+ struct iovec iov;
+ struct buf *bp;
+ vm_offset_t kva;
+ struct vnode *vp;
struct vcache *avc;
- off_t oldoff, newoff;
- struct ucred *cred;
-{
- if ((int) newoff < 0)
- return(EINVAL);
- else
- return(0);
-}
-mp_afs_abortop(ndp)
- struct nameidata *ndp;
-{
- return(0);
-}
+ memset(&uio, 0, sizeof(uio));
+ memset(&iov, 0, sizeof(iov));
+
+ vp = ap->a_vp;
+ avc = VTOAFS(vp);
+ /* Perhaps these two checks should just be KASSERTs instead... */
+ if (vp->v_object == NULL) {
+ printf("afs_putpages: called with non-merged cache vnode??\n");
+ return VM_PAGER_ERROR; /* XXX I think this is insufficient */
+ }
+ if (vType(avc) != VREG) {
+ printf("afs_putpages: not VREG");
+ return VM_PAGER_ERROR; /* XXX I think this is insufficient */
+ }
+ npages = btoc(ap->a_count);
+ for (i = 0; i < npages; i++)
+ ap->a_rtvals[i] = VM_PAGER_AGAIN;
+ bp = getpbuf(&afs_pbuf_freecnt);
+
+ kva = (vm_offset_t) bp->b_data;
+ pmap_qenter(kva, ap->a_m, npages);
+ AFS_VM_CNT_INC(v_vnodeout);
+ AFS_VM_CNT_ADD(v_vnodepgsout, ap->a_count);
+
+ iov.iov_base = (caddr_t) kva;
+ iov.iov_len = ap->a_count;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
+ uio.uio_resid = ap->a_count;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+ uio.uio_td = curthread;
+ sync = IO_VMIO;
+ if (ap->a_sync & VM_PAGER_PUT_SYNC)
+ sync |= IO_SYNC;
+ /*if (ap->a_sync & VM_PAGER_PUT_INVAL)
+ * sync |= IO_INVAL; */
-mp_afs_inactive(avc, acred)
- register struct vcache *avc;
- struct AFS_UCRED *acred;
-{
AFS_GLOCK();
- afs_InactiveVCache(avc, acred);
+ code = afs_write(avc, &uio, sync, osi_curcred(), 0);
AFS_GUNLOCK();
-}
+ pmap_qremove(kva, npages);
+ relpbuf(bp, &afs_pbuf_freecnt);
-mp_afs_reclaim(avc)
- struct vcache *avc;
-{
- return(0);
+ if (!code) {
+ size = ap->a_count - uio.uio_resid;
+ for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
+ ap->a_rtvals[i] = VM_PAGER_OK;
+ vm_page_undirty(ap->a_m[i]);
+ }
+ }
+ return ap->a_rtvals[0];
}
-mp_afs_print(avc)
- struct vcache *avc;
+static int
+afs_vop_ioctl(ap)
+ struct vop_ioctl_args /* {
+ * struct vnode *a_vp;
+ * u_long a_command;
+ * void *a_data;
+ * int a_fflag;
+ * struct ucred *a_cred;
+ * struct thread *a_td;
+ * } */ *ap;
{
- return(0);
+ struct vcache *tvc = VTOAFS(ap->a_vp);
+ int error = 0;
+
+ /* in case we ever get in here... */
+
+ AFS_STATCNT(afs_ioctl);
+ if (((ap->a_command >> 8) & 0xff) == 'V') {
+ /* This is a VICEIOCTL call */
+ AFS_GLOCK();
+ error = HandleIoctl(tvc, ap->a_command, ap->a_data);
+ AFS_GUNLOCK();
+ return (error);
+ } else {
+ /* No-op call; just return. */
+ return (ENOTTY);
+ }
}
-mp_afs_page_read(avc, uio, acred)
- struct vcache *avc;
- struct uio *uio;
- struct ucred *acred;
+static int
+afs_vop_fsync(ap)
+ struct vop_fsync_args /* {
+ * struct vnode *a_vp;
+ * int a_waitfor;
+ * struct thread *td;
+ * } */ *ap;
{
int error;
- struct vrequest treq;
+ struct vnode *vp = ap->a_vp;
AFS_GLOCK();
- error = afs_rdwr(avc, uio, UIO_READ, 0, acred);
- afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_READ, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
- if (error) {
- error = EIO;
- } else if ((avc->states) == 0) {
- afs_InitReq(&treq, acred);
- ObtainWriteLock(&avc->lock,161);
- afs_Wire(avc, &treq);
- ReleaseWriteLock(&avc->lock);
- }
+ /*vflushbuf(vp, wait); */
+ error = afs_fsync(VTOAFS(vp), ap->a_td->td_ucred);
AFS_GUNLOCK();
- return(error);
+ return error;
}
-
-mp_afs_page_write(avc, uio, acred, pager, offset)
- struct vcache *avc;
- struct uio *uio;
- struct ucred *acred;
- memory_object_t pager;
- vm_offset_t offset;
+static int
+afs_vop_remove(ap)
+ struct vop_remove_args /* {
+ * struct vnode *a_dvp;
+ * struct vnode *a_vp;
+ * struct componentname *a_cnp;
+ * } */ *ap;
{
- int error;
+ int error = 0;
+ struct vnode *vp = ap->a_vp;
+ struct vnode *dvp = ap->a_dvp;
+ GETNAME();
AFS_GLOCK();
- error = afs_rdwr(avc, uio, UIO_WRITE, 0, acred);
- afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_WRITE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
- if (error) {
- error = EIO;
- }
+ error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
AFS_GUNLOCK();
- return(error);
+ cache_purge(vp);
+ DROPNAME();
+ return error;
}
-
-int DO_FLUSH=1;
-mp_afs_ubcrdwr(avc, uio, ioflag, cred)
- struct vcache *avc;
- struct uio *uio;
- int ioflag;
- struct ucred *cred;
+static int
+afs_vop_link(ap)
+ struct vop_link_args /* {
+ * struct vnode *a_vp;
+ * struct vnode *a_tdvp;
+ * struct componentname *a_cnp;
+ * } */ *ap;
{
- register afs_int32 code;
- register char *data;
- afs_int32 fileBase, size, cnt=0;
- afs_int32 pageBase;
- register afs_int32 tsize;
- register afs_int32 pageOffset;
- int eof;
- struct vrequest treq;
- int rw = uio->uio_rw;
- int rv, flags;
- int newpage=0;
- vm_page_t page;
- afs_int32 save_resid;
- struct dcache *tdc;
- int didFakeOpen=0;
- int counter=0;
-
- AFS_GLOCK();
- afs_InitReq(&treq, cred);
- if (AFS_NFSXLATORREQ(cred) && rw == UIO_READ) {
- if (!afs_AccessOK(avc, PRSFS_READ, &treq,
- CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
- AFS_GUNLOCK();
- return EACCES;
- }
+ int error = 0;
+ struct vnode *dvp = ap->a_tdvp;
+ struct vnode *vp = ap->a_vp;
+
+ GETNAME();
+ if (dvp->v_mount != vp->v_mount) {
+ error = EXDEV;
+ goto out;
}
- afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, (rw==UIO_WRITE? 1 : 0),
- ICL_TYPE_LONG, uio->uio_offset,
- ICL_TYPE_LONG, uio->uio_resid);
- code = afs_VerifyVCache(avc, &treq);
- if (code) {
- code = afs_CheckCode(code, &treq, 35);
- AFS_GUNLOCK();
- return code;
+ if (vp->v_type == VDIR) {
+ error = EISDIR;
+ goto out;
}
- if (vType(avc) != VREG) {
- AFS_GUNLOCK();
- return EISDIR; /* can't read or write other things */
+ if ((error = vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE)) != 0) {
+ goto out;
}
- afs_BozonLock(&avc->pvnLock, avc);
- osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
- ObtainWriteLock(&avc->lock,162);
- /* adjust parameters when appending files */
- if ((ioflag & IO_APPEND) && uio->uio_rw == UIO_WRITE)
- uio->uio_offset = avc->m.Length; /* write at EOF position */
- if (uio->uio_rw == UIO_WRITE) {
- avc->states |= CDirty;
- afs_FakeOpen(avc);
- didFakeOpen=1;
- /*
- * before starting any I/O, we must ensure that the file is big enough
- * to hold the results (since afs_putpage will be called to force
- * the I/O.
- */
- size = uio->afsio_resid + uio->afsio_offset; /* new file size */
- if (size > avc->m.Length) avc->m.Length = size; /* file grew */
- avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
- if (uio->afsio_resid > PAGE_SIZE)
- cnt = uio->afsio_resid / PAGE_SIZE;
- save_resid = uio->afsio_resid;
- }
-
- while (1) {
- /*
- * compute the amount of data to move into this block,
- * based on uio->afsio_resid.
- */
- size = uio->afsio_resid; /* transfer size */
- fileBase = uio->afsio_offset; /* start file position */
- pageBase = fileBase & ~(PAGE_SIZE-1); /* file position of the page */
- pageOffset = fileBase & (PAGE_SIZE-1); /* start offset within page */
- tsize = PAGE_SIZE-pageOffset; /* amount left in this page */
- /*
- * we'll read tsize bytes,
- * but first must make sure tsize isn't too big
- */
- if (tsize > size) tsize = size; /* don't read past end of request */
- eof = 0; /* flag telling us if we hit the EOF on the read */
- if (uio->uio_rw == UIO_READ) { /* we're doing a read operation */
- /* don't read past EOF */
- if (tsize + fileBase > avc->m.Length) {
- tsize = avc->m.Length - fileBase;
- eof = 1; /* we did hit the EOF */
- if (tsize < 0) tsize = 0; /* better safe than sorry */
- }
- }
- if (tsize <= 0) break; /* nothing to transfer, we're done */
+ AFS_GLOCK();
+ error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
+ AFS_GUNLOCK();
+ if (dvp != vp)
+ VOP_UNLOCK(vp, 0);
+ out:
+ DROPNAME();
+ return error;
+}
- /* Purge dirty chunks of file if there are too many dirty chunks.
- * Inside the write loop, we only do this at a chunk boundary.
- * Clean up partial chunk if necessary at end of loop.
- */
- if (uio->uio_rw == UIO_WRITE && counter > 0
- && AFS_CHUNKOFFSET(fileBase) == 0) {
- code = afs_DoPartialWrite(avc, &treq);
- avc->states |= CDirty;
+static int
+afs_vop_rename(ap)
+ struct vop_rename_args /* {
+ * struct vnode *a_fdvp;
+ * struct vnode *a_fvp;
+ * struct componentname *a_fcnp;
+ * struct vnode *a_tdvp;
+ * struct vnode *a_tvp;
+ * struct componentname *a_tcnp;
+ * } */ *ap;
+{
+ int error = 0;
+ struct componentname *fcnp = ap->a_fcnp;
+ char *fname;
+ struct componentname *tcnp = ap->a_tcnp;
+ char *tname;
+ struct vnode *tvp = ap->a_tvp;
+ struct vnode *tdvp = ap->a_tdvp;
+ struct vnode *fvp = ap->a_fvp;
+ struct vnode *fdvp = ap->a_fdvp;
+
+ /*
+ * Check for cross-device rename.
+ */
+ if ((fvp->v_mount != tdvp->v_mount)
+ || (tvp && (fvp->v_mount != tvp->v_mount))) {
+ error = EXDEV;
+ abortit:
+ if (tdvp == tvp)
+ vrele(tdvp);
+ else
+ vput(tdvp);
+ if (tvp)
+ vput(tvp);
+ vrele(fdvp);
+ vrele(fvp);
+ return (error);
+ }
+ /*
+ * if fvp == tvp, we're just removing one name of a pair of
+ * directory entries for the same element. convert call into rename.
+ ( (pinched from FreeBSD 4.4's ufs_rename())
+
+ */
+ if (fvp == tvp) {
+ if (fvp->v_type == VDIR) {
+ error = EINVAL;
+ goto abortit;
}
- if (code) {
- break;
+ /* Release destination completely. */
+ vput(tdvp);
+ vput(tvp);
+
+ /* Delete source. */
+ vrele(fdvp);
+ vrele(fvp);
+ fcnp->cn_flags &= ~MODMASK;
+ fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
+ if ((fcnp->cn_flags & SAVESTART) == 0)
+ panic("afs_rename: lost from startdir");
+ fcnp->cn_nameiop = DELETE;
+ VREF(fdvp);
+ error = relookup(fdvp, &fvp, fcnp);
+ if (error == 0)
+ vrele(fdvp);
+ if (fvp == NULL) {
+ return (ENOENT);
}
- flags = 0;
- ReleaseWriteLock(&avc->lock);
- AFS_GUNLOCK();
- code = ubc_lookup(((struct vnode *)avc)->v_object, pageBase,
- PAGE_SIZE, PAGE_SIZE, &page, &flags);
- AFS_GLOCK();
- ObtainWriteLock(&avc->lock,163);
-
- if (code) {
- break;
- }
- if (flags & B_NOCACHE) {
- /*
- No page found. We should not read the page in if
- 1. the write starts on a page edge (ie, pageoffset == 0)
- and either
- 1. we will fill the page (ie, size == PAGESIZE), or
- 2. we are writing past eof
- */
- if ((uio->uio_rw == UIO_WRITE) &&
- ((pageOffset == 0 && (size == PAGE_SIZE || fileBase >= avc->m.Length)))) {
- struct vnode *vp = (struct vnode *)avc;
- /* we're doing a write operation past eof; no need to read it */
- newpage = 1;
- AFS_GUNLOCK();
- ubc_page_zero(page, 0, PAGE_SIZE);
- ubc_page_release(page, B_DONE);
- AFS_GLOCK();
- } else {
- /* page wasn't cached, read it in. */
- struct buf *bp;
-
- AFS_GUNLOCK();
- bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_READ);
- AFS_GLOCK();
- bp->b_dev = 0;
- bp->b_vp = (struct vnode *)avc;
- bp->b_blkno = btodb(pageBase);
- ReleaseWriteLock(&avc->lock);
- code = afs_ustrategy(bp, cred); /* do the I/O */
- ObtainWriteLock(&avc->lock,164);
- AFS_GUNLOCK();
- ubc_sync_iodone(bp);
- AFS_GLOCK();
- if (code) {
- AFS_GUNLOCK();
- ubc_page_release(page, 0);
- AFS_GLOCK();
- break;
- }
- }
- }
- AFS_GUNLOCK();
- ubc_page_wait(page);
- data = (char *)page->pg_addr; /* DUX 4.0D */
- if (data == 0)
- data = (char *)PHYS_TO_KSEG(page->pg_phys_addr); /* DUX 4.0E */
- AFS_GLOCK();
- ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
- AFS_GUNLOCK();
- code = uiomove(data+pageOffset, tsize, uio);
- ubc_unload(page, pageOffset, page_size);
- if (uio->uio_rw == UIO_WRITE) {
- vm_offset_t toffset;
-
- /* Mark the page dirty and release it to avoid a deadlock
- * in ubc_dirty_kluster when more than one process writes
- * this page at the same time. */
- toffset = page->pg_offset;
- flags |= B_DIRTY;
- ubc_page_release(page, flags);
-
- if (cnt > 10) {
- vm_page_t pl;
- int kpcnt;
- struct buf *bp;
-
- /* We released the page, so we can get a null page
- * list if another thread calls the strategy routine.
- */
- pl = ubc_dirty_kluster(((struct vnode *)avc)->v_object,
- NULL, toffset, 0, B_WANTED, FALSE, &kpcnt);
- if (pl) {
- bp = ubc_bufalloc(pl, 1, PAGE_SIZE, 1, B_WRITE);
- bp->b_dev = 0;
- bp->b_vp = (struct vnode *)avc;
- bp->b_blkno = btodb(pageBase);
- AFS_GLOCK();
- code = afs_ustrategy(bp, cred); /* do the I/O */
- AFS_GUNLOCK();
- ubc_sync_iodone(bp);
- if (code) {
- AFS_GLOCK();
- ObtainWriteLock(&avc->lock,415);
- break;
- }
- }
- }
- } else {
- ubc_page_release(page, flags);
- }
- AFS_GLOCK();
- ObtainWriteLock(&avc->lock,165);
- /*
- * If reading at a chunk boundary, start prefetch of next chunk.
- */
- if (uio->uio_rw == UIO_READ
- && (counter == 0 || AFS_CHUNKOFFSET(fileBase) == 0)) {
- tdc = afs_FindDCache(avc, fileBase);
- if (tdc) {
- if (!(tdc->flags & DFNextStarted))
- afs_PrefetchChunk(avc, tdc, cred, &treq);
- afs_PutDCache(tdc);
- }
- }
- counter++;
- if (code) break;
- }
- if (didFakeOpen)
- afs_FakeClose(avc, cred);
- if (uio->uio_rw == UIO_WRITE && code == 0 && (avc->states & CDirty)) {
- code = afs_DoPartialWrite(avc, &treq);
- }
- ReleaseWriteLock(&avc->lock);
- afs_BozonUnlock(&avc->pvnLock, avc);
- if (DO_FLUSH || (!newpage && (cnt < 10))) {
- AFS_GUNLOCK();
- ubc_flush_dirty(((struct vnode *)avc)->v_object, flags);
- AFS_GLOCK();
+ error = VOP_REMOVE(fdvp, fvp, fcnp);
+ if (fdvp == fvp)
+ vrele(fdvp);
+ else
+ vput(fdvp);
+ vput(fvp);
+ return (error);
}
+ if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
+ goto abortit;
- ObtainSharedLock(&avc->lock, 409);
- if (!code) {
- if (avc->vc_error) {
- code = avc->vc_error;
- }
- }
- /* This is required since we may still have dirty pages after the write.
- * I could just let close do the right thing, but stat's before the close
- * return the wrong length.
- */
- if (code == EDQUOT || code == ENOSPC) {
- uio->uio_resid = save_resid;
- UpgradeSToWLock(&avc->lock, 410);
- osi_ReleaseVM(avc, cred);
- ConvertWToSLock(&avc->lock);
- }
- ReleaseSharedLock(&avc->lock);
+ fname = malloc(fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
+ memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
+ fname[fcnp->cn_namelen] = '\0';
+ tname = malloc(tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
+ memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
+ tname[tcnp->cn_namelen] = '\0';
- if (!code && (ioflag & IO_SYNC) && (uio->uio_rw == UIO_WRITE)
- && !AFS_NFSXLATORREQ(cred)) {
- code = afs_fsync(avc, 0, cred, 0);
- }
-out:
- code = afs_CheckCode(code, &treq, 36);
- AFS_GUNLOCK();
- return code;
-}
+ AFS_GLOCK();
+ /* XXX use "from" or "to" creds? NFS uses "to" creds */
+ error =
+ afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
+ AFS_GUNLOCK();
-/*
- * Now for some bad news. Since we artificially hold on to vnodes by doing
- * and extra VNHOLD in afs_NewVCache(), there is no way for us to know
- * when we need to flush the pages when a program exits. Particularly
- * if it closes the file after mapping it R/W.
- *
- */
+ free(fname, M_TEMP);
+ free(tname, M_TEMP);
+ if (tdvp == tvp)
+ vrele(tdvp);
+ else
+ vput(tdvp);
+ if (tvp)
+ vput(tvp);
+ vrele(fdvp);
+ vput(fvp);
+ return error;
+}
-mp_afs_mmap(avc, offset, map, addrp, len, prot, maxprot, flags, cred)
- register struct vcache *avc;
- vm_offset_t offset;
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t len;
- vm_prot_t prot;
- vm_prot_t maxprot;
- int flags;
- struct ucred *cred;
+static int
+afs_vop_mkdir(ap)
+ struct vop_mkdir_args /* {
+ * struct vnode *a_dvp;
+ * struct vnode **a_vpp;
+ * struct componentname *a_cnp;
+ * struct vattr *a_vap;
+ * } */ *ap;
{
- struct vp_mmap_args args;
- register struct vp_mmap_args *ap = &args;
- struct vnode *vp = (struct vnode *)avc;
- int code;
- struct vrequest treq;
-#if !defined(DYNEL)
- extern kern_return_t u_vp_create();
+ struct vnode *dvp = ap->a_dvp;
+ struct vattr *vap = ap->a_vap;
+ int error = 0;
+ struct vcache *vcp;
+
+ GETNAME();
+#ifdef DIAGNOSTIC
+ if ((cnp->cn_flags & HASBUF) == 0)
+ panic("afs_vop_mkdir: no name");
#endif
-
- AFS_GLOCK();
- afs_InitReq(&treq, cred);
- code = afs_VerifyVCache(avc, &treq);
- if (code) {
- code = afs_CheckCode(code, &treq, 37);
- AFS_GUNLOCK();
- return code;
- }
- afs_BozonLock(&avc->pvnLock, avc);
- osi_FlushPages(avc); /* ensure old pages are gone */
- afs_BozonUnlock(&avc->pvnLock, avc);
- ObtainWriteLock(&avc->lock,166);
- avc->states |= CMAPPED;
- ReleaseWriteLock(&avc->lock);
- ap->a_offset = offset;
- ap->a_vaddr = addrp;
- ap->a_size = len;
- ap->a_prot = prot,
- ap->a_maxprot = maxprot;
- ap->a_flags = flags;
- AFS_GUNLOCK();
- code = u_vp_create(map, vp->v_object, (vm_offset_t) ap);
AFS_GLOCK();
- code = afs_CheckCode(code, &treq, 38);
+ error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
AFS_GUNLOCK();
- return code;
+ if (error) {
+ DROPNAME();
+ return (error);
+ }
+ if (vcp) {
+ *ap->a_vpp = AFSTOV(vcp);
+ vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY);
+ } else
+ *ap->a_vpp = 0;
+ DROPNAME();
+ return error;
}
-
-int mp_afs_getpage(vop, offset, len, protp, pl, plsz, mape, addr, rw, cred)
- vm_ubc_object_t vop;
- vm_offset_t offset;
- vm_size_t len;
- vm_prot_t *protp;
- vm_page_t *pl;
- int plsz;
- vm_map_entry_t mape;
- vm_offset_t addr;
- int rw;
- struct ucred *cred;
+static int
+afs_vop_rmdir(ap)
+ struct vop_rmdir_args /* {
+ * struct vnode *a_dvp;
+ * struct vnode *a_vp;
+ * struct componentname *a_cnp;
+ * } */ *ap;
{
- register afs_int32 code;
- struct vrequest treq;
- int flags = 0;
- int i, pages = (len + PAGE_SIZE - 1) >> page_shift;
- vm_page_t *pagep;
- vm_offset_t off;
-
- struct vcache *avc = (struct vcache *)vop->vu_vp;
-
- /* first, obtain the proper lock for the VM system */
+ int error = 0;
+ struct vnode *dvp = ap->a_dvp;
+ GETNAME();
AFS_GLOCK();
- afs_InitReq(&treq, cred);
- code = afs_VerifyVCache(avc, &treq);
- if (code) {
- *pl = VM_PAGE_NULL;
- code = afs_CheckCode(code, &treq, 39); /* failed to get it */
- AFS_GUNLOCK();
- return code;
- }
-
- /* clean all dirty pages for this vnode */
+ error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
AFS_GUNLOCK();
- ubc_flush_dirty(vop,0);
+ DROPNAME();
+ return error;
+}
+
+/* struct vop_symlink_args {
+ * struct vnode *a_dvp;
+ * struct vnode **a_vpp;
+ * struct componentname *a_cnp;
+ * struct vattr *a_vap;
+ * char *a_target;
+ * };
+ */
+static int
+afs_vop_symlink(struct vop_symlink_args *ap)
+{
+ struct vnode *dvp;
+ struct vnode *newvp;
+ struct vcache *vcp;
+ int error;
+
+ GETNAME();
AFS_GLOCK();
- afs_BozonLock(&avc->pvnLock, avc);
- ObtainWriteLock(&avc->lock,167);
- afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, avc,
- ICL_TYPE_LONG, offset, ICL_TYPE_LONG, len,
- ICL_TYPE_INT32, (int) rw);
- for (i = 0; i < pages; i++) {
- pagep = &pl[i];
- off = offset + PAGE_SIZE * i;
- if (protp) protp[i] = 0;
- flags = 0;
- ReleaseWriteLock(&avc->lock);
- AFS_GUNLOCK();
- code = ubc_lookup(((struct vnode *)avc)->v_object, off,
- PAGE_SIZE, PAGE_SIZE, pagep, &flags);
- AFS_GLOCK();
- ObtainWriteLock(&avc->lock,168);
- if (code) {
- goto out;
- }
- if(flags & B_NOCACHE) { /* if (page) */
- if ((rw & B_WRITE) && (offset+len >= avc->m.Length)) {
- struct vnode *vp = (struct vnode *)avc;
- /* we're doing a write operation past eof; no need to read it */
- AFS_GUNLOCK();
- ubc_page_zero(*pagep, 0, PAGE_SIZE);
- ubc_page_release(*pagep, B_DONE);
- AFS_GLOCK();
- } else {
- /* page wasn't cached, read it in. */
- struct buf *bp;
-
- AFS_GUNLOCK();
- bp = ubc_bufalloc(*pagep, 1, PAGE_SIZE, 1, B_READ);
- AFS_GLOCK();
- bp->b_dev = 0;
- bp->b_vp = (struct vnode *)avc;
- bp->b_blkno = btodb(off);
- ReleaseWriteLock(&avc->lock);
- code = afs_ustrategy(bp, cred); /* do the I/O */
- ObtainWriteLock(&avc->lock,169);
- AFS_GUNLOCK();
- ubc_sync_iodone(bp);
- AFS_GLOCK();
- if (code) {
- AFS_GUNLOCK();
- ubc_page_release(pl[i], 0);
- AFS_GLOCK();
- goto out;
- }
- }
- }
- if ((rw & B_READ) == 0) {
- AFS_GUNLOCK();
- ubc_page_dirty(pl[i]);
- AFS_GLOCK();
- } else {
- if (protp && (flags & B_DIRTY) == 0) {
- protp[i] = VM_PROT_WRITE;
- }
+ dvp = ap->a_dvp;
+ newvp = NULL;
+
+ error =
+ afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, NULL,
+ cnp->cn_cred);
+ if (error == 0) {
+ error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
+ if (error == 0) {
+ newvp = AFSTOV(vcp);
+ vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY);
}
}
-out:
- pl[i] = VM_PAGE_NULL;
- ReleaseWriteLock(&avc->lock);
- afs_BozonUnlock(&avc->pvnLock, avc);
- afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_INT32, code,
- ICL_TYPE_POINTER, *pagep, ICL_TYPE_INT32, flags);
- code = afs_CheckCode(code, &treq, 40);
AFS_GUNLOCK();
- return code;
+ DROPNAME();
+ *(ap->a_vpp) = newvp;
+ return error;
}
-
-int mp_afs_putpage(vop, pl, pcnt, flags, cred)
- vm_ubc_object_t vop;
- vm_page_t *pl;
- int pcnt;
- int flags;
- struct ucred *cred;
+static int
+afs_vop_readdir(ap)
+ struct vop_readdir_args /* {
+ * struct vnode *a_vp;
+ * struct uio *a_uio;
+ * struct ucred *a_cred;
+ * int *a_eofflag;
+ * u_long *a_cookies;
+ * int ncookies;
+ * } */ *ap;
{
- register afs_int32 code=0;
- struct vcache *avc = (struct vcache *)vop->vu_vp;
- struct vnode *vp = (struct vnode *)avc;
- int i;
-
+ int error;
+ off_t off;
+/* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
+ ap->a_ncookies); */
+ off = ap->a_uio->uio_offset;
AFS_GLOCK();
- afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, pcnt, ICL_TYPE_INT32, vp->v_flag,
- ICL_TYPE_INT32, flags);
- if (flags & B_UBC) {
- AFS_GUNLOCK();
- VN_LOCK(vp);
- if (vp->v_flag & VXLOCK) {
- VN_UNLOCK(vp);
- for (i = 0; i < pcnt; i++) {
- ubc_page_release(pl[i], B_DONE|B_DIRTY);
- pl[i] = VM_PAGE_NULL;
- }
- return(0);
- } else {
- VN_UNLOCK(vp);
+ error =
+ afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred, ap->a_eofflag);
+ AFS_GUNLOCK();
+ if (!error && ap->a_ncookies != NULL) {
+ struct uio *uio = ap->a_uio;
+ const struct dirent *dp, *dp_start, *dp_end;
+ int ncookies;
+ u_long *cookies, *cookiep;
+
+ if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
+ panic("afs_readdir: burned cookies");
+ dp = (const struct dirent *)
+ ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
+
+ dp_end = (const struct dirent *)uio->uio_iov->iov_base;
+ for (dp_start = dp, ncookies = 0; dp < dp_end;
+ dp = (const struct dirent *)((const char *)dp + dp->d_reclen))
+ ncookies++;
+
+ cookies = malloc(ncookies * sizeof(u_long), M_TEMP,
+ M_WAITOK);
+ for (dp = dp_start, cookiep = cookies; dp < dp_end;
+ dp = (const struct dirent *)((const char *)dp + dp->d_reclen)) {
+ off += dp->d_reclen;
+ *cookiep++ = off;
}
- AFS_GLOCK();
+ *ap->a_cookies = cookies;
+ *ap->a_ncookies = ncookies;
}
- /* first, obtain the proper lock for the VM system */
- afs_BozonLock(&avc->pvnLock, avc);
- ObtainWriteLock(&avc->lock,170);
- for (i = 0; i < pcnt; i++) {
- vm_page_t page = pl[i];
- struct buf *bp;
-
- /* write it out */
- AFS_GUNLOCK();
- bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_WRITE);
- AFS_GLOCK();
- bp->b_dev = 0;
- bp->b_vp = (struct vnode *)avc;
- bp->b_blkno = btodb(page->pg_offset);
- ReleaseWriteLock(&avc->lock);
- code = afs_ustrategy(bp, cred); /* do the I/O */
- ObtainWriteLock(&avc->lock,171);
- AFS_GUNLOCK();
- ubc_sync_iodone(bp);
- AFS_GLOCK();
- if (code) {
- goto done;
- } else {
- pl[i] = VM_PAGE_NULL;
- }
- }
-done:
- ReleaseWriteLock(&avc->lock);
- afs_BozonUnlock(&avc->pvnLock, avc);
- afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_INT32, code,
- ICL_TYPE_INT32, avc->m.Length);
- AFS_GUNLOCK();
- return code;
+ return error;
}
-
-int mp_afs_swap(avc, swapop, argp)
- struct vcache *avc;
- vp_swap_op_t swapop;
- vm_offset_t argp;
+static int
+afs_vop_readlink(ap)
+ struct vop_readlink_args /* {
+ * struct vnode *a_vp;
+ * struct uio *a_uio;
+ * struct ucred *a_cred;
+ * } */ *ap;
{
- return EIO;
+ int error;
+/* printf("readlink %x\n", ap->a_vp);*/
+ AFS_GLOCK();
+ error = afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
+ AFS_GUNLOCK();
+ return error;
}
-int mp_afs_syncdata(avc, flag, offset, length, cred)
- struct vcache *avc;
- int flag;
- vm_offset_t offset;
- vm_size_t length;
- struct ucred *cred;
+static int
+afs_vop_inactive(ap)
+ struct vop_inactive_args /* {
+ * struct vnode *a_vp;
+ * struct thread *td;
+ * } */ *ap;
{
- /* NFS V3 makes this call, ignore it. We'll sync the data in afs_fsync. */
- if (AFS_NFSXLATORREQ(cred))
- return 0;
- else
- return EINVAL;
-}
+ struct vnode *vp = ap->a_vp;
-/* a freelist of one */
-struct buf *afs_bread_freebp = 0;
+ AFS_GLOCK();
+ afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
+ AFS_GUNLOCK();
+ return 0;
+}
/*
- * Only rfs_read calls this, and it only looks at bp->b_un.b_addr.
- * Thus we can use fake bufs (ie not from the real buffer pool).
+ * struct vop_reclaim_args {
+ * struct vnode *a_vp;
+ * };
*/
-mp_afs_bread(vp, lbn, bpp, cred)
- struct ucred *cred;
- struct vnode *vp;
- daddr_t lbn;
- struct buf **bpp;
+static int
+afs_vop_reclaim(struct vop_reclaim_args *ap)
{
- int offset, fsbsize, error;
- struct buf *bp;
- struct iovec iov;
- struct uio uio;
+ int code, slept;
+ struct vnode *vp = ap->a_vp;
+ struct vcache *avc = VTOAFS(vp);
+ int haveGlock = ISAFS_GLOCK();
+
+ /*
+ * In other code paths, we acquire the vnode lock while afs_xvcache is
+ * already held (e.g. afs_PutVCache() -> vrele()). Here, we already have
+ * the vnode lock, and we need afs_xvcache. So drop the vnode lock in order
+ * to hold afs_xvcache.
+ */
+ VOP_UNLOCK(vp, 0);
+ if (!haveGlock)
AFS_GLOCK();
- AFS_STATCNT(afs_bread);
- fsbsize = vp->v_vfsp->vfs_bsize;
- offset = lbn * fsbsize;
- if (afs_bread_freebp) {
- bp = afs_bread_freebp;
- afs_bread_freebp = 0;
- } else {
- bp = (struct buf *) AFS_KALLOC(sizeof(*bp));
- bp->b_un.b_addr = (caddr_t) AFS_KALLOC(fsbsize);
- }
+ ObtainWriteLock(&afs_xvcache, 901);
- iov.iov_base = bp->b_un.b_addr;
- iov.iov_len = fsbsize;
- uio.afsio_iov = &iov;
- uio.afsio_iovcnt = 1;
- uio.afsio_seg = AFS_UIOSYS;
- uio.afsio_offset = offset;
- uio.afsio_resid = fsbsize;
- *bpp = 0;
- error = afs_read((struct vcache *)vp, &uio, cred, lbn, bpp, 0);
- if (error) {
- afs_bread_freebp = bp;
- AFS_GUNLOCK();
- return error;
- }
- if (*bpp) {
- afs_bread_freebp = bp;
- } else {
- *(struct buf **)&bp->b_vp = bp; /* mark as fake */
- *bpp = bp;
- }
- AFS_GUNLOCK();
- return 0;
-}
+ /*
+ * Note that we deliberately call VOP_LOCK() instead of vn_lock() here.
+ * vn_lock() will return an error for VI_DOOMED vnodes, but we know this
+ * vnode is already VI_DOOMED. We just want to lock it again, and skip the
+ * VI_DOOMED check.
+ */
+ VOP_LOCK(vp, LK_EXCLUSIVE);
+ code = afs_FlushVCache(avc, &slept);
-mp_afs_brelse(vp, bp)
-struct vnode *vp;
-struct buf *bp;
-{
- AFS_GLOCK();
- AFS_STATCNT(afs_brelse);
- if ((struct buf *)bp->b_vp != bp) { /* not fake */
- brelse(bp);
- } else if (afs_bread_freebp) {
- AFS_KFREE(bp->b_un.b_addr, vp->v_vfsp->vfs_bsize);
- AFS_KFREE(bp, sizeof(*bp));
- } else {
- afs_bread_freebp = bp;
- }
- AFS_GUNLOCK();
-}
+ if (avc->f.states & CVInit) {
+ avc->f.states &= ~CVInit;
+ afs_osi_Wakeup(&avc->f.states);
+ }
+ ReleaseWriteLock(&afs_xvcache);
+ if (!haveGlock)
+ AFS_GUNLOCK();
+
+ if (code) {
+ afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
+ VOP_PRINT(vp);
+ panic("afs: afs_FlushVCache failed during reclaim");
+ }
+
+ vnode_destroy_vobject(vp);
+ vp->v_data = 0;
-mp_afs_bmap(avc, abn, anvp, anbn)
- register struct vcache *avc;
- afs_int32 abn, *anbn;
- struct vcache **anvp;
-{
- AFS_GLOCK();
- AFS_STATCNT(afs_bmap);
- if (anvp)
- *anvp = avc;
- if (anbn)
- *anbn = abn * (8192 / DEV_BSIZE); /* in 512 byte units */
- AFS_GUNLOCK();
return 0;
}
-
-/* real strategy */
-mp_afs_strategy (abp)
- register struct buf *abp;
+static int
+afs_vop_strategy(ap)
+ struct vop_strategy_args /* {
+ * struct buf *a_bp;
+ * } */ *ap;
{
- register afs_int32 code;
-
+ int error;
AFS_GLOCK();
- AFS_STATCNT(afs_strategy);
- code = afs_osi_MapStrategy(afs_ustrategy, abp);
+ error = afs_ustrategy(ap->a_bp, osi_curcred());
AFS_GUNLOCK();
- return code;
+ return error;
}
-
-mp_afs_refer(vm_ubc_object_t vop)
+static int
+afs_vop_print(ap)
+ struct vop_print_args /* {
+ * struct vnode *a_vp;
+ * } */ *ap;
{
- VREF(vop->vu_vp);
-}
-
-
-mp_afs_release(vm_ubc_object_t vop)
-{
- vrele(vop->vu_vp);
-}
-
-
-mp_afs_write_check(vm_ubc_object_t vop, vm_page_t pp)
-{
- return TRUE;
+ struct vnode *vp = ap->a_vp;
+ struct vcache *vc = VTOAFS(ap->a_vp);
+ int s = vc->f.states;
+
+ printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
+ (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
+ (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
+ vc->execsOrWriters);
+ printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
+ (s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
+ (s & CMAPPED) ? " mapped" : "",
+ (s & CVFlushed) ? " flush in progress" : "");
+ printf("\n");
+ return 0;
}
-
-
-struct vfs_ubcops afs_ubcops = {
- mp_afs_refer, /* refer vnode */
- mp_afs_release, /* release vnode */
- mp_afs_getpage, /* get page */
- mp_afs_putpage, /* put page */
- mp_afs_write_check, /* check writablity */
-};
-#endif /* 0 */
-
/*
- * Cover function for lookup name using OSF equivalent, namei()
- *
- * Note, the result vnode (ni_vp) in the namei data structure is remains
- * locked after return.
+ * Advisory record locking support (fcntl() POSIX style)
*/
-lookupname(namep, seg, follow, dvpp, cvpp)
- char *namep; /* path name */
- int seg; /* address space containing name */
- int follow; /* follow symbolic links */
- struct vnode **dvpp; /* result, containing parent vnode */
- struct vnode **cvpp; /* result, containing final component vnode */
+static int
+afs_vop_advlock(ap)
+ struct vop_advlock_args /* {
+ * struct vnode *a_vp;
+ * caddr_t a_id;
+ * int a_op;
+ * struct flock *a_fl;
+ * int a_flags;
+ * } */ *ap;
{
- /* Should I use free-bee in u-area? */
- struct nameidata *ndp = &u.u_nd;
- int error;
+ int error, a_op;
+ struct ucred cr = *osi_curcred();
- ndp->ni_nameiop = ((follow) ? (LOOKUP|FOLLOW) : (LOOKUP));
- ndp->ni_segflg = seg;
- ndp->ni_dirp = namep;
- error = namei(ndp);
- if (dvpp != (struct vnode **)0)
- *dvpp = ndp->ni_dvp;
- if (cvpp != (struct vnode **)0)
- *cvpp = ndp->ni_vp;
- return(error);
+ a_op = ap->a_op;
+ if (a_op == F_UNLCK) {
+ /*
+ * When a_fl->type is F_UNLCK, FreeBSD passes in an a_op of F_UNLCK.
+ * This is (confusingly) different than how you actually release a lock
+ * with fcntl(), which is done with an a_op of F_SETLK and an l_type of
+ * F_UNLCK. Pretend we were given an a_op of F_SETLK in this case,
+ * since this is what afs_lockctl expects.
+ */
+ a_op = F_SETLK;
+ }
+
+ AFS_GLOCK();
+ error =
+ afs_lockctl(VTOAFS(ap->a_vp),
+ ap->a_fl,
+ a_op, &cr,
+ (int)(intptr_t)ap->a_id); /* XXX: no longer unique! */
+ AFS_GUNLOCK();
+ return error;
}
+struct vop_vector afs_vnodeops = {
+ .vop_default = &default_vnodeops,
+ .vop_access = afs_vop_access,
+ .vop_advlock = afs_vop_advlock,
+ .vop_close = afs_vop_close,
+ .vop_create = afs_vop_create,
+ .vop_fsync = afs_vop_fsync,
+ .vop_getattr = afs_vop_getattr,
+ .vop_getpages = afs_vop_getpages,
+ .vop_inactive = afs_vop_inactive,
+ .vop_ioctl = afs_vop_ioctl,
+ .vop_link = afs_vop_link,
+ .vop_lookup = afs_vop_lookup,
+ .vop_mkdir = afs_vop_mkdir,
+ .vop_mknod = afs_vop_mknod,
+ .vop_open = afs_vop_open,
+ .vop_pathconf = afs_vop_pathconf,
+ .vop_print = afs_vop_print,
+ .vop_putpages = afs_vop_putpages,
+ .vop_read = afs_vop_read,
+ .vop_readdir = afs_vop_readdir,
+ .vop_readlink = afs_vop_readlink,
+ .vop_reclaim = afs_vop_reclaim,
+ .vop_remove = afs_vop_remove,
+ .vop_rename = afs_vop_rename,
+ .vop_rmdir = afs_vop_rmdir,
+ .vop_setattr = afs_vop_setattr,
+ .vop_strategy = afs_vop_strategy,
+ .vop_symlink = afs_vop_symlink,
+ .vop_write = afs_vop_write,
+};