#include <afsconfig.h>
#include <afs/param.h>
-RCSID
- ("$Header$");
#include <afs/sysincludes.h> /* Standard vendor system headers */
#include <afsincludes.h> /* Afs-based standard headers */
#include <sys/malloc.h>
#include <sys/namei.h>
#include <sys/unistd.h>
-#ifndef AFS_FBSD50_ENV
-#include <vm/vm_zone.h>
-#endif
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
static vop_strategy_t afs_vop_strategy;
static vop_symlink_t afs_vop_symlink;
static vop_write_t afs_vop_write;
+#if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
+static vop_lock1_t afs_vop_lock;
+static vop_unlock_t afs_vop_unlock;
+static vop_islocked_t afs_vop_islocked;
+#endif
struct vop_vector afs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_getpages = afs_vop_getpages,
.vop_inactive = afs_vop_inactive,
.vop_ioctl = afs_vop_ioctl,
+#if !defined(AFS_FBSD80_ENV)
+ /* removed at least temporarily (NFSv4 flux) */
.vop_lease = VOP_NULL,
+#endif
.vop_link = afs_vop_link,
.vop_lookup = afs_vop_lookup,
.vop_mkdir = afs_vop_mkdir,
.vop_strategy = afs_vop_strategy,
.vop_symlink = afs_vop_symlink,
.vop_write = afs_vop_write,
+#if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
+ .vop_lock1 = afs_vop_lock,
+ .vop_unlock = afs_vop_unlock,
+ .vop_islocked = afs_vop_islocked,
+#endif
};
#else /* AFS_FBSD60_ENV */
int afs_vop_ioctl(struct vop_ioctl_args *);
static int afs_vop_pathconf(struct vop_pathconf_args *);
int afs_vop_poll(struct vop_poll_args *);
-#ifndef AFS_FBSD50_ENV
-int afs_vop_mmap(struct vop_mmap_args *);
-#endif
int afs_vop_fsync(struct vop_fsync_args *);
int afs_vop_remove(struct vop_remove_args *);
int afs_vop_link(struct vop_link_args *);
{&vop_access_desc, (vop_t *) afs_vop_access}, /* access */
{&vop_advlock_desc, (vop_t *) afs_vop_advlock}, /* advlock */
{&vop_bmap_desc, (vop_t *) afs_vop_bmap}, /* bmap */
-#ifndef AFS_FBSD50_ENV
- {&vop_bwrite_desc, (vop_t *) vop_stdbwrite},
-#endif
{&vop_close_desc, (vop_t *) afs_vop_close}, /* close */
{&vop_createvobject_desc, (vop_t *) vop_stdcreatevobject},
{&vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject},
{&vop_lookup_desc, (vop_t *) afs_vop_lookup}, /* lookup */
{&vop_mkdir_desc, (vop_t *) afs_vop_mkdir}, /* mkdir */
{&vop_mknod_desc, (vop_t *) afs_vop_mknod}, /* mknod */
-#ifndef AFS_FBSD50_ENV
- {&vop_mmap_desc, (vop_t *) afs_vop_mmap}, /* mmap */
-#endif
{&vop_open_desc, (vop_t *) afs_vop_open}, /* open */
{&vop_pathconf_desc, (vop_t *) afs_vop_pathconf}, /* pathconf */
{&vop_poll_desc, (vop_t *) afs_vop_poll}, /* select */
{&vop_write_desc, (vop_t *) afs_vop_write}, /* write */
{&vop_ioctl_desc, (vop_t *) afs_vop_ioctl}, /* XXX ioctl */
/*{ &vop_seek_desc, afs_vop_seek }, *//* seek */
+#if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD90_ENV)
+ {&vop_lock1_desc, (vop_t *) afs_vop_lock}, /* lock */
+ {&vop_unlock_desc, (vop_t *) afs_vop_unlock}, /* unlock */
+ {&vop_islocked_desc, (vop_t *) afs_vop_islocked}, /* islocked */
+#endif
{NULL, NULL}
};
struct vnodeopv_desc afs_vnodeop_opv_desc =
#define DROPNAME() FREE(name, M_TEMP)
-/* This is a bit of a cheat... */
-#ifdef AFS_FBSD50_ENV
-#define a_p a_td
+/*
+ * Here we define compatibility functions/macros for interfaces that
+ * have changed between different FreeBSD versions.
+ */
+#if defined(AFS_FBSD90_ENV)
+static __inline void ma_vm_page_lock_queues(void) {};
+static __inline void ma_vm_page_unlock_queues(void) {};
+static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
+static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
+#else
+static __inline void ma_vm_page_lock_queues(void) { vm_page_lock_queues(); };
+static __inline void ma_vm_page_unlock_queues(void) { vm_page_unlock_queues(); };
+static __inline void ma_vm_page_lock(vm_page_t m) {};
+static __inline void ma_vm_page_unlock(vm_page_t m) {};
+#endif
+
+#if defined(AFS_FBSD80_ENV)
+#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
+#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
+#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
+#else
+#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags, p))
+#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags, p))
+#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags, p))
+#endif
+
+#if defined(AFS_FBSD70_ENV)
+#define MA_PCPU_INC(c) PCPU_INC(c)
+#define MA_PCPU_ADD(c, n) PCPU_ADD(c, n)
+#else
+#define MA_PCPU_INC(c) PCPU_LAZY_INC(c)
+#define MA_PCPU_ADD(c, n) (c) += (n)
+#endif
+
+#ifdef AFS_FBSD70_ENV
+#ifndef AFS_FBSD80_ENV
+/* From kern_lock.c */
+#define COUNT(td, x) if ((td)) (td)->td_locks += (x)
+#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
+ LK_SHARE_NONZERO | LK_WAIT_NONZERO)
+
+static __inline void
+sharelock(struct thread *td, struct lock *lkp, int incr) {
+ lkp->lk_flags |= LK_SHARE_NONZERO;
+ lkp->lk_sharecount += incr;
+ COUNT(td, incr);
+}
+#endif
+
+/*
+ * Standard lock, unlock and islocked functions.
+ */
+int
+afs_vop_lock(ap)
+ struct vop_lock1_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct thread *a_td;
+ char *file;
+ int line;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ struct lock *lkp = vp->v_vnlock;
+
+#if 0 && defined(AFS_FBSD80_ENV) && !defined(UKERNEL)
+ afs_warn("afs_vop_lock: tid %d pid %d \"%s\"\n", curthread->td_tid,
+ curthread->td_proc->p_pid, curthread->td_name);
+ kdb_backtrace();
+#endif
+
+#ifdef AFS_FBSD80_ENV
+ return (_lockmgr_args(lkp, ap->a_flags, VI_MTX(vp),
+ LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
+ ap->a_file, ap->a_line));
+#else
+ return (_lockmgr(lkp, ap->a_flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line));
+#endif
+}
+
+/* See above. */
+int
+afs_vop_unlock(ap)
+ struct vop_unlock_args /* {
+ struct vnode *a_vp;
+ int a_flags;
+ struct thread *a_td;
+ } */ *ap;
+{
+ struct vnode *vp = ap->a_vp;
+ struct lock *lkp = vp->v_vnlock;
+
+#ifdef AFS_FBSD80_ENV
+ int code = 0;
+ u_int op;
+ op = ((ap->a_flags) | LK_RELEASE) & LK_TYPE_MASK;
+ int glocked = ISAFS_GLOCK();
+ if (glocked)
+ AFS_GUNLOCK();
+ if ((op & (op - 1)) != 0) {
+ afs_warn("afs_vop_unlock: Shit.\n");
+ goto done;
+ }
+ code = lockmgr(lkp, ap->a_flags | LK_RELEASE, VI_MTX(vp));
+ done:
+ if (glocked)
+ AFS_GLOCK();
+ return(code);
+#else
+ /* possibly in current code path where this
+ * forces trace, we should have had a (shared? not
+ * necessarily, see _lockmgr in kern_lock.c) lock
+ * and that's the real bug. but.
+ */
+ critical_enter();
+ if ((lkp->lk_exclusivecount == 0) &&
+ (!(lkp->lk_flags & LK_SHARE_NONZERO))) {
+ sharelock(ap->a_td, lkp, 1);
+ }
+ critical_exit();
+ return (lockmgr(lkp, ap->a_flags | LK_RELEASE, VI_MTX(vp),
+ ap->a_td));
+#endif
+}
+
+/* See above. */
+int
+afs_vop_islocked(ap)
+ struct vop_islocked_args /* {
+ struct vnode *a_vp;
+ struct thread *a_td; (not in 80)
+ } */ *ap;
+{
+#ifdef AFS_FBSD80_ENV
+ return (lockstatus(ap->a_vp->v_vnlock));
+#else
+ return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
#endif
+}
+#endif /* 70 */
/*
* Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
int error;
struct vcache *vcp;
struct vnode *vp, *dvp;
- register int flags = ap->a_cnp->cn_flags;
+ int flags = ap->a_cnp->cn_flags;
int lockparent; /* 1 => lockparent flag is set */
int wantparent; /* 1 => wantparent or lockparent flag */
-#ifdef AFS_FBSD50_ENV
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
+
+ dvp = ap->a_dvp;
+ if (dvp->v_type != VDIR) {
+#ifndef AFS_FBSD70_ENV
+ *ap->a_vpp = 0;
#endif
+ return ENOTDIR;
+ }
+
+ if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
+ return EIO;
+
GETNAME();
lockparent = flags & LOCKPARENT;
wantparent = flags & (LOCKPARENT | WANTPARENT);
- if (ap->a_dvp->v_type != VDIR) {
- *ap->a_vpp = 0;
- DROPNAME();
- return ENOTDIR;
- }
- dvp = ap->a_dvp;
+#ifdef AFS_FBSD80_ENV
+ cnp->cn_flags |= MPSAFE; /* steel */
+#endif
+
+#ifndef AFS_FBSD70_ENV
if (flags & ISDOTDOT)
VOP_UNLOCK(dvp, 0, p);
+#endif
+
AFS_GLOCK();
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
AFS_GUNLOCK();
+
if (error) {
if (flags & ISDOTDOT)
- VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
+ MA_VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
&& (flags & ISLASTCN) && error == ENOENT)
error = EJUSTRETURN;
* we also always return the vnode locked. */
if (flags & ISDOTDOT) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
/* always return the child locked */
if (lockparent && (flags & ISLASTCN)
- && (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
+ && (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
vput(vp);
DROPNAME();
return (error);
/* they're the same; afs_lookup() already ref'ed the leaf.
* It came in locked, so we don't need to ref OR lock it */
} else {
- if (!lockparent || !(flags & ISLASTCN))
- VOP_UNLOCK(dvp, 0, p); /* done with parent. */
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ if (!lockparent || !(flags & ISLASTCN)) {
+#ifndef AFS_FBSD70_ENV /* 6 too? */
+ MA_VOP_UNLOCK(dvp, 0, p); /* done with parent. */
+#endif
+ }
+ ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
/* always return the child locked */
}
*ap->a_vpp = vp;
{
int error = 0;
struct vcache *vcp;
- register struct vnode *dvp = ap->a_dvp;
-#ifdef AFS_FBSD50_ENV
+ struct vnode *dvp = ap->a_dvp;
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
-#endif
GETNAME();
AFS_GLOCK();
if (vcp) {
*ap->a_vpp = AFSTOV(vcp);
- vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
+ ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
} else
*ap->a_vpp = 0;
* struct vnode *a_vp;
* int a_mode;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
+ * struct file *a_fp;
* } */ *ap;
{
int error;
#endif
AFS_GUNLOCK();
#ifdef AFS_FBSD60_ENV
- vnode_create_vobject(ap->a_vp, vc->m.Length, ap->a_td);
+ vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
#endif
osi_FlushPages(vc, ap->a_cred);
return error;
* struct vnode *a_vp;
* int a_fflag;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
* } */ *ap;
{
int code;
afs_vop_access(ap)
struct vop_access_args /* {
* struct vnode *a_vp;
- * int a_mode;
+ * accmode_t a_accmode;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
* } */ *ap;
{
int code;
AFS_GLOCK();
+#if defined(AFS_FBSD80_ENV)
+ code = afs_access(VTOAFS(ap->a_vp), ap->a_accmode, ap->a_cred);
+#else
code = afs_access(VTOAFS(ap->a_vp), ap->a_mode, ap->a_cred);
+#endif
AFS_GUNLOCK();
return code;
}
* struct vnode *a_vp;
* struct vattr *a_vap;
* struct ucred *a_cred;
- * struct proc *a_p;
* } */ *ap;
{
int code;
* struct vnode *a_vp;
* struct vattr *a_vap;
* struct ucred *a_cred;
- * struct proc *a_p;
* } */ *ap;
{
int code;
struct vnode *vp;
struct vcache *avc;
-#ifdef AFS_FBSD50_ENV
- GIANT_REQUIRED;
-#endif
vp = ap->a_vp;
avc = VTOAFS(vp);
if ((object = vp->v_object) == NULL) {
{
vm_page_t m = ap->a_m[ap->a_reqpage];
-#ifdef AFS_FBSD50_ENV
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
-#endif
+ ma_vm_page_lock_queues();
if (m->valid != 0) {
/* handled by vm_fault now */
/* vm_page_zero_invalid(m, TRUE); */
for (i = 0; i < npages; ++i) {
- if (i != ap->a_reqpage)
+ if (i != ap->a_reqpage) {
+ ma_vm_page_lock(ap->a_m[i]);
vm_page_free(ap->a_m[i]);
+ ma_vm_page_unlock(ap->a_m[i]);
+ }
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
+ ma_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
-#endif
return (0);
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
+ ma_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
-#endif
}
bp = getpbuf(&afs_pbuf_freecnt);
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
-#ifdef AFS_FBSD50_ENV
- cnt.v_vnodein++;
- cnt.v_vnodepgsin += npages;
-#endif
+ MA_PCPU_INC(cnt.v_vnodein);
+ MA_PCPU_ADD(cnt.v_vnodepgsin, npages);
iov.iov_base = (caddr_t) kva;
iov.iov_len = ap->a_count;
uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
-#ifdef AFS_FBSD50_ENV
uio.uio_td = curthread;
-#else
- uio.uio_procp = curproc;
-#endif
AFS_GLOCK();
osi_FlushPages(avc, osi_curcred()); /* hold bozon lock, but not basic vnode lock */
relpbuf(bp, &afs_pbuf_freecnt);
if (code && (uio.uio_resid == ap->a_count)) {
-#ifdef AFS_FBSD50_ENV
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
-#endif
+ ma_vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage)
vm_page_free(ap->a_m[i]);
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
+ ma_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
-#endif
return VM_PAGER_ERROR;
}
size = ap->a_count - uio.uio_resid;
-#ifdef AFS_FBSD50_ENV
VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
-#endif
+ ma_vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
m = ap->a_m[i];
+ /* XXX not in nfsclient? */
m->flags &= ~PG_ZERO;
if (nextoff <= size) {
* Read operation filled an entire page
*/
m->valid = VM_PAGE_BITS_ALL;
+#ifndef AFS_FBSD80_ENV
vm_page_undirty(m);
+#else
+ KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
+#endif
} else if (size > toff) {
/*
* Read operation filled a partial page.
*/
m->valid = 0;
- vm_page_set_validclean(m, 0, size - toff);
- /* handled by vm_fault now */
- /* vm_page_zero_invalid(m, TRUE); */
+ vm_page_set_valid(m, 0, size - toff);
+#ifndef AFS_FBSD80_ENV
+ vm_page_undirty(m);
+#else
+ KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
+#endif
}
if (i != ap->a_reqpage) {
*/
if (!code) {
#if defined(AFS_FBSD70_ENV)
- if(0) /* XXXX fixme for 7.0 */
+ if (m->oflags & VPO_WANTED) {
#else
- if (m->flags & PG_WANTED)
+ if (m->flags & PG_WANTED) {
#endif
+ ma_vm_page_lock(m);
vm_page_activate(m);
- else
+ ma_vm_page_unlock(m);
+ }
+ else {
+ ma_vm_page_lock(m);
vm_page_deactivate(m);
+ ma_vm_page_unlock(m);
+ }
vm_page_wakeup(m);
} else {
+ ma_vm_page_lock(m);
vm_page_free(m);
+ ma_vm_page_unlock(m);
}
}
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
+ ma_vm_page_unlock_queues();
VM_OBJECT_UNLOCK(object);
-#endif
return 0;
}
struct vnode *vp;
struct vcache *avc;
-#ifdef AFS_FBSD50_ENV
- GIANT_REQUIRED;
-#endif
-
vp = ap->a_vp;
avc = VTOAFS(vp);
/* Perhaps these two checks should just be KASSERTs instead... */
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
-#ifdef AFS_FBSD50_ENV
- cnt.v_vnodeout++;
- cnt.v_vnodepgsout += ap->a_count;
-#endif
+ MA_PCPU_INC(cnt.v_vnodeout);
+ MA_PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
iov.iov_base = (caddr_t) kva;
iov.iov_len = ap->a_count;
uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_WRITE;
-#ifdef AFS_FBSD50_ENV
uio.uio_td = curthread;
-#else
- uio.uio_procp = curproc;
-#endif
sync = IO_VMIO;
if (ap->a_sync & VM_PAGER_PUT_SYNC)
sync |= IO_SYNC;
afs_vop_ioctl(ap)
struct vop_ioctl_args /* {
* struct vnode *a_vp;
- * int a_command;
- * caddr_t a_data;
+ * u_long a_command;
+ * void *a_data;
* int a_fflag;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
* } */ *ap;
{
struct vcache *tvc = VTOAFS(ap->a_vp);
if (((ap->a_command >> 8) & 0xff) == 'V') {
/* This is a VICEIOCTL call */
AFS_GLOCK();
- error = HandleIoctl(tvc, NULL /*Not used */ ,
- ap->a_command, ap->a_data);
+ error = HandleIoctl(tvc, ap->a_command, ap->a_data);
AFS_GUNLOCK();
return (error);
} else {
* struct vnode *a_vp;
* int a_events;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *td;
* } */ *ap;
{
/*
* struct vnode *a_vp;
* int a_fflags;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *td;
* } */ *ap;
{
return (EINVAL);
afs_vop_fsync(ap)
struct vop_fsync_args /* {
* struct vnode *a_vp;
- * struct ucred *a_cred;
* int a_waitfor;
- * struct proc *a_p;
+ * struct thread *td;
* } */ *ap;
{
int error;
- register struct vnode *vp = ap->a_vp;
+ struct vnode *vp = ap->a_vp;
AFS_GLOCK();
/*vflushbuf(vp, wait); */
* } */ *ap;
{
int error = 0;
- register struct vnode *vp = ap->a_vp;
- register struct vnode *dvp = ap->a_dvp;
+ struct vnode *vp = ap->a_vp;
+ struct vnode *dvp = ap->a_dvp;
GETNAME();
AFS_GLOCK();
* } */ *ap;
{
int error = 0;
- register struct vnode *dvp = ap->a_tdvp;
- register struct vnode *vp = ap->a_vp;
-#ifdef AFS_FBSD50_ENV
+ struct vnode *dvp = ap->a_tdvp;
+ struct vnode *vp = ap->a_vp;
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
-#endif
GETNAME();
if (dvp->v_mount != vp->v_mount) {
error = EISDIR;
goto out;
}
- if ((error = vn_lock(vp, LK_EXCLUSIVE, p)) != 0) {
+ if ((error = ma_vn_lock(vp, LK_EXCLUSIVE, p)) != 0) {
goto out;
}
AFS_GLOCK();
error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
AFS_GUNLOCK();
if (dvp != vp)
- VOP_UNLOCK(vp, 0, p);
+ MA_VOP_UNLOCK(vp, 0, p);
out:
DROPNAME();
return error;
struct componentname *tcnp = ap->a_tcnp;
char *tname;
struct vnode *tvp = ap->a_tvp;
- register struct vnode *tdvp = ap->a_tdvp;
+ struct vnode *tdvp = ap->a_tdvp;
struct vnode *fvp = ap->a_fvp;
- register struct vnode *fdvp = ap->a_fdvp;
-#ifdef AFS_FBSD50_ENV
+ struct vnode *fdvp = ap->a_fdvp;
struct thread *p = fcnp->cn_thread;
-#else
- struct proc *p = fcnp->cn_proc;
-#endif
/*
* Check for cross-device rename.
vput(fvp);
return (error);
}
- if ((error = vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
+ if ((error = ma_vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
goto abortit;
MALLOC(fname, char *, fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
* struct vattr *a_vap;
* } */ *ap;
{
- register struct vnode *dvp = ap->a_dvp;
- register struct vattr *vap = ap->a_vap;
+ struct vnode *dvp = ap->a_dvp;
+ struct vattr *vap = ap->a_vap;
int error = 0;
struct vcache *vcp;
-#ifdef AFS_FBSD50_ENV
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
-#endif
GETNAME();
#ifdef DIAGNOSTIC
}
if (vcp) {
*ap->a_vpp = AFSTOV(vcp);
- vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
+ ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
} else
*ap->a_vpp = 0;
DROPNAME();
* } */ *ap;
{
int error = 0;
- register struct vnode *dvp = ap->a_dvp;
+ struct vnode *dvp = ap->a_dvp;
GETNAME();
AFS_GLOCK();
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
if (error == 0) {
newvp = AFSTOV(vcp);
-#ifdef AFS_FBSD50_ENV
- vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
-#else
- vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_proc);
-#endif
+ ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
}
}
AFS_GUNLOCK();
afs_vop_inactive(ap)
struct vop_inactive_args /* {
* struct vnode *a_vp;
- * struct proc *a_p;
+ * struct thread *td;
* } */ *ap;
{
- register struct vnode *vp = ap->a_vp;
+ struct vnode *vp = ap->a_vp;
if (prtactive && vp->v_usecount != 0)
vprint("afs_vop_inactive(): pushing active", vp);
AFS_GLOCK();
afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
AFS_GUNLOCK();
- VOP_UNLOCK(vp, 0, ap->a_p);
+#ifndef AFS_FBSD60_ENV
+ MA_VOP_UNLOCK(vp, 0, ap->a_td);
+#endif
return 0;
}
AFS_GLOCK();
if (!haveVlock)
ObtainWriteLock(&afs_xvcache, 901);
-#ifndef AFS_DISCON_ENV
- code = afs_FlushVCache(avc, &slept); /* tosses our stuff from vnode */
-#else
/* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
- code = afs_FlushVS(avc);
-#endif
+ code = afs_FlushVCache(avc, &slept);
if (!haveVlock)
ReleaseWriteLock(&afs_xvcache);
if (!haveGlock)
* XXX Pretend it worked, to prevent panic on shutdown
* Garrett, please fix - Jim Rees
*/
- if (code)
- printf("afs_vop_reclaim: afs_FlushVCache failed code %d\n", code);
-#ifdef AFS_FBSD60_ENV
- else
- vnode_destroy_vobject(vp);
-#endif
+ if (code) {
+ printf("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
+ VOP_PRINT(vp);
+ }
+
+ /* basically, it must not fail */
+ vnode_destroy_vobject(vp);
+ vp->v_data = 0;
+
return 0;
}
{
int error;
AFS_GLOCK();
- error = afs_ustrategy(ap->a_bp);
+ error = afs_ustrategy(ap->a_bp, osi_curcred());
AFS_GUNLOCK();
return error;
}
* struct vnode *a_vp;
* } */ *ap;
{
- register struct vnode *vp = ap->a_vp;
- register struct vcache *vc = VTOAFS(ap->a_vp);
- int s = vc->states;
-
-#ifdef AFS_FBSD50_ENV
- printf("tag %s, fid: %d.%x.%x.%x, opens %d, writers %d", vp->v_tag,
- (int)vc->fid.Cell, (u_int) vc->fid.Fid.Volume,
- (u_int) vc->fid.Fid.Vnode, (u_int) vc->fid.Fid.Unique, vc->opens,
- vc->execsOrWriters);
-#else
- printf("tag %d, fid: %ld.%x.%x.%x, opens %d, writers %d", vp->v_tag,
- vc->fid.Cell, (u_int) vc->fid.Fid.Volume,
- (u_int) vc->fid.Fid.Vnode, (u_int) vc->fid.Fid.Unique, vc->opens,
+ struct vnode *vp = ap->a_vp;
+ struct vcache *vc = VTOAFS(ap->a_vp);
+ int s = vc->f.states;
+
+ printf("tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vp->v_tag,
+ (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
+ (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
vc->execsOrWriters);
-#endif
printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
(s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
(s & CMAPPED) ? " mapped" : "",