#include <sys/malloc.h>
#include <sys/namei.h>
#include <sys/unistd.h>
-#ifndef AFS_FBSD50_ENV
-#include <vm/vm_zone.h>
+#if __FreeBSD_version >= 1000030
+#include <sys/rwlock.h>
#endif
#include <vm/vm_page.h>
#include <vm/vm_object.h>
static vop_mknod_t afs_vop_mknod;
static vop_open_t afs_vop_open;
static vop_pathconf_t afs_vop_pathconf;
-static vop_poll_t afs_vop_poll;
static vop_print_t afs_vop_print;
static vop_putpages_t afs_vop_putpages;
static vop_read_t afs_vop_read;
.vop_mknod = afs_vop_mknod,
.vop_open = afs_vop_open,
.vop_pathconf = afs_vop_pathconf,
- .vop_poll = afs_vop_poll,
.vop_print = afs_vop_print,
.vop_putpages = afs_vop_putpages,
.vop_read = afs_vop_read,
int afs_vop_putpages(struct vop_putpages_args *);
int afs_vop_ioctl(struct vop_ioctl_args *);
static int afs_vop_pathconf(struct vop_pathconf_args *);
-int afs_vop_poll(struct vop_poll_args *);
-#ifndef AFS_FBSD50_ENV
-int afs_vop_mmap(struct vop_mmap_args *);
-#endif
int afs_vop_fsync(struct vop_fsync_args *);
int afs_vop_remove(struct vop_remove_args *);
int afs_vop_link(struct vop_link_args *);
{&vop_access_desc, (vop_t *) afs_vop_access}, /* access */
{&vop_advlock_desc, (vop_t *) afs_vop_advlock}, /* advlock */
{&vop_bmap_desc, (vop_t *) afs_vop_bmap}, /* bmap */
-#ifndef AFS_FBSD50_ENV
- {&vop_bwrite_desc, (vop_t *) vop_stdbwrite},
-#endif
{&vop_close_desc, (vop_t *) afs_vop_close}, /* close */
{&vop_createvobject_desc, (vop_t *) vop_stdcreatevobject},
{&vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject},
{&vop_lookup_desc, (vop_t *) afs_vop_lookup}, /* lookup */
{&vop_mkdir_desc, (vop_t *) afs_vop_mkdir}, /* mkdir */
{&vop_mknod_desc, (vop_t *) afs_vop_mknod}, /* mknod */
-#ifndef AFS_FBSD50_ENV
- {&vop_mmap_desc, (vop_t *) afs_vop_mmap}, /* mmap */
-#endif
{&vop_open_desc, (vop_t *) afs_vop_open}, /* open */
{&vop_pathconf_desc, (vop_t *) afs_vop_pathconf}, /* pathconf */
- {&vop_poll_desc, (vop_t *) afs_vop_poll}, /* select */
+ {&vop_poll_desc, (vop_t *) vop_nopoll}, /* select */
{&vop_print_desc, (vop_t *) afs_vop_print}, /* print */
{&vop_read_desc, (vop_t *) afs_vop_read}, /* read */
{&vop_readdir_desc, (vop_t *) afs_vop_readdir}, /* readdir */
#define DROPNAME() FREE(name, M_TEMP)
-/* This is a bit of a cheat... */
-#ifdef AFS_FBSD50_ENV
-#define a_p a_td
+/*
+ * Here we define compatibility functions/macros for interfaces that
+ * have changed between different FreeBSD versions.
+ */
+#if defined(AFS_FBSD90_ENV)
+static __inline void ma_vm_page_lock_queues(void) {};
+static __inline void ma_vm_page_unlock_queues(void) {};
+static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
+static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
+#else
+static __inline void ma_vm_page_lock_queues(void) { vm_page_lock_queues(); };
+static __inline void ma_vm_page_unlock_queues(void) { vm_page_unlock_queues(); };
+static __inline void ma_vm_page_lock(vm_page_t m) {};
+static __inline void ma_vm_page_unlock(vm_page_t m) {};
#endif
#if defined(AFS_FBSD80_ENV)
#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags, p))
#endif
+#if defined(AFS_FBSD70_ENV)
+#define MA_PCPU_INC(c) PCPU_INC(c)
+#define MA_PCPU_ADD(c, n) PCPU_ADD(c, n)
+#else
+#define MA_PCPU_INC(c) PCPU_LAZY_INC(c)
+#define MA_PCPU_ADD(c, n) (c) += (n)
+#endif
+
+#if __FreeBSD_version >= 1000030
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
+#else
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
+#endif
+
#ifdef AFS_FBSD70_ENV
#ifndef AFS_FBSD80_ENV
/* From kern_lock.c */
int error;
struct vcache *vcp;
struct vnode *vp, *dvp;
- register int flags = ap->a_cnp->cn_flags;
+ int flags = ap->a_cnp->cn_flags;
int lockparent; /* 1 => lockparent flag is set */
int wantparent; /* 1 => wantparent or lockparent flag */
-#ifdef AFS_FBSD50_ENV
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
#endif
dvp = ap->a_dvp;
lockparent = flags & LOCKPARENT;
wantparent = flags & (LOCKPARENT | WANTPARENT);
-#ifdef AFS_FBSD80_ENV
+#if __FreeBSD_version < 1000021
cnp->cn_flags |= MPSAFE; /* steel */
#endif
-#ifndef AFS_FBSD70_ENV
if (flags & ISDOTDOT)
- VOP_UNLOCK(dvp, 0, p);
-#endif
+ MA_VOP_UNLOCK(dvp, 0, p);
AFS_GLOCK();
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
* we also always return the vnode locked. */
if (flags & ISDOTDOT) {
+ /* vp before dvp since we go root to leaf, and .. comes first */
ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ ma_vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
/* always return the child locked */
if (lockparent && (flags & ISLASTCN)
&& (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
MA_VOP_UNLOCK(dvp, 0, p); /* done with parent. */
#endif
}
- ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ ma_vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
/* always return the child locked */
}
*ap->a_vpp = vp;
{
int error = 0;
struct vcache *vcp;
- register struct vnode *dvp = ap->a_dvp;
-#ifdef AFS_FBSD50_ENV
+ struct vnode *dvp = ap->a_dvp;
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
#endif
GETNAME();
* struct vnode *a_vp;
* int a_mode;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
+ * struct file *a_fp;
* } */ *ap;
{
int error;
* struct vnode *a_vp;
* int a_fflag;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
* } */ *ap;
{
- int code;
- struct vcache *avc = VTOAFS(ap->a_vp);
+ int code, iflag;
+ struct vnode *vp = ap->a_vp;
+ struct vcache *avc = VTOAFS(vp);
+
+#if defined(AFS_FBSD80_ENV)
+ VI_LOCK(vp);
+ iflag = vp->v_iflag & VI_DOOMED;
+ VI_UNLOCK(vp);
+ if (iflag & VI_DOOMED) {
+ /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
+ * have an afs_close to process, in that case */
+ if (avc->opens != 0)
+ panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
+ vp, avc, avc->opens);
+ return 0;
+ }
+#endif
+
AFS_GLOCK();
if (ap->a_cred)
code = afs_close(avc, ap->a_fflag, ap->a_cred);
else
code = afs_close(avc, ap->a_fflag, afs_osi_credp);
- osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
AFS_GUNLOCK();
return code;
}
afs_vop_access(ap)
struct vop_access_args /* {
* struct vnode *a_vp;
- * int a_mode;
+ * accmode_t a_accmode;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
* } */ *ap;
{
int code;
* struct vnode *a_vp;
* struct vattr *a_vap;
* struct ucred *a_cred;
- * struct proc *a_p;
* } */ *ap;
{
int code;
+
AFS_GLOCK();
code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
AFS_GUNLOCK();
+
return code;
}
* struct vnode *a_vp;
* struct vattr *a_vap;
* struct ucred *a_cred;
- * struct proc *a_p;
* } */ *ap;
{
int code;
int code;
struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
- osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
- code = afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
+ code = afs_read(avc, ap->a_uio, ap->a_cred, 0);
AFS_GUNLOCK();
return code;
}
* struct vnode *a_vp;
* vm_page_t *a_m;
* int a_count;
- * int a_reqpage;
- * vm_oofset_t a_offset;
+ * int *a_rbehind;
+ * int *a_rahead;
* };
*/
int
afs_vop_getpages(struct vop_getpages_args *ap)
{
int code;
- int i, nextoff, size, toff, npages;
+ int i, nextoff, size, toff, npages, count;
struct uio uio;
struct iovec iov;
struct buf *bp;
vm_offset_t kva;
vm_object_t object;
+ vm_page_t *pages;
struct vnode *vp;
struct vcache *avc;
-#ifdef AFS_FBSD50_ENV
- GIANT_REQUIRED;
-#endif
+ memset(&uio, 0, sizeof(uio));
+ memset(&iov, 0, sizeof(iov));
+
vp = ap->a_vp;
avc = VTOAFS(vp);
+ pages = ap->a_m;
+#ifdef FBSD_VOP_GETPAGES_BUSIED
+ npages = ap->a_count;
+ if (ap->a_rbehind)
+ *ap->a_rbehind = 0;
+ if (ap->a_rahead)
+ *ap->a_rahead = 0;
+#else
+ npages = btoc(ap->a_count);
+#endif
+
if ((object = vp->v_object) == NULL) {
printf("afs_getpages: called with non-merged cache vnode??\n");
return VM_PAGER_ERROR;
}
- npages = btoc(ap->a_count);
+
/*
* If the requested page is partially valid, just return it and
* allow the pager to zero-out the blanks. Partially valid pages
* can only occur at the file EOF.
*/
-
{
- vm_page_t m = ap->a_m[ap->a_reqpage];
-
-#ifdef AFS_FBSD50_ENV
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
-#endif
+#ifdef FBSD_VOP_GETPAGES_BUSIED
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
+ if(pages[npages - 1]->valid != 0) {
+ if (--npages == 0) {
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
+ return (VM_PAGER_OK);
+ }
+ }
+#else
+ vm_page_t m = pages[ap->a_reqpage];
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
if (m->valid != 0) {
/* handled by vm_fault now */
/* vm_page_zero_invalid(m, TRUE); */
for (i = 0; i < npages; ++i) {
- if (i != ap->a_reqpage)
- vm_page_free(ap->a_m[i]);
+ if (i != ap->a_reqpage) {
+ ma_vm_page_lock(pages[i]);
+ vm_page_free(pages[i]);
+ ma_vm_page_unlock(pages[i]);
+ }
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
-#endif
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
return (0);
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
#endif
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
}
bp = getpbuf(&afs_pbuf_freecnt);
kva = (vm_offset_t) bp->b_data;
- pmap_qenter(kva, ap->a_m, npages);
-#ifdef AFS_FBSD50_ENV
- cnt.v_vnodein++;
- cnt.v_vnodepgsin += npages;
-#endif
+ pmap_qenter(kva, pages, npages);
+ MA_PCPU_INC(cnt.v_vnodein);
+ MA_PCPU_ADD(cnt.v_vnodepgsin, npages);
+#ifdef FBSD_VOP_GETPAGES_BUSIED
+ count = ctob(npages);
+#else
+ count = ap->a_count;
+#endif
iov.iov_base = (caddr_t) kva;
- iov.iov_len = ap->a_count;
+ iov.iov_len = count;
uio.uio_iov = &iov;
uio.uio_iovcnt = 1;
- uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
- uio.uio_resid = ap->a_count;
+ uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
+ uio.uio_resid = count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_READ;
-#ifdef AFS_FBSD50_ENV
uio.uio_td = curthread;
-#else
- uio.uio_procp = curproc;
-#endif
AFS_GLOCK();
- osi_FlushPages(avc, osi_curcred()); /* hold bozon lock, but not basic vnode lock */
- code = afs_read(avc, &uio, osi_curcred(), 0, 0, 0);
+ osi_FlushPages(avc, osi_curcred()); /* hold GLOCK, but not basic vnode lock */
+ code = afs_read(avc, &uio, osi_curcred(), 0);
AFS_GUNLOCK();
pmap_qremove(kva, npages);
relpbuf(bp, &afs_pbuf_freecnt);
- if (code && (uio.uio_resid == ap->a_count)) {
-#ifdef AFS_FBSD50_ENV
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
-#endif
+ if (code && (uio.uio_resid == count)) {
+#ifndef FBSD_VOP_GETPAGES_BUSIED
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage)
- vm_page_free(ap->a_m[i]);
+ vm_page_free(pages[i]);
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
#endif
return VM_PAGER_ERROR;
}
- size = ap->a_count - uio.uio_resid;
-#ifdef AFS_FBSD50_ENV
- VM_OBJECT_LOCK(object);
- vm_page_lock_queues();
-#endif
+ size = count - uio.uio_resid;
+ AFS_VM_OBJECT_WLOCK(object);
+ ma_vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
nextoff = toff + PAGE_SIZE;
- m = ap->a_m[i];
+ m = pages[i];
+ /* XXX not in nfsclient? */
m->flags &= ~PG_ZERO;
if (nextoff <= size) {
* Read operation filled an entire page
*/
m->valid = VM_PAGE_BITS_ALL;
+#ifndef AFS_FBSD80_ENV
vm_page_undirty(m);
+#else
+ KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
+#endif
} else if (size > toff) {
/*
* Read operation filled a partial page.
*/
m->valid = 0;
vm_page_set_validclean(m, 0, size - toff);
- /* handled by vm_fault now */
- /* vm_page_zero_invalid(m, TRUE); */
+ KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
}
+#ifndef FBSD_VOP_GETPAGES_BUSIED
if (i != ap->a_reqpage) {
+#if __FreeBSD_version >= 1000042
+ vm_page_readahead_finish(m);
+#else
/*
* Whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
*/
if (!code) {
#if defined(AFS_FBSD70_ENV)
- if (m->oflags & VPO_WANTED)
+ if (m->oflags & VPO_WANTED) {
#else
- if (m->flags & PG_WANTED)
+ if (m->flags & PG_WANTED) {
#endif
+ ma_vm_page_lock(m);
vm_page_activate(m);
- else
+ ma_vm_page_unlock(m);
+ }
+ else {
+ ma_vm_page_lock(m);
vm_page_deactivate(m);
+ ma_vm_page_unlock(m);
+ }
vm_page_wakeup(m);
} else {
+ ma_vm_page_lock(m);
vm_page_free(m);
+ ma_vm_page_unlock(m);
}
+#endif /* __FreeBSD_version 1000042 */
}
+#endif /* ndef FBSD_VOP_GETPAGES_BUSIED */
}
-#ifdef AFS_FBSD50_ENV
- vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
-#endif
- return 0;
+ ma_vm_page_unlock_queues();
+ AFS_VM_OBJECT_WUNLOCK(object);
+ return VM_PAGER_OK;
}
int
int code;
struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
- osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
code =
afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
AFS_GUNLOCK();
struct vnode *vp;
struct vcache *avc;
-#ifdef AFS_FBSD50_ENV
- GIANT_REQUIRED;
-#endif
+ memset(&uio, 0, sizeof(uio));
+ memset(&iov, 0, sizeof(iov));
vp = ap->a_vp;
avc = VTOAFS(vp);
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
-#ifdef AFS_FBSD50_ENV
- cnt.v_vnodeout++;
- cnt.v_vnodepgsout += ap->a_count;
-#endif
+ MA_PCPU_INC(cnt.v_vnodeout);
+ MA_PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
iov.iov_base = (caddr_t) kva;
iov.iov_len = ap->a_count;
uio.uio_resid = ap->a_count;
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_rw = UIO_WRITE;
-#ifdef AFS_FBSD50_ENV
uio.uio_td = curthread;
-#else
- uio.uio_procp = curproc;
-#endif
sync = IO_VMIO;
if (ap->a_sync & VM_PAGER_PUT_SYNC)
sync |= IO_SYNC;
afs_vop_ioctl(ap)
struct vop_ioctl_args /* {
* struct vnode *a_vp;
- * int a_command;
- * caddr_t a_data;
+ * u_long a_command;
+ * void *a_data;
* int a_fflag;
* struct ucred *a_cred;
- * struct proc *a_p;
+ * struct thread *a_td;
* } */ *ap;
{
struct vcache *tvc = VTOAFS(ap->a_vp);
}
}
-/* ARGSUSED */
-int
-afs_vop_poll(ap)
- struct vop_poll_args /* {
- * struct vnode *a_vp;
- * int a_events;
- * struct ucred *a_cred;
- * struct proc *a_p;
- * } */ *ap;
-{
- /*
- * We should really check to see if I/O is possible.
- */
- return (1);
-}
-
-/*
- * Mmap a file
- *
- * NB Currently unsupported.
- */
-/* ARGSUSED */
-int
-afs_vop_mmap(ap)
- struct vop_mmap_args /* {
- * struct vnode *a_vp;
- * int a_fflags;
- * struct ucred *a_cred;
- * struct proc *a_p;
- * } */ *ap;
-{
- return (EINVAL);
-}
-
int
afs_vop_fsync(ap)
struct vop_fsync_args /* {
* struct vnode *a_vp;
- * struct ucred *a_cred;
* int a_waitfor;
- * struct proc *a_p;
+ * struct thread *td;
* } */ *ap;
{
int error;
- register struct vnode *vp = ap->a_vp;
+ struct vnode *vp = ap->a_vp;
AFS_GLOCK();
/*vflushbuf(vp, wait); */
* } */ *ap;
{
int error = 0;
- register struct vnode *vp = ap->a_vp;
- register struct vnode *dvp = ap->a_dvp;
+ struct vnode *vp = ap->a_vp;
+ struct vnode *dvp = ap->a_dvp;
GETNAME();
AFS_GLOCK();
* } */ *ap;
{
int error = 0;
- register struct vnode *dvp = ap->a_tdvp;
- register struct vnode *vp = ap->a_vp;
-#ifdef AFS_FBSD50_ENV
+ struct vnode *dvp = ap->a_tdvp;
+ struct vnode *vp = ap->a_vp;
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
#endif
GETNAME();
error = EISDIR;
goto out;
}
- if ((error = ma_vn_lock(vp, LK_EXCLUSIVE, p)) != 0) {
+ if ((error = ma_vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE, p)) != 0) {
goto out;
}
AFS_GLOCK();
struct componentname *tcnp = ap->a_tcnp;
char *tname;
struct vnode *tvp = ap->a_tvp;
- register struct vnode *tdvp = ap->a_tdvp;
+ struct vnode *tdvp = ap->a_tdvp;
struct vnode *fvp = ap->a_fvp;
- register struct vnode *fdvp = ap->a_fdvp;
-#ifdef AFS_FBSD50_ENV
+ struct vnode *fdvp = ap->a_fdvp;
+#ifndef AFS_FBSD80_ENV
struct thread *p = fcnp->cn_thread;
-#else
- struct proc *p = fcnp->cn_proc;
#endif
/*
* struct vattr *a_vap;
* } */ *ap;
{
- register struct vnode *dvp = ap->a_dvp;
- register struct vattr *vap = ap->a_vap;
+ struct vnode *dvp = ap->a_dvp;
+ struct vattr *vap = ap->a_vap;
int error = 0;
struct vcache *vcp;
-#ifdef AFS_FBSD50_ENV
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
-#else
- struct proc *p = ap->a_cnp->cn_proc;
#endif
GETNAME();
* } */ *ap;
{
int error = 0;
- register struct vnode *dvp = ap->a_dvp;
+ struct vnode *dvp = ap->a_dvp;
GETNAME();
AFS_GLOCK();
newvp = NULL;
error =
- afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, cnp->cn_cred);
+ afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, NULL,
+ cnp->cn_cred);
if (error == 0) {
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
if (error == 0) {
newvp = AFSTOV(vcp);
-#ifdef AFS_FBSD50_ENV
ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
-#else
- ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_proc);
-#endif
}
}
AFS_GUNLOCK();
return error;
}
-extern int prtactive;
-
int
afs_vop_inactive(ap)
struct vop_inactive_args /* {
* struct vnode *a_vp;
- * struct proc *a_p;
+ * struct thread *td;
* } */ *ap;
{
- register struct vnode *vp = ap->a_vp;
-
- if (prtactive && vp->v_usecount != 0)
- vprint("afs_vop_inactive(): pushing active", vp);
+ struct vnode *vp = ap->a_vp;
AFS_GLOCK();
afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
AFS_GUNLOCK();
-#ifndef AFS_FBSD80_ENV
- MA_VOP_UNLOCK(vp, 0, ap->a_p);
+#ifndef AFS_FBSD60_ENV
+ MA_VOP_UNLOCK(vp, 0, ap->a_td);
#endif
return 0;
}
AFS_GLOCK();
if (!haveVlock)
ObtainWriteLock(&afs_xvcache, 901);
-#ifndef AFS_DISCON_ENV
- code = afs_FlushVCache(avc, &slept); /* tosses our stuff from vnode */
-#else
/* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
- code = afs_FlushVS(avc);
-#endif
+ code = afs_FlushVCache(avc, &slept);
+
+ if (avc->f.states & CVInit) {
+ avc->f.states &= ~CVInit;
+ afs_osi_Wakeup(&avc->f.states);
+ }
+
if (!haveVlock)
ReleaseWriteLock(&afs_xvcache);
if (!haveGlock)
AFS_GUNLOCK();
- /*
- * XXX Pretend it worked, to prevent panic on shutdown
- * Garrett, please fix - Jim Rees
- */
- if (code)
- printf("afs_vop_reclaim: afs_FlushVCache failed code %d\n", code);
+ if (code) {
+ afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
+ VOP_PRINT(vp);
+ }
/* basically, it must not fail */
vnode_destroy_vobject(vp);
* struct vnode *a_vp;
* } */ *ap;
{
- register struct vnode *vp = ap->a_vp;
- register struct vcache *vc = VTOAFS(ap->a_vp);
+ struct vnode *vp = ap->a_vp;
+ struct vcache *vc = VTOAFS(ap->a_vp);
int s = vc->f.states;
- printf("tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vp->v_tag,
+ printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
(int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
(u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
vc->execsOrWriters);
* int a_flags;
* } */ *ap;
{
- int error;
+ int error, a_op;
struct ucred cr = *osi_curcred();
+ a_op = ap->a_op;
+ if (a_op == F_UNLCK) {
+ /*
+ * When a_fl->type is F_UNLCK, FreeBSD passes in an a_op of F_UNLCK.
+ * This is (confusingly) different than how you actually release a lock
+ * with fcntl(), which is done with an a_op of F_SETLK and an l_type of
+ * F_UNLCK. Pretend we were given an a_op of F_SETLK in this case,
+ * since this is what afs_lockctl expects.
+ */
+ a_op = F_SETLK;
+ }
+
AFS_GLOCK();
error =
- afs_lockctl(VTOAFS(ap->a_vp), ap->a_fl, ap->a_op, &cr, (int)ap->a_id);
+ afs_lockctl(VTOAFS(ap->a_vp),
+ ap->a_fl,
+ a_op, &cr,
+ (int)(intptr_t)ap->a_id); /* XXX: no longer unique! */
AFS_GUNLOCK();
return error;
}