#include <sys/malloc.h>
#include <sys/namei.h>
#include <sys/unistd.h>
+#if __FreeBSD_version >= 1000030
+#include <sys/rwlock.h>
+#endif
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
static vop_mknod_t afs_vop_mknod;
static vop_open_t afs_vop_open;
static vop_pathconf_t afs_vop_pathconf;
-static vop_poll_t afs_vop_poll;
static vop_print_t afs_vop_print;
static vop_putpages_t afs_vop_putpages;
static vop_read_t afs_vop_read;
.vop_mknod = afs_vop_mknod,
.vop_open = afs_vop_open,
.vop_pathconf = afs_vop_pathconf,
- .vop_poll = afs_vop_poll,
.vop_print = afs_vop_print,
.vop_putpages = afs_vop_putpages,
.vop_read = afs_vop_read,
int afs_vop_putpages(struct vop_putpages_args *);
int afs_vop_ioctl(struct vop_ioctl_args *);
static int afs_vop_pathconf(struct vop_pathconf_args *);
-int afs_vop_poll(struct vop_poll_args *);
int afs_vop_fsync(struct vop_fsync_args *);
int afs_vop_remove(struct vop_remove_args *);
int afs_vop_link(struct vop_link_args *);
{&vop_mknod_desc, (vop_t *) afs_vop_mknod}, /* mknod */
{&vop_open_desc, (vop_t *) afs_vop_open}, /* open */
{&vop_pathconf_desc, (vop_t *) afs_vop_pathconf}, /* pathconf */
- {&vop_poll_desc, (vop_t *) afs_vop_poll}, /* select */
+ {&vop_poll_desc, (vop_t *) vop_nopoll}, /* select */
{&vop_print_desc, (vop_t *) afs_vop_print}, /* print */
{&vop_read_desc, (vop_t *) afs_vop_read}, /* read */
{&vop_readdir_desc, (vop_t *) afs_vop_readdir}, /* readdir */
#define MA_PCPU_ADD(c, n) (c) += (n)
#endif
+#if __FreeBSD_version >= 1000030
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
+#else
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
+#endif
+
#ifdef AFS_FBSD70_ENV
#ifndef AFS_FBSD80_ENV
/* From kern_lock.c */
lockparent = flags & LOCKPARENT;
wantparent = flags & (LOCKPARENT | WANTPARENT);
-#ifdef AFS_FBSD80_ENV
+#if __FreeBSD_version < 1000021
cnp->cn_flags |= MPSAFE; /* steel */
#endif
-#ifndef AFS_FBSD70_ENV
if (flags & ISDOTDOT)
- VOP_UNLOCK(dvp, 0, p);
-#endif
+ MA_VOP_UNLOCK(dvp, 0, p);
AFS_GLOCK();
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
* we also always return the vnode locked. */
if (flags & ISDOTDOT) {
- MA_VOP_UNLOCK(dvp, 0, p);
+ /* vp before dvp since we go root to leaf, and .. comes first */
ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
ma_vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
/* always return the child locked */
code = afs_close(avc, ap->a_fflag, ap->a_cred);
else
code = afs_close(avc, ap->a_fflag, afs_osi_credp);
- osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
AFS_GUNLOCK();
return code;
}
int code;
struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
- osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
- code = afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
+ code = afs_read(avc, ap->a_uio, ap->a_cred, 0);
AFS_GUNLOCK();
return code;
}
struct vnode *vp;
struct vcache *avc;
+ memset(&uio, 0, sizeof(uio));
+ memset(&iov, 0, sizeof(iov));
+
vp = ap->a_vp;
avc = VTOAFS(vp);
if ((object = vp->v_object) == NULL) {
{
vm_page_t m = ap->a_m[ap->a_reqpage];
- VM_OBJECT_LOCK(object);
+ AFS_VM_OBJECT_WLOCK(object);
ma_vm_page_lock_queues();
if (m->valid != 0) {
/* handled by vm_fault now */
}
}
ma_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
+ AFS_VM_OBJECT_WUNLOCK(object);
return (0);
}
ma_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
+ AFS_VM_OBJECT_WUNLOCK(object);
}
bp = getpbuf(&afs_pbuf_freecnt);
uio.uio_td = curthread;
AFS_GLOCK();
- osi_FlushPages(avc, osi_curcred()); /* hold bozon lock, but not basic vnode lock */
- code = afs_read(avc, &uio, osi_curcred(), 0, 0, 0);
+ osi_FlushPages(avc, osi_curcred()); /* hold GLOCK, but not basic vnode lock */
+ code = afs_read(avc, &uio, osi_curcred(), 0);
AFS_GUNLOCK();
pmap_qremove(kva, npages);
relpbuf(bp, &afs_pbuf_freecnt);
if (code && (uio.uio_resid == ap->a_count)) {
- VM_OBJECT_LOCK(object);
+ AFS_VM_OBJECT_WLOCK(object);
ma_vm_page_lock_queues();
for (i = 0; i < npages; ++i) {
if (i != ap->a_reqpage)
vm_page_free(ap->a_m[i]);
}
ma_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
+ AFS_VM_OBJECT_WUNLOCK(object);
return VM_PAGER_ERROR;
}
size = ap->a_count - uio.uio_resid;
- VM_OBJECT_LOCK(object);
+ AFS_VM_OBJECT_WLOCK(object);
ma_vm_page_lock_queues();
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
vm_page_t m;
* Read operation filled a partial page.
*/
m->valid = 0;
- vm_page_set_valid(m, 0, size - toff);
-#ifndef AFS_FBSD80_ENV
- vm_page_undirty(m);
-#else
+ vm_page_set_validclean(m, 0, size - toff);
KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
-#endif
}
if (i != ap->a_reqpage) {
+#if __FreeBSD_version >= 1000042
+ vm_page_readahead_finish(m);
+#else
/*
* Whether or not to leave the page activated is up in
* the air, but we should put the page on a page queue
vm_page_free(m);
ma_vm_page_unlock(m);
}
+#endif /* __FreeBSD_version 1000042 */
}
}
ma_vm_page_unlock_queues();
- VM_OBJECT_UNLOCK(object);
+ AFS_VM_OBJECT_WUNLOCK(object);
return 0;
}
int code;
struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
- osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
+ osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
code =
afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
AFS_GUNLOCK();
struct vnode *vp;
struct vcache *avc;
+ memset(&uio, 0, sizeof(uio));
+ memset(&iov, 0, sizeof(iov));
+
vp = ap->a_vp;
avc = VTOAFS(vp);
/* Perhaps these two checks should just be KASSERTs instead... */
}
}
-/* ARGSUSED */
-int
-afs_vop_poll(ap)
- struct vop_poll_args /* {
- * struct vnode *a_vp;
- * int a_events;
- * struct ucred *a_cred;
- * struct thread *td;
- * } */ *ap;
-{
- /*
- * We should really check to see if I/O is possible.
- */
- return (1);
-}
-
-/*
- * Mmap a file
- *
- * NB Currently unsupported.
- */
-/* ARGSUSED */
-int
-afs_vop_mmap(ap)
- struct vop_mmap_args /* {
- * struct vnode *a_vp;
- * int a_fflags;
- * struct ucred *a_cred;
- * struct thread *td;
- * } */ *ap;
-{
- return (EINVAL);
-}
-
int
afs_vop_fsync(ap)
struct vop_fsync_args /* {
newvp = NULL;
error =
- afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, cnp->cn_cred);
+ afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, NULL,
+ cnp->cn_cred);
if (error == 0) {
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
if (error == 0) {
return error;
}
-extern int prtactive;
-
int
afs_vop_inactive(ap)
struct vop_inactive_args /* {
{
struct vnode *vp = ap->a_vp;
- if (prtactive && vp->v_usecount != 0)
- vprint("afs_vop_inactive(): pushing active", vp);
-
AFS_GLOCK();
afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
AFS_GUNLOCK();
ObtainWriteLock(&afs_xvcache, 901);
/* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
code = afs_FlushVCache(avc, &slept);
+
+ if (avc->f.states & CVInit) {
+ avc->f.states &= ~CVInit;
+ afs_osi_Wakeup(&avc->f.states);
+ }
+
if (!haveVlock)
ReleaseWriteLock(&afs_xvcache);
if (!haveGlock)
AFS_GLOCK();
error =
- afs_lockctl(VTOAFS(ap->a_vp), ap->a_fl, ap->a_op, &cr, (int)ap->a_id);
+ afs_lockctl(VTOAFS(ap->a_vp),
+ ap->a_fl,
+ ap->a_op, &cr,
+ (int)(intptr_t)ap->a_id); /* XXX: no longer unique! */
AFS_GUNLOCK();
return error;
}