#include "afs/afs_stats.h" /* statistics */
#include <vm/vm_object.h>
#include <vm/vm_map.h>
-#include <limits.h>
-#include <float.h>
+#include <sys/limits.h>
+#if __FreeBSD_version >= 1000030
+#include <sys/rwlock.h>
+#endif
/*
* FreeBSD implementation notes:
* Most of these operations require us to frob vm_objects. Most
- * functions require that the object be locked (with VM_OBJECT_LOCK)
- * on entry and leave it locked on exit. In order to get the
- * vm_object itself we call VOP_GETVOBJECT on the vnode; the
- * locking protocol requires that we do so with the heavy vnode lock
- * held and the vnode interlock unlocked, and it returns the same
- * way.
+ * functions require that the object be locked (with VM_OBJECT_*LOCK)
+ * on entry and leave it locked on exit. The locking protocol
+ * requires that we access vp->v_object with the heavy vnode lock
+ * held and the vnode interlock unlocked.
*
* The locking protocol for vnodes is defined in
- * kern/vnode_if.src and sys/vnode.h; the locking is still a work in
- * progress, so some fields are (as of 5.1) still protected by Giant
- * rather than an explicit lock.
+ * kern/vnode_if.src and sys/vnode.h; unfortunately, it is not *quite*
+ * constant from version to version so to be properly correct we must
+ * check the VCS history of those files.
*/
-#ifdef AFS_FBSD60_ENV
-#define VOP_GETVOBJECT(vp, objp) (*(objp) = (vp)->v_object)
-#endif
-
#if defined(AFS_FBSD80_ENV)
-#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY)
-#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY);
+#define lock_vnode(v, f) vn_lock((v), (f))
+#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY)
#define unlock_vnode(v) VOP_UNLOCK((v), 0)
+#define islocked_vnode(v) VOP_ISLOCKED((v))
#else
-#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY, curthread)
-#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
+#define lock_vnode(v, f) vn_lock((v), (f), curthread)
+#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread)
#define unlock_vnode(v) VOP_UNLOCK((v), 0, curthread)
+#define islocked_vnode(v) VOP_ISLOCKED((v), curthread)
+#endif
+
+#if __FreeBSD_version >= 1000030
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
+#else
+#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
+#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
#endif
/* Try to discard pages, in order to recycle a vcache entry.
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
- struct vnode *vp = AFSTOV(avc);
-
- if (!VI_TRYLOCK(vp)) /* need interlock to check usecount */
- return EBUSY;
+ struct vnode *vp;
+ int code;
- if (vp->v_usecount > 0) {
- VI_UNLOCK(vp);
- return EBUSY;
- }
+ vp = AFSTOV(avc);
- /* XXX
- * The value of avc->opens here came to be, at some point,
- * typically -1. This was caused by incorrectly performing afs_close
- * processing on vnodes being recycled */
- if (avc->opens) {
- VI_UNLOCK(vp);
+ if (!VI_TRYLOCK(vp))
return EBUSY;
- }
-
- /* if a lock is held, give up */
- if (CheckLock(&avc->lock)) {
+ code = osi_fbsd_checkinuse(avc);
+ if (code) {
VI_UNLOCK(vp);
- return EBUSY;
+ return code;
}
if ((vp->v_iflag & VI_DOOMED) != 0) {
VI_UNLOCK(vp);
- return (0);
+ return 0;
}
/* must hold the vnode before calling vgone()
*/
do {
anyio = 0;
- if (VOP_GETVOBJECT(vp, &obj) == 0 && (obj->flags & OBJ_MIGHTBEDIRTY)) {
+
+ obj = vp->v_object;
+ if (obj != NULL && obj->flags & OBJ_MIGHTBEDIRTY) {
if (!vget(vp, LK_EXCLUSIVE | LK_RETRY, curthread)) {
- if (VOP_GETVOBJECT(vp, &obj) == 0) {
- VM_OBJECT_LOCK(obj);
+ obj = vp->v_object;
+ if (obj != NULL) {
+ AFS_VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(obj);
+ AFS_VM_OBJECT_WUNLOCK(obj);
anyio = 1;
}
vput(vp);
}
VI_UNLOCK(vp);
- islocked = VOP_ISLOCKED(vp);
+ islocked = islocked_vnode(vp);
if (islocked == LK_EXCLOTHER)
panic("Trying to Smush over someone else's lock");
else if (islocked == LK_SHARED) {
afs_warn("Trying to Smush with a shared lock");
- vn_lock(vp, LK_UPGRADE);
+ lock_vnode(vp, LK_UPGRADE);
} else if (!islocked)
- vn_lock(vp, LK_EXCLUSIVE);
+ lock_vnode(vp, LK_EXCLUSIVE);
if (vp->v_bufobj.bo_object != NULL) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ AFS_VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
/*
* Do we really want OBJPC_SYNC? OBJPC_INVAL would be
* faster, if invalidation is really what we are being
*/
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ AFS_VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
}
tries = 5;
--tries;
}
if (islocked == LK_SHARED)
- vn_lock(vp, LK_DOWNGRADE);
+ lock_vnode(vp, LK_DOWNGRADE);
else if (!islocked)
- VOP_UNLOCK(vp, 0);
+ unlock_vnode(vp);
}
/* Purge VM for a file when its callback is revoked.
vp = AFSTOV(avc);
ASSERT_VOP_LOCKED(vp, __func__);
- if (VOP_GETVOBJECT(vp, &obj) == 0) {
- VM_OBJECT_LOCK(obj);
+ obj = vp->v_object;
+ if (obj != NULL) {
+ AFS_VM_OBJECT_WLOCK(obj);
vm_object_page_remove(obj, 0, 0, FALSE);
- VM_OBJECT_UNLOCK(obj);
+ AFS_VM_OBJECT_WUNLOCK(obj);
}
osi_vinvalbuf(vp, 0, 0, 0);
}