osi_TryEvictVCache(struct vcache *avc, int *slept) {
struct vnode *vp = AFSTOV(avc);
- if (!VREFCOUNT_GT(avc,0)
- && avc->opens == 0 && (avc->f.states & CUnlinkedDel) == 0) {
- /*
- * vgone() reclaims the vnode, which calls afs_FlushVCache(),
- * then it puts the vnode on the free list.
- * If we don't do this we end up with a cleaned vnode that's
- * not on the free list.
- * XXX assume FreeBSD is the same for now.
- */
- /*
- * We only have one caller (afs_ShakeLooseVCaches), which already
- * holds the write lock. vgonel() sometimes calls VOP_CLOSE(),
- * so we must drop the write lock around our call to vgone().
- */
- ReleaseWriteLock(&afs_xvcache);
- AFS_GUNLOCK();
- *slept = 1;
-
-#if defined(AFS_FBSD80_ENV)
- /* vgone() is correct, but vgonel() panics if v_usecount is 0--
- * this is particularly confusing since vgonel() will trigger
- * vop_reclaim, in the call path of which we'll check v_usecount
- * and decide that the vnode is busy. Splat. */
- if (vrefcnt(vp) < 1)
- vref(vp);
-
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
-#endif
- vgone(vp);
-#if defined(AFS_FBSD80_ENV)
- VOP_UNLOCK(vp, 0);
-#endif
-
- AFS_GLOCK();
- ObtainWriteLock(&afs_xvcache, 340);
+ /*
+ * essentially all we want to do here is check that the
+ * vcache is not in use, then call vgone() (which will call
+ * inactive and reclaim as needed). This requires some
+ * kind of complicated locking, which we already need to implement
+ * for FlushVCache, so just call that routine here and check
+ * its return value for whether the vcache was evict-able.
+ */
+ if (osi_VM_FlushVCache(avc, slept) != 0)
+ return 0;
+ else
return 1;
- }
- return 0;
}
struct vcache *
#include <afsconfig.h>
#include "afs/param.h"
-#ifdef AFS_FBSD70_ENV
#include <sys/param.h>
#include <sys/vnode.h>
- void
- vgonel(struct vnode *vp, struct thread *td);
-#endif
#include "afs/sysincludes.h" /* Standard vendor system headers */
#if defined(AFS_FBSD80_ENV)
#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY)
+#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY);
#define unlock_vnode(v) VOP_UNLOCK((v), 0)
#else
#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY, curthread)
+#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
#define unlock_vnode(v) VOP_UNLOCK((v), 0, curthread)
#endif
* is not dropped and re-acquired for any platform. It may be that *slept is
* therefore obsolescent.
*
- * OSF/1 Locking: VN_LOCK has been called.
- * We do not lock the vnode here, but instead require that it be exclusive
- * locked by code calling osi_VM_StoreAllSegments directly, or scheduling it
- * from the bqueue - Matt
- * Maybe better to just call vnode_pager_setsize()?
*/
int
osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
struct vm_object *obj;
- struct vnode *vp;
- if (VREFCOUNT(avc) > 1) {
+ struct vnode *vp = AFSTOV(avc);
+
+ if (!VI_TRYLOCK(vp)) /* need interlock to check usecount */
+ return EBUSY;
+
+ if (vp->v_usecount > 0) {
+ VI_UNLOCK(vp);
return EBUSY;
}
* typically -1. This was caused by incorrectly performing afs_close
* processing on vnodes being recycled */
if (avc->opens) {
+ VI_UNLOCK(vp);
return EBUSY;
}
/* if a lock is held, give up */
if (CheckLock(&avc->lock)) {
+ VI_UNLOCK(vp);
return EBUSY;
}
- return(0);
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ VI_UNLOCK(vp);
+ return (0);
+ }
+ /* must hold the vnode before calling vgone()
+ * This code largely copied from vfs_subr.c:vlrureclaim() */
+ vholdl(vp);
AFS_GUNLOCK();
- vp = AFSTOV(avc);
-#ifndef AFS_FBSD70_ENV
- lock_vnode(vp);
-#endif
- if (VOP_GETVOBJECT(vp, &obj) == 0) {
- VM_OBJECT_LOCK(obj);
- vm_object_page_remove(obj, 0, 0, FALSE);
-#if 1
- if (obj->ref_count == 0) {
- simple_lock(&vp->v_interlock);
- vgonel(vp, curthread);
- vp->v_tag = VT_AFS;
- SetAfsVnode(vp);
- }
-#endif
- VM_OBJECT_UNLOCK(obj);
- }
-#ifndef AFS_FBSD70_ENV
+ *slept = 1;
+ /* use the interlock while locking, so no one else can DOOM this */
+ ilock_vnode(vp);
+ vgone(vp);
unlock_vnode(vp);
-#endif
- AFS_GLOCK();
+ vdrop(vp);
+ AFS_GLOCK();
return 0;
}