#include "afs/sysincludes.h" /*Standard vendor system headers */
#include "afsincludes.h" /*AFS-based standard headers */
-int
-osi_TryEvictVCache(struct vcache *avc, int *slept) {
- if (!VREFCOUNT_GT(avc,0)
- && avc->opens == 0 && (avc->f.states & CUnlinkedDel) == 0) {
- /*
- * vgone() reclaims the vnode, which calls afs_FlushVCache(),
- * then it puts the vnode on the free list.
- * If we don't do this we end up with a cleaned vnode that's
- * not on the free list.
- * XXX assume FreeBSD is the same for now.
- */
- AFS_GUNLOCK();
-
#if defined(AFS_FBSD80_ENV)
- /* vgone() is correct, but v_usecount is assumed not
- * to be 0, and I suspect that currently our usage ensures that
- * in fact it will */
- if (vrefcnt(AFSTOV(tvc)) < 1) {
- vref(AFSTOV(tvc));
- }
- vn_lock(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
+#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
+#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
+#else
+#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags, p))
+#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags, p))
+#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags, p))
#endif
- vgone(AFSTOV(avc));
-#if defined(AFS_FBSD80_ENV)
- VOP_UNLOCK(AFSTOV(tvc), 0);
-#endif
+int
+osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep)
+{
+ struct vnode *vp;
+ int code;
+
+ vp = AFSTOV(avc);
- AFS_GLOCK();
+ if (!VI_TRYLOCK(vp))
+ return 0;
+ code = osi_fbsd_checkinuse(avc);
+ if (code != 0) {
+ VI_UNLOCK(vp);
+ return 0;
+ }
+
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ VI_UNLOCK(vp);
return 1;
}
- return 0;
+
+ /* must hold the vnode before calling vgone()
+ * This code largely copied from vfs_subr.c:vlrureclaim() */
+ vholdl(vp);
+ AFS_GUNLOCK();
+ *slept = 1;
+ /* use the interlock while locking, so no one else can DOOM this */
+ ma_vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
+ vgone(vp);
+ MA_VOP_UNLOCK(vp, 0, curthread);
+ vdrop(vp);
+
+ AFS_GLOCK();
+ return 1;
}
struct vcache *
osi_NewVnode(void) {
struct vcache *tvc;
- tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
+ tvc = afs_osi_Alloc(sizeof(struct vcache));
tvc->v = NULL; /* important to clean this, or use memset 0 */
return tvc;
void
osi_AttachVnode(struct vcache *avc, int seq) {
struct vnode *vp;
+ struct thread *p = curthread;
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
#if defined(AFS_FBSD60_ENV)
if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
-#elif defined(AFS_FBSD50_ENV)
- if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
#else
- if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
+ if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
#endif
panic("afs getnewvnode"); /* can't happen */
#ifdef AFS_FBSD70_ENV
/* XXX verified on 80--TODO check on 7x */
if (!vp->v_mount) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+ ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* !glocked */
insmntque(vp, afs_globalVFS);
- VOP_UNLOCK(vp, 0);
+ MA_VOP_UNLOCK(vp, 0, p);
}
#endif
AFS_GLOCK();
* mutual exclusion (an Embryonic flag would suffice).
* -GAW */
afs_warn("afs_NewVCache: lost the race\n");
- return (avc);
+ return;
}
avc->v = vp;
avc->v->v_data = avc;