#include "afsincludes.h" /*AFS-based standard headers */
int
-osi_TryEvictVCache(struct vcache *avc, int *slept) {
- if (!VREFCOUNT_GT(avc,0)
- && avc->opens == 0 && (avc->f.states & CUnlinkedDel) == 0) {
- /*
- * vgone() reclaims the vnode, which calls afs_FlushVCache(),
- * then it puts the vnode on the free list.
- * If we don't do this we end up with a cleaned vnode that's
- * not on the free list.
- * XXX assume FreeBSD is the same for now.
- */
- AFS_GUNLOCK();
-
-#if defined(AFS_FBSD80_ENV)
- /* vgone() is correct, but v_usecount is assumed not
- * to be 0, and I suspect that currently our usage ensures that
- * in fact it will */
- if (vrefcnt(AFSTOV(tvc)) < 1) {
- vref(AFSTOV(tvc));
- }
- vn_lock(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY); /* !glocked */
-#endif
-
- vgone(AFSTOV(avc));
-#if defined(AFS_FBSD80_ENV)
- VOP_UNLOCK(AFSTOV(tvc), 0);
-#endif
-
- AFS_GLOCK();
+osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep)
+{
+ struct vnode *vp;
+ int code;
+
+ vp = AFSTOV(avc);
+
+ if (!VI_TRYLOCK(vp))
+ return 0;
+ code = osi_fbsd_checkinuse(avc);
+ if (code != 0) {
+ VI_UNLOCK(vp);
+ return 0;
+ }
+
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ VI_UNLOCK(vp);
return 1;
}
- return 0;
+
+ /* must hold the vnode before calling vgone()
+ * This code largely copied from vfs_subr.c:vlrureclaim() */
+ vholdl(vp);
+
+ ReleaseWriteLock(&afs_xvcache);
+ AFS_GUNLOCK();
+
+ *slept = 1;
+ /* use the interlock while locking, so no one else can DOOM this */
+ vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY);
+ vgone(vp);
+ VOP_UNLOCK(vp, 0);
+ vdrop(vp);
+
+ AFS_GLOCK();
+ ObtainWriteLock(&afs_xvcache, 340);
+ return 1;
}
struct vcache *
osi_NewVnode(void) {
struct vcache *tvc;
- tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
+ tvc = afs_osi_Alloc(sizeof(struct vcache));
tvc->v = NULL; /* important to clean this, or use memset 0 */
return tvc;
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
-#if defined(AFS_FBSD60_ENV)
if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
-#elif defined(AFS_FBSD50_ENV)
- if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
-#else
- if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
-#endif
panic("afs getnewvnode"); /* can't happen */
-#ifdef AFS_FBSD70_ENV
/* XXX verified on 80--TODO check on 7x */
if (!vp->v_mount) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
insmntque(vp, afs_globalVFS);
VOP_UNLOCK(vp, 0);
}
-#endif
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,339);
if (avc->v != NULL) {
* mutual exclusion (an Embryonic flag would suffice).
* -GAW */
afs_warn("afs_NewVCache: lost the race\n");
- return (avc);
+ return;
}
avc->v = vp;
avc->v->v_data = avc;