#include "afsincludes.h" /*AFS-based standard headers */
int
-osi_TryEvictVCache(struct vcache *avc, int *slept) {
- struct vnode *vp = AFSTOV(avc);
+osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep)
+{
+ struct vnode *vp;
+ int code;
+ int evicted = 0;
- if (!VREFCOUNT_GT(avc,0)
- && avc->opens == 0 && (avc->f.states & CUnlinkedDel) == 0) {
- /*
- * vgone() reclaims the vnode, which calls afs_FlushVCache(),
- * then it puts the vnode on the free list.
- * If we don't do this we end up with a cleaned vnode that's
- * not on the free list.
- * XXX assume FreeBSD is the same for now.
- */
+ vp = AFSTOV(avc);
+
+ if (!VI_TRYLOCK(vp))
+ return evicted;
+ code = osi_fbsd_checkinuse(avc);
+ if (code != 0) {
+ VI_UNLOCK(vp);
+ return evicted;
+ }
+
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ VI_UNLOCK(vp);
+ evicted = 1;
+ return evicted;
+ }
+
+ vholdl(vp);
+
+ ReleaseWriteLock(&afs_xvcache);
+ AFS_GUNLOCK();
+
+ *slept = 1;
+
+ if (vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT) == 0) {
/*
- * We only have one caller (afs_ShakeLooseVCaches), which already
- * holds the write lock. vgonel() sometimes calls VOP_CLOSE(),
- * so we must drop the write lock around our call to vgone().
+ * vrecycle() will vgone() only if its usecount is 0. If someone grabbed a
+ * new usecount ref just now, the vgone() will be skipped, and vrecycle
+ * will return 0.
*/
- ReleaseWriteLock(&afs_xvcache);
- AFS_GUNLOCK();
- *slept = 1;
-
-#if defined(AFS_FBSD80_ENV)
- /* vgone() is correct, but v_usecount is assumed not
- * to be 0, and I suspect that currently our usage ensures that
- * in fact it will */
- if (vrefcnt(vp) < 1) {
- vref(vp);
+ if (vrecycle(vp) != 0) {
+ evicted = 1;
}
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
-#endif
- vgone(vp);
-#if defined(AFS_FBSD80_ENV)
VOP_UNLOCK(vp, 0);
-#endif
-
- AFS_GLOCK();
- ObtainWriteLock(&afs_xvcache, 340);
- return 1;
}
- return 0;
+
+ vdrop(vp);
+
+ AFS_GLOCK();
+ ObtainWriteLock(&afs_xvcache, 340);
+
+ return evicted;
}
struct vcache *
-osi_NewVnode(void) {
+osi_NewVnode(void)
+{
struct vcache *tvc;
- tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
+ tvc = afs_osi_Alloc(sizeof(struct vcache));
tvc->v = NULL; /* important to clean this, or use memset 0 */
return tvc;
}
void
-osi_PrePopulateVCache(struct vcache *avc) {
+osi_PrePopulateVCache(struct vcache *avc)
+{
memset(avc, 0, sizeof(struct vcache));
}
void
-osi_AttachVnode(struct vcache *avc, int seq) {
+osi_AttachVnode(struct vcache *avc, int seq)
+{
struct vnode *vp;
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
-#if defined(AFS_FBSD60_ENV)
if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
-#else
- if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
-#endif
panic("afs getnewvnode"); /* can't happen */
-#ifdef AFS_FBSD70_ENV
/* XXX verified on 80--TODO check on 7x */
if (!vp->v_mount) {
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
insmntque(vp, afs_globalVFS);
VOP_UNLOCK(vp, 0);
}
-#endif
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,339);
if (avc->v != NULL) {
}
void
-osi_PostPopulateVCache(struct vcache *avc) {
+osi_PostPopulateVCache(struct vcache *avc)
+{
avc->v->v_mount = afs_globalVFS;
vSetType(avc, VREG);
}
+int
+osi_vnhold(struct vcache *avc)
+{
+ struct vnode *vp = AFSTOV(avc);
+
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ VI_UNLOCK(vp);
+ return ENOENT;
+ }
+
+ vrefl(AFSTOV(avc));
+ VI_UNLOCK(vp);
+ return 0;
+}