#include "afs/sysincludes.h" /*Standard vendor system headers */
#include "afsincludes.h" /*AFS-based standard headers */
-#if defined(AFS_FBSD80_ENV)
-#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
-#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
-#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
-#else
-#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags, p))
-#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags, p))
-#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags, p))
-#endif
-
int
osi_TryEvictVCache(struct vcache *avc, int *slept, int defersleep)
{
struct vnode *vp;
int code;
+ int evicted = 0;
vp = AFSTOV(avc);
if (!VI_TRYLOCK(vp))
- return 0;
+ return evicted;
code = osi_fbsd_checkinuse(avc);
if (code != 0) {
VI_UNLOCK(vp);
- return 0;
+ return evicted;
}
if ((vp->v_iflag & VI_DOOMED) != 0) {
VI_UNLOCK(vp);
- return 1;
+ evicted = 1;
+ return evicted;
}
- /* must hold the vnode before calling vgone()
- * This code largely copied from vfs_subr.c:vlrureclaim() */
vholdl(vp);
+
+ ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
+
*slept = 1;
- /* use the interlock while locking, so no one else can DOOM this */
- ma_vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
- vgone(vp);
- MA_VOP_UNLOCK(vp, 0, curthread);
+
+ if (vn_lock(vp, LK_INTERLOCK|LK_EXCLUSIVE|LK_NOWAIT) == 0) {
+ /*
+ * vrecycle() will vgone() only if its usecount is 0. If someone grabbed a
+ * new usecount ref just now, the vgone() will be skipped, and vrecycle
+ * will return 0.
+ */
+ if (vrecycle(vp) != 0) {
+ evicted = 1;
+ }
+
+ VOP_UNLOCK(vp, 0);
+ }
+
vdrop(vp);
AFS_GLOCK();
- return 1;
+ ObtainWriteLock(&afs_xvcache, 340);
+
+ return evicted;
}
struct vcache *
-osi_NewVnode(void) {
+osi_NewVnode(void)
+{
struct vcache *tvc;
tvc = afs_osi_Alloc(sizeof(struct vcache));
}
void
-osi_PrePopulateVCache(struct vcache *avc) {
+osi_PrePopulateVCache(struct vcache *avc)
+{
memset(avc, 0, sizeof(struct vcache));
}
void
-osi_AttachVnode(struct vcache *avc, int seq) {
+osi_AttachVnode(struct vcache *avc, int seq)
+{
struct vnode *vp;
-#if !defined(AFS_FBSD80_ENV)
- struct thread *p = curthread;
-#endif
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
-#if defined(AFS_FBSD60_ENV)
if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
-#else
- if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
-#endif
panic("afs getnewvnode"); /* can't happen */
-#ifdef AFS_FBSD70_ENV
/* XXX verified on 80--TODO check on 7x */
if (!vp->v_mount) {
- ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* !glocked */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
insmntque(vp, afs_globalVFS);
- MA_VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0);
}
-#endif
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,339);
if (avc->v != NULL) {
}
void
-osi_PostPopulateVCache(struct vcache *avc) {
+osi_PostPopulateVCache(struct vcache *avc)
+{
avc->v->v_mount = afs_globalVFS;
vSetType(avc, VREG);
}
+int
+osi_vnhold(struct vcache *avc)
+{
+ struct vnode *vp = AFSTOV(avc);
+
+ VI_LOCK(vp);
+ if ((vp->v_iflag & VI_DOOMED) != 0) {
+ VI_UNLOCK(vp);
+ return ENOENT;
+ }
+
+ vrefl(AFSTOV(avc));
+ VI_UNLOCK(vp);
+ return 0;
+}