#include "afs/sysincludes.h" /*Standard vendor system headers */
#include "afsincludes.h" /*AFS-based standard headers */
+#if defined(AFS_FBSD80_ENV)
+#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
+#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
+#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
+#else
+#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags, p))
+#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags, p))
+#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags, p))
+#endif
+
int
osi_TryEvictVCache(struct vcache *avc, int *slept) {
void
osi_AttachVnode(struct vcache *avc, int seq) {
struct vnode *vp;
+ struct thread *p = curthread;
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
#ifdef AFS_FBSD70_ENV
/* XXX verified on 80--TODO check on 7x */
if (!vp->v_mount) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+ ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); /* !glocked */
insmntque(vp, afs_globalVFS);
- VOP_UNLOCK(vp, 0);
+ MA_VOP_UNLOCK(vp, 0, p);
}
#endif
AFS_GLOCK();
#endif
#if defined(AFS_FBSD80_ENV)
-#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY)
+#define lock_vnode(v, f) vn_lock((v), (f))
#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY);
#define unlock_vnode(v) VOP_UNLOCK((v), 0)
+#define islocked_vnode(v) VOP_ISLOCKED((v))
#else
-#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY, curthread)
+#define lock_vnode(v, f) vn_lock((v), (f), curthread)
#define ilock_vnode(v) vn_lock((v), LK_INTERLOCK|LK_EXCLUSIVE|LK_RETRY, curthread);
#define unlock_vnode(v) VOP_UNLOCK((v), 0, curthread)
+#define islocked_vnode(v) VOP_ISLOCKED((v), curthread)
#endif
/* Try to discard pages, in order to recycle a vcache entry.
}
VI_UNLOCK(vp);
- islocked = VOP_ISLOCKED(vp);
+ islocked = islocked_vnode(vp);
if (islocked == LK_EXCLOTHER)
panic("Trying to Smush over someone else's lock");
else if (islocked == LK_SHARED) {
afs_warn("Trying to Smush with a shared lock");
- vn_lock(vp, LK_UPGRADE);
+ lock_vnode(vp, LK_UPGRADE);
} else if (!islocked)
- vn_lock(vp, LK_EXCLUSIVE);
+ lock_vnode(vp, LK_EXCLUSIVE);
if (vp->v_bufobj.bo_object != NULL) {
VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
--tries;
}
if (islocked == LK_SHARED)
- vn_lock(vp, LK_DOWNGRADE);
+ lock_vnode(vp, LK_DOWNGRADE);
else if (!islocked)
- VOP_UNLOCK(vp, 0);
+ unlock_vnode(vp);
}
/* Purge VM for a file when its callback is revoked.
* Read operation filled a partial page.
*/
m->valid = 0;
- vm_page_set_valid(m, 0, size - toff);
-#ifndef AFS_FBSD80_ENV
- vm_page_undirty(m);
-#else
+ vm_page_set_validclean(m, 0, size - toff);
KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
-#endif
}
if (i != ap->a_reqpage) {