#if defined(AFS_FBSD80_ENV)
#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY)
#define unlock_vnode(v) VOP_UNLOCK((v), 0)
-#elif defined(AFS_FBSD50_ENV)
+#else
#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY, curthread)
#define unlock_vnode(v) VOP_UNLOCK((v), 0, curthread)
-#else
-#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY, curproc)
-#define unlock_vnode(v) VOP_UNLOCK((v), 0, curproc)
-/* need splvm() protection? */
-#define VM_OBJECT_LOCK(o)
-#define VM_OBJECT_UNLOCK(o)
#endif
/* Try to discard pages, in order to recycle a vcache entry.
{
struct vm_object *obj;
struct vnode *vp;
- if (VREFCOUNT(avc) > 1)
+ if (VREFCOUNT(avc) > 1) {
return EBUSY;
+ }
- if (avc->opens)
+ /* XXX
+ * The value of avc->opens here came to be, at some point,
+ * typically -1. This was caused by incorrectly performing afs_close
+ * processing on vnodes being recycled */
+ if (avc->opens) {
return EBUSY;
+ }
/* if a lock is held, give up */
- if (CheckLock(&avc->lock))
+ if (CheckLock(&avc->lock)) {
return EBUSY;
+ }
return(0);
do {
anyio = 0;
if (VOP_GETVOBJECT(vp, &obj) == 0 && (obj->flags & OBJ_MIGHTBEDIRTY)) {
-#ifdef AFS_FBSD50_ENV
if (!vget(vp, LK_EXCLUSIVE | LK_RETRY, curthread)) {
-#else
- if (!vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
-#endif
if (VOP_GETVOBJECT(vp, &obj) == 0) {
VM_OBJECT_LOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
{
struct vnode *vp;
int tries, code;
+ int islocked;
SPLVAR;
vp = AFSTOV(avc);
+ VI_LOCK(vp);
if (vp->v_iflag & VI_DOOMED) {
- USERPRI;
- return;
+ VI_UNLOCK(vp);
+ USERPRI;
+ return;
}
+ VI_UNLOCK(vp);
+
+ islocked = VOP_ISLOCKED(vp);
+ if (islocked == LK_EXCLOTHER)
+ panic("Trying to Smush over someone else's lock");
+ else if (islocked == LK_SHARED) {
+ afs_warn("Trying to Smush with a shared lock");
+ vn_lock(vp, LK_UPGRADE);
+ } else if (!islocked)
+ vn_lock(vp, LK_EXCLUSIVE);
if (vp->v_bufobj.bo_object != NULL) {
- VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
- /*
- * Do we really want OBJPC_SYNC? OBJPC_INVAL would be
- * faster, if invalidation is really what we are being
- * asked to do. (It would make more sense, too, since
- * otherwise this function is practically identical to
- * osi_VM_StoreAllSegments().) -GAW
- */
+ VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ /*
+ * Do we really want OBJPC_SYNC? OBJPC_INVAL would be
+ * faster, if invalidation is really what we are being
+ * asked to do. (It would make more sense, too, since
+ * otherwise this function is practically identical to
+ * osi_VM_StoreAllSegments().) -GAW
+ */
- /*
- * Dunno. We no longer resemble osi_VM_StoreAllSegments,
- * though maybe that's wrong, now. And OBJPC_SYNC is the
- * common thing in 70 file systems, it seems. Matt.
- */
+ /*
+ * Dunno. We no longer resemble osi_VM_StoreAllSegments,
+ * though maybe that's wrong, now. And OBJPC_SYNC is the
+ * common thing in 70 file systems, it seems. Matt.
+ */
- vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
- VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
+ VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
}
tries = 5;
code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
while (code && (tries > 0)) {
- code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
- --tries;
+ afs_warn("TryToSmush retrying vinvalbuf");
+ code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
+ --tries;
}
+ if (islocked == LK_SHARED)
+ vn_lock(vp, LK_DOWNGRADE);
+ else if (!islocked)
+ VOP_UNLOCK(vp, 0);
USERPRI;
}
vm_object_page_remove(obj, 0, 0, FALSE);
VM_OBJECT_UNLOCK(obj);
}
- /*vinvalbuf(AFSTOV(avc),0, NOCRED, curproc, 0,0); */
+ osi_vinvalbuf(vp, 0, 0, 0);
}
/* Purge pages beyond end-of-file, when truncating a file.