* osi_VM_Truncate(avc, alen, acred)
*/
-#include "../afs/param.h" /* Should be always first */
-#include "../afs/sysincludes.h" /* Standard vendor system headers */
-#include "../afs/afsincludes.h" /* Afs-based standard headers */
-#include "../afs/afs_stats.h" /* statistics */
-/* #include <vm/vm_ubc.h> */
+#include <afsconfig.h>
+#include "afs/param.h"
+#ifdef AFS_FBSD70_ENV
+#include <sys/param.h>
+#include <sys/vnode.h>
+ void
+ vgonel(struct vnode *vp, struct thread *td);
+#endif
+
+
+#include "afs/sysincludes.h" /* Standard vendor system headers */
+#include "afsincludes.h" /* Afs-based standard headers */
+#include "afs/afs_stats.h" /* statistics */
+#include <vm/vm_object.h>
+#include <vm/vm_map.h>
#include <limits.h>
#include <float.h>
+/*
+ * FreeBSD implementation notes:
+ * Most of these operations require us to frob vm_objects. Most
+ * functions require that the object be locked (with VM_OBJECT_LOCK)
+ * on entry and leave it locked on exit. In order to get the
+ * vm_object itself we call VOP_GETVOBJECT on the vnode; the
+ * locking protocol requires that we do so with the heavy vnode lock
+ * held and the vnode interlock unlocked, and it returns the same
+ * way.
+ *
+ * The locking protocol for vnodes is defined in
+ * kern/vnode_if.src and sys/vnode.h; the locking is still a work in
+ * progress, so some fields are (as of 5.1) still protected by Giant
+ * rather than an explicit lock.
+ */
+
+#ifdef AFS_FBSD60_ENV
+#define VOP_GETVOBJECT(vp, objp) (*(objp) = (vp)->v_object)
+#endif
+
+#if defined(AFS_FBSD80_ENV)
+#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY)
+#define unlock_vnode(v) VOP_UNLOCK((v), 0)
+#elif defined(AFS_FBSD50_ENV)
+#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY, curthread)
+#define unlock_vnode(v) VOP_UNLOCK((v), 0, curthread)
+#else
+#define lock_vnode(v) vn_lock((v), LK_EXCLUSIVE | LK_RETRY, curproc)
+#define unlock_vnode(v) VOP_UNLOCK((v), 0, curproc)
+/* need splvm() protection? */
+#define VM_OBJECT_LOCK(o)
+#define VM_OBJECT_UNLOCK(o)
+#endif
+
/* Try to discard pages, in order to recycle a vcache entry.
*
* We also make some sanity checks: ref count, open count, held locks.
* therefore obsolescent.
*
* OSF/1 Locking: VN_LOCK has been called.
+ * We do not lock the vnode here, but instead require that it be exclusive
+ * locked by code calling osi_VM_StoreAllSegments directly, or scheduling it
+ * from the bqueue - Matt
+ * Maybe better to just call vnode_pager_setsize()?
*/
int
-osi_VM_FlushVCache(avc, slept)
- struct vcache *avc;
- int *slept;
+osi_VM_FlushVCache(struct vcache *avc, int *slept)
{
-#ifdef SECRETLY_OSF1
- if (avc->vrefCount > 1)
+ struct vm_object *obj;
+ struct vnode *vp;
+ if (VREFCOUNT(avc) > 1)
return EBUSY;
if (avc->opens)
return EBUSY;
/* if a lock is held, give up */
- if (CheckLock(&avc->lock) || afs_CheckBozonLock(&avc->pvnLock))
+ if (CheckLock(&avc->lock))
return EBUSY;
+ return(0);
+
AFS_GUNLOCK();
- ubc_invalidate(((struct vnode *)avc)->v_object, 0, 0, B_INVAL);
+ vp = AFSTOV(avc);
+#ifndef AFS_FBSD70_ENV
+ lock_vnode(vp);
+#endif
+ if (VOP_GETVOBJECT(vp, &obj) == 0) {
+ VM_OBJECT_LOCK(obj);
+ vm_object_page_remove(obj, 0, 0, FALSE);
+#if 1
+ if (obj->ref_count == 0) {
+ simple_lock(&vp->v_interlock);
+ vgonel(vp, curthread);
+ vp->v_tag = VT_AFS;
+ SetAfsVnode(vp);
+ }
+#endif
+ VM_OBJECT_UNLOCK(obj);
+ }
+#ifndef AFS_FBSD70_ENV
+ unlock_vnode(vp);
+#endif
AFS_GLOCK();
-#endif /* SECRETLY_OSF1 */
return 0;
}
-/*
- * osi_ubc_flush_dirty_and_wait -- ensure all dirty pages cleaned
- *
- * Alpha OSF/1 doesn't make it easy to wait for all dirty pages to be cleaned.
- * NFS tries to do this by calling waitforio(), which waits for v_numoutput
- * to go to zero. But that isn't good enough, because afs_putpage() doesn't
- * increment v_numoutput until it has obtained the vcache entry lock. Suppose
- * that Process A, trying to flush a page, is waiting for that lock, and
- * Process B tries to close the file. Process B calls waitforio() which thinks
- * that everything is cool because v_numoutput is still zero. Process B then
- * proceeds to call afs_StoreAllSegments(). Finally when B is finished, A gets
- * to proceed and flush its page. But then it's too late because the file is
- * already closed.
- *
- * (I suspect that waitforio() is not adequate for NFS, just as it isn't
- * adequate for us. But that's not my problem.)
- *
- * The only way we can be sure that there are no more dirty pages is if there
- * are no more pages with pg_busy set. We look for them on the cleanpl.
- *
- * For some reason, ubc_flush_dirty() only looks at the dirtypl, not the
- * dirtywpl. I don't know why this is good enough, but I assume it is. By
- * the same token, I only look for busy pages on the cleanpl, not the cleanwpl.
- *
- * Called with the global lock NOT held.
- */
-void
-osi_ubc_flush_dirty_and_wait(vp, flags)
-struct vnode *vp;
-int flags; {
- int retry;
- vm_page_t pp;
- int first;
-
-#ifdef SECRETLY_OSF1
- do {
- struct vm_ubc_object* vop;
- vop = (struct vm_ubc_object*)(vp->v_object);
- ubc_flush_dirty(vop, flags);
-
- vm_object_lock(vop);
- if (vop->vu_dirtypl)
- /* shouldn't happen, but who knows */
- retry = 1;
- else {
- retry = 0;
- if (vop->vu_cleanpl) {
- for (first = 1, pp = vop->vu_cleanpl;
- first || pp != vop->vu_cleanpl;
- first = 0, pp = pp->pg_onext) {
- if (pp->pg_busy) {
- retry = 1;
- pp->pg_wait = 1;
- assert_wait_mesg((vm_offset_t)pp, FALSE, "pg_wait");
- vm_object_unlock(vop);
- thread_block();
- break;
- }
- }
- }
- if (retry) continue;
- }
- vm_object_unlock(vop);
- } while (retry);
-#endif /* SECRETLY_OSF1 */
-}
-
/* Try to store pages to cache, in order to store a file back to the server.
*
* Locking: the vcache entry's lock is held. It will usually be dropped and
* re-obtained.
*/
void
-osi_VM_StoreAllSegments(avc)
- struct vcache *avc;
+osi_VM_StoreAllSegments(struct vcache *avc)
{
-#ifdef SECRETLY_OSF1
+ struct vnode *vp;
+ struct vm_object *obj;
+ int anyio, tries;
+
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
- osi_ubc_flush_dirty_and_wait((struct vnode *)avc, 0);
+ tries = 5;
+ vp = AFSTOV(avc);
+
+ /*
+ * I don't understand this. Why not just call vm_object_page_clean()
+ * and be done with it? I particularly don't understand why we're calling
+ * vget() here. Is there some reason to believe that the vnode might
+ * be being recycled at this point? I don't think there's any need for
+ * this loop, either -- if we keep the vnode locked all the time,
+ * that and the object lock will prevent any new pages from appearing.
+ * The loop is what causes the race condition. -GAW
+ */
+ do {
+ anyio = 0;
+ if (VOP_GETVOBJECT(vp, &obj) == 0 && (obj->flags & OBJ_MIGHTBEDIRTY)) {
+#ifdef AFS_FBSD50_ENV
+ if (!vget(vp, LK_EXCLUSIVE | LK_RETRY, curthread)) {
+#else
+ if (!vget(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOOBJ, curproc)) {
+#endif
+ if (VOP_GETVOBJECT(vp, &obj) == 0) {
+ VM_OBJECT_LOCK(obj);
+ vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
+ VM_OBJECT_UNLOCK(obj);
+ anyio = 1;
+ }
+ vput(vp);
+ }
+ }
+ } while (anyio && (--tries > 0));
AFS_GLOCK();
- ObtainWriteLock(&avc->lock,94);
-#endif /* SECRETLY_OSF1 */
+ ObtainWriteLock(&avc->lock, 94);
}
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or
* be some pages around when we return, newly created by concurrent activity.
*/
void
-osi_VM_TryToSmush(avc, acred, sync)
- struct vcache *avc;
- struct AFS_UCRED *acred;
- int sync;
+osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
-#ifdef SECRETLY_OSF1
- ReleaseWriteLock(&avc->lock);
- AFS_GUNLOCK();
- osi_ubc_flush_dirty_and_wait((struct vnode *)avc, 0);
- ubc_invalidate(((struct vnode *)avc)->v_object, 0, 0, B_INVAL);
- AFS_GLOCK();
- ObtainWriteLock(&avc->lock,59);
-#endif /* SECRETLY_OSF1 */
+ struct vnode *vp;
+ int tries, code;
+
+ SPLVAR;
+
+ vp = AFSTOV(avc);
+
+ if (vp->v_iflag & VI_DOOMED) {
+ USERPRI;
+ return;
+ }
+
+ if (vp->v_bufobj.bo_object != NULL) {
+ VM_OBJECT_LOCK(vp->v_bufobj.bo_object);
+ /*
+ * Do we really want OBJPC_SYNC? OBJPC_INVAL would be
+ * faster, if invalidation is really what we are being
+ * asked to do. (It would make more sense, too, since
+ * otherwise this function is practically identical to
+ * osi_VM_StoreAllSegments().) -GAW
+ */
+
+ /*
+ * Dunno. We no longer resemble osi_VM_StoreAllSegments,
+ * though maybe that's wrong, now. And OBJPC_SYNC is the
+ * common thing in 70 file systems, it seems. Matt.
+ */
+
+ vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
+ VM_OBJECT_UNLOCK(vp->v_bufobj.bo_object);
+ }
+
+ tries = 5;
+ code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
+ while (code && (tries > 0)) {
+ code = osi_vinvalbuf(vp, V_SAVE, PCATCH, 0);
+ --tries;
+ }
+ USERPRI;
}
/* Purge VM for a file when its callback is revoked.
* Locking: No lock is held, not even the global lock.
*/
void
-osi_VM_FlushPages(avc, credp)
- struct vcache *avc;
- struct AFS_UCRED *credp;
+osi_VM_FlushPages(struct vcache *avc, afs_ucred_t *credp)
{
-#ifdef SECRETLY_OSF1
- ubc_flush_dirty(((struct vnode *)avc)->v_object, 0);
- ubc_invalidate(((struct vnode *)avc)->v_object, 0, 0, B_INVAL);
-#endif /* SECRETLY_OSF1 */
+ struct vnode *vp;
+ struct vm_object *obj;
+
+ vp = AFSTOV(avc);
+ ASSERT_VOP_LOCKED(vp, __func__);
+ if (VOP_GETVOBJECT(vp, &obj) == 0) {
+ VM_OBJECT_LOCK(obj);
+ vm_object_page_remove(obj, 0, 0, FALSE);
+ VM_OBJECT_UNLOCK(obj);
+ }
+ /*vinvalbuf(AFSTOV(avc),0, NOCRED, curproc, 0,0); */
}
/* Purge pages beyond end-of-file, when truncating a file.
* it only works on Solaris.
*/
void
-osi_VM_Truncate(avc, alen, acred)
- struct vcache *avc;
- int alen;
- struct AFS_UCRED *acred;
+osi_VM_Truncate(struct vcache *avc, int alen, afs_ucred_t *acred)
{
-#ifdef SECRETLY_OSF1
- ubc_invalidate(((struct vnode *)avc)->v_object, alen,
- MAXINT - alen, B_INVAL);
-#endif /* SECRETLY_OSF1 */
+ vnode_pager_setsize(AFSTOV(avc), alen);
}