* therefore obsolescent.
*
* OSF/1 Locking: VN_LOCK has been called.
- * XXX - should FreeBSD have done this, too? Certainly looks like it.
+ * We do not lock the vnode here, but instead require that it be exclusive
+ * locked by code calling osi_VM_StoreAllSegments directly, or scheduling it
+ * from the bqueue - Matt
* Maybe better to just call vnode_pager_setsize()?
*/
int
*/
do {
anyio = 0;
-#ifdef AFS_FBSD80_ENV
- lock_vnode(vp);
-#endif
if (VOP_GETVOBJECT(vp, &obj) == 0 && (obj->flags & OBJ_MIGHTBEDIRTY)) {
-#ifdef AFS_FBSD80_ENV
- unlock_vnode(vp);
-#endif
#ifdef AFS_FBSD50_ENV
if (!vget(vp, LK_EXCLUSIVE | LK_RETRY, curthread)) {
#else
vput(vp);
}
}
-#ifdef AFS_FBSD80_ENV
- else
- unlock_vnode(vp);
-#endif
} while (anyio && (--tries > 0));
AFS_GLOCK();
ObtainWriteLock(&avc->lock, 94);
osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync)
{
struct vnode *vp;
- struct vm_object *obj;
- int anyio, tries, code;
+ int tries, code;
SPLVAR;
if (vp->v_iflag & VI_DOOMED) {
USERPRI;
- return 0;
+ return;
}
if (vp->v_bufobj.bo_object != NULL) {
static vop_strategy_t afs_vop_strategy;
static vop_symlink_t afs_vop_symlink;
static vop_write_t afs_vop_write;
-#if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD90_ENV)
+#if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
static vop_lock1_t afs_vop_lock;
static vop_unlock_t afs_vop_unlock;
static vop_islocked_t afs_vop_islocked;
.vop_strategy = afs_vop_strategy,
.vop_symlink = afs_vop_symlink,
.vop_write = afs_vop_write,
-#if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD90_ENV)
+#if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
.vop_lock1 = afs_vop_lock,
.vop_unlock = afs_vop_unlock,
.vop_islocked = afs_vop_islocked,
*/
if (code)
printf("afs_vop_reclaim: afs_FlushVCache failed code %d\n", code);
-#ifdef AFS_FBSD60_ENV
- else {
- vnode_destroy_vobject(vp);
-#ifndef AFS_FBSD70_ENV
- vfs_hash_remove(vp);
-#endif
- vp->v_data = 0;
- }
-#endif
+
+ /* basically, it must not fail */
+ vnode_destroy_vobject(vp);
+ vp->v_data = 0;
+
return 0;
}
register struct vcache *vc = VTOAFS(ap->a_vp);
int s = vc->f.states;
-#ifdef AFS_FBSD50_ENV
- printf("tag %s, fid: %d.%x.%x.%x, opens %d, writers %d", vp->v_tag,
+ printf("tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vp->v_tag,
(int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
(u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
vc->execsOrWriters);
-#else
- printf("tag %d, fid: %ld.%x.%x.%x, opens %d, writers %d", vp->v_tag,
- vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
- (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
- vc->execsOrWriters);
-#endif
printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
(s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
(s & CMAPPED) ? " mapped" : "",
vp = (struct vnode *)dp->d_inode;
#else
code = gop_lookupname_user(path, AFS_UIOUSER, follow, &vp);
+#if defined(AFS_FBSD80_ENV) /* XXX check on 7x */
+ VN_HOLD(vp);
+#endif /* AFS_FBSD80_ENV */
#endif /* AFS_LINUX22_ENV */
#endif /* AFS_AIX41_ENV */
AFS_GLOCK();
#ifdef AFS_LINUX22_ENV
dput(dp);
#else
+#if defined(AFS_FBSD80_ENV)
+ if (VOP_ISLOCKED(vp))
+ VOP_UNLOCK(vp, 0);
+#endif /* AFS_FBSD80_ENV */
AFS_RELE(vp); /* put vnode back */
#endif
}
/* none free, making one is better than a panic */
afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
-#if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
+#if (defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)) && !defined(UKERNEL)
tvc->v = NULL; /* important to clean this, or use memset 0 */
-#endif
+#endif /* DARWIN || XBSD && !UKERNEL */
#ifdef KERNEL_HAVE_PIN
pin((char *)tvc, sizeof(struct vcache)); /* XXX */
#endif
* XXX assume FreeBSD is the same for now.
*/
AFS_GUNLOCK();
+#if defined(AFS_FBSD80_ENV)
+ /* vgone() is correct, but v_usecount is assumed not
+ * to be 0, and I suspect that currently our usage ensures that
+ * in fact it will */
+ if (vrefcnt(AFSTOV(tvc)) < 1) {
+ vref(AFSTOV(tvc));
+ }
+ vn_lock(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+#endif
vgone(AFSTOV(tvc));
+#if defined(AFS_FBSD80_ENV)
+ VOP_UNLOCK(AFSTOV(tvc), 0);
+#endif
fv_slept = 0;
code = 0;
AFS_GLOCK();
if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
#endif
panic("afs getnewvnode"); /* can't happen */
+#ifdef AFS_FBSD70_ENV
+ /* XXX verified on 80--TODO check on 7x */
+ if (!vp->v_mount) {
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+ insmntque(vp, afs_globalVFS);
+ VOP_UNLOCK(vp, 0);
+ }
+#endif
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,339);
if (tvc->v != NULL) {
tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
gnodepnt->gn_vnode = &tvc->v;
#endif
-#ifdef AFS_FBSD70_ENV
-#ifndef AFS_FBSD80_ENV /* yup. they put it back. */
- insmntque(AFSTOV(tvc), afs_globalVFS);
-#endif
-#endif
#if defined(AFS_SGI_ENV)
VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
osi_Assert((tvc->v.v_flag & VINACT) == 0);