int flags = ap->a_cnp->cn_flags;
int lockparent; /* 1 => lockparent flag is set */
int wantparent; /* 1 => wantparent or lockparent flag */
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
+#endif
dvp = ap->a_dvp;
if (dvp->v_type != VDIR) {
lockparent = flags & LOCKPARENT;
wantparent = flags & (LOCKPARENT | WANTPARENT);
-#ifdef AFS_FBSD80_ENV
+#if __FreeBSD_version < 1000021
cnp->cn_flags |= MPSAFE; /* steel */
#endif
-#ifndef AFS_FBSD70_ENV
if (flags & ISDOTDOT)
- VOP_UNLOCK(dvp, 0, p);
-#endif
+ MA_VOP_UNLOCK(dvp, 0, p);
AFS_GLOCK();
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
* we also always return the vnode locked. */
if (flags & ISDOTDOT) {
+ /* vp before dvp since we go root to leaf, and .. comes first */
ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ ma_vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
/* always return the child locked */
if (lockparent && (flags & ISLASTCN)
&& (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
MA_VOP_UNLOCK(dvp, 0, p); /* done with parent. */
#endif
}
- ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+ ma_vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
/* always return the child locked */
}
*ap->a_vpp = vp;
int error = 0;
struct vcache *vcp;
struct vnode *dvp = ap->a_dvp;
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
+#endif
GETNAME();
AFS_GLOCK();
* struct thread *a_td;
* } */ *ap;
{
- int code;
- struct vcache *avc = VTOAFS(ap->a_vp);
+ int code, iflag;
+ struct vnode *vp = ap->a_vp;
+ struct vcache *avc = VTOAFS(vp);
+
+#if defined(AFS_FBSD80_ENV)
+ VI_LOCK(vp);
+ iflag = vp->v_iflag & VI_DOOMED;
+ VI_UNLOCK(vp);
+ if (iflag & VI_DOOMED) {
+ /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
+ * have an afs_close to process, in that case */
+ if (avc->opens != 0)
+ panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
+ vp, avc, avc->opens);
+ return 0;
+ }
+#endif
+
AFS_GLOCK();
if (ap->a_cred)
code = afs_close(avc, ap->a_fflag, ap->a_cred);
* } */ *ap;
{
int code;
+
AFS_GLOCK();
code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
AFS_GUNLOCK();
+
return code;
}
struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
- code = afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
+ code = afs_read(avc, ap->a_uio, ap->a_cred, 0);
AFS_GUNLOCK();
return code;
}
AFS_GLOCK();
osi_FlushPages(avc, osi_curcred()); /* hold bozon lock, but not basic vnode lock */
- code = afs_read(avc, &uio, osi_curcred(), 0, 0, 0);
+ code = afs_read(avc, &uio, osi_curcred(), 0);
AFS_GUNLOCK();
pmap_qremove(kva, npages);
* Read operation filled a partial page.
*/
m->valid = 0;
- vm_page_set_valid(m, 0, size - toff);
-#ifndef AFS_FBSD80_ENV
- vm_page_undirty(m);
-#else
+ vm_page_set_validclean(m, 0, size - toff);
KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
-#endif
}
if (i != ap->a_reqpage) {
int error = 0;
struct vnode *dvp = ap->a_tdvp;
struct vnode *vp = ap->a_vp;
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
+#endif
GETNAME();
if (dvp->v_mount != vp->v_mount) {
error = EISDIR;
goto out;
}
- if ((error = ma_vn_lock(vp, LK_EXCLUSIVE, p)) != 0) {
+ if ((error = ma_vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE, p)) != 0) {
goto out;
}
AFS_GLOCK();
struct vnode *tdvp = ap->a_tdvp;
struct vnode *fvp = ap->a_fvp;
struct vnode *fdvp = ap->a_fdvp;
+#ifndef AFS_FBSD80_ENV
struct thread *p = fcnp->cn_thread;
+#endif
/*
* Check for cross-device rename.
struct vattr *vap = ap->a_vap;
int error = 0;
struct vcache *vcp;
+#ifndef AFS_FBSD80_ENV
struct thread *p = ap->a_cnp->cn_thread;
+#endif
GETNAME();
#ifdef DIAGNOSTIC
return error;
}
-extern int prtactive;
-
int
afs_vop_inactive(ap)
struct vop_inactive_args /* {
{
struct vnode *vp = ap->a_vp;
- if (prtactive && vp->v_usecount != 0)
- vprint("afs_vop_inactive(): pushing active", vp);
-
AFS_GLOCK();
afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
AFS_GUNLOCK();
ObtainWriteLock(&afs_xvcache, 901);
/* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
code = afs_FlushVCache(avc, &slept);
+
+ if (avc->f.states & CVInit) {
+ avc->f.states &= ~CVInit;
+ afs_osi_Wakeup(&avc->f.states);
+ }
+
if (!haveVlock)
ReleaseWriteLock(&afs_xvcache);
if (!haveGlock)
AFS_GUNLOCK();
- /*
- * XXX Pretend it worked, to prevent panic on shutdown
- * Garrett, please fix - Jim Rees
- */
if (code) {
- printf("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
+ afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
VOP_PRINT(vp);
}
struct vcache *vc = VTOAFS(ap->a_vp);
int s = vc->f.states;
- printf("tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vp->v_tag,
+ printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
(int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
(u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
vc->execsOrWriters);
AFS_GLOCK();
error =
- afs_lockctl(VTOAFS(ap->a_vp), ap->a_fl, ap->a_op, &cr, (int)ap->a_id);
+ afs_lockctl(VTOAFS(ap->a_vp),
+ ap->a_fl,
+ ap->a_op, &cr,
+ (int)(intptr_t)ap->a_id); /* XXX: no longer unique! */
AFS_GUNLOCK();
return error;
}