#include <vm/vm_object.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
+#include <sys/vmmeter.h>
extern int afs_pbuf_freecnt;
#define GETNAME() \
struct componentname *cnp = ap->a_cnp; \
char *name; \
- MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
+ name = malloc(cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
name[cnp->cn_namelen] = '\0'
-#define DROPNAME() FREE(name, M_TEMP)
+#define DROPNAME() free(name, M_TEMP)
+
+#ifdef LINK_MAX
+# define AFS_LINK_MAX LINK_MAX
+#else
+# define AFS_LINK_MAX (32767)
+#endif
/*
* Here we define compatibility functions/macros for interfaces that
static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
-#define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
-#define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
-#define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
-
-#define MA_PCPU_INC(c) PCPU_INC(c)
-#define MA_PCPU_ADD(c, n) PCPU_ADD(c, n)
-
#if __FreeBSD_version >= 1000030
#define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
#define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
#endif
+#ifdef VM_CNT_ADD
+# define AFS_VM_CNT_ADD(var, x) VM_CNT_ADD(var, x)
+# define AFS_VM_CNT_INC(var) VM_CNT_INC(var)
+#else
+# define AFS_VM_CNT_ADD(var, x) PCPU_ADD(cnt.var, x)
+# define AFS_VM_CNT_INC(var) PCPU_INC(cnt.var)
+#endif
+
/*
* Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
* We should know the correct answers to these questions with
error = 0;
switch (ap->a_name) {
case _PC_LINK_MAX:
- *ap->a_retval = LINK_MAX;
+ *ap->a_retval = AFS_LINK_MAX;
break;
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
struct vcache *vcp;
struct vnode *vp, *dvp;
int flags = ap->a_cnp->cn_flags;
- int lockparent; /* 1 => lockparent flag is set */
dvp = ap->a_dvp;
if (dvp->v_type != VDIR) {
GETNAME();
- lockparent = flags & LOCKPARENT;
-
#if __FreeBSD_version < 1000021
cnp->cn_flags |= MPSAFE; /* steel */
#endif
+ /*
+ * Locking rules:
+ *
+ * - 'dvp' is locked by our caller. We must return it locked, whether we
+ * return success or error.
+ *
+ * - If the lookup succeeds, 'vp' must be locked before we return.
+ *
+ * - If we lock multiple vnodes, parent vnodes must be locked before
+ * children vnodes.
+ *
+ * As a result, looking up the parent directory (if 'flags' has ISDOTDOT
+ * set) is a bit of a special case. In that case, we must unlock 'dvp'
+ * before performing the lookup, since the lookup operation may lock the
+ * target vnode, and the target vnode is the parent of 'dvp' (so we must
+ * lock 'dvp' after locking the target vnode).
+ */
+
if (flags & ISDOTDOT)
- MA_VOP_UNLOCK(dvp, 0, p);
+ VOP_UNLOCK(dvp, 0);
AFS_GLOCK();
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
if (error) {
if (flags & ISDOTDOT)
- MA_VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
&& (flags & ISLASTCN) && error == ENOENT)
error = EJUSTRETURN;
}
vp = AFSTOV(vcp); /* always get a node if no error */
- /* The parent directory comes in locked. We unlock it on return
- * unless the caller wants it left locked.
- * we also always return the vnode locked. */
-
if (flags & ISDOTDOT) {
- /* vp before dvp since we go root to leaf, and .. comes first */
- ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
- ma_vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
- /* always return the child locked */
- if (lockparent && (flags & ISLASTCN)
- && (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
- vput(vp);
- DROPNAME();
- return (error);
- }
+ /* Must lock 'vp' before 'dvp', since 'vp' is the parent vnode. */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
+ vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
} else if (vp == dvp) {
/* they're the same; afs_lookup() already ref'ed the leaf.
* It came in locked, so we don't need to ref OR lock it */
} else {
- ma_vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
- /* always return the child locked */
+ vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
}
*ap->a_vpp = vp;
if (vcp) {
*ap->a_vpp = AFSTOV(vcp);
- ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY);
} else
*ap->a_vpp = 0;
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, pages, npages);
- MA_PCPU_INC(cnt.v_vnodein);
- MA_PCPU_ADD(cnt.v_vnodepgsin, npages);
+ AFS_VM_CNT_INC(v_vnodein);
+ AFS_VM_CNT_ADD(v_vnodepgsin, npages);
#ifdef FBSD_VOP_GETPAGES_BUSIED
count = ctob(npages);
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
- MA_PCPU_INC(cnt.v_vnodeout);
- MA_PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
+ AFS_VM_CNT_INC(v_vnodeout);
+ AFS_VM_CNT_ADD(v_vnodepgsout, ap->a_count);
iov.iov_base = (caddr_t) kva;
iov.iov_len = ap->a_count;
relpbuf(bp, &afs_pbuf_freecnt);
if (!code) {
+ AFS_VM_OBJECT_WLOCK(vp->v_object);
size = ap->a_count - uio.uio_resid;
for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
ap->a_rtvals[i] = VM_PAGER_OK;
vm_page_undirty(ap->a_m[i]);
}
+ AFS_VM_OBJECT_WUNLOCK(vp->v_object);
}
return ap->a_rtvals[0];
}
error = EISDIR;
goto out;
}
- if ((error = ma_vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE, p)) != 0) {
+ if ((error = vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE)) != 0) {
goto out;
}
AFS_GLOCK();
error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
AFS_GUNLOCK();
if (dvp != vp)
- MA_VOP_UNLOCK(vp, 0, p);
+ VOP_UNLOCK(vp, 0);
out:
DROPNAME();
return error;
vput(fvp);
return (error);
}
- if ((error = ma_vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
+ if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
goto abortit;
- MALLOC(fname, char *, fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
+ fname = malloc(fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
fname[fcnp->cn_namelen] = '\0';
- MALLOC(tname, char *, tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
+ tname = malloc(tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
tname[tcnp->cn_namelen] = '\0';
afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
AFS_GUNLOCK();
- FREE(fname, M_TEMP);
- FREE(tname, M_TEMP);
+ free(fname, M_TEMP);
+ free(tname, M_TEMP);
if (tdvp == tvp)
vrele(tdvp);
else
}
if (vcp) {
*ap->a_vpp = AFSTOV(vcp);
- ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
+ vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY);
} else
*ap->a_vpp = 0;
DROPNAME();
error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
if (error == 0) {
newvp = AFSTOV(vcp);
- ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
+ vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY);
}
}
AFS_GUNLOCK();
dp = (const struct dirent *)((const char *)dp + dp->d_reclen))
ncookies++;
- MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP,
+ cookies = malloc(ncookies * sizeof(u_long), M_TEMP,
M_WAITOK);
for (dp = dp_start, cookiep = cookies; dp < dp_end;
dp = (const struct dirent *)((const char *)dp + dp->d_reclen)) {
static int
afs_vop_reclaim(struct vop_reclaim_args *ap)
{
- /* copied from ../OBSD/osi_vnodeops.c:afs_nbsd_reclaim() */
int code, slept;
struct vnode *vp = ap->a_vp;
struct vcache *avc = VTOAFS(vp);
int haveGlock = ISAFS_GLOCK();
- int haveVlock = CheckLock(&afs_xvcache);
+
+ /*
+ * In other code paths, we acquire the vnode lock while afs_xvcache is
+ * already held (e.g. afs_PutVCache() -> vrele()). Here, we already have
+ * the vnode lock, and we need afs_xvcache. So drop the vnode lock in order
+ * to hold afs_xvcache.
+ */
+ VOP_UNLOCK(vp, 0);
if (!haveGlock)
AFS_GLOCK();
- if (!haveVlock)
- ObtainWriteLock(&afs_xvcache, 901);
- /* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
+ ObtainWriteLock(&afs_xvcache, 901);
+
+ /*
+ * Note that we deliberately call VOP_LOCK() instead of vn_lock() here.
+ * vn_lock() will return an error for VI_DOOMED vnodes, but we know this
+ * vnode is already VI_DOOMED. We just want to lock it again, and skip the
+ * VI_DOOMED check.
+ */
+ VOP_LOCK(vp, LK_EXCLUSIVE);
+
code = afs_FlushVCache(avc, &slept);
if (avc->f.states & CVInit) {
afs_osi_Wakeup(&avc->f.states);
}
- if (!haveVlock)
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseWriteLock(&afs_xvcache);
if (!haveGlock)
AFS_GUNLOCK();
if (code) {
afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
VOP_PRINT(vp);
+ panic("afs: afs_FlushVCache failed during reclaim");
}
- /* basically, it must not fail */
vnode_destroy_vobject(vp);
vp->v_data = 0;