return code;
}
+/**
+ * Dummy pvn_vplist_dirty() handler for non-writable vnodes.
+ */
+static int
+afs_never_putapage(struct vnode *vp, struct page *pages, u_offset_t * offp,
+ size_t * lenp, int flags, afs_ucred_t *credp)
+{
+ struct vcache *avc = VTOAFS(vp);
+ osi_Assert((avc->f.states & CRO) != 0);
+ osi_Panic("Dirty pages while flushing a read-only volume vnode.");
+ return EIO; /* unreachable */
+}
+
int
afs_putpage(struct vnode *vp, offset_t off, u_int len, int flags,
afs_ucred_t *cred)
afs_offs_t endPos;
afs_int32 NPages = 0;
u_offset_t toff = off;
- int didWriteLock;
+ int didLock = 0;
AFS_STATCNT(afs_putpage);
if (vp->v_flag & VNOMAP) /* file doesn't allow mapping */
(afs_int32) vp, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(off),
ICL_TYPE_INT32, (afs_int32) len, ICL_TYPE_LONG, (int)flags);
avc = VTOAFS(vp);
- ObtainSharedLock(&avc->lock, 247);
- didWriteLock = 0;
/* Get a list of modified (or whatever) pages */
if (len) {
+ ObtainSharedLock(&avc->lock, 247);
+ didLock = SHARED_LOCK;
endPos = (afs_offs_t) off + len; /* position we're supposed to write up to */
while ((afs_offs_t) toff < endPos
&& (afs_offs_t) toff < avc->f.m.Length) {
if (!pages || !pvn_getdirty(pages, flags))
tlen = PAGESIZE;
else {
- if (!didWriteLock) {
+ if (didLock == SHARED_LOCK) {
AFS_GLOCK();
- didWriteLock = 1;
+ didLock = WRITE_LOCK;
UpgradeSToWLock(&avc->lock, 671);
AFS_GUNLOCK();
}
AFS_GLOCK();
}
} else {
- if (!didWriteLock) {
- UpgradeSToWLock(&avc->lock, 670);
- didWriteLock = 1;
+ /*
+ * We normally arrive here due to a vm flush.
+ *
+ * If this vnode belongs to a writable volume, obtain a vcache lock
+ * then call pvn_vplist_dirty to free, invalidate, or to write out
+ * dirty pages with afs_putapage. The afs_putapage routine requires a
+ * vcache lock, so we obtain it here before any page locks are taken.
+ * This locking order is done to avoid deadlocking due to races with
+ * afs_getpage, which also takes vcache and page locks.
+ *
+ * If this vnode belongs to a non-writable volume, then it will not
+ * contain dirty pages, so we do not need to lock the vcache and since
+ * afs_putapage will not be called. Instead, forgo the vcache lock and
+ * call pvn_vplist_dirty to free, or invalidate pages. Pass a dummy
+ * page out handler to pvn_vplist_dirty which we do not expect to be
+ * called. Panic if the dummy handler is called, since something went
+ * horribly wrong.
+ */
+ if ((avc->f.states & CRO) == 0) {
+ ObtainWriteLock(&avc->lock, 670);
+ didLock = WRITE_LOCK;
}
-
AFS_GUNLOCK();
- code = pvn_vplist_dirty(vp, toff, afs_putapage, flags, cred);
+ if ((avc->f.states & CRO) == 0)
+ code = pvn_vplist_dirty(vp, toff, afs_putapage, flags, cred);
+ else
+ code = pvn_vplist_dirty(vp, toff, afs_never_putapage, flags, cred);
AFS_GLOCK();
}
if (code && !avc->vc_error) {
- if (!didWriteLock) {
+ if (didLock == 0) {
+ ObtainWriteLock(&avc->lock, 668);
+ didLock = WRITE_LOCK;
+ } else if (didLock == SHARED_LOCK) {
UpgradeSToWLock(&avc->lock, 669);
- didWriteLock = 1;
+ didLock = WRITE_LOCK;
}
avc->vc_error = code;
}
- if (didWriteLock)
+ if (didLock == WRITE_LOCK)
ReleaseWriteLock(&avc->lock);
- else
+ else if (didLock == SHARED_LOCK)
ReleaseSharedLock(&avc->lock);
afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_LONG, code,
ICL_TYPE_LONG, NPages);
{
afs_int32 code;
afs_int32 code2;
+ afs_int32 code_checkcode = 0;
int counter;
afs_int32 mode, sflags;
char *data;
afs_size_t toff, tlen;
dcp = afs_GetDCache(avc, fileBase, &treq, &toff, &tlen, 2);
if (!dcp) {
- code = ENOENT;
+ code = EIO;
break;
}
}
}
if (!code && avc->vc_error) {
- code = avc->vc_error;
+ code = code_checkcode = avc->vc_error;
}
ReleaseWriteLock(&avc->lock);
if (!code) {
*/
if (code == 0 && extraResid > 0)
auio->uio_resid += extraResid;
- return afs_CheckCode(code, &treq, 46);
+ if (code_checkcode) {
+ return code_checkcode;
+ } else {
+ return afs_CheckCode(code, &treq, 46);
+ }
}
int
-afs_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addr, u_int len, u_char prot, u_char maxprot, u_int flags, afs_ucred_t *cred)
+afs_map(struct vnode *vp, offset_t off, struct as *as, caddr_t *addr, size_t len, u_char prot, u_char maxprot, u_int flags, afs_ucred_t *cred)
{
struct segvn_crargs crargs;
afs_int32 code;
(void)as_unmap(as, *addr, len); /* unmap old address space use */
/* setup the create parameter block for the call */
crargs.vp = AFSTOV(avc);
- crargs.offset = (u_int) off;
+ crargs.offset = (u_offset_t)off;
crargs.cred = cred;
crargs.type = flags & MAP_TYPE;
crargs.prot = prot;
afs_inactive(struct vcache *avc, afs_ucred_t *acred)
{
struct vnode *vp = AFSTOV(avc);
- if (afs_shuttingdown)
+ if (afs_shuttingdown != AFS_RUNNING)
return 0;
/*
* Solaris calls VOP_OPEN on exec, but doesn't call VOP_CLOSE when
* the executable exits. So we clean up the open count here.
*
- * Only do this for mvstat 0 vnodes: when using fakestat, we can't
- * lose the open count for volume roots (mvstat 2), even though they
+ * Only do this for AFS_MVSTAT_FILE vnodes: when using fakestat, we can't
+ * lose the open count for volume roots (AFS_MVSTAT_ROOT), even though they
* will get VOP_INACTIVE'd when released by afs_PutFakeStat().
*/
- if (avc->opens > 0 && avc->mvstat == 0 && !(avc->f.states & CCore))
+ if (avc->opens > 0 && avc->mvstat == AFS_MVSTAT_FILE && !(avc->f.states & CCore))
avc->opens = avc->execsOrWriters = 0;
#endif