{
struct vcache *avc = VTOAFS(FILE_INODE(fp));
struct vrequest treq;
- register struct dcache *tdc;
+ struct dcache *tdc;
int code;
int offset;
int dirpos;
/* get a validated vcache entry */
code = afs_linux_VerifyVCache(vcp, NULL);
- /* Linux's Flushpage implementation doesn't use credp, so optimise
- * our code to not need to crref() it */
- osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
+ if (code == 0) {
+ /* Linux's Flushpage implementation doesn't use credp, so optimise
+ * our code to not need to crref() it */
+ osi_FlushPages(vcp, NULL); /* ensure stale pages are gone */
+ AFS_GUNLOCK();
+ code = generic_file_mmap(fp, vmap);
+ AFS_GLOCK();
+ if (!code)
+ vcp->f.states |= CMAPPED;
+ }
AFS_GUNLOCK();
- code = generic_file_mmap(fp, vmap);
- AFS_GLOCK();
- if (!code)
- vcp->f.states |= CMAPPED;
- AFS_GUNLOCK();
return code;
}
#endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
AFS_GLOCK();
- code = afs_lockctl(vcp, &flock, cmd, credp);
+ code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
if ((code == 0 || flp->fl_type == F_UNLCK) &&
flp->fl_end = flock.l_start + flock.l_len - 1;
crfree(credp);
- return afs_convert_code(code);
+ return code;
}
#ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
#endif /* F_GETLK64 && F_GETLK != F_GETLK64 */
AFS_GLOCK();
- code = afs_lockctl(vcp, &flock, cmd, credp);
+ code = afs_convert_code(afs_lockctl(vcp, &flock, cmd, credp));
AFS_GUNLOCK();
if ((code == 0 || flp->fl_type == F_UNLCK) &&
flp->fl_pid = flock.l_pid;
crfree(credp);
- return afs_convert_code(code);
+ return code;
}
#endif
cred_t *credp;
int code;
#if defined(AFS_CACHE_BYPASS)
- int bypasscache;
+ int bypasscache = 0;
#endif
AFS_GLOCK();
if (code)
goto out;
#if defined(AFS_CACHE_BYPASS)
- /* If caching is bypassed for this file, or globally, just return 0 */
- if(cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
- bypasscache = 1;
- else {
- ObtainReadLock(&vcp->lock);
- if(vcp->cachingStates & FCSBypass)
- bypasscache = 1;
- ReleaseReadLock(&vcp->lock);
- }
- if(bypasscache) {
- /* future proof: don't rely on 0 return from afs_InitReq */
- code = 0; goto out;
- }
+ /* If caching is bypassed for this file, or globally, just return 0 */
+ if (cache_bypass_strategy == ALWAYS_BYPASS_CACHE)
+ bypasscache = 1;
+ else {
+ ObtainReadLock(&vcp->lock);
+ if (vcp->cachingStates & FCSBypass)
+ bypasscache = 1;
+ ReleaseReadLock(&vcp->lock);
+ }
+ if (bypasscache) {
+ /* future proof: don't rely on 0 return from afs_InitReq */
+ code = 0;
+ goto out;
+ }
#endif
ObtainSharedLock(&vcp->lock, 535);
#endif
.open = afs_linux_open,
.release = afs_linux_release,
+ .llseek = default_llseek,
};
struct file_operations afs_file_fops = {
#ifdef STRUCT_FILE_OPERATIONS_HAS_FLOCK
.flock = afs_linux_flock,
#endif
+ .llseek = default_llseek,
};
* AFS Linux dentry operations
**********************************************************************/
-/* check_bad_parent() : Checks if this dentry's vcache is a root vcache
+/* fix_bad_parent() : called if this dentry's vcache is a root vcache
* that has its mvid (parent dir's fid) pointer set to the wrong directory
- * due to being mounted in multiple points at once. If so, check_bad_parent()
+ * due to being mounted in multiple points at once. fix_bad_parent()
* calls afs_lookup() to correct the vcache's mvid, as well as the volume's
* dotdotfid and mtpoint fid members.
* Parameters:
* dp - dentry to be checked.
+ * credp - credentials
+ * vcp, pvc - item's and parent's vcache pointer
* Return Values:
* None.
* Sideeffects:
*/
static inline void
-check_bad_parent(struct dentry *dp)
+fix_bad_parent(struct dentry *dp, cred_t *credp, struct vcache *vcp, struct vcache *pvc)
{
- cred_t *credp;
- struct dentry *parent;
- struct vcache *vcp, *pvc, *avc = NULL;
-
- vcp = VTOAFS(dp->d_inode);
- parent = dget_parent(dp);
- pvc = VTOAFS(parent->d_inode);
-
- if (vcp->mvid->Fid.Volume != pvc->f.fid.Fid.Volume) { /* bad parent */
- credp = crref();
-
- /* force a lookup, so vcp->mvid is fixed up */
- afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
- if (!avc || vcp != avc) { /* bad, very bad.. */
- afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
- "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
- ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
- ICL_TYPE_POINTER, dp);
- }
- if (avc)
- AFS_RELE(AFSTOV(avc));
- crfree(credp);
+ struct vcache *avc = NULL;
+
+ /* force a lookup, so vcp->mvid is fixed up */
+ afs_lookup(pvc, (char *)dp->d_name.name, &avc, credp);
+ if (!avc || vcp != avc) { /* bad, very bad.. */
+ afs_Trace4(afs_iclSetp, CM_TRACE_TMP_1S3L, ICL_TYPE_STRING,
+ "check_bad_parent: bad pointer returned from afs_lookup origvc newvc dentry",
+ ICL_TYPE_POINTER, vcp, ICL_TYPE_POINTER, avc,
+ ICL_TYPE_POINTER, dp);
}
-
- dput(parent);
+ if (avc)
+ AFS_RELE(AFSTOV(avc));
return;
}
#ifdef notyet
/* Make this a fast path (no crref), since it's called so often. */
- if (vcp->f.states & CStatd) {
-
- if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
- check_bad_parent(dp); /* check and correct mvid */
+ if (vcp->states & CStatd) {
+ struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
- AFS_GUNLOCK();
+ if (*dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
+ if (vcp->mvid->Fid.Volume != pvc->fid.Fid.Volume) { /* bad parent */
+ credp = crref();
+ AFS_GLOCK();
+ fix_bad_parent(dp); /* check and correct mvid */
+ AFS_GUNLOCK();
+ crfree(credp);
+ }
+ }
return 0;
}
#endif
code = afs_getattr(vcp, &vattr, credp);
crfree(credp);
}
+
if (!code)
afs_fill_inode(AFSTOV(vcp), &vattr);
* we are advised to follow the entry if it is a link or to make sure that
* it is a directory. But since the kernel itself checks these possibilities
* later on, we shouldn't have to do it until later. Perhaps in the future..
+ *
+ * The code here assumes that on entry the global lock is not held
*/
static int
#ifdef DOP_REVALIDATE_TAKES_NAMEIDATA
struct dentry *parent;
int valid;
struct afs_fakestat_state fakestate;
+ int locked = 0;
- AFS_GLOCK();
afs_InitFakeStat(&fakestate);
if (dp->d_inode) {
+ parent = dget_parent(dp);
+ pvcp = VTOAFS(parent->d_inode);
vcp = VTOAFS(dp->d_inode);
if (vcp == afs_globalVp)
goto good_dentry;
- if (vcp->mvstat == 1) { /* mount point */
+ if ((vcp->mvstat == 1) || (vcp->mvstat == 2)) { /* need to lock */
+ credp = crref();
+ AFS_GLOCK();
+ locked = 1;
+ }
+
+ if (locked && vcp->mvstat == 1) { /* mount point */
if (vcp->mvid && (vcp->f.states & CMValid)) {
int tryEvalOnly = 0;
int code = 0;
struct vrequest treq;
- credp = crref();
code = afs_InitReq(&treq, credp);
if (
(strcmp(dp->d_name.name, ".directory") == 0)) {
}
}
} else
- if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
- check_bad_parent(dp); /* check and correct mvid */
+ if (locked && *dp->d_name.name != '/' && vcp->mvstat == 2) { /* root vnode */
+ if (vcp->mvid->Fid.Volume != pvcp->f.fid.Fid.Volume) { /* bad parent */
+ fix_bad_parent(dp, credp, vcp, pvcp); /* check and correct mvid */
+ }
+ }
#ifdef notdef
/* If the last looker changes, we should make sure the current
}
#endif
- parent = dget_parent(dp);
- pvcp = VTOAFS(parent->d_inode);
/* If the parent's DataVersion has changed or the vnode
* is longer valid, we need to do a full lookup. VerifyVCache
* isn't enough since the vnode may have been renamed.
*/
- if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) {
-
+ if ((!locked) && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) ) {
credp = crref();
+ AFS_GLOCK();
+ locked = 1;
+ }
+
+ if (locked && (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd))) {
afs_lookup(pvcp, (char *)dp->d_name.name, &tvc, credp);
if (!tvc || tvc != vcp) {
dput(parent);
/* Clean up */
if (tvc)
afs_PutVCache(tvc);
- afs_PutFakeStat(&fakestate);
- AFS_GUNLOCK();
+ afs_PutFakeStat(&fakestate); /* from here on vcp may be no longer valid */
+ if (locked) {
+ /* we hold the global lock if we evaluated a mount point */
+ AFS_GUNLOCK();
+ }
if (credp)
crfree(credp);
page_cache_release(pp);
iovecp[page_ix].iov_base = (void *) 0;
base_index++;
- continue;
+ ancr->length -= PAGE_SIZE;
+ continue;
}
base_index++;
if(code) {
lock_page(pp);
}
+ /* increment page refcount--our original design assumed
+ * that locking it would effectively pin it; protect
+ * ourselves from the possiblity that this assumption is
+ * is faulty, at low cost (provided we do not fail to
+ * do the corresponding decref on the other side) */
+ get_page(pp);
+
/* save the page for background map */
iovecp[page_ix].iov_base = (void*) pp;
/* and put it on the LRU cache */
if (!pagevec_add(&lrupv, pp))
- __pagevec_lru_add(&lrupv);
+ __pagevec_lru_add_file(&lrupv);
}
}
/* If there were useful pages in the page list, make sure all pages
* are in the LRU cache, then schedule the read */
if(page_count) {
- pagevec_lru_add(&lrupv);
+ if (pagevec_count(&lrupv))
+ __pagevec_lru_add_file(&lrupv);
credp = crref();
code = afs_ReadNoCache(avc, ancr, credp);
crfree(credp);
struct nocache_read_request *ancr;
int code;
+ /*
+ * Special case: if page is at or past end of file, just zero it and set
+ * it as up to date.
+ */
+ if (page_offset(pp) >= i_size_read(fp->f_mapping->host)) {
+ zero_user_segment(pp, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(pp);
+ unlock_page(pp);
+ return 0;
+ }
+
ClearPageError(pp);
/* receiver frees */
PAGE_SIZE, UIO_READ, AFS_UIOSYS);
/* save the page for background map */
- /* XXX - Shouldn't we get a reference count here? */
+ get_page(pp); /* see above */
auio->uio_iov->iov_base = (void*) pp;
/* the background thread will free this */
ancr = osi_Alloc(sizeof(struct nocache_read_request));