afs_int32 afs_bulkStatsDone;
static int bulkStatCounter = 0; /* counter for bulk stat seq. numbers */
+int afs_fakestat_enable = 0;
/* this would be faster if it did comparison as int32word, but would be
}
/* call under write lock, evaluate mvid field from a mt pt.
- * avc is the vnode of the mount point object.
- * advc is the vnode of the containing directory
+ * avc is the vnode of the mount point object; must be write-locked.
+ * advc is the vnode of the containing directory (optional; if NULL and
+ * EvalMountPoint succeeds, caller must initialize *avolpp->dotdot)
* avolpp is where we return a pointer to the volume named by the mount pt, if success
* areq is the identity of the caller.
*
* to the new path.
*/
tvp->mtpoint = avc->fid; /* setup back pointer to mtpoint */
- tvp->dotdot = advc->fid;
+ if (advc) tvp->dotdot = advc->fid;
*avolpp = tvp;
return 0;
}
+
+/*
+ * afs_InitFakeStat
+ *
+ * Must be called on an afs_fakestat_state object before calling
+ * afs_EvalFakeStat or afs_PutFakeStat. Calling afs_PutFakeStat
+ * without calling afs_EvalFakeStat is legal, as long as this
+ * function is called.
+ */
+
+void
+afs_InitFakeStat(state)
+ struct afs_fakestat_state *state;
+{
+ state->valid = 1;
+ state->did_eval = 0;
+ state->need_release = 0;
+}
+
+/*
+ * afs_EvalFakeStat_int
+ *
+ * The actual implementation of afs_EvalFakeStat and afs_TryEvalFakeStat,
+ * which is called by those wrapper functions.
+ *
+ * Only issues RPCs if canblock is non-zero.
+ */
+static int
+afs_EvalFakeStat_int(avcp, state, areq, canblock)
+ struct vcache **avcp;
+ struct afs_fakestat_state *state;
+ struct vrequest *areq;
+ int canblock;
+{
+ struct vcache *tvc, *root_vp;
+ struct volume *tvolp = NULL;
+ int code = 0;
+
+ osi_Assert(state->valid == 1);
+ osi_Assert(state->did_eval == 0);
+ state->did_eval = 1;
+ if (!afs_fakestat_enable)
+ return 0;
+ tvc = *avcp;
+ if (tvc->mvstat != 1)
+ return 0;
+
+ /* Is the call to VerifyVCache really necessary? */
+ code = afs_VerifyVCache(tvc, areq);
+ if (code)
+ goto done;
+ if (canblock) {
+ ObtainWriteLock(&tvc->lock, 599);
+ code = EvalMountPoint(tvc, NULL, &tvolp, areq);
+ ReleaseWriteLock(&tvc->lock);
+ if (code)
+ goto done;
+ if (tvolp) {
+ tvolp->dotdot = tvc->fid;
+ tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
+ tvolp->dotdot.Fid.Unique = tvc->parentUnique;
+ }
+ }
+ if (tvc->mvid && (tvc->states & CMValid)) {
+ if (!canblock) {
+ afs_int32 retry;
+
+ do {
+ retry = 0;
+ ObtainWriteLock(&afs_xvcache, 597);
+ root_vp = afs_FindVCache(tvc->mvid, 0, 0, &retry, 0);
+ if (root_vp && retry) {
+ ReleaseWriteLock(&afs_xvcache);
+ afs_PutVCache(root_vp, 0);
+ }
+ } while (root_vp && retry);
+ ReleaseWriteLock(&afs_xvcache);
+ } else {
+ root_vp = afs_GetVCache(tvc->mvid, areq, NULL, NULL, WRITE_LOCK);
+ }
+ if (!root_vp) {
+ code = canblock ? ENOENT : 0;
+ goto done;
+ }
+ if (tvolp) {
+ /* Is this always kosher? Perhaps we should instead use
+ * NBObtainWriteLock to avoid potential deadlock.
+ */
+ ObtainWriteLock(&root_vp->lock, 598);
+ if (!root_vp->mvid)
+ root_vp->mvid = osi_AllocSmallSpace(sizeof(struct VenusFid));
+ *root_vp->mvid = tvolp->dotdot;
+ ReleaseWriteLock(&root_vp->lock);
+ }
+ state->need_release = 1;
+ state->root_vp = root_vp;
+ *avcp = root_vp;
+ code = 0;
+ } else {
+ code = canblock ? ENOENT : 0;
+ }
+
+done:
+ if (tvolp)
+ afs_PutVolume(tvolp, WRITE_LOCK);
+ return code;
+}
+
+/*
+ * afs_EvalFakeStat
+ *
+ * Automatically does the equivalent of EvalMountPoint for vcache entries
+ * which are mount points. Remembers enough state to properly release
+ * the volume root vcache when afs_PutFakeStat() is called.
+ *
+ * State variable must be initialized by afs_InitFakeState() beforehand.
+ *
+ * Returns 0 when everything succeeds and *avcp points to the vcache entry
+ * that should be used for the real vnode operation. Returns non-zero if
+ * something goes wrong and the error code should be returned to the user.
+ */
+int
+afs_EvalFakeStat(avcp, state, areq)
+ struct vcache **avcp;
+ struct afs_fakestat_state *state;
+ struct vrequest *areq;
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 1);
+}
+
+/*
+ * afs_TryEvalFakeStat
+ *
+ * Same as afs_EvalFakeStat, but tries not to talk to remote servers
+ * and only evaluate the mount point if all the data is already in
+ * local caches.
+ *
+ * Returns 0 if everything succeeds and *avcp points to a valid
+ * vcache entry (possibly evaluated).
+ */
+int
+afs_TryEvalFakeStat(avcp, state, areq)
+ struct vcache **avcp;
+ struct afs_fakestat_state *state;
+ struct vrequest *areq;
+{
+ return afs_EvalFakeStat_int(avcp, state, areq, 0);
+}
+
+/*
+ * afs_PutFakeStat
+ *
+ * Perform any necessary cleanup at the end of a vnode op, given that
+ * afs_InitFakeStat was previously called with this state.
+ */
+void
+afs_PutFakeStat(state)
+ struct afs_fakestat_state *state;
+{
+ osi_Assert(state->valid == 1);
+ if (state->need_release)
+ afs_PutVCache(state->root_vp, 0);
+ state->valid = 0;
+}
afs_ENameOK(aname)
register char *aname; {
struct dcache *dcp; /* chunk containing the dir block */
char *statMemp; /* status memory block */
char *cbfMemp; /* callback and fid memory block */
- long temp; /* temp for holding chunk length, &c. */
+ afs_size_t temp; /* temp for holding chunk length, &c. */
struct AFSFid *fidsp; /* file IDs were collecting */
struct AFSCallBack *cbsp; /* call back pointers */
struct AFSCallBack *tcbp; /* temp callback ptr */
long startTime; /* time we started the call,
* for callback expiration base
*/
- int statSeqNo; /* Valued of file size to detect races */
+ afs_size_t statSeqNo; /* Valued of file size to detect races */
int code; /* error code */
long newIndex; /* new index in the dir */
struct DirEntry *dirEntryp; /* dir entry we are examining */
code = afs_VerifyVCache(adp, areqp);
if (code) goto done;
- dcp = afs_GetDCache(adp, 0, areqp, &temp, &temp, 1);
+ dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
if (!dcp) {
code = ENOENT;
goto done;
/* lock the directory cache entry */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (dcp->flags & DFFetching)
+ && (dcp->dflags & DFFetching)
&& hsame(adp->m.DataVersion, dcp->f.versionNo)) {
- dcp->flags |= DFWaiting;
+ afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
+ ICL_TYPE_STRING, __FILE__,
+ ICL_TYPE_INT32, __LINE__,
+ ICL_TYPE_POINTER, dcp,
+ ICL_TYPE_INT32, dcp->dflags);
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&dcp->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
goto tagain;
tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
if (tvcp && retry) {
ReleaseWriteLock(&afs_xvcache);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
tvcp->m.Length = statSeqNo;
fidIndex++;
}
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
} /* if dir vnode has non-zero entry */
/* move to the next dir entry by adding in the # of entries
} /* while loop over all dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
/* release the chunk */
#ifdef RX_ENABLE_LOCKS
AFS_GUNLOCK();
#endif /* RX_ENABLE_LOCKS */
- code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
- &cbParm, &volSync);
- if (code == RXGEN_OPCODE) {
+
+ if (!(tcp->srvr->server->flags & SNO_INLINEBULK)) {
+ code = RXAFS_InlineBulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ if (code == RXGEN_OPCODE) {
+ tcp->srvr->server->flags |= SNO_INLINEBULK;
+ inlinebulk = 0;
+ code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm,
+ &cbParm, &volSync);
+ } else
+ inlinebulk=1;
+ } else {
+ inlinebulk=0;
code = RXAFS_BulkStatus(tcp->id, &fidParm, &statParm, &cbParm,
&volSync);
- inlinebulk=0;
- } else {
- inlinebulk=1;
}
-
#ifdef RX_ENABLE_LOCKS
AFS_GLOCK();
#endif /* RX_ENABLE_LOCKS */
if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
continue;
}
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
ReleaseWriteLock(&afs_xcbhash);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
continue;
}
ReleaseWriteLock(&tvcp->lock);
/* finally, we're done with the entry */
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
} /* for all files we got back */
/* finally return the pointer into the LRU queue */
- afs_PutVCache(lruvcp);
+ afs_PutVCache(lruvcp, 0);
done:
/* Be sure to turn off the CBulkFetching flags */
tvcp->states &= ~CBulkFetching;
}
if (tvcp != NULL) {
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
}
}
if ( volp )
afs_PutVolume(volp, READ_LOCK);
/* If we did the InlineBulk RPC pull out the return code */
- if (inlinebulk && (&statsp[0])->errorCode) {
- afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
- AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
- (struct cell *)0);
- code = (&statsp[0])->errorCode;
+ if (inlinebulk) {
+ if ((&statsp[0])->errorCode) {
+ afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
+ AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK,
+ (struct cell *)0);
+ code = (&statsp[0])->errorCode;
+ }
+ } else {
+ code = 0;
}
osi_FreeLargeSpace(statMemp);
osi_FreeLargeSpace(cbfMemp);
int no_read_access = 0;
struct sysname_info sysState; /* used only for @sys checking */
int dynrootRetry = 1;
+ struct afs_fakestat_state fakestate;
+ int tryEvalOnly = 0;
AFS_STATCNT(afs_lookup);
+ afs_InitFakeStat(&fakestate);
+
+ if (code = afs_InitReq(&treq, acred))
+ goto done;
+
#ifdef AFS_OSF_ENV
- ndp->ni_dvp = (struct vnode *)adp;
+ ndp->ni_dvp = AFSTOV(adp);
memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
aname[ndp->ni_namelen] = '\0';
#endif /* AFS_OSF_ENV */
- *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
+#if defined(AFS_DARWIN_ENV)
+ /* Workaround for MacOSX Finder, which tries to look for
+ * .DS_Store and Contents under every directory.
+ */
+ if (afs_fakestat_enable && adp->mvstat == 1) {
+ if (strcmp(aname, ".DS_Store") == 0)
+ tryEvalOnly = 1;
+ if (strcmp(aname, "Contents") == 0)
+ tryEvalOnly = 1;
+ }
+#endif
- if (code = afs_InitReq(&treq, acred)) {
+ if (tryEvalOnly)
+ code = afs_TryEvalFakeStat(&adp, &fakestate, &treq);
+ else
+ code = afs_EvalFakeStat(&adp, &fakestate, &treq);
+ if (tryEvalOnly && adp->mvstat == 1)
+ code = ENOENT;
+ if (code)
goto done;
- }
+
+ *avcp = (struct vcache *) 0; /* Since some callers don't initialize it */
/* come back to here if we encounter a non-existent object in a read-only
volume's directory */
#ifdef AFS_OSF_ENV
extern struct vcache *afs_globalVp;
if (adp == afs_globalVp) {
- struct vnode *rvp = (struct vnode *)adp;
+ struct vnode *rvp = AFSTOV(adp);
/*
ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
ndp->ni_dvp = ndp->ni_vp;
/* Check for read access as well. We need read access in order to
stat files, but not to stat subdirectories. */
- if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
+ if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
no_read_access = 1;
/* special case lookup of ".". Can we check for it sooner in this code,
{
register struct dcache *tdc;
- afs_int32 dirOffset, dirLen;
+ afs_size_t dirOffset, dirLen;
ino_t theDir;
struct VenusFid tfid;
/* now we have to lookup the next fid */
- tdc = afs_GetDCache(adp, 0, &treq, &dirOffset, &dirLen, 1);
+ tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &dirOffset, &dirLen, 1);
if (!tdc) {
*avcp = (struct vcache *)0; /* redundant, but harmless */
code = EIO;
/* now we will just call dir package with appropriate inode.
Dirs are always fetched in their entirety for now */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (tdc->flags & DFFetching)
+ && (tdc->dflags & DFFetching)
&& hsame(adp->m.DataVersion, tdc->f.versionNo)) {
- tdc->flags |= DFWaiting;
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
goto redo;
}
tname = sysState.name;
+ ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
/* if the vcache isn't usable, release it */
if (tvc && !(tvc->states & CStatd)) {
- afs_PutVCache(tvc);
+ afs_PutVCache(tvc, 0);
tvc = (struct vcache *) 0;
}
} else {
if (!(flags & AFS_LOOKUP_NOEVAL))
/* don't eval mount points */
#endif /* UKERNEL && AFS_WEB_ENHANCEMENTS */
- if (tvc->mvstat == 1) {
- /* a mt point, possibly unevaluated */
- struct volume *tvolp;
+ if (!afs_fakestat_enable && tvc->mvstat == 1) {
+ /* a mt point, possibly unevaluated */
+ struct volume *tvolp;
ObtainWriteLock(&tvc->lock,133);
code = EvalMountPoint(tvc, adp, &tvolp, &treq);
if (!FidCmp(&(tvc->fid), &(adp->fid))) {
afs_PutVCache(*avcp, WRITE_LOCK);
*avcp = NULL;
+ afs_PutFakeStat(&fakestate);
return afs_CheckCode(EISDIR, &treq, 18);
}
}
/* So Linux inode cache is up to date. */
code = afs_VerifyVCache(tvc, &treq);
#else
+ afs_PutFakeStat(&fakestate);
return 0; /* can't have been any errors if hit and !code */
#endif
}
*avcp = (struct vcache *)0;
}
+ afs_PutFakeStat(&fakestate);
return code;
}