struct VenusFid tfid;
struct cell *tcell;
char *cpos, *volnamep = NULL;
- char *buf, *endptr;
+ char *endptr;
afs_int32 prefetch; /* 1=>None 2=>RO 3=>BK */
afs_int32 mtptCell, assocCell = 0, hac = 0;
afs_int32 samecell, roname, len;
WRITE_LOCK);
}
- /* Still not found. If we are looking for the RO, then perhaps the RW
- * doesn't exist? Try adding ".readonly" to volname and look for that.
- * Don't know why we do this. Would have still found it in above call - jpm.
- */
- if (!tvp && (prefetch == 2) && len < AFS_SMALLOCSIZ - 10) {
- buf = osi_AllocSmallSpace(len + 10);
-
- strcpy(buf, volnamep);
- afs_strcat(buf, ".readonly");
-
- tvp = afs_GetVolumeByName(buf, mtptCell, 1, areq, WRITE_LOCK);
-
- /* Try the associated linked cell if failed */
- if (!tvp && hac && areq->volumeError) {
- tvp = afs_GetVolumeByName(buf, assocCell, 1, areq, WRITE_LOCK);
- }
- osi_FreeSmallSpace(buf);
- }
/* done with volname */
if (cpos)
*cpos = ':';
afs_uint32 avnoid, auniq;
AFS_STATCNT(EvalMountPoint);
-#ifdef notdef
- if (avc->mvid.target_root && (avc->f.states & CMValid))
- return 0; /* done while racing */
-#endif
*avolpp = NULL;
code = afs_HandleLink(avc, areq);
if (code)
do {
retry = 0;
- ObtainWriteLock(&afs_xvcache, 597);
- root_vp = afs_FindVCache(tvc->mvid.target_root, &retry, IS_WLOCK);
+ ObtainReadLock(&afs_xvcache);
+ root_vp = afs_FindVCache(tvc->mvid.target_root, &retry, 0);
if (root_vp && retry) {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseReadLock(&afs_xvcache);
afs_PutVCache(root_vp);
}
} while (root_vp && retry);
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseReadLock(&afs_xvcache);
} else {
- root_vp = afs_GetVCache(tvc->mvid.target_root, areq, NULL, NULL);
+ root_vp = afs_GetVCache(tvc->mvid.target_root, areq);
}
if (!root_vp) {
- code = canblock ? ENOENT : 0;
+ code = canblock ? EIO : 0;
goto done;
}
#ifdef AFS_DARWIN80_ENV
*avcp = root_vp;
code = 0;
} else {
- code = canblock ? ENOENT : 0;
+ code = canblock ? EIO : 0;
}
done:
return 0;
}
-extern int BlobScan(struct dcache * afile, afs_int32 ablob);
+extern int BlobScan(struct dcache * afile, afs_int32 ablob, afs_int32 *ablobOut);
/* called with an unlocked directory and directory cookie. Areqp
* describes who is making the call.
long startTime; /* time we started the call,
* for callback expiration base
*/
+#if defined(AFS_DARWIN_ENV)
int ftype[4] = {VNON, VREG, VDIR, VLNK}; /* verify type is as expected */
+#endif
afs_size_t statSeqNo = 0; /* Valued of file size to detect races */
int code; /* error code */
- long newIndex; /* new index in the dir */
+ afs_int32 newIndex; /* new index in the dir */
struct DirBuffer entry; /* Buffer for dir manipulation */
struct DirEntry *dirEntryp; /* dir entry we are examining */
int i;
struct VenusFid dotdot = {0, {0, 0, 0}};
int flagIndex = 0; /* First file with bulk fetch flag set */
struct rx_connection *rxconn;
+ int attempt_i;
XSTATS_DECLS;
dotdot.Cell = 0;
dotdot.Fid.Unique = 0;
dcp = afs_GetDCache(adp, (afs_size_t) 0, areqp, &temp, &temp, 1);
if (!dcp) {
- code = ENOENT;
+ code = EIO;
goto done2;
}
*/
while ((adp->f.states & CStatd)
&& (dcp->dflags & DFFetching)
- && hsame(adp->f.m.DataVersion, dcp->f.versionNo)) {
+ && afs_IsDCacheFresh(dcp, adp)) {
afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, dcp,
ICL_TYPE_INT32, dcp->dflags);
ObtainReadLock(&dcp->lock);
}
if (!(adp->f.states & CStatd)
- || !hsame(adp->f.m.DataVersion, dcp->f.versionNo)) {
+ || !afs_IsDCacheFresh(dcp, adp)) {
ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
/* now we have dir data in the cache, so scan the dir page */
fidIndex = 0;
flagIndex = 0;
- while (1) { /* Should probably have some constant bound */
+
+ /*
+ * Only examine at most the next 'nentries*4' entries to find dir entries
+ * to stat. This is an arbitrary limit that we set so we don't waste time
+ * scanning an entire dir that contains stat'd entries. For example, if a
+ * dir contains 10k entries, and all or almost all of them are stat'd, then
+ * we'll examine 10k entries for no benefit. For each entry, we run
+ * afs_FindVCache, and grab and release afs_xvcache; doing this e.g. 10k
+ * times can have significant impact if the client is under a lot of load.
+ */
+ for (attempt_i = 0; attempt_i < nentries * 4; attempt_i++) {
+
/* look for first safe entry to examine in the directory. BlobScan
* looks for a the 1st allocated dir after the dirCookie slot.
*/
- newIndex = BlobScan(dcp, (dirCookie >> 5));
- if (newIndex == 0)
+ code = BlobScan(dcp, (dirCookie >> 5), &newIndex);
+ if (code || newIndex == 0)
break;
/* remember the updated directory cookie */
tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
do {
retry = 0;
- ObtainWriteLock(&afs_xvcache, 130);
- tvcp = afs_FindVCache(&tfid, &retry, IS_WLOCK /* no stats | LRU */ );
+ ObtainSharedLock(&afs_xvcache, 130);
+ tvcp = afs_FindVCache(&tfid, &retry, IS_SLOCK /* no stats | LRU */ );
if (tvcp && retry) {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseSharedLock(&afs_xvcache);
afs_PutVCache(tvcp);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
+ UpgradeSToWLock(&afs_xvcache, 129);
tvcp = afs_NewBulkVCache(&tfid, hostp, statSeqNo);
if (tvcp)
{
ReleaseWriteLock(&afs_xvcache);
}
} else {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseSharedLock(&afs_xvcache);
}
if (!tvcp)
{
if (temp <= 0)
break;
dirCookie += temp;
- } /* while loop over all dir entries */
+ } /* for loop over dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
ReleaseReadLock(&dcp->lock);
retry = 1;
}
#else
- osi_vnhold(lruvcp, &retry);
+ if (osi_vnhold(lruvcp) != 0) {
+ retry = 1;
+ }
#endif
ReleaseReadLock(&afs_xvcache); /* could be read lock */
if (retry)
*/
if (!(tvcp->f.states & CBulkFetching)
|| (tvcp->f.m.Length != statSeqNo)
- || (ftype[(&statsp[i])->FileType] != vType(tvcp))) {
+#if defined(AFS_DARWIN_ENV)
+ || (ftype[(&statsp[i])->FileType] != vType(tvcp))
+#endif
+ ) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
afs_PutVCache(tvcp);
tvcp->f.states |= CStatd;
afs_QueueCallback(tvcp, CBHash(3600), volp);
} else {
- tvcp->callback = 0;
- tvcp->f.states &= ~(CStatd | CUnique);
- afs_DequeueCallback(tvcp);
- if ((tvcp->f.states & CForeign) || (vType(tvcp) == VDIR))
- osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvcp,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
#ifdef AFS_DARWIN80_ENV
/* reclaim->FlushVCache will need xcbhash */
code = afs_darwin_finalizevnode(tvcp, NULL, NULL, 0, 1);
if (code) {
/* It's gonna get recycled - shouldn't happen */
- tvcp->callback = 0;
- tvcp->f.states &= ~(CStatd | CUnique);
- afs_DequeueCallback(tvcp);
- if ((tvcp->f.states & CForeign) || (vType(tvcp) == VDIR))
- osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvcp,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
} else
/* re-acquire the usecount that finalizevnode disposed of */
vnode_ref(AFSTOV(tvcp));
return code;
}
-/* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
#ifdef AFS_DARWIN80_ENV
int AFSDOBULK = 0;
-#else
-static int AFSDOBULK = 1;
#endif
+static int
+afs_ShouldTryBulkStat(struct vcache *adp)
+{
+#ifdef AFS_DARWIN80_ENV
+ if (!AFSDOBULK) {
+ return 0;
+ }
+#endif
+ if (AFS_IS_DISCONNECTED) {
+ /* We can't prefetch entries if we're offline. */
+ return 0;
+ }
+ if (adp->opens < 1) {
+ /* Don't bother prefetching entries if nobody is holding the dir open
+ * while we're doing a lookup. */
+ return 0;
+ }
+ if ((adp->f.states & CForeign)) {
+ /* Don't bulkstat for dfs xlator dirs. */
+ return 0;
+ }
+ if (afs_IsDynroot(adp)) {
+ /* Don't prefetch dynroot entries; that's pointless, since we generate
+ * those locally. */
+ return 0;
+ }
+ if (afs_InReadDir(adp)) {
+ /* Don't bulkstat if we're in the middle of servicing a readdir() in
+ * the same process. */
+ return 0;
+ }
+ return 1;
+}
+
static_inline int
osi_lookup_isdot(const char *aname)
{
int dynrootRetry = 1;
struct afs_fakestat_state fakestate;
int tryEvalOnly = 0;
+
+ /* Don't allow ENOENT errors, except for a specific code path where
+ * 'enoent_prohibited' is cleared below. */
+ int enoent_prohibited = 1;
+
OSI_VC_CONVERT(adp);
AFS_STATCNT(afs_lookup);
/*printf("Code is %d\n", code);*/
if (tryEvalOnly && adp->mvstat == AFS_MVSTAT_MTPT)
- code = ENOENT;
+ code = ENODEV;
if (code)
goto done;
}
/* otherwise we have the fid here, so we use it */
/*printf("Getting vcache\n");*/
- tvc = afs_GetVCache(adp->mvid.parent, treq, NULL, NULL);
+ tvc = afs_GetVCache(adp->mvid.parent, treq);
afs_Trace3(afs_iclSetp, CM_TRACE_GETVCDOTDOT, ICL_TYPE_FID, adp->mvid.parent,
ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, code);
*avcp = tvc;
- code = (tvc ? 0 : ENOENT);
+ code = (tvc ? 0 : EIO);
hit = 1;
if (tvc && !VREFCOUNT_GT(tvc, 0)) {
osi_Panic("TT1");
*/
if (osi_lookup_isdot(aname)) { /* special case */
ObtainReadLock(&afs_xvcache);
- osi_vnhold(adp, 0);
+ if (osi_vnhold(adp) != 0) {
+ ReleaseReadLock(&afs_xvcache);
+ code = EIO;
+ goto done;
+ }
ReleaseReadLock(&afs_xvcache);
#ifdef AFS_DARWIN80_ENV
vnode_get(AFSTOV(adp));
aname[0] == '.' && aname[1] == '.' && !aname[2]) {
ObtainReadLock(&afs_xvcache);
- osi_vnhold(afs_globalVp, 0);
+ if (osi_vnhold(afs_globalVp) != 0) {
+ ReleaseReadLock(&afs_xvcache);
+ code = EIO;
+ goto done;
+ }
ReleaseReadLock(&afs_xvcache);
#ifdef AFS_DARWIN80_ENV
vnode_get(AFSTOV(afs_globalVp));
tfid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, cellidx << 2);
tfid.Fid.Unique = volid;
}
- *avcp = tvc = afs_GetVCache(&tfid, treq, NULL, NULL);
- code = (tvc ? 0 : ENOENT);
+ *avcp = tvc = afs_GetVCache(&tfid, treq);
+ code = (tvc ? 0 : EIO);
hit = 1;
goto done;
}
struct VenusFid tfid;
afs_GetDynrootMountFid(&tfid);
- *avcp = tvc = afs_GetVCache(&tfid, treq, NULL, NULL);
+ *avcp = tvc = afs_GetVCache(&tfid, treq);
code = 0;
hit = 1;
goto done;
if (!afs_InReadDir(adp)) {
while ((adp->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
+ && afs_IsDCacheFresh(tdc, adp)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&tdc->lock);
}
if (!(adp->f.states & CStatd)
- || !hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
+ || !afs_IsDCacheFresh(tdc, adp)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
ICL_TYPE_INT32, code);
if (code) {
- if (code != ENOENT) {
- /*printf("LOOKUP dirLookupOff -> %d\n", code);*/
+ if (code == ENOENT) {
+ /* The target name really doesn't exist (according to
+ * afs_dir_LookupOffset, anyway). */
+ enoent_prohibited = 0;
}
goto done;
}
/* prefetch some entries, if the dir is currently open. The variable
* dirCookie tells us where to start prefetching from.
*/
- if (!AFS_IS_DISCONNECTED &&
- AFSDOBULK && adp->opens > 0 && !(adp->f.states & CForeign)
- && !afs_IsDynroot(adp) && !afs_InReadDir(adp)) {
+ if (afs_ShouldTryBulkStat(adp)) {
afs_int32 retry;
/* if the entry is not in the cache, or is in the cache,
* but hasn't been statd, then do a bulk stat operation.
* the file has not yet been looked up.
*/
if (!tvc) {
- afs_int32 cached = 0;
if (!tfid.Fid.Unique && (adp->f.states & CForeign)) {
- tvc = afs_LookupVCache(&tfid, treq, &cached, adp, tname);
+ tvc = afs_LookupVCache(&tfid, treq, adp, tname);
}
if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
- tvc = afs_GetVCache(&tfid, treq, &cached, NULL);
+ tvc = afs_GetVCache(&tfid, treq);
}
} /* if !tvc */
} /* sub-block just to reduce stack usage */
if (tvolp && (tvolp->states & VForeign)) {
/* XXXX tvolp has ref cnt on but not locked! XXX */
tvc =
- afs_GetRootVCache(tvc->mvid.target_root, treq, NULL, tvolp);
+ afs_GetRootVCache(tvc->mvid.target_root, treq, tvolp);
} else {
- tvc = afs_GetVCache(tvc->mvid.target_root, treq, NULL, NULL);
+ tvc = afs_GetVCache(tvc->mvid.target_root, treq);
}
afs_PutVCache(uvc); /* we're done with it */
if (!tvc) {
- code = ENOENT;
+ code = EIO;
if (tvolp) {
afs_PutVolume(tvolp, WRITE_LOCK);
}
}
} else {
afs_PutVCache(tvc);
- code = ENOENT;
+ code = ENODEV;
if (tvolp)
afs_PutVolume(tvolp, WRITE_LOCK);
goto done;
if (tv) {
if (tv->states & VRO) {
pass = 1; /* try this *once* */
- ObtainWriteLock(&afs_xcbhash, 495);
- afs_DequeueCallback(adp);
- /* re-stat to get later version */
- adp->f.states &= ~CStatd;
- ReleaseWriteLock(&afs_xcbhash);
- osi_dnlc_purgedp(adp);
+ /* re-stat to get later version */
+ afs_StaleVCache(adp);
afs_PutVolume(tv, READ_LOCK);
goto redo;
}
afs_PutVolume(tv, READ_LOCK);
}
}
- code = ENOENT;
+ code = EIO;
} else {
code = ENETDOWN;
}
*/
*avcp = NULL;
}
+ if (code == ENOENT && enoent_prohibited) {
+ /*
+ * We got an ENOENT error, but we didn't get it while looking up the
+ * dir entry in the relevant dir blob. That means we likely hit some
+ * other internal error; don't allow us to return ENOENT in this case,
+ * since some platforms cache ENOENT errors, and the target path name
+ * may actually exist.
+ */
+ code = EIO;
+ }
afs_PutFakeStat(&fakestate);
afs_DestroyReq(treq);