do {
retry = 0;
- ObtainWriteLock(&afs_xvcache, 597);
- root_vp = afs_FindVCache(tvc->mvid.target_root, &retry, IS_WLOCK);
+ ObtainReadLock(&afs_xvcache);
+ root_vp = afs_FindVCache(tvc->mvid.target_root, &retry, 0);
if (root_vp && retry) {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseReadLock(&afs_xvcache);
afs_PutVCache(root_vp);
}
} while (root_vp && retry);
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseReadLock(&afs_xvcache);
} else {
root_vp = afs_GetVCache(tvc->mvid.target_root, areq);
}
struct VenusFid dotdot = {0, {0, 0, 0}};
int flagIndex = 0; /* First file with bulk fetch flag set */
struct rx_connection *rxconn;
+ int attempt_i;
XSTATS_DECLS;
dotdot.Cell = 0;
dotdot.Fid.Unique = 0;
/* now we have dir data in the cache, so scan the dir page */
fidIndex = 0;
flagIndex = 0;
- while (1) { /* Should probably have some constant bound */
+
+ /*
+ * Only examine at most the next 'nentries*4' entries to find dir entries
+ * to stat. This is an arbitrary limit that we set so we don't waste time
+ * scanning an entire dir that contains stat'd entries. For example, if a
+ * dir contains 10k entries, and all or almost all of them are stat'd, then
+ * we'll examine 10k entries for no benefit. For each entry, we run
+ * afs_FindVCache, and grab and release afs_xvcache; doing this e.g. 10k
+ * times can have significant impact if the client is under a lot of load.
+ */
+ for (attempt_i = 0; attempt_i < nentries * 4; attempt_i++) {
+
/* look for first safe entry to examine in the directory. BlobScan
* looks for a the 1st allocated dir after the dirCookie slot.
*/
tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
do {
retry = 0;
- ObtainWriteLock(&afs_xvcache, 130);
- tvcp = afs_FindVCache(&tfid, &retry, IS_WLOCK /* no stats | LRU */ );
+ ObtainSharedLock(&afs_xvcache, 130);
+ tvcp = afs_FindVCache(&tfid, &retry, IS_SLOCK /* no stats | LRU */ );
if (tvcp && retry) {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseSharedLock(&afs_xvcache);
afs_PutVCache(tvcp);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
+ UpgradeSToWLock(&afs_xvcache, 129);
tvcp = afs_NewBulkVCache(&tfid, hostp, statSeqNo);
if (tvcp)
{
ReleaseWriteLock(&afs_xvcache);
}
} else {
- ReleaseWriteLock(&afs_xvcache);
+ ReleaseSharedLock(&afs_xvcache);
}
if (!tvcp)
{
if (temp <= 0)
break;
dirCookie += temp;
- } /* while loop over all dir entries */
+ } /* for loop over dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
ReleaseReadLock(&dcp->lock);
return code;
}
-/* was: (AFS_DEC_ENV) || defined(AFS_OSF30_ENV) || defined(AFS_NCR_ENV) */
#ifdef AFS_DARWIN80_ENV
int AFSDOBULK = 0;
-#else
-static int AFSDOBULK = 1;
#endif
+static int
+afs_ShouldTryBulkStat(struct vcache *adp)
+{
+#ifdef AFS_DARWIN80_ENV
+ if (!AFSDOBULK) {
+ return 0;
+ }
+#endif
+ if (AFS_IS_DISCONNECTED) {
+ /* We can't prefetch entries if we're offline. */
+ return 0;
+ }
+ if (adp->opens < 1) {
+ /* Don't bother prefetching entries if nobody is holding the dir open
+ * while we're doing a lookup. */
+ return 0;
+ }
+ if ((adp->f.states & CForeign)) {
+ /* Don't bulkstat for dfs xlator dirs. */
+ return 0;
+ }
+ if (afs_IsDynroot(adp)) {
+ /* Don't prefetch dynroot entries; that's pointless, since we generate
+ * those locally. */
+ return 0;
+ }
+ if (afs_InReadDir(adp)) {
+ /* Don't bulkstat if we're in the middle of servicing a readdir() in
+ * the same process. */
+ return 0;
+ }
+ return 1;
+}
+
static_inline int
osi_lookup_isdot(const char *aname)
{
/* prefetch some entries, if the dir is currently open. The variable
* dirCookie tells us where to start prefetching from.
*/
- if (!AFS_IS_DISCONNECTED &&
- AFSDOBULK && adp->opens > 0 && !(adp->f.states & CForeign)
- && !afs_IsDynroot(adp) && !afs_InReadDir(adp)) {
+ if (afs_ShouldTryBulkStat(adp)) {
afs_int32 retry;
/* if the entry is not in the cache, or is in the cache,
* but hasn't been statd, then do a bulk stat operation.