/* lock the directory cache entry */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (dcp->flags & DFFetching)
+ && (dcp->dflags & DFFetching)
&& hsame(adp->m.DataVersion, dcp->f.versionNo)) {
- dcp->flags |= DFWaiting;
+ afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
+ ICL_TYPE_STRING, __FILE__,
+ ICL_TYPE_INT32, __LINE__,
+ ICL_TYPE_POINTER, dcp,
+ ICL_TYPE_INT32, dcp->dflags);
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&dcp->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&dcp->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
goto tagain;
tvcp = afs_FindVCache(&tfid, 0, 0, &retry, 0 /* no stats | LRU */);
if (tvcp && retry) {
ReleaseWriteLock(&afs_xvcache);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
}
} while (tvcp && retry);
if (!tvcp) { /* otherwise, create manually */
tvcp->m.Length = statSeqNo;
fidIndex++;
}
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
} /* if dir vnode has non-zero entry */
/* move to the next dir entry by adding in the # of entries
} /* while loop over all dir entries */
/* now release the dir lock and prepare to make the bulk RPC */
+ ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
/* release the chunk */
if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
continue;
}
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
ReleaseWriteLock(&afs_xcbhash);
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
continue;
}
ReleaseWriteLock(&tvcp->lock);
/* finally, we're done with the entry */
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
} /* for all files we got back */
/* finally return the pointer into the LRU queue */
- afs_PutVCache(lruvcp);
+ afs_PutVCache(lruvcp, 0);
done:
/* Be sure to turn off the CBulkFetching flags */
tvcp->states &= ~CBulkFetching;
}
if (tvcp != NULL) {
- afs_PutVCache(tvcp);
+ afs_PutVCache(tvcp, 0);
}
}
if ( volp )
AFS_STATCNT(afs_lookup);
#ifdef AFS_OSF_ENV
- ndp->ni_dvp = (struct vnode *)adp;
+ ndp->ni_dvp = AFSTOV(adp);
memcpy(aname, ndp->ni_ptr, ndp->ni_namelen);
aname[ndp->ni_namelen] = '\0';
#endif /* AFS_OSF_ENV */
#ifdef AFS_OSF_ENV
extern struct vcache *afs_globalVp;
if (adp == afs_globalVp) {
- struct vnode *rvp = (struct vnode *)adp;
+ struct vnode *rvp = AFSTOV(adp);
/*
ndp->ni_vp = rvp->v_vfsp->vfs_vnodecovered;
ndp->ni_dvp = ndp->ni_vp;
/* Check for read access as well. We need read access in order to
stat files, but not to stat subdirectories. */
- if (!afs_AccessOK(adp, PRSFS_READ, &treq, CHECK_MODE_BITS))
+ if (!afs_AccessOK(adp, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS))
no_read_access = 1;
/* special case lookup of ".". Can we check for it sooner in this code,
/* now we will just call dir package with appropriate inode.
Dirs are always fetched in their entirety for now */
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
/*
* Make sure that the data in the cache is current. There are two
* 2. The cache data is no longer valid
*/
while ((adp->states & CStatd)
- && (tdc->flags & DFFetching)
+ && (tdc->dflags & DFFetching)
&& hsame(adp->m.DataVersion, tdc->f.versionNo)) {
- tdc->flags |= DFWaiting;
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&adp->lock);
+ ObtainReadLock(&tdc->lock);
}
if (!(adp->states & CStatd)
|| !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
goto redo;
}
tname = sysState.name;
+ ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
if (code == ENOENT && afs_IsDynroot(adp) && dynrootRetry) {
/* if the vcache isn't usable, release it */
if (tvc && !(tvc->states & CStatd)) {
- afs_PutVCache(tvc);
+ afs_PutVCache(tvc, 0);
tvc = (struct vcache *) 0;
}
} else {