* afs_WriteVCacheDiscon
* afs_SimpleVStat
* afs_ProcessFS
- * TellALittleWhiteLie
* afs_RemoteLookup
* afs_GetVCache
* afs_LookupVCache
/* Forward declarations */
static afs_int32 afs_QueueVCB(struct vcache *avc, int *slept);
+
+/*
+ * The PFlush algorithm makes use of the fact that Fid.Unique is not used in
+ * below hash algorithms. Change it if need be so that flushing algorithm
+ * doesn't move things from one hash chain to another.
+ */
+/* Don't hash on the cell; our callback-breaking code sometimes fails to compute
+ * the cell correctly, and only scans one hash bucket. */
+int VCHash(struct VenusFid *fid)
+{
+ return opr_jhash_int2(fid->Fid.Volume, fid->Fid.Vnode, 0) &
+ opr_jhash_mask(VCSIZEBITS);
+}
+/* Hash only on volume to speed up volume callbacks. */
+int VCHashV(struct VenusFid *fid)
+{
+ return opr_jhash_int(fid->Fid.Volume, 0) & opr_jhash_mask(VCSIZEBITS);
+}
+
/*!
* Generate an index into the hash table for a given Fid.
* \param fid
}
#endif
- if (avc->mvid)
- osi_FreeSmallSpace(avc->mvid);
- avc->mvid = (struct VenusFid *)0;
+ if (avc->mvid.target_root)
+ osi_FreeSmallSpace(avc->mvid.target_root);
+ avc->mvid.target_root = NULL;
if (avc->linkData) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
}
#endif
-#ifdef AFS_SUN510_ENV
+
+#ifdef AFS_SUN511_ENV
+ if (avc->v) {
+ vn_free(avc->v);
+ avc->v = NULL;
+ }
+#elif defined(AFS_SUN510_ENV)
/* As we use private vnodes, cleanup is up to us */
vn_reinit(AFSTOV(avc));
#endif
afs_FreeAllAxs(&(avc->Access));
- ObtainWriteLock(&afs_xcbhash, 460);
- afs_DequeueCallback(avc); /* remove it from queued callbacks list */
- avc->f.states &= ~(CStatd | CUnique);
- ReleaseWriteLock(&afs_xcbhash);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
- else
- osi_dnlc_purgevp(avc);
+ afs_StaleVCacheFlags(avc, AFS_STALEVC_FILENAME, CUnique);
/* By this point, the vcache has been removed from all global structures
* via which someone could try to use the vcache. It is okay to drop
* afs_xvcache at this point (if *slept is set). */
- if (!afs_shuttingdown)
+ if (afs_shuttingdown == AFS_RUNNING)
afs_QueueVCB(avc, slept);
/*
/* we can't keep trying to push back dirty data forever. Give up. */
afs_InvalidateAllSegments(avc); /* turns off dirty bit */
}
- avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
+ avc->f.states &= ~CMAPPED;
avc->f.states &= ~CDirty; /* Turn it off */
if (avc->f.states & CUnlinked) {
if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
afs_ShakeLooseVCaches(afs_int32 anumber)
{
afs_int32 i, loop;
+ int evicted;
struct vcache *tvc;
struct afs_q *tq, *uq;
int fv_slept, defersleep = 0;
}
fv_slept = 0;
- if (osi_TryEvictVCache(tvc, &fv_slept, defersleep))
+ evicted = osi_TryEvictVCache(tvc, &fv_slept, defersleep);
+ if (evicted) {
anumber--;
+ }
if (fv_slept) {
if (loop++ > 100)
break;
+ if (!evicted) {
+ /*
+ * This vcache was busy and we slept while trying to evict it.
+ * Move this busy vcache to the head of the VLRU so vcaches
+ * following this busy vcache can be evicted during the retry.
+ */
+ QRemove(&tvc->vlruq);
+ QAdd(&VLRU, &tvc->vlruq);
+ }
goto retry; /* start over - may have raced. */
}
if (uq == &VLRU) {
struct server *serverp) {
afs_uint32 slot;
+ afs_hyper_t zero;
slot = avc->diskSlot;
osi_PrePopulateVCache(avc);
AFS_RWLOCK_INIT(&avc->lock, "vcache lock");
- avc->mvid = NULL;
+ memset(&avc->mvid, 0, sizeof(avc->mvid));
avc->linkData = NULL;
avc->cbExpires = 0;
avc->opens = 0;
hzero(avc->mapDV);
avc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
- hzero(avc->f.m.DataVersion); /* in case we copy it into flushDV */
+ hzero(zero);
+ afs_SetDataVersion(avc, &zero); /* in case we copy it into flushDV */
avc->Access = NULL;
avc->callback = serverp; /* to minimize chance that clear
* request is lost */
ReleaseWriteLock(&avc->lock);
return 0;
}
- ObtainWriteLock(&afs_xcbhash, 461);
- avc->f.states &= ~(CStatd | CUnique);
- avc->callback = NULL;
- afs_DequeueCallback(avc);
- ReleaseWriteLock(&afs_xcbhash);
+ afs_StaleVCacheFlags(avc, AFS_STALEVC_FILENAME | AFS_STALEVC_CLEARCB,
+ CUnique);
ReleaseWriteLock(&avc->lock);
- /* since we've been called back, or the callback has expired,
- * it's possible that the contents of this directory, or this
- * file's name have changed, thus invalidating the dnlc contents.
- */
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc);
- else
- osi_dnlc_purgevp(avc);
-
/* fetch the status info */
- tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
+ tvc = afs_GetVCache(&avc->f.fid, areq);
if (!tvc)
- return ENOENT;
+ return EIO;
/* Put it back; caller has already incremented vrefCount */
afs_PutVCache(tvc);
return 0;
avc->f.m.Date = OutStatus.ClientModTime;
} else {
/* failure, set up to check with server next time */
- ObtainWriteLock(&afs_xcbhash, 462);
- afs_DequeueCallback(avc);
- avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
- ReleaseWriteLock(&afs_xcbhash);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(avc, 0, CUnique);
}
ConvertWToSLock(&avc->lock);
return code;
struct AFSFetchStatus *astat, struct vrequest *areq)
{
afs_size_t length;
+ afs_hyper_t newDV;
AFS_STATCNT(afs_ProcessFS);
#ifdef AFS_64BIT_CLIENT
avc->f.m.Length = length;
avc->f.m.Date = astat->ClientModTime;
}
- hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
+ hset64(newDV, astat->dataVersionHigh, astat->DataVersion);
+ afs_SetDataVersion(avc, &newDV);
avc->f.m.Owner = astat->Owner;
avc->f.m.Mode = astat->UnixModeBits;
avc->f.m.Group = astat->Group;
* \param afid File ID.
* \param areq Ptr to associated vrequest structure, specifying the
* user whose authentication tokens will be used.
- * \param avc Caller may already have a vcache for this file, which is
- * already held.
*
* \note Environment:
* The cache entry is returned with an increased vrefCount field.
* locking directories in a constant order.
*
* \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
- *
- * \note Might have a vcache structure already, which must
- * already be held by the caller
*/
struct vcache *
-afs_GetVCache(struct VenusFid *afid, struct vrequest *areq,
- afs_int32 * cached, struct vcache *avc)
+afs_GetVCache(struct VenusFid *afid, struct vrequest *areq)
{
afs_int32 code, newvcache = 0;
AFS_STATCNT(afs_GetVCache);
- if (cached)
- *cached = 0; /* Init just in case */
-
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
loop:
#endif
#endif
}
if (tvc) {
- if (cached)
- *cached = 1;
osi_Assert((tvc->f.states & CVInit) == 0);
/* If we are in readdir, return the vnode even if not statd */
if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
#endif
#endif
- ObtainWriteLock(&afs_xcbhash, 464);
- tvc->f.states &= ~CUnique;
- tvc->callback = 0;
- afs_DequeueCallback(tvc);
- ReleaseWriteLock(&afs_xcbhash);
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC | AFS_STALEVC_CLEARCB,
+ CUnique);
/* It is always appropriate to throw away all the access rights? */
afs_FreeAllAxs(&(tvc->Access));
tvc->f.states |= CBackup;
/* now copy ".." entry back out of volume structure, if necessary */
if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
+ if (!tvc->mvid.parent)
+ tvc->mvid.parent = (struct VenusFid *)
osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvp->dotdot;
+ *tvc->mvid.parent = tvp->dotdot;
}
afs_PutVolume(tvp, READ_LOCK);
}
*
* \param afid
* \param areq
- * \param cached Is element cached? If NULL, don't answer.
* \param adp
* \param aname
*
*/
struct vcache *
afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
- afs_int32 * cached, struct vcache *adp, char *aname)
+ struct vcache *adp, char *aname)
{
afs_int32 code, now, newvcache = 0;
struct VenusFid nfid;
afs_int32 retry;
AFS_STATCNT(afs_GetVCache);
- if (cached)
- *cached = 0; /* Init just in case */
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
loop1:
ObtainReadLock(&tvc->lock);
if (tvc->f.states & CStatd) {
- if (cached) {
- *cached = 1;
- }
ReleaseReadLock(&tvc->lock);
return tvc;
}
tvc->f.states |= CBackup;
/* now copy ".." entry back out of volume structure, if necessary */
if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
+ if (!tvc->mvid.parent)
+ tvc->mvid.parent = (struct VenusFid *)
osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvp->dotdot;
+ *tvc->mvid.parent = tvp->dotdot;
}
}
if (code) {
- ObtainWriteLock(&afs_xcbhash, 465);
- afs_DequeueCallback(tvc);
- tvc->f.states &= ~(CStatd | CUnique);
- ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, 0, CUnique);
if (tvp)
afs_PutVolume(tvp, READ_LOCK);
ReleaseWriteLock(&tvc->lock);
tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(3600), tvp);
} else {
- tvc->callback = NULL;
- afs_DequeueCallback(tvc);
- tvc->f.states &= ~(CStatd | CUnique);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
} else {
- afs_DequeueCallback(tvc);
- tvc->f.states &= ~CStatd;
- tvc->f.states &= ~CUnique;
- tvc->callback = NULL;
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
ReleaseWriteLock(&afs_xcbhash);
if (tvp)
struct vcache *
afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
- afs_int32 * cached, struct volume *tvolp)
+ struct volume *tvolp)
{
afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
afs_int32 getNewFid = 0;
newvcache = 1;
afs_stats_cmperf.vcacheMisses++;
} else {
- if (cached)
- *cached = 1;
afs_stats_cmperf.vcacheHits++;
#if defined(AFS_DARWIN80_ENV)
/* we already bumped the ref count in the for loop above */
tvc->mvstat = AFS_MVSTAT_ROOT;
}
if (tvc->mvstat == AFS_MVSTAT_ROOT && tvolp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
+ if (!tvc->mvid.parent)
+ tvc->mvid.parent = (struct VenusFid *)
osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvolp->dotdot;
+ *tvc->mvid.parent = tvolp->dotdot;
}
/* stat the file */
}
if (code) {
- ObtainWriteLock(&afs_xcbhash, 467);
- afs_DequeueCallback(tvc);
- tvc->callback = NULL;
- tvc->f.states &= ~(CStatd | CUnique);
- ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_CLEARCB, CUnique);
ReleaseWriteLock(&tvc->lock);
afs_PutVCache(tvc);
return NULL;
afs_QueueCallback(tvc, CBHash(3600), tvolp);
}
} else {
- afs_DequeueCallback(tvc);
- tvc->callback = NULL;
- tvc->f.states &= ~(CStatd | CUnique);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
ReleaseWriteLock(&afs_xcbhash);
afs_ProcessFS(tvc, &OutStatus, areq);
avc->f.states &= ~CBulkFetching;
afs_QueueCallback(avc, CBHash(3600), volp);
} else {
- afs_DequeueCallback(avc);
- avc->callback = NULL;
- avc->f.states &= ~(CStatd | CUnique);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(avc,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
} else {
- afs_DequeueCallback(avc);
- avc->callback = NULL;
- avc->f.states &= ~(CStatd | CUnique);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(avc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
ReleaseWriteLock(&afs_xcbhash);
if (volp)
ReleaseSharedLock(&afs_xvcache);
ObtainWriteLock(&tvc->lock, 58);
- tvc->f.states &= ~CStatd;
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_NOCB, 0);
/* Is it always appropriate to throw away all the access rights? */
afs_FreeAllAxs(&(tvc->Access));
* necessary
*/
if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
+ if (!tvc->mvid.parent)
+ tvc->mvid.parent = (struct VenusFid *)
osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvp->dotdot;
+ *tvc->mvid.parent = tvp->dotdot;
}
}
/* store the stat on the file */
tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(3600), tvp);
} else {
- afs_DequeueCallback(tvc);
- tvc->callback = NULL;
- tvc->f.states &= ~(CStatd | CUnique);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
ReleaseWriteLock(&afs_xcbhash);
if (tvp)
void
afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred, afs_int32 skipdnlc)
{
- ObtainWriteLock(&afs_xcbhash, 456);
- afs_DequeueCallback(avc);
- avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
- ReleaseWriteLock(&afs_xcbhash);
+ afs_stalevc_flags_t flags = 0;
+ if (skipdnlc) {
+ flags |= AFS_STALEVC_NODNLC;
+ }
+
+ afs_StaleVCacheFlags(avc, flags, CDirty); /* next reference will re-stat */
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
- if (!skipdnlc) {
- osi_dnlc_purgedp(avc);
- }
if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
tvc = QTOV(tq);
uq = QPrev(tq);
- if (tvc->mvid) {
- osi_FreeSmallSpace(tvc->mvid);
- tvc->mvid = (struct VenusFid *)0;
+ if (tvc->mvid.target_root) {
+ osi_FreeSmallSpace(tvc->mvid.target_root);
+ tvc->mvid.target_root = NULL;
}
#ifdef AFS_AIX_ENV
aix_gnode_rele(AFSTOV(tvc));
*/
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
- if (tvc->mvid) {
- osi_FreeSmallSpace(tvc->mvid);
- tvc->mvid = (struct VenusFid *)0;
+ if (tvc->mvid.target_root) {
+ osi_FreeSmallSpace(tvc->mvid.target_root);
+ tvc->mvid.target_root = NULL;
}
#ifdef AFS_AIX_ENV
if (tvc->v.v_gnode)
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
- tvc->f.states &= ~(CStatd|CUnique);
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC | AFS_STALEVC_NOCB,
+ CUnique);
}
}
ReleaseWriteLock(&afs_xvcache);
}
+
+/**
+ * Mark a vcache as stale; our metadata for the relevant file may be out of
+ * date.
+ *
+ * @post Any subsequent access to this vcache will cause us to fetch the
+ * metadata for this vcache again.
+ */
+void
+afs_StaleVCacheFlags(struct vcache *avc, afs_stalevc_flags_t flags,
+ afs_uint32 cflags)
+{
+ int do_dnlc = 1;
+ int do_filename = 0;
+ int do_dequeue = 1;
+ int lock_cbhash = 1;
+
+ if ((flags & AFS_STALEVC_NODNLC)) {
+ do_dnlc = 0;
+ }
+ if ((flags & AFS_STALEVC_FILENAME)) {
+ do_filename = 1;
+ }
+ if ((flags & AFS_STALEVC_CBLOCKED)) {
+ lock_cbhash = 0;
+ }
+ if ((flags & AFS_STALEVC_NOCB)) {
+ do_dequeue = 0;
+ lock_cbhash = 0;
+ }
+
+ if (lock_cbhash) {
+ ObtainWriteLock(&afs_xcbhash, 486);
+ }
+ if (do_dequeue) {
+ afs_DequeueCallback(avc);
+ }
+
+ cflags |= CStatd;
+ avc->f.states &= ~cflags;
+
+ if (lock_cbhash) {
+ ReleaseWriteLock(&afs_xcbhash);
+ }
+
+ if ((flags & AFS_STALEVC_SKIP_DNLC_FOR_INIT_FLUSHED) &&
+ (avc->f.states & (CVInit | CVFlushed))) {
+ do_dnlc = 0;
+ }
+
+ if (flags & AFS_STALEVC_CLEARCB) {
+ avc->callback = NULL;
+ }
+
+ if (do_dnlc) {
+ if ((avc->f.fid.Fid.Vnode & 1) ||
+ AFSTOV(avc) == NULL || vType(avc) == VDIR ||
+ (avc->f.states & CForeign)) {
+ /* This vcache is (or could be) a directory. */
+ osi_dnlc_purgedp(avc);
+
+ } else if (do_filename) {
+ osi_dnlc_purgevp(avc);
+ }
+ }
+}
+
+void
+afs_SetDataVersion(struct vcache *avc, afs_hyper_t *avers)
+{
+ hset(avc->f.m.DataVersion, *avers);
+}