* afs_WriteVCacheDiscon
* afs_SimpleVStat
* afs_ProcessFS
- * TellALittleWhiteLie
* afs_RemoteLookup
* afs_GetVCache
* afs_LookupVCache
/* Forward declarations */
static afs_int32 afs_QueueVCB(struct vcache *avc, int *slept);
+
+/*
+ * The PFlush algorithm makes use of the fact that Fid.Unique is not used in
+ * below hash algorithms. Change it if need be so that flushing algorithm
+ * doesn't move things from one hash chain to another.
+ */
+/* Don't hash on the cell; our callback-breaking code sometimes fails to compute
+ * the cell correctly, and only scans one hash bucket. */
+int VCHash(struct VenusFid *fid)
+{
+ return opr_jhash_int2(fid->Fid.Volume, fid->Fid.Vnode, 0) &
+ opr_jhash_mask(VCSIZEBITS);
+}
+/* Hash only on volume to speed up volume callbacks. */
+int VCHashV(struct VenusFid *fid)
+{
+ return opr_jhash_int(fid->Fid.Volume, 0) & opr_jhash_mask(VCSIZEBITS);
+}
+
/*!
* Generate an index into the hash table for a given Fid.
* \param fid
afs_int32 i, code;
struct vcache **uvc, *wvc;
+ /* NOTE: We must have nothing drop afs_xvcache until we have removed all
+ * possible references to this vcache. This means all hash tables, queues,
+ * DNLC, etc. */
+
*slept = 0;
AFS_STATCNT(afs_FlushVCache);
afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
ICL_TYPE_INT32, avc->f.states);
- code = osi_VM_FlushVCache(avc, slept);
+ code = osi_VM_FlushVCache(avc);
if (code)
goto bad;
/* remove entry from the volume hash table */
QRemove(&avc->vhashq);
- if (avc->mvid)
- osi_FreeSmallSpace(avc->mvid);
- avc->mvid = (struct VenusFid *)0;
+#if defined(AFS_LINUX26_ENV)
+ {
+ struct pagewriter *pw, *store;
+ struct list_head tofree;
+
+ INIT_LIST_HEAD(&tofree);
+ spin_lock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &avc->pagewriters, link) {
+ list_del(&pw->link);
+ /* afs_osi_Free may sleep so we need to defer it */
+ list_add_tail(&pw->link, &tofree);
+ }
+ spin_unlock(&avc->pagewriter_lock);
+ list_for_each_entry_safe(pw, store, &tofree, link) {
+ list_del(&pw->link);
+ afs_osi_Free(pw, sizeof(struct pagewriter));
+ }
+ }
+#endif
+
+ if (avc->mvid.target_root)
+ osi_FreeSmallSpace(avc->mvid.target_root);
+ avc->mvid.target_root = NULL;
if (avc->linkData) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
/* OK, there are no internal vrefCounts, so there shouldn't
* be any more refs here. */
if (avc->v) {
-#ifdef AFS_DARWIN80_ENV
+# ifdef AFS_DARWIN80_ENV
vnode_clearfsnode(AFSTOV(avc));
vnode_removefsref(AFSTOV(avc));
-#else
+# else
avc->v->v_data = NULL; /* remove from vnode */
-#endif
+# endif
AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
}
#endif
-#ifdef AFS_SUN510_ENV
+
+#ifdef AFS_SUN511_ENV
+ if (avc->v) {
+ vn_free(avc->v);
+ avc->v = NULL;
+ }
+#elif defined(AFS_SUN510_ENV)
/* As we use private vnodes, cleanup is up to us */
vn_reinit(AFSTOV(avc));
#endif
afs_FreeAllAxs(&(avc->Access));
- ObtainWriteLock(&afs_xcbhash, 460);
- afs_DequeueCallback(avc); /* remove it from queued callbacks list */
- avc->f.states &= ~(CStatd | CUnique);
- ReleaseWriteLock(&afs_xcbhash);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
- else
- osi_dnlc_purgevp(avc);
+ afs_StaleVCacheFlags(avc, AFS_STALEVC_FILENAME, CUnique);
+
+ /* By this point, the vcache has been removed from all global structures
+ * via which someone could try to use the vcache. It is okay to drop
+ * afs_xvcache at this point (if *slept is set). */
- if (!afs_shuttingdown)
+ if (afs_shuttingdown == AFS_RUNNING)
afs_QueueVCB(avc, slept);
/*
/* we can't keep trying to push back dirty data forever. Give up. */
afs_InvalidateAllSegments(avc); /* turns off dirty bit */
}
- avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
+ avc->f.states &= ~CMAPPED;
avc->f.states &= ~CDirty; /* Turn it off */
if (avc->f.states & CUnlinked) {
if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
int tcount;
struct server *tsp;
int i;
- struct vrequest treq;
+ struct vrequest *treq = NULL;
struct afs_conn *tc;
int safety1, safety2, safety3;
XSTATS_DECLS;
if (AFS_IS_DISCONNECTED)
return ENETDOWN;
- if ((code = afs_InitReq(&treq, afs_osi_credp)))
+ if ((code = afs_CreateReq(&treq, afs_osi_credp)))
return code;
- treq.flags |= O_NONBLOCK;
+ treq->flags |= O_NONBLOCK;
tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
osi_Assert(tfids != NULL);
callBacks[0].CallBackType = CB_EXCLUSIVE;
for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
tc = afs_ConnByHost(tsp, tsp->cell->fsport,
- tsp->cell->cellNum, &treq, 0,
+ tsp->cell->cellNum, treq, 0,
SHARED_LOCK, 0, &rxconn);
if (tc) {
XSTATS_START_TIME
} else
code = -1;
if (!afs_Analyze
- (tc, rxconn, code, 0, &treq,
+ (tc, rxconn, code, 0, treq,
AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
tsp->cell)) {
break;
if (lockit)
ReleaseWriteLock(&afs_xvcb);
afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
+ afs_DestroyReq(treq);
return 0;
}
/* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
}
if (tvc->f.states & (CVInit
-#ifdef AFS_DARWIN80_ENV
+# ifdef AFS_DARWIN80_ENV
| CDeadVnode
-#endif
+# endif
)) {
tvc->f.states &= ~(CVInit
-#ifdef AFS_DARWIN80_ENV
+# ifdef AFS_DARWIN80_ENV
| CDeadVnode
-#endif
+# endif
);
afs_osi_Wakeup(&tvc->f.states);
}
/*
* The proper value for mvstat (for root fids) is setup by the caller.
*/
- avc->mvstat = 0;
+ avc->mvstat = AFS_MVSTAT_FILE;
if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
- avc->mvstat = 2;
+ avc->mvstat = AFS_MVSTAT_ROOT;
if (afs_globalVFS == 0)
osi_Panic("afs globalvfs");
afs_ShakeLooseVCaches(afs_int32 anumber)
{
afs_int32 i, loop;
+ int evicted;
struct vcache *tvc;
struct afs_q *tq, *uq;
int fv_slept, defersleep = 0;
}
fv_slept = 0;
- if (osi_TryEvictVCache(tvc, &fv_slept, defersleep))
+ evicted = osi_TryEvictVCache(tvc, &fv_slept, defersleep);
+ if (evicted) {
anumber--;
+ }
if (fv_slept) {
if (loop++ > 100)
break;
+ if (!evicted) {
+ /*
+ * This vcache was busy and we slept while trying to evict it.
+ * Move this busy vcache to the head of the VLRU so vcaches
+ * following this busy vcache can be evicted during the retry.
+ */
+ QRemove(&tvc->vlruq);
+ QAdd(&VLRU, &tvc->vlruq);
+ }
goto retry; /* start over - may have raced. */
}
if (uq == &VLRU) {
struct server *serverp) {
afs_uint32 slot;
+ afs_hyper_t zero;
slot = avc->diskSlot;
osi_PrePopulateVCache(avc);
AFS_RWLOCK_INIT(&avc->lock, "vcache lock");
- avc->mvid = NULL;
+ memset(&avc->mvid, 0, sizeof(avc->mvid));
avc->linkData = NULL;
avc->cbExpires = 0;
avc->opens = 0;
hzero(avc->mapDV);
avc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
- hzero(avc->f.m.DataVersion); /* in case we copy it into flushDV */
+ hzero(zero);
+ afs_SetDataVersion(avc, &zero); /* in case we copy it into flushDV */
avc->Access = NULL;
avc->callback = serverp; /* to minimize chance that clear
* request is lost */
struct afs_conn *tc;
afs_int32 code;
afs_ucred_t *cred = NULL;
- struct vrequest treq, ureq;
+ struct vrequest *treq = NULL;
struct AFSVolSync tsync;
int didCore;
XSTATS_DECLS;
AFS_STATCNT(afs_FlushActiveVcaches);
+
+ code = afs_CreateReq(&treq, afs_osi_credp);
+ if (code) {
+ afs_warn("unable to alloc treq\n");
+ return;
+ }
+
ObtainReadLock(&afs_xvcache);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
ReleaseReadLock(&afs_xvcache);
ObtainWriteLock(&tvc->lock, 51);
do {
- code = afs_InitReq(&treq, afs_osi_credp);
+ code = afs_InitReq(treq, afs_osi_credp);
if (code) {
code = -1;
break; /* shutting down: do not try to extend the lock */
}
- treq.flags |= O_NONBLOCK;
+ treq->flags |= O_NONBLOCK;
- tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK, &rxconn);
+ tc = afs_Conn(&tvc->f.fid, treq, SHARED_LOCK, &rxconn);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
RX_AFS_GUNLOCK();
} else
code = -1;
} while (afs_Analyze
- (tc, rxconn, code, &tvc->f.fid, &treq,
+ (tc, rxconn, code, &tvc->f.fid, treq,
AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
ReleaseWriteLock(&tvc->lock);
*/
osi_vnhold(tvc, 0);
ReleaseReadLock(&afs_xvcache);
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonLock(&tvc->pvnLock, tvc);
-#endif
#if defined(AFS_SGI_ENV)
/*
* That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
/* XXXX Find better place-holder for cred XXXX */
cred = (afs_ucred_t *)tvc->linkData;
tvc->linkData = NULL; /* XXX */
- code = afs_InitReq(&ureq, cred);
+ code = afs_InitReq(treq, cred);
afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
tvc->execsOrWriters);
if (!code) { /* avoid store when shutting down */
- code = afs_StoreOnLastReference(tvc, &ureq);
+ code = afs_StoreOnLastReference(tvc, treq);
}
ReleaseWriteLock(&tvc->lock);
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonUnlock(&tvc->pvnLock, tvc);
-#endif
hzero(tvc->flushDV);
osi_FlushText(tvc);
didCore = 1;
* Ignore errors
*/
ReleaseWriteLock(&tvc->lock);
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonUnlock(&tvc->pvnLock, tvc);
-#endif
#if defined(AFS_SGI_ENV)
AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
#endif
} else {
/* lost (or won, perhaps) the race condition */
ReleaseWriteLock(&tvc->lock);
-#ifdef AFS_BOZONLOCK_ENV
- afs_BozonUnlock(&tvc->pvnLock, tvc);
-#endif
}
#if defined(AFS_SGI_ENV)
AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
}
}
ReleaseReadLock(&afs_xvcache);
+ afs_DestroyReq(treq);
}
ReleaseWriteLock(&avc->lock);
return 0;
}
- ObtainWriteLock(&afs_xcbhash, 461);
- avc->f.states &= ~(CStatd | CUnique);
- avc->callback = NULL;
- afs_DequeueCallback(avc);
- ReleaseWriteLock(&afs_xcbhash);
+ afs_StaleVCacheFlags(avc, AFS_STALEVC_FILENAME | AFS_STALEVC_CLEARCB,
+ CUnique);
ReleaseWriteLock(&avc->lock);
- /* since we've been called back, or the callback has expired,
- * it's possible that the contents of this directory, or this
- * file's name have changed, thus invalidating the dnlc contents.
- */
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc);
- else
- osi_dnlc_purgevp(avc);
-
/* fetch the status info */
- tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
+ tvc = afs_GetVCache(&avc->f.fid, areq);
if (!tvc)
- return ENOENT;
+ return EIO;
/* Put it back; caller has already incremented vrefCount */
afs_PutVCache(tvc);
return 0;
} else if (vType(avc) == VLNK) {
avc->f.m.Mode |= S_IFLNK;
if ((avc->f.m.Mode & 0111) == 0)
- avc->mvstat = 1;
+ avc->mvstat = AFS_MVSTAT_MTPT;
}
if (avc->f.states & CForeign) {
struct axscache *ac;
avc->f.m.Date = OutStatus.ClientModTime;
} else {
/* failure, set up to check with server next time */
- ObtainWriteLock(&afs_xcbhash, 462);
- afs_DequeueCallback(avc);
- avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
- ReleaseWriteLock(&afs_xcbhash);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(avc, 0, CUnique);
}
ConvertWToSLock(&avc->lock);
return code;
if (astatus->Mask & AFS_SETMODE) {
avc->f.m.Mode = astatus->UnixModeBits;
-#if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
-
- if (vType(avc) == VREG) {
- avc->f.m.Mode |= S_IFREG;
- } else if (vType(avc) == VDIR) {
- avc->f.m.Mode |= S_IFDIR;
- } else if (vType(avc) == VLNK) {
- avc->f.m.Mode |= S_IFLNK;
- if ((avc->f.m.Mode & 0111) == 0)
- avc->mvstat = 1;
- }
-#endif
flags |= VDisconSetMode;
} /* if(astatus.Mask & AFS_SETMODE) */
struct AFSFetchStatus *astat, struct vrequest *areq)
{
afs_size_t length;
+ afs_hyper_t newDV;
AFS_STATCNT(afs_ProcessFS);
#ifdef AFS_64BIT_CLIENT
avc->f.m.Length = length;
avc->f.m.Date = astat->ClientModTime;
}
- hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
+ hset64(newDV, astat->dataVersionHigh, astat->DataVersion);
+ afs_SetDataVersion(avc, &newDV);
avc->f.m.Owner = astat->Owner;
avc->f.m.Mode = astat->UnixModeBits;
avc->f.m.Group = astat->Group;
avc->f.m.Mode |= S_IFLNK;
}
if ((avc->f.m.Mode & 0111) == 0) {
- avc->mvstat = 1;
+ avc->mvstat = AFS_MVSTAT_MTPT;
}
}
avc->f.anyAccess = astat->AnonymousAccess;
* \param afid File ID.
* \param areq Ptr to associated vrequest structure, specifying the
* user whose authentication tokens will be used.
- * \param avc Caller may already have a vcache for this file, which is
- * already held.
*
* \note Environment:
* The cache entry is returned with an increased vrefCount field.
* locking directories in a constant order.
*
* \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
- *
- * \note Might have a vcache structure already, which must
- * already be held by the caller
*/
struct vcache *
-afs_GetVCache(struct VenusFid *afid, struct vrequest *areq,
- afs_int32 * cached, struct vcache *avc)
+afs_GetVCache(struct VenusFid *afid, struct vrequest *areq)
{
afs_int32 code, newvcache = 0;
AFS_STATCNT(afs_GetVCache);
- if (cached)
- *cached = 0; /* Init just in case */
-
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
loop:
#endif
#endif
}
if (tvc) {
- if (cached)
- *cached = 1;
osi_Assert((tvc->f.states & CVInit) == 0);
/* If we are in readdir, return the vnode even if not statd */
if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
/* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
What about ubc? */
#else
-#if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
+# if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
/*
* XXX - I really don't like this. Should try to understand better.
* It seems that sometimes, when we get called, we already hold the
struct vnode *vp = AFSTOV(tvc);
int iheldthelock;
-#if defined(AFS_DARWIN_ENV)
+# if defined(AFS_DARWIN_ENV)
iheldthelock = VOP_ISLOCKED(vp);
if (!iheldthelock)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
ObtainWriteLock(&tvc->lock, 954);
if (!iheldthelock)
VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
-#elif defined(AFS_FBSD80_ENV)
+# elif defined(AFS_FBSD_ENV)
iheldthelock = VOP_ISLOCKED(vp);
if (!iheldthelock) {
/* nosleep/sleep lock order reversal */
vinvalbuf(vp, V_SAVE, PINOD, 0); /* changed late in 8.0-CURRENT */
if (!iheldthelock)
VOP_UNLOCK(vp, 0);
-#elif defined(AFS_FBSD60_ENV)
- iheldthelock = VOP_ISLOCKED(vp, curthread);
- if (!iheldthelock)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
- AFS_GUNLOCK();
- vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
- AFS_GLOCK();
- if (!iheldthelock)
- VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
-#elif defined(AFS_FBSD_ENV)
- iheldthelock = VOP_ISLOCKED(vp, curthread);
- if (!iheldthelock)
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
- vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
- if (!iheldthelock)
- VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
-#elif defined(AFS_OBSD_ENV)
+# elif defined(AFS_OBSD_ENV)
iheldthelock = VOP_ISLOCKED(vp, curproc);
if (!iheldthelock)
VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
uvm_vnp_uncache(vp);
if (!iheldthelock)
VOP_UNLOCK(vp, 0, curproc);
-#elif defined(AFS_NBSD40_ENV)
+# elif defined(AFS_NBSD40_ENV)
iheldthelock = VOP_ISLOCKED(vp);
if (!iheldthelock) {
VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
uvm_vnp_uncache(vp);
if (!iheldthelock)
VOP_UNLOCK(vp, 0);
-#endif
+# endif
}
-#endif
+# endif
#endif
- ObtainWriteLock(&afs_xcbhash, 464);
- tvc->f.states &= ~CUnique;
- tvc->callback = 0;
- afs_DequeueCallback(tvc);
- ReleaseWriteLock(&afs_xcbhash);
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC | AFS_STALEVC_CLEARCB,
+ CUnique);
/* It is always appropriate to throw away all the access rights? */
afs_FreeAllAxs(&(tvc->Access));
tvc->f.states |= CForeign;
if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
&& (tvp->rootUnique == afid->Fid.Unique)) {
- tvc->mvstat = 2;
+ tvc->mvstat = AFS_MVSTAT_ROOT;
}
}
if (tvp->states & VRO)
if (tvp->states & VBackup)
tvc->f.states |= CBackup;
/* now copy ".." entry back out of volume structure, if necessary */
- if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
+ if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
+ if (!tvc->mvid.parent)
+ tvc->mvid.parent = (struct VenusFid *)
osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvp->dotdot;
+ *tvc->mvid.parent = tvp->dotdot;
}
afs_PutVolume(tvp, READ_LOCK);
}
*
* \param afid
* \param areq
- * \param cached Is element cached? If NULL, don't answer.
* \param adp
* \param aname
*
*/
struct vcache *
afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
- afs_int32 * cached, struct vcache *adp, char *aname)
+ struct vcache *adp, char *aname)
{
afs_int32 code, now, newvcache = 0;
struct VenusFid nfid;
afs_int32 retry;
AFS_STATCNT(afs_GetVCache);
- if (cached)
- *cached = 0; /* Init just in case */
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
loop1:
ObtainReadLock(&tvc->lock);
if (tvc->f.states & CStatd) {
- if (cached) {
- *cached = 1;
- }
ReleaseReadLock(&tvc->lock);
return tvc;
}
tvc->f.states |= CForeign;
if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
&& (tvp->rootUnique == afid->Fid.Unique))
- tvc->mvstat = 2;
+ tvc->mvstat = AFS_MVSTAT_ROOT;
}
if (tvp->states & VRO)
tvc->f.states |= CRO;
if (tvp->states & VBackup)
tvc->f.states |= CBackup;
/* now copy ".." entry back out of volume structure, if necessary */
- if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
+ if (tvc->mvstat == AFS_MVSTAT_ROOT && tvp->dotdot.Fid.Volume != 0) {
+ if (!tvc->mvid.parent)
+ tvc->mvid.parent = (struct VenusFid *)
osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvp->dotdot;
+ *tvc->mvid.parent = tvp->dotdot;
}
}
if (code) {
- ObtainWriteLock(&afs_xcbhash, 465);
- afs_DequeueCallback(tvc);
- tvc->f.states &= ~(CStatd | CUnique);
- ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, 0, CUnique);
if (tvp)
afs_PutVolume(tvp, READ_LOCK);
ReleaseWriteLock(&tvc->lock);
tvc->f.states &= ~CBulkFetching;
afs_QueueCallback(tvc, CBHash(3600), tvp);
} else {
- tvc->callback = NULL;
- afs_DequeueCallback(tvc);
- tvc->f.states &= ~(CStatd | CUnique);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
} else {
- afs_DequeueCallback(tvc);
- tvc->f.states &= ~CStatd;
- tvc->f.states &= ~CUnique;
- tvc->callback = NULL;
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
ReleaseWriteLock(&afs_xcbhash);
if (tvp)
struct vcache *
afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
- afs_int32 * cached, struct volume *tvolp)
+ struct volume *tvolp)
{
afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
afs_int32 getNewFid = 0;
newvcache = 1;
afs_stats_cmperf.vcacheMisses++;
} else {
- if (cached)
- *cached = 1;
afs_stats_cmperf.vcacheHits++;
#if defined(AFS_DARWIN80_ENV)
/* we already bumped the ref count in the for loop above */
/* now copy ".." entry back out of volume structure, if necessary */
if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
&& (tvolp->rootUnique == afid->Fid.Unique)) {
- tvc->mvstat = 2;
+ tvc->mvstat = AFS_MVSTAT_ROOT;
}
- if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
+ if (tvc->mvstat == AFS_MVSTAT_ROOT && tvolp->dotdot.Fid.Volume != 0) {
+ if (!tvc->mvid.parent)
+ tvc->mvid.parent = (struct VenusFid *)
osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvolp->dotdot;
+ *tvc->mvid.parent = tvolp->dotdot;
}
/* stat the file */
}
if (code) {
- ObtainWriteLock(&afs_xcbhash, 467);
- afs_DequeueCallback(tvc);
- tvc->callback = NULL;
- tvc->f.states &= ~(CStatd | CUnique);
- ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_CLEARCB, CUnique);
ReleaseWriteLock(&tvc->lock);
afs_PutVCache(tvc);
return NULL;
afs_QueueCallback(tvc, CBHash(3600), tvolp);
}
} else {
- afs_DequeueCallback(tvc);
- tvc->callback = NULL;
- tvc->f.states &= ~(CStatd | CUnique);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
ReleaseWriteLock(&afs_xcbhash);
afs_ProcessFS(tvc, &OutStatus, areq);
avc->f.states &= ~CBulkFetching;
afs_QueueCallback(avc, CBHash(3600), volp);
} else {
- afs_DequeueCallback(avc);
- avc->callback = NULL;
- avc->f.states &= ~(CStatd | CUnique);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(avc,
+ AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
} else {
- afs_DequeueCallback(avc);
- avc->callback = NULL;
- avc->f.states &= ~(CStatd | CUnique);
- if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(avc); /* if it (could be) a directory */
+ afs_StaleVCacheFlags(avc, AFS_STALEVC_CBLOCKED | AFS_STALEVC_CLEARCB,
+ CUnique);
}
ReleaseWriteLock(&afs_xcbhash);
if (volp)
return code;
}
-#if 0
-/*
- * afs_StuffVcache
- *
- * Description:
- * Stuff some information into the vcache for the given file.
- *
- * Parameters:
- * afid : File in question.
- * OutStatus : Fetch status on the file.
- * CallBack : Callback info.
- * tc : RPC connection involved.
- * areq : vrequest involved.
- *
- * Environment:
- * Nothing interesting.
- */
-void
-afs_StuffVcache(struct VenusFid *afid,
- struct AFSFetchStatus *OutStatus,
- struct AFSCallBack *CallBack, struct afs_conn *tc,
- struct vrequest *areq)
-{
- afs_int32 code, i, newvcache = 0;
- struct vcache *tvc;
- struct AFSVolSync tsync;
- struct volume *tvp;
- struct axscache *ac;
- afs_int32 retry;
-
- AFS_STATCNT(afs_StuffVcache);
-#ifdef IFS_VCACHECOUNT
- ifs_gvcachecall++;
-#endif
-
- loop:
- ObtainSharedLock(&afs_xvcache, 8);
-
- tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
- if (tvc && retry) {
-#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
- ReleaseSharedLock(&afs_xvcache);
- spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
- goto loop;
-#endif
- }
-
- if (!tvc) {
- /* no cache entry, better grab one */
- UpgradeSToWLock(&afs_xvcache, 25);
- tvc = afs_NewVCache(afid, NULL);
- newvcache = 1;
- ConvertWToSLock(&afs_xvcache);
- if (!tvc)
- {
- ReleaseSharedLock(&afs_xvcache);
- return NULL;
- }
- }
-
- ReleaseSharedLock(&afs_xvcache);
- ObtainWriteLock(&tvc->lock, 58);
-
- tvc->f.states &= ~CStatd;
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
-
- /* Is it always appropriate to throw away all the access rights? */
- afs_FreeAllAxs(&(tvc->Access));
-
- /*Copy useful per-volume info */
- tvp = afs_GetVolume(afid, areq, READ_LOCK);
- if (tvp) {
- if (newvcache && (tvp->states & VForeign))
- tvc->f.states |= CForeign;
- if (tvp->states & VRO)
- tvc->f.states |= CRO;
- if (tvp->states & VBackup)
- tvc->f.states |= CBackup;
- /*
- * Now, copy ".." entry back out of volume structure, if
- * necessary
- */
- if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
- if (!tvc->mvid)
- tvc->mvid = (struct VenusFid *)
- osi_AllocSmallSpace(sizeof(struct VenusFid));
- *tvc->mvid = tvp->dotdot;
- }
- }
- /* store the stat on the file */
- afs_RemoveVCB(afid);
- afs_ProcessFS(tvc, OutStatus, areq);
- tvc->callback = tc->srvr->server;
-
- /* we use osi_Time twice below. Ideally, we would use the time at which
- * the FetchStatus call began, instead, but we don't have it here. So we
- * make do with "now". In the CRO case, it doesn't really matter. In
- * the other case, we hope that the difference between "now" and when the
- * call actually began execution on the server won't be larger than the
- * padding which the server keeps. Subtract 1 second anyway, to be on
- * the safe side. Can't subtract more because we don't know how big
- * ExpirationTime is. Possible consistency problems may arise if the call
- * timeout period becomes longer than the server's expiration padding. */
- ObtainWriteLock(&afs_xcbhash, 470);
- if (CallBack->ExpirationTime != 0) {
- tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
- tvc->f.states |= CStatd;
- tvc->f.states &= ~CBulkFetching;
- afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
- } else if (tvc->f.states & CRO) {
- /* old-fashioned AFS 3.2 style */
- tvc->cbExpires = 3600 + osi_Time();
- /*XXX*/ tvc->f.states |= CStatd;
- tvc->f.states &= ~CBulkFetching;
- afs_QueueCallback(tvc, CBHash(3600), tvp);
- } else {
- afs_DequeueCallback(tvc);
- tvc->callback = NULL;
- tvc->f.states &= ~(CStatd | CUnique);
- if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
- osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
- }
- ReleaseWriteLock(&afs_xcbhash);
- if (tvp)
- afs_PutVolume(tvp, READ_LOCK);
-
- /* look in per-pag cache */
- if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
- ac->axess = OutStatus->CallerAccess; /* substitute pags */
- else /* not found, add a new one if possible */
- afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
-
- ReleaseWriteLock(&tvc->lock);
- afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
- ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
- tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
- /*
- * Release ref count... hope this guy stays around...
- */
- afs_PutVCache(tvc);
-} /*afs_StuffVcache */
-#endif
-
/*!
* Decrements the reference count on a cache entry.
*
void
afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred, afs_int32 skipdnlc)
{
- ObtainWriteLock(&afs_xcbhash, 456);
- afs_DequeueCallback(avc);
- avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
- ReleaseWriteLock(&afs_xcbhash);
+ afs_stalevc_flags_t flags = 0;
+ if (skipdnlc) {
+ flags |= AFS_STALEVC_NODNLC;
+ }
+
+ afs_StaleVCacheFlags(avc, flags, CDirty); /* next reference will re-stat */
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
- if (!skipdnlc) {
- osi_dnlc_purgedp(avc);
- }
if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
/* should I have a read lock on the vnode here? */
if (tvc) {
#ifndef AFS_DARWIN80_ENV
-#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
+# if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
afs_int32 retry = 0;
osi_vnhold(tvc, &retry);
if (retry) {
spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
goto loop;
}
-#else
+# else
osi_vnhold(tvc, (int *)0); /* already held, above */
-#endif
+# endif
#endif
/*
* We obtained the xvcache lock above.
tvc->vc_rwlockid = OSI_NO_LOCKID;
initnsema(&tvc->vc_rwlock, 1,
makesname(name, "vrw", tvc->v.v_number));
-#ifndef AFS_SGI53_ENV
+# ifndef AFS_SGI53_ENV
initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
-#endif
-#ifndef AFS_SGI62_ENV
+# endif
+# ifndef AFS_SGI62_ENV
initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
-#endif /* AFS_SGI62_ENV */
+# endif /* AFS_SGI62_ENV */
}
#endif
QInit(&VLRU);
for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
tvc = QTOV(tq);
uq = QPrev(tq);
- if (tvc->mvid) {
- osi_FreeSmallSpace(tvc->mvid);
- tvc->mvid = (struct VenusFid *)0;
+ if (tvc->mvid.target_root) {
+ osi_FreeSmallSpace(tvc->mvid.target_root);
+ tvc->mvid.target_root = NULL;
}
#ifdef AFS_AIX_ENV
aix_gnode_rele(AFSTOV(tvc));
*/
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
- if (tvc->mvid) {
- osi_FreeSmallSpace(tvc->mvid);
- tvc->mvid = (struct VenusFid *)0;
+ if (tvc->mvid.target_root) {
+ osi_FreeSmallSpace(tvc->mvid.target_root);
+ tvc->mvid.target_root = NULL;
}
#ifdef AFS_AIX_ENV
if (tvc->v.v_gnode)
afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
-#ifdef AFS_AIX32_ENV
+# ifdef AFS_AIX32_ENV
if (tvc->segid) {
AFS_GUNLOCK();
vms_delete(tvc->segid);
crfree(tvc->credp);
tvc->credp = NULL;
}
-#endif
+# endif
#endif
#if defined(AFS_SUN5_ENV)
if (tvc->credp) {
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
- tvc->f.states &= ~(CStatd|CUnique);
+ afs_StaleVCacheFlags(tvc, AFS_STALEVC_NODNLC | AFS_STALEVC_NOCB,
+ CUnique);
}
}
ReleaseWriteLock(&afs_xvcache);
}
+
+/**
+ * Mark a vcache as stale; our metadata for the relevant file may be out of
+ * date.
+ *
+ * @post Any subsequent access to this vcache will cause us to fetch the
+ * metadata for this vcache again.
+ */
+void
+afs_StaleVCacheFlags(struct vcache *avc, afs_stalevc_flags_t flags,
+ afs_uint32 cflags)
+{
+ int do_dnlc = 1;
+ int do_filename = 0;
+ int do_dequeue = 1;
+ int lock_cbhash = 1;
+
+ if ((flags & AFS_STALEVC_NODNLC)) {
+ do_dnlc = 0;
+ }
+ if ((flags & AFS_STALEVC_FILENAME)) {
+ do_filename = 1;
+ }
+ if ((flags & AFS_STALEVC_CBLOCKED)) {
+ lock_cbhash = 0;
+ }
+ if ((flags & AFS_STALEVC_NOCB)) {
+ do_dequeue = 0;
+ lock_cbhash = 0;
+ }
+
+ if (lock_cbhash) {
+ ObtainWriteLock(&afs_xcbhash, 486);
+ }
+ if (do_dequeue) {
+ afs_DequeueCallback(avc);
+ }
+
+ cflags |= CStatd;
+ avc->f.states &= ~cflags;
+
+ if (lock_cbhash) {
+ ReleaseWriteLock(&afs_xcbhash);
+ }
+
+ if ((flags & AFS_STALEVC_SKIP_DNLC_FOR_INIT_FLUSHED) &&
+ (avc->f.states & (CVInit | CVFlushed))) {
+ do_dnlc = 0;
+ }
+
+ if (flags & AFS_STALEVC_CLEARCB) {
+ avc->callback = NULL;
+ }
+
+ if (do_dnlc) {
+ if ((avc->f.fid.Fid.Vnode & 1) ||
+ AFSTOV(avc) == NULL || vType(avc) == VDIR ||
+ (avc->f.states & CForeign)) {
+ /* This vcache is (or could be) a directory. */
+ osi_dnlc_purgedp(avc);
+
+ } else if (do_filename) {
+ osi_dnlc_purgevp(avc);
+ }
+ }
+}
+
+void
+afs_SetDataVersion(struct vcache *avc, afs_hyper_t *avers)
+{
+ hset(avc->f.m.DataVersion, *avers);
+}