/* Exported variables */
afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
+afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
#if !defined(AFS_LINUX22_ENV)
static struct vcache *freeVCList; /*Free list for stat cache entries */
+struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
static struct vcache *Initial_freeVCList; /*Initial list for above */
#endif
struct afs_q VLRU; /*vcache LRU */
AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
}
#endif
+#ifdef AFS_SUN510_ENV
+ /* As we use private vnodes, cleanup is up to us */
+ vn_reinit(AFSTOV(avc));
+#endif
afs_FreeAllAxs(&(avc->Access));
/* we can't really give back callbacks on RO files, since the
MReleaseWriteLock(&afs_xvcb);
}
+void
+afs_FlushReclaimedVcaches(void)
+{
+#if !defined(AFS_LINUX22_ENV)
+ struct vcache *tvc;
+ int code, fv_slept;
+ struct vcache *tmpReclaimedVCList = NULL;
+
+ ObtainWriteLock(&afs_xvreclaim, 76);
+ while (ReclaimedVCList) {
+ tvc = ReclaimedVCList; /* take from free list */
+ ReclaimedVCList = tvc->nextfree;
+ tvc->nextfree = NULL;
+ code = afs_FlushVCache(tvc, &fv_slept);
+ if (code) {
+ /* Ok, so, if we got code != 0, uh, wtf do we do? */
+ /* Probably, build a temporary list and then put all back when we
+ get to the end of the list */
+ /* This is actually really crappy, but we need to not leak these.
+ We probably need a way to be smarter about this. */
+ tvc->nextfree = tmpReclaimedVCList;
+ tmpReclaimedVCList = tvc;
+ printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code);
+ }
+ }
+ if (tmpReclaimedVCList)
+ ReclaimedVCList = tmpReclaimedVCList;
+
+ ReleaseWriteLock(&afs_xvreclaim);
+#endif
+}
+
/*
* afs_NewVCache
*
int code, fv_slept;
AFS_STATCNT(afs_NewVCache);
+
+ afs_FlushReclaimedVcaches();
+
#if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
#if defined(AFS_OSF30_ENV) || defined(AFS_LINUX22_ENV)
if (afs_vcount >= afs_maxvcount)
&& tvc->opens == 0 && (tvc->states & CUnlinkedDel) == 0) {
#if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
#ifdef AFS_DARWIN80_ENV
- fv_slept=1;
- /* must release lock, since vnode_recycle will immediately
- reclaim if there are no other users */
- ReleaseWriteLock(&afs_xvcache);
- AFS_GUNLOCK();
- /* VREFCOUNT_GT only sees usecounts, not iocounts */
- /* so this may fail to actually recycle the vnode now */
- if (vnode_recycle(AFSTOV(tvc)))
- code=0;
- else
- code=EBUSY;
- AFS_GLOCK();
- ObtainWriteLock(&afs_xvcache, 336);
+ vnode_t tvp = AFSTOV(tvc);
+ /* VREFCOUNT_GT only sees usecounts, not iocounts */
+ /* so this may fail to actually recycle the vnode now */
+ /* must call vnode_get to avoid races. */
+ if (vnode_get(tvp) == 0) {
+ fv_slept=1;
+ /* must release lock, since vnode_put will immediately
+ reclaim if there are no other users */
+ ReleaseWriteLock(&afs_xvcache);
+ AFS_GUNLOCK();
+ vnode_recycle(tvp);
+ vnode_put(tvp);
+ AFS_GLOCK();
+ ObtainWriteLock(&afs_xvcache, 336);
+ }
+ /* we can't use the vnode_recycle return value to figure
+ * this out, since the iocount we have to hold makes it
+ * always "fail" */
+ if (AFSTOV(tvc) == tvp)
+ code = EBUSY;
+ else
+ code = 0;
#else
/*
* vgone() reclaims the vnode, which calls afs_FlushVCache(),
* request is lost */
i = VCHash(afid);
+ j = VCHashV(afid);
tvc->hnext = afs_vhashT[i];
afs_vhashT[i] = tvc;
- QAdd(&afs_vhashTV[i], &tvc->vhashq);
+ QAdd(&afs_vhashTV[j], &tvc->vhashq);
if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
refpanic("NewVCache VLRU inconsistent");
ObtainSharedLock(&afs_xvcache, 5);
- tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU);
+ tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
if (tvc && retry) {
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
ReleaseSharedLock(&afs_xvcache);
#endif
ObtainSharedLock(&afs_xvcache, 6);
- tvc = afs_FindVCache(&nfid, &retry, DO_VLRU /* no xstats now */ );
+ tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
if (tvc && retry) {
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
ReleaseSharedLock(&afs_xvcache);
struct AFSCallBack CallBack;
struct AFSVolSync tsync;
int origCBs = 0;
+#ifdef AFS_OSF_ENV
+ int vg;
+#endif
start = osi_Time();
/* for the present (95.05.25) everything on the hash table is
* definitively NOT in the free list -- at least until afs_reclaim
* can be safely implemented */
- int vg;
AFS_GUNLOCK();
vg = vget(AFSTOV(tvc)); /* this bumps ref count */
AFS_GLOCK();
continue;
#endif /* AFS_OSF_ENV */
#ifdef AFS_DARWIN80_ENV
- int vg;
if (tvc->states & CDeadVnode) {
ReleaseSharedLock(&afs_xvcache);
afs_osi_Sleep(&tvc->states);
goto rootvc_loop;
}
- AFS_GUNLOCK();
- vg = vnode_get(AFSTOV(tvc)); /* this bumps ref count */
- AFS_GLOCK();
- if (vg)
+ if (vnode_get(AFSTOV(tvc))) /* this bumps ref count */
continue;
#endif
break;
getNewFid = 1;
ReleaseSharedLock(&afs_xvcache);
#ifdef AFS_DARWIN80_ENV
- if (tvc)
+ if (tvc) {
+ AFS_GUNLOCK();
vnode_put(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
#endif
tvc = NULL;
goto newmtpt;
loop:
ObtainSharedLock(&afs_xvcache, 8);
- tvc = afs_FindVCache(afid, &retry, DO_VLRU /* no stats */ );
+ tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
if (tvc && retry) {
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
ReleaseSharedLock(&afs_xvcache);
#endif
} /*afs_PutVCache */
+
+static void findvc_sleep(struct vcache *avc, int flag) {
+ if (flag & IS_SLOCK) {
+ ReleaseSharedLock(&afs_xvcache);
+ } else {
+ if (flag & IS_WLOCK) {
+ ReleaseWriteLock(&afs_xvcache);
+ } else {
+ ReleaseReadLock(&afs_xvcache);
+ }
+ }
+ afs_osi_Sleep(&avc->states);
+ if (flag & IS_SLOCK) {
+ ObtainSharedLock(&afs_xvcache, 341);
+ } else {
+ if (flag & IS_WLOCK) {
+ ObtainWriteLock(&afs_xvcache, 343);
+ } else {
+ ObtainReadLock(&afs_xvcache);
+ }
+ }
+}
/*
* afs_FindVCache
*
register struct vcache *tvc;
afs_int32 i;
+#if defined( AFS_OSF_ENV)
+ int vg;
+#endif
AFS_STATCNT(afs_FindVCache);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
if (FidMatches(afid, tvc)) {
if (tvc->states & CVInit) {
- int lock;
- lock = CheckLock(&afs_xvcache);
- if (lock > 0)
- ReleaseReadLock(&afs_xvcache);
- else
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
- if (lock > 0)
- ObtainReadLock(&afs_xvcache);
- else
- ObtainSharedLock(&afs_xvcache, 341);
+ findvc_sleep(tvc, flag);
goto findloop;
}
#ifdef AFS_OSF_ENV
/* Grab this vnode, possibly reactivating from the free list */
- int vg;
AFS_GUNLOCK();
vg = vget(AFSTOV(tvc));
AFS_GLOCK();
continue;
#endif /* AFS_OSF_ENV */
#ifdef AFS_DARWIN80_ENV
- int vg;
if (tvc->states & CDeadVnode) {
- int lock;
- lock = CheckLock(&afs_xvcache);
- if (lock > 0)
- ReleaseReadLock(&afs_xvcache);
- else
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
- if (lock > 0)
- ObtainReadLock(&afs_xvcache);
- else
- ObtainSharedLock(&afs_xvcache, 341);
+ findvc_sleep(tvc, flag);
goto findloop;
}
- AFS_GUNLOCK();
- vg = vnode_get(AFSTOV(tvc));
- AFS_GLOCK();
- if (vg)
+ if (vnode_get(AFSTOV(tvc)))
continue;
#endif
break;
afs_int32 i;
afs_int32 count = 0;
struct vcache *found_tvc = NULL;
+#ifdef AFS_OSF_ENV
+ int vg;
+#endif
AFS_STATCNT(afs_FindVCache);
}
#ifdef AFS_OSF_ENV
/* Grab this vnode, possibly reactivating from the free list */
- int vg;
AFS_GUNLOCK();
vg = vget(AFSTOV(tvc));
AFS_GLOCK();
}
#endif /* AFS_OSF_ENV */
#ifdef AFS_DARWIN80_ENV
- int vg;
if (tvc->states & CDeadVnode) {
ReleaseSharedLock(&afs_xvcache);
afs_osi_Sleep(&tvc->states);
goto loop;
}
- AFS_GUNLOCK();
- vg = vnode_get(AFSTOV(tvc));
- AFS_GLOCK();
- if (vg) {
+ if (vnode_get(AFSTOV(tvc))) {
/* This vnode no longer exists. */
continue;
}