vn_reinit(AFSTOV(avc));
#endif
afs_FreeAllAxs(&(avc->Access));
- afs_QueueVCB(avc);
+ if (!afs_shuttingdown)
+ afs_QueueVCB(avc);
ObtainWriteLock(&afs_xcbhash, 460);
afs_DequeueCallback(avc); /* remove it from queued callbacks list */
avc->f.states &= ~(CStatd | CUnique);
afs_stats_cmperf.CallBackFlushes++;
} else {
/* try allocating */
- tsp =
- (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
- sizeof(struct afs_cbr));
+ tsp = afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
+ osi_Assert(tsp != NULL);
for (i = 0; i < AFS_NCBRS - 1; i++) {
tsp[i].next = &tsp[i + 1];
tsp[i].dynalloc = 0;
return 0;
}
+static void
+FlushAllVCBs(struct rx_connection **rxconns, int nconns, int nservers,
+ struct afs_conn **conns, struct srvAddr **addrs)
+{
+ afs_int32 *results;
+ afs_int32 i;
+
+ results = afs_osi_Alloc(nservers * sizeof (afs_int32));
+ osi_Assert(results != NULL);
+
+ AFS_GUNLOCK();
+ multi_Rx(rxconns,nconns)
+ {
+ multi_RXAFS_GiveUpAllCallBacks();
+ results[multi_i] = multi_error;
+ } multi_End;
+ AFS_GLOCK();
+
+ /*
+ * Freeing the CBR will unlink it from the server's CBR list
+ * do it here, not in the loop, because a dynamic CBR will call
+ * into the memory management routines.
+ */
+ for ( i = 0 ; i < nconns ; i++ ) {
+ if (results[i] == 0) {
+ /* Unchain all of them */
+ while (addrs[i]->server->cbrs)
+ afs_FreeCBR(addrs[i]->server->cbrs);
+ }
+ }
+ afs_osi_Free(results, nservers * sizeof(afs_int32));
+}
+
/*!
* Flush all queued callbacks to all servers.
*
struct afs_conn *tc;
int safety1, safety2, safety3;
XSTATS_DECLS;
+
+ if (AFS_IS_DISCONNECTED)
+ return ENETDOWN;
+
if ((code = afs_InitReq(&treq, afs_osi_credp)))
return code;
treq.flags |= O_NONBLOCK;
tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
+ osi_Assert(tfids != NULL);
if (lockit)
ObtainWriteLock(&afs_xvcb, 273);
+ /*
+ * Shutting down.
+ * First, attempt a multi across everything, all addresses
+ * for all servers we know of.
+ */
+
+ if (lockit == 2)
+ afs_LoopServers(2, NULL, 0, FlushAllVCBs, NULL);
+
ObtainReadLock(&afs_xserver);
for (i = 0; i < NSERVERS; i++) {
for (safety1 = 0, tsp = afs_servers[i];
afs_int32 i, loop;
struct vcache *tvc;
struct afs_q *tq, *uq;
- int fv_slept;
+ int fv_slept, defersleep = 0;
afs_int32 target = anumber;
i = 0;
}
fv_slept = 0;
- if (osi_TryEvictVCache(tvc, &fv_slept))
+ if (osi_TryEvictVCache(tvc, &fv_slept, defersleep))
anumber--;
if (fv_slept) {
i = 0;
continue; /* start over - may have raced. */
}
- if (tq == uq)
+ if (tq == uq) {
+ if (anumber && !defersleep) {
+ defersleep = 1;
+ tq = VLRU.prev;
+ continue;
+ }
break;
+ }
}
if (!afsd_dynamic_vcaches && anumber == target) {
afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
afs_FlushReclaimedVcaches();
#if defined(AFS_LINUX22_ENV)
- if(!afsd_dynamic_vcaches) {
+ if(!afsd_dynamic_vcaches && afs_vcount >= afs_maxvcount) {
afs_ShakeLooseVCaches(anumber);
if (afs_vcount >= afs_maxvcount) {
afs_warn("afs_NewVCache - none freed\n");
tvc = freeVCList; /* take from free list */
freeVCList = tvc->nextfree;
tvc->nextfree = NULL;
+ afs_vcount++; /* balanced by FlushVCache */
} /* end of if (!freeVCList) */
#endif /* AFS_LINUX22_ENV */
tc = afs_Conn(afid, areq, SHARED_LOCK);
if (tc) {
if (serverp)
- *serverp = tc->srvr->server;
+ *serverp = tc->parent->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
RX_AFS_GUNLOCK();
iheldthelock = VOP_ISLOCKED(vp, curthread);
if (!iheldthelock)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ AFS_GUNLOCK();
vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
+ AFS_GLOCK();
if (!iheldthelock)
VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
#elif defined(AFS_FBSD_ENV)
tc = afs_Conn(afid, areq, SHARED_LOCK);
avc->dchint = NULL; /* invalidate hints */
if (tc) {
- avc->callback = tc->srvr->server;
+ avc->callback = tc->parent->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
RX_AFS_GUNLOCK();
struct vcache *tvc;
afs_int32 i;
#ifdef AFS_DARWIN80_ENV
+ struct vcache *deadvc = NULL, *livevc = NULL;
vnode_t tvp;
#endif
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
if (FidMatches(afid, tvc)) {
+#ifdef AFS_DARWIN80_ENV
+ if (flag & FIND_CDEAD) {
+ if (tvc->f.states & (CDeadVnode|CBulkFetching)) {
+ deadvc = tvc;
+ continue;
+ }
+ } else {
+ if (tvc->f.states & CDeadVnode)
+ if ((tvc->f.states & CBulkFetching) &&
+ !(flag & FIND_BULKDEAD))
+ continue;
+ }
+#endif
if (tvc->f.states & CVInit) {
findvc_sleep(tvc, flag);
goto findloop;
- }
-#ifdef AFS_DARWIN80_ENV
- if (tvc->f.states & CDeadVnode) {
- if (!(flag & FIND_CDEAD)) {
- findvc_sleep(tvc, flag);
- goto findloop;
- }
}
- tvp = AFSTOV(tvc);
- if (vnode_get(tvp))
- continue;
- if (vnode_ref(tvp)) {
- AFS_GUNLOCK();
- /* AFSTOV(tvc) may be NULL */
- vnode_put(tvp);
- AFS_GLOCK();
+#ifdef AFS_DARWIN80_ENV
+ if (tvc->f.states & CDeadVnode) {
+ findvc_sleep(tvc, flag);
+ goto findloop;
+ }
+ if (flag & FIND_CDEAD) {
+ livevc = tvc;
continue;
}
- if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
- AFS_GUNLOCK();
- vnode_recycle(AFSTOV(tvc));
- AFS_GLOCK();
- }
#endif
break;
}
}
+#ifdef AFS_DARWIN80_ENV
+ if (flag & FIND_CDEAD) {
+ if (livevc && deadvc) {
+ /* discard deadvc */
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(deadvc));
+ vnode_put(AFSTOV(deadvc));
+ vnode_rele(AFSTOV(deadvc));
+ AFS_GLOCK();
+ deadvc = NULL;
+ }
+
+ /* return what's left */
+ tvc = livevc ? livevc : deadvc;
+ }
+#endif
/* should I have a read lock on the vnode here? */
if (tvc) {
if (retry)
*retry = 0;
-#if !defined(AFS_DARWIN80_ENV)
- osi_vnhold(tvc, retry); /* already held, above */
- if (retry && *retry)
- return 0;
-#endif
-#if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
+#if defined(AFS_DARWIN80_ENV)
+ tvp = AFSTOV(tvc);
+ if (vnode_get(tvp))
+ tvp = NULL;
+ if (tvp && vnode_ref(tvp)) {
+ AFS_GUNLOCK();
+ /* AFSTOV(tvc) may be NULL */
+ vnode_put(tvp);
+ AFS_GLOCK();
+ tvp = NULL;
+ }
+ if (tvp && (tvc->f.states & (CBulkFetching|CDeadVnode))) {
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
+ if (!tvp) {
+ tvc = NULL;
+ return tvc;
+ }
+#elif defined(AFS_DARWIN_ENV)
tvc->f.states |= CUBCinit;
AFS_GUNLOCK();
if (UBCINFOMISSING(AFSTOV(tvc)) ||
}
AFS_GLOCK();
tvc->f.states &= ~CUBCinit;
+#else
+ osi_vnhold(tvc, retry); /* already held, above */
+ if (retry && *retry)
+ return 0;
#endif
/*
* only move to front of vlru if we have proper vcache locking)
tvc = found_tvc;
/* should I have a read lock on the vnode here? */
if (tvc) {
+#ifndef AFS_DARWIN80_ENV
#if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
afs_int32 retry = 0;
osi_vnhold(tvc, &retry);
#else
osi_vnhold(tvc, (int *)0); /* already held, above */
#endif
+#endif
/*
* We obtained the xvcache lock above.
*/
#if !defined(AFS_LINUX22_ENV)
/* Allocate and thread the struct vcache entries */
- tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
+ tvp = afs_osi_Alloc(astatSize * sizeof(struct vcache));
+ osi_Assert(tvp != NULL);
memset(tvp, 0, sizeof(struct vcache) * astatSize);
Initial_freeVCList = tvp;
ReleaseWriteLock(&afs_xvcache);
- afs_FlushVCBs(1);
+ afs_FlushVCBs(2);
}
/*!