static struct afs_slotlist *afs_freeSlotList = NULL;
/* Forward declarations */
-static afs_int32 afs_QueueVCB(struct vcache *avc);
+static afs_int32 afs_QueueVCB(struct vcache *avc, int *slept);
/*!
* Generate an index into the hash table for a given Fid.
for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
if (avc == wvc) {
*uvc = avc->hnext;
- avc->hnext = (struct vcache *)NULL;
+ avc->hnext = NULL;
break;
}
}
vn_reinit(AFSTOV(avc));
#endif
afs_FreeAllAxs(&(avc->Access));
- if (!afs_shuttingdown)
- afs_QueueVCB(avc);
ObtainWriteLock(&afs_xcbhash, 460);
afs_DequeueCallback(avc); /* remove it from queued callbacks list */
avc->f.states &= ~(CStatd | CUnique);
else
osi_dnlc_purgevp(avc);
+ if (!afs_shuttingdown)
+ afs_QueueVCB(avc, slept);
+
/*
* Next, keep track of which vnodes we've deleted for create's
* optimistic synchronization algorithm
struct afs_cbr *tsp;
int i;
- if (!afs_cbrSpace) {
- afs_osi_CancelWait(&AFS_WaitHandler); /* trigger FlushVCBs asap */
-
+ while (!afs_cbrSpace) {
if (afs_stats_cmperf.CallBackAlloced >= sizeof(afs_cbrHeads)/sizeof(afs_cbrHeads[0])) {
/* don't allocate more than 16 * AFS_NCBRS for now */
- tsp = (struct afs_cbr *)osi_AllocSmallSpace(sizeof(*tsp));
- tsp->dynalloc = 1;
- tsp->next = NULL;
+ afs_FlushVCBs(0);
afs_stats_cmperf.CallBackFlushes++;
} else {
/* try allocating */
osi_Assert(tsp != NULL);
for (i = 0; i < AFS_NCBRS - 1; i++) {
tsp[i].next = &tsp[i + 1];
- tsp[i].dynalloc = 0;
}
tsp[AFS_NCBRS - 1].next = 0;
- tsp[AFS_NCBRS - 1].dynalloc = 0;
- afs_cbrSpace = tsp->next;
+ afs_cbrSpace = tsp;
afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
afs_stats_cmperf.CallBackAlloced++;
}
- } else {
- tsp = afs_cbrSpace;
- afs_cbrSpace = tsp->next;
}
+ tsp = afs_cbrSpace;
+ afs_cbrSpace = tsp->next;
return tsp;
}
if (asp->hash_next)
asp->hash_next->hash_pprev = asp->hash_pprev;
- if (asp->dynalloc) {
- osi_FreeSmallSpace(asp);
- } else {
- asp->next = afs_cbrSpace;
- afs_cbrSpace = asp;
- }
+ asp->next = afs_cbrSpace;
+ afs_cbrSpace = asp;
return 0;
}
static void
-FlushAllVCBs(struct rx_connection **rxconns, int nconns, int nservers,
- struct afs_conn **conns, struct srvAddr **addrs)
+FlushAllVCBs(int nconns, struct rx_connection **rxconns,
+ struct afs_conn **conns)
{
afs_int32 *results;
afs_int32 i;
- results = afs_osi_Alloc(nservers * sizeof (afs_int32));
+ results = afs_osi_Alloc(nconns * sizeof (afs_int32));
osi_Assert(results != NULL);
AFS_GUNLOCK();
for ( i = 0 ; i < nconns ; i++ ) {
if (results[i] == 0) {
/* Unchain all of them */
- while (addrs[i]->server->cbrs)
- afs_FreeCBR(addrs[i]->server->cbrs);
+ while (conns[i]->parent->srvr->server->cbrs)
+ afs_FreeCBR(conns[i]->parent->srvr->server->cbrs);
}
}
- afs_osi_Free(results, nservers * sizeof(afs_int32));
+ afs_osi_Free(results, nconns * sizeof(afs_int32));
}
/*!
int tcount;
struct server *tsp;
int i;
- struct vrequest treq;
+ struct vrequest *treq = NULL;
struct afs_conn *tc;
int safety1, safety2, safety3;
XSTATS_DECLS;
if (AFS_IS_DISCONNECTED)
return ENETDOWN;
- if ((code = afs_InitReq(&treq, afs_osi_credp)))
+ if ((code = afs_CreateReq(&treq, afs_osi_credp)))
return code;
- treq.flags |= O_NONBLOCK;
+ treq->flags |= O_NONBLOCK;
tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
osi_Assert(tfids != NULL);
*/
if (lockit == 2)
- afs_LoopServers(2, NULL, 0, FlushAllVCBs, NULL);
+ afs_LoopServers(AFS_LS_ALL, NULL, 0, FlushAllVCBs, NULL);
ObtainReadLock(&afs_xserver);
for (i = 0; i < NSERVERS; i++) {
tcount = 0; /* number found so far */
for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
+ struct rx_connection *rxconn;
/* if buffer is full, or we've queued all we're going
* to from this server, we should flush out the
* callbacks.
callBacks[0].CallBackType = CB_EXCLUSIVE;
for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
tc = afs_ConnByHost(tsp, tsp->cell->fsport,
- tsp->cell->cellNum, &treq, 0,
- SHARED_LOCK);
+ tsp->cell->cellNum, treq, 0,
+ SHARED_LOCK, 0, &rxconn);
if (tc) {
XSTATS_START_TIME
(AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
RX_AFS_GUNLOCK();
code =
- RXAFS_GiveUpCallBacks(tc->id, &fidArray,
+ RXAFS_GiveUpCallBacks(rxconn, &fidArray,
&cbArray);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
if (!afs_Analyze
- (tc, code, 0, &treq,
+ (tc, rxconn, code, 0, treq,
AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
tsp->cell)) {
break;
if (lockit)
ReleaseWriteLock(&afs_xvcb);
afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
+ afs_DestroyReq(treq);
return 0;
}
* Environment:
* Locks the xvcb lock.
* Called when the xvcache lock is already held.
+ * RACE: afs_xvcache may be dropped and reacquired
*
* \param avc vcache entry
+ * \param slep Set to 1 if we dropped afs_xvcache
* \return 1 if queued, 0 otherwise
*/
static afs_int32
-afs_QueueVCB(struct vcache *avc)
+afs_QueueVCB(struct vcache *avc, int *slept)
{
int queued = 0;
struct server *tsp;
struct afs_cbr *tcbp;
+ int reacquire = 0;
AFS_STATCNT(afs_QueueVCB);
/* The callback is really just a struct server ptr. */
tsp = (struct server *)(avc->callback);
+ if (!afs_cbrSpace) {
+ /* If we don't have CBR space, AllocCBR may block or hit the net for
+ * clearing up CBRs. Hitting the net may involve a fileserver
+ * needing to contact us, so we must drop xvcache so we don't block
+ * those requests from going through. */
+ reacquire = *slept = 1;
+ ReleaseWriteLock(&afs_xvcache);
+ }
+
/* we now have a pointer to the server, so we just allocate
* a queue entry and queue it.
*/
done:
/* now release locks and return */
ReleaseWriteLock(&afs_xvcb);
+
+ if (reacquire) {
+ /* make sure this is after dropping xvcb, for locking order */
+ ObtainWriteLock(&afs_xvcache, 279);
+ }
return queued;
}
struct vcache *tvc;
struct afs_q *tq, *uq;
int fv_slept, defersleep = 0;
+ int limit;
afs_int32 target = anumber;
- i = 0;
loop = 0;
+
+ retry:
+ i = 0;
+ limit = afs_vcount;
for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
tvc = QTOV(tq);
uq = QPrev(tq);
if (tvc->f.states & CVFlushed) {
refpanic("CVFlushed on VLRU");
- /* In the other path, this was 2 * afs_cacheStats */
- } else if (!afsd_dynamic_vcaches && i++ > afs_maxvcount) {
- refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
+ } else if (i++ > limit) {
+ afs_warn("afs_ShakeLooseVCaches: i %d limit %d afs_vcount %d afs_maxvcount %d\n",
+ (int)i, limit, (int)afs_vcount, (int)afs_maxvcount);
+ refpanic("Found too many AFS vnodes on VLRU (VLRU cycle?)");
} else if (QNext(uq) != tq) {
refpanic("VLRU inconsistent");
} else if (tvc->f.states & CVInit) {
if (fv_slept) {
if (loop++ > 100)
break;
- uq = VLRU.prev;
- i = 0;
- continue; /* start over - may have raced. */
+ goto retry; /* start over - may have raced. */
}
- if (tq == uq) {
+ if (uq == &VLRU) {
if (anumber && !defersleep) {
defersleep = 1;
- tq = VLRU.prev;
- continue;
+ goto retry;
}
break;
}
#endif
}
+void
+afs_FlushAllVCaches(void)
+{
+ int i;
+ struct vcache *tvc, *nvc;
+
+ ObtainWriteLock(&afs_xvcache, 867);
+
+ retry:
+ for (i = 0; i < VCSIZE; i++) {
+ for (tvc = afs_vhashT[i]; tvc; tvc = nvc) {
+ int slept;
+
+ nvc = tvc->hnext;
+ if (afs_FlushVCache(tvc, &slept)) {
+ afs_warn("Failed to flush vcache 0x%lx\n", (unsigned long)(uintptrsz)tvc);
+ }
+ if (slept) {
+ goto retry;
+ }
+ }
+ }
+
+ ReleaseWriteLock(&afs_xvcache);
+}
+
/*!
* This routine is responsible for allocating a new cache entry
* from the free list. It formats the cache entry and inserts it
struct afs_conn *tc;
afs_int32 code;
afs_ucred_t *cred = NULL;
- struct vrequest treq, ureq;
+ struct vrequest *treq = NULL;
struct AFSVolSync tsync;
int didCore;
XSTATS_DECLS;
AFS_STATCNT(afs_FlushActiveVcaches);
+
+ code = afs_CreateReq(&treq, afs_osi_credp);
+ if (code) {
+ afs_warn("unable to alloc treq\n");
+ return;
+ }
+
ObtainReadLock(&afs_xvcache);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
#endif
if (doflocks && tvc->flockCount != 0) {
+ struct rx_connection *rxconn;
/* if this entry has an flock, send a keep-alive call out */
osi_vnhold(tvc, 0);
ReleaseReadLock(&afs_xvcache);
ObtainWriteLock(&tvc->lock, 51);
do {
- afs_InitReq(&treq, afs_osi_credp);
- treq.flags |= O_NONBLOCK;
+ code = afs_InitReq(treq, afs_osi_credp);
+ if (code) {
+ code = -1;
+ break; /* shutting down: do not try to extend the lock */
+ }
+ treq->flags |= O_NONBLOCK;
- tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK);
+ tc = afs_Conn(&tvc->f.fid, treq, SHARED_LOCK, &rxconn);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
RX_AFS_GUNLOCK();
code =
- RXAFS_ExtendLock(tc->id,
+ RXAFS_ExtendLock(rxconn,
(struct AFSFid *)&tvc->f.fid.Fid,
&tsync);
RX_AFS_GLOCK();
} else
code = -1;
} while (afs_Analyze
- (tc, code, &tvc->f.fid, &treq,
+ (tc, rxconn, code, &tvc->f.fid, treq,
AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
ReleaseWriteLock(&tvc->lock);
/* XXXX Find better place-holder for cred XXXX */
cred = (afs_ucred_t *)tvc->linkData;
tvc->linkData = NULL; /* XXX */
- afs_InitReq(&ureq, cred);
+ code = afs_InitReq(treq, cred);
afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
tvc->execsOrWriters);
- code = afs_StoreOnLastReference(tvc, &ureq);
+ if (!code) { /* avoid store when shutting down */
+ code = afs_StoreOnLastReference(tvc, treq);
+ }
ReleaseWriteLock(&tvc->lock);
#ifdef AFS_BOZONLOCK_ENV
afs_BozonUnlock(&tvc->pvnLock, tvc);
}
}
ReleaseReadLock(&afs_xvcache);
+ afs_DestroyReq(treq);
}
struct afs_conn *tc;
struct AFSFetchStatus OutStatus;
struct AFSVolSync tsync;
+ struct rx_connection *rxconn;
XSTATS_DECLS;
AFS_STATCNT(afs_WriteVCache);
afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
do {
- tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
RX_AFS_GUNLOCK();
code =
- RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->f.fid.Fid,
+ RXAFS_StoreStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
astatus, &OutStatus, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
+ (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
SHARED_LOCK, NULL));
UpgradeSToWLock(&avc->lock, 20);
{
afs_int32 code;
struct afs_conn *tc;
+ struct rx_connection *rxconn;
struct AFSFetchStatus OutDirStatus;
XSTATS_DECLS;
if (!name)
name = ""; /* XXX */
do {
- tc = afs_Conn(afid, areq, SHARED_LOCK);
+ tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
if (tc) {
if (serverp)
*serverp = tc->parent->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
RX_AFS_GUNLOCK();
code =
- RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
+ RXAFS_Lookup(rxconn, (struct AFSFid *)&afid->Fid, name,
(struct AFSFid *)&nfid->Fid, OutStatusp,
&OutDirStatus, CallBackp, tsyncp);
RX_AFS_GLOCK();
} else
code = -1;
} while (afs_Analyze
- (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
+ (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
NULL));
return code;
}
#ifdef AFS_DARWIN80_ENV
if (tvc->f.states & CDeadVnode) {
- if (!(tvc->f.states & CBulkFetching)) {
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->f.states);
- goto rootvc_loop;
- }
+ ReleaseSharedLock(&afs_xvcache);
+ afs_osi_Sleep(&tvc->f.states);
+ goto rootvc_loop;
}
tvp = AFSTOV(tvc);
if (vnode_get(tvp)) /* this bumps ref count */
AFS_GLOCK();
continue;
}
- if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
- AFS_GUNLOCK();
- vnode_recycle(AFSTOV(tvc));
- AFS_GLOCK();
- }
#endif
break;
}
afs_PutVolume(volp, READ_LOCK);
}
+void
+afs_BadFetchStatus(struct afs_conn *tc)
+{
+ int addr = ntohl(tc->parent->srvr->sa_ip);
+ afs_warn("afs: Invalid AFSFetchStatus from server %u.%u.%u.%u\n",
+ (addr >> 24) & 0xff, (addr >> 16) & 0xff, (addr >> 8) & 0xff,
+ (addr) & 0xff);
+ afs_warn("afs: This suggests the server may be sending bad data that "
+ "can lead to availability issues or data corruption. The "
+ "issue has been avoided for now, but it may not always be "
+ "detectable. Please upgrade the server if possible.\n");
+}
+
+/**
+ * Check if a given AFSFetchStatus structure is sane.
+ *
+ * @param[in] tc The server from which we received the status
+ * @param[in] status The status we received
+ *
+ * @return whether the given structure is valid or not
+ * @retval 0 the structure is fine
+ * @retval nonzero the structure looks like garbage; act as if we received
+ * the returned error code from the server
+ */
+int
+afs_CheckFetchStatus(struct afs_conn *tc, struct AFSFetchStatus *status)
+{
+ if (status->errorCode ||
+ status->InterfaceVersion != 1 ||
+ !(status->FileType > Invalid && status->FileType <= SymbolicLink) ||
+ status->ParentVnode == 0 || status->ParentUnique == 0) {
+
+ afs_warn("afs: FetchStatus ec %u iv %u ft %u pv %u pu %u\n",
+ (unsigned)status->errorCode, (unsigned)status->InterfaceVersion,
+ (unsigned)status->FileType, (unsigned)status->ParentVnode,
+ (unsigned)status->ParentUnique);
+ afs_BadFetchStatus(tc);
+
+ return VBUSY;
+ }
+ return 0;
+}
+
/*!
* Must be called with avc write-locked
* don't absolutely have to invalidate the hint unless the dv has
struct afs_conn *tc;
struct AFSCallBack CallBack;
struct AFSVolSync tsync;
+ struct rx_connection *rxconn;
XSTATS_DECLS;
do {
- tc = afs_Conn(afid, areq, SHARED_LOCK);
+ tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
avc->dchint = NULL; /* invalidate hints */
if (tc) {
avc->callback = tc->parent->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
RX_AFS_GUNLOCK();
code =
- RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
+ RXAFS_FetchStatus(rxconn, (struct AFSFid *)&afid->Fid, Outsp,
&CallBack, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
+ if (code == 0) {
+ code = afs_CheckFetchStatus(tc, Outsp);
+ }
+
} else
code = -1;
} while (afs_Analyze
- (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
+ (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
SHARED_LOCK, NULL));
if (!code) {
*
* \param avc Pointer to the cache entry to reset
* \param acred
+ * \param skipdnlc skip the dnlc purge for this vnode
*
* \note avc must be write locked on entry
+ *
+ * \note The caller should purge the dnlc when skipdnlc is set.
*/
void
-afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred)
+afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred, afs_int32 skipdnlc)
{
ObtainWriteLock(&afs_xcbhash, 456);
afs_DequeueCallback(avc);
ReleaseWriteLock(&afs_xcbhash);
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
- osi_dnlc_purgedp(avc);
+ if (!skipdnlc) {
+ osi_dnlc_purgedp(avc);
+ }
if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
static void
findvc_sleep(struct vcache *avc, int flag)
{
- int fstates = avc->f.states;
if (flag & IS_SLOCK) {
ReleaseSharedLock(&afs_xvcache);
} else {
ReleaseReadLock(&afs_xvcache);
}
}
- if (flag & FIND_CDEAD) {
- ObtainWriteLock(&afs_xvcache, 342);
- afs_FlushReclaimedVcaches();
- if (fstates == avc->f.states) {
- ReleaseWriteLock(&afs_xvcache);
- afs_osi_Sleep(&avc->f.states);
- } else
- ReleaseWriteLock(&afs_xvcache);
- } else
- afs_osi_Sleep(&avc->f.states);
+ afs_osi_Sleep(&avc->f.states);
if (flag & IS_SLOCK) {
ObtainSharedLock(&afs_xvcache, 341);
} else {
i = VCHash(afid);
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
if (FidMatches(afid, tvc)) {
-#ifdef AFS_DARWIN80_ENV
- if (flag & FIND_CDEAD) {
- if (tvc->f.states & (CDeadVnode|CBulkFetching)) {
- deadvc = tvc;
- continue;
- }
- } else {
- if (tvc->f.states & CDeadVnode)
- if ((tvc->f.states & CBulkFetching) &&
- !(flag & FIND_BULKDEAD))
- continue;
- }
-#endif
if (tvc->f.states & CVInit) {
findvc_sleep(tvc, flag);
goto findloop;
findvc_sleep(tvc, flag);
goto findloop;
}
- if (flag & FIND_CDEAD) {
- livevc = tvc;
- continue;
- }
#endif
break;
}
}
-#ifdef AFS_DARWIN80_ENV
- if (flag & FIND_CDEAD) {
- if (livevc && deadvc) {
- /* discard deadvc */
- AFS_GUNLOCK();
- vnode_recycle(AFSTOV(deadvc));
- vnode_put(AFSTOV(deadvc));
- vnode_rele(AFSTOV(deadvc));
- AFS_GLOCK();
- deadvc = NULL;
- }
-
- /* return what's left */
- tvc = livevc ? livevc : deadvc;
- }
-#endif
/* should I have a read lock on the vnode here? */
if (tvc) {
AFS_GLOCK();
tvp = NULL;
}
- if (tvp && (tvc->f.states & (CBulkFetching|CDeadVnode))) {
- AFS_GUNLOCK();
- vnode_recycle(AFSTOV(tvc));
- AFS_GLOCK();
- }
if (!tvp) {
tvc = NULL;
return tvc;
}
#ifdef AFS_DARWIN80_ENV
if (tvc->f.states & CDeadVnode) {
- if (!(tvc->f.states & CBulkFetching)) {
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->f.states);
- goto loop;
- }
+ ReleaseSharedLock(&afs_xvcache);
+ afs_osi_Sleep(&tvc->f.states);
+ goto loop;
}
tvp = AFSTOV(tvc);
if (vnode_get(tvp)) {
AFS_GLOCK();
continue;
}
- if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
- AFS_GUNLOCK();
- vnode_recycle(AFSTOV(tvc));
- AFS_GLOCK();
- }
#endif /* AFS_DARWIN80_ENV */
count++;
if (found_tvc) {
ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
+ retry:
/* Somehow, walk the set of vcaches, with each one coming out as tvc */
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
- if (afs_QueueVCB(tvc)) {
+ int slept = 0;
+ if (afs_QueueVCB(tvc, &slept)) {
tvc->callback = NULL;
nq++;
}
+ if (slept) {
+ goto retry;
+ }
}
}