afs_stats_cmperf.vcacheXAllocs--;
} else {
if (afs_norefpanic) {
- printf("flush vc refcnt < 1");
+ afs_warn("flush vc refcnt < 1");
afs_norefpanic++;
} else
osi_Panic("flush vc refcnt < 1");
We probably need a way to be smarter about this. */
tvc->nextfree = tmpReclaimedVCList;
tmpReclaimedVCList = tvc;
- printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code);
+ /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
}
if (tvc->f.states & (CVInit
#ifdef AFS_DARWIN80_ENV
break;
}
if (!afsd_dynamic_vcaches && anumber == target) {
- printf("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
+ afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
afs_vcount, afs_maxvcount);
}
} /* finished freeing up space */
if (!ip)
osi_Panic("afs_AllocVCache: no more inodes");
AFS_GLOCK();
-#if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
+#if defined(STRUCT_SUPER_OPERATIONS_HAS_ALLOC_INODE)
tvc = VTOAFS(ip);
#else
tvc = afs_osi_Alloc(sizeof(struct vcache));
*
* \return The new vcache struct.
*/
-struct vcache *
-afs_NewVCache(struct VenusFid *afid, struct server *serverp)
+
+static_inline struct vcache *
+afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
{
struct vcache *tvc;
afs_int32 i, j;
if(!afsd_dynamic_vcaches) {
afs_ShakeLooseVCaches(anumber);
if (afs_vcount >= afs_maxvcount) {
- printf("afs_NewVCache - none freed\n");
+ afs_warn("afs_NewVCache - none freed\n");
return NULL;
}
}
#ifdef AFS_OBSD_ENV
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
- afs_nbsd_getnewvnode(tvc); /* includes one refcount */
+ afs_obsd_getnewvnode(tvc); /* includes one refcount */
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,337);
lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
#ifdef AFS_DARWIN_ENV
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
- afs_darwin_getnewvnode(tvc); /* includes one refcount */
+ afs_darwin_getnewvnode(tvc, seq ? 0 : 1); /* includes one refcount */
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,338);
#ifdef AFS_DARWIN80_ENV
#endif
panic("afs getnewvnode"); /* can't happen */
#ifdef AFS_FBSD70_ENV
- /* XXX verified on 80--TODO check on 7x */
- if (!vp->v_mount) {
- vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
- insmntque(vp, afs_globalVFS);
- VOP_UNLOCK(vp, 0);
- }
+ /* XXX verified on 80--TODO check on 7x */
+ if (!vp->v_mount) {
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+ insmntque(vp, afs_globalVFS);
+ VOP_UNLOCK(vp, 0);
+ }
#endif
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,339);
* then there probably needs to be some sort of additional
* mutual exclusion (an Embryonic flag would suffice).
* -GAW */
- printf("afs_NewVCache: lost the race\n");
+ afs_warn("afs_NewVCache: lost the race\n");
return (tvc);
}
tvc->v = vp;
memset(&(tvc->callsort), 0, sizeof(struct afs_q));
tvc->slocks = NULL;
tvc->f.states &=~ CVInit;
+ if (seq) {
+ tvc->f.states |= CBulkFetching;
+ tvc->f.m.Length = seq;
+ }
afs_osi_Wakeup(&tvc->f.states);
return tvc;
-
} /*afs_NewVCache */
+struct vcache *
+afs_NewVCache(struct VenusFid *afid, struct server *serverp)
+{
+ return afs_NewVCache_int(afid, serverp, 0);
+}
+
+struct vcache *
+afs_NewBulkVCache(struct VenusFid *afid, struct server *serverp, int seq)
+{
+ return afs_NewVCache_int(afid, serverp, seq);
+}
+
/*!
* ???
*
*
* \note Must be called with a shared lock on the vnode
*/
-int afs_WriteVCacheDiscon(register struct vcache *avc,
- register struct AFSStoreStatus *astatus,
- struct vattr *attrs)
+int
+afs_WriteVCacheDiscon(register struct vcache *avc,
+ register struct AFSStoreStatus *astatus,
+ struct vattr *attrs)
{
afs_int32 code = 0;
afs_int32 flags = 0;
}
if (astatus->Mask & AFS_SETOWNER) {
- printf("Not allowed yet. \n");
- /*avc->f.m.Owner = astatus->Owner;*/
+ /* printf("Not allowed yet. \n"); */
+ /*avc->f.m.Owner = astatus->Owner;*/
}
if (astatus->Mask & AFS_SETGROUP) {
- printf("Not allowed yet. \n");
- /*avc->f.m.Group = astatus->Group;*/
+ /* printf("Not allowed yet. \n"); */
+ /*avc->f.m.Group = astatus->Group;*/
}
if (astatus->Mask & AFS_SETMODE) {
if (AFS_IS_DISCONNECTED) {
/* Nothing to do otherwise...*/
code = ENETDOWN;
- printf("Network is down in afs_GetCache");
+ /* printf("Network is down in afs_GetCache"); */
} else
code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
if (AFS_IS_DISCONNECTED) {
- printf("Network is down in afs_LookupVcache\n");
+ /* printf("Network is down in afs_LookupVcache\n"); */
code = ENETDOWN;
} else
code =
goto rootvc_loop;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->f.states & CDeadVnode) {
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->f.states);
- goto rootvc_loop;
- }
+ if (tvc->f.states & CDeadVnode) {
+ if (!(tvc->f.states & CBulkFetching)) {
+ ReleaseSharedLock(&afs_xvcache);
+ afs_osi_Sleep(&tvc->f.states);
+ goto rootvc_loop;
+ }
+ }
tvp = AFSTOV(tvc);
if (vnode_get(tvp)) /* this bumps ref count */
continue;
AFS_GLOCK();
continue;
}
+ if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
#endif
break;
}
* \note The vcache must be write locked.
*/
void
-afs_UpdateStatus(struct vcache *avc,
- struct VenusFid *afid,
- struct vrequest *areq,
- struct AFSFetchStatus *Outsp,
- struct AFSCallBack *acb,
- afs_uint32 start)
+afs_UpdateStatus(struct vcache *avc, struct VenusFid *afid,
+ struct vrequest *areq, struct AFSFetchStatus *Outsp,
+ struct AFSCallBack *acb, afs_uint32 start)
{
struct volume *volp;
* \note avc must be write locked on entry
*/
void
-afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred) {
+afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred)
+{
ObtainWriteLock(&afs_xcbhash, 456);
afs_DequeueCallback(avc);
avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
*
* \return
*/
-static void findvc_sleep(struct vcache *avc, int flag) {
+static void
+findvc_sleep(struct vcache *avc, int flag)
+{
+ int fstates = avc->f.states;
if (flag & IS_SLOCK) {
ReleaseSharedLock(&afs_xvcache);
} else {
ReleaseReadLock(&afs_xvcache);
}
}
- afs_osi_Sleep(&avc->f.states);
+ if (flag & FIND_CDEAD) {
+ ObtainWriteLock(&afs_xvcache, 342);
+ afs_FlushReclaimedVcaches();
+ if (fstates == avc->f.states) {
+ ReleaseWriteLock(&afs_xvcache);
+ afs_osi_Sleep(&avc->f.states);
+ } else
+ ReleaseWriteLock(&afs_xvcache);
+ } else
+ afs_osi_Sleep(&avc->f.states);
if (flag & IS_SLOCK) {
ObtainSharedLock(&afs_xvcache, 341);
} else {
goto findloop;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->f.states & CDeadVnode) {
- findvc_sleep(tvc, flag);
- goto findloop;
- }
+ if (tvc->f.states & CDeadVnode) {
+ if (!(flag & FIND_CDEAD)) {
+ findvc_sleep(tvc, flag);
+ goto findloop;
+ }
+ }
tvp = AFSTOV(tvc);
if (vnode_get(tvp))
continue;
AFS_GLOCK();
continue;
}
+ if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
#endif
break;
}
goto loop;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->f.states & CDeadVnode) {
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->f.states);
- goto loop;
- }
+ if (tvc->f.states & CDeadVnode) {
+ if (!(tvc->f.states & CBulkFetching)) {
+ ReleaseSharedLock(&afs_xvcache);
+ afs_osi_Sleep(&tvc->f.states);
+ goto loop;
+ }
+ }
tvp = AFSTOV(tvc);
if (vnode_get(tvp)) {
/* This vnode no longer exists. */
AFS_GLOCK();
continue;
}
+ if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
#endif /* AFS_DARWIN80_ENV */
count++;
if (found_tvc) {
}
void
-afs_DisconGiveUpCallbacks(void) {
+afs_DisconGiveUpCallbacks(void)
+{
int i;
struct vcache *tvc;
int nq=0;
*
*/
void
-afs_ClearAllStatdFlag(void) {
+afs_ClearAllStatdFlag(void)
+{
int i;
struct vcache *tvc;