#include "afs/afs_cbqueue.h"
#include "afs/afs_osidnlc.h"
-#if defined(AFS_LINUX22_ENV)
afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
afs_int32 afs_vcount = 0; /* number of vcache in use now */
-#endif /* AFS_LINUX22_ENV */
#ifdef AFS_SGI_ENV
int afsvnumbers = 0;
afs_stats_cmperf.vcacheXAllocs--;
} else {
if (afs_norefpanic) {
- printf("flush vc refcnt < 1");
+ afs_warn("flush vc refcnt < 1");
afs_norefpanic++;
} else
osi_Panic("flush vc refcnt < 1");
tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
if (lockit)
- MObtainWriteLock(&afs_xvcb, 273);
+ ObtainWriteLock(&afs_xvcb, 273);
ObtainReadLock(&afs_xserver);
for (i = 0; i < NSERVERS; i++) {
for (safety1 = 0, tsp = afs_servers[i];
cbArray.AFSCBs_val = callBacks;
memset(&callBacks[0], 0, sizeof(callBacks[0]));
callBacks[0].CallBackType = CB_EXCLUSIVE;
- for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
+ for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
tc = afs_ConnByHost(tsp, tsp->cell->fsport,
tsp->cell->cellNum, &treq, 0,
SHARED_LOCK);
ReleaseReadLock(&afs_xserver);
if (lockit)
- MReleaseWriteLock(&afs_xvcb);
+ ReleaseWriteLock(&afs_xvcb);
afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
return 0;
}
AFS_STATCNT(afs_QueueVCB);
- MObtainWriteLock(&afs_xvcb, 274);
+ ObtainWriteLock(&afs_xvcb, 274);
/* we can't really give back callbacks on RO files, since the
* server only tracks them on a per-volume basis, and we don't
done:
/* now release locks and return */
- MReleaseWriteLock(&afs_xvcb);
+ ReleaseWriteLock(&afs_xvcb);
return queued;
}
struct afs_cbr *cbr, *ncbr;
AFS_STATCNT(afs_RemoveVCB);
- MObtainWriteLock(&afs_xvcb, 275);
+ ObtainWriteLock(&afs_xvcb, 275);
slot = afs_HashCBRFid(&afid->Fid);
ncbr = afs_cbrHashT[slot];
}
}
- MReleaseWriteLock(&afs_xvcb);
+ ReleaseWriteLock(&afs_xvcb);
}
void
We probably need a way to be smarter about this. */
tvc->nextfree = tmpReclaimedVCList;
tmpReclaimedVCList = tvc;
- printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code);
+ /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
}
if (tvc->f.states & (CVInit
#ifdef AFS_DARWIN80_ENV
break;
}
if (!afsd_dynamic_vcaches && anumber == target) {
- printf("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
+ afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
afs_vcount, afs_maxvcount);
}
} /* finished freeing up space */
if (!ip)
osi_Panic("afs_AllocVCache: no more inodes");
AFS_GLOCK();
-#if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
+#if defined(STRUCT_SUPER_OPERATIONS_HAS_ALLOC_INODE)
tvc = VTOAFS(ip);
#else
tvc = afs_osi_Alloc(sizeof(struct vcache));
/* none free, making one is better than a panic */
afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
-#if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
+#if (defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)) && !defined(UKERNEL)
tvc->v = NULL; /* important to clean this, or use memset 0 */
-#endif
+#endif /* DARWIN || XBSD && !UKERNEL */
#ifdef KERNEL_HAVE_PIN
pin((char *)tvc, sizeof(struct vcache)); /* XXX */
#endif
*
* \return The new vcache struct.
*/
-struct vcache *
-afs_NewVCache(struct VenusFid *afid, struct server *serverp)
+
+static_inline struct vcache *
+afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
{
struct vcache *tvc;
afs_int32 i, j;
if(!afsd_dynamic_vcaches) {
afs_ShakeLooseVCaches(anumber);
if (afs_vcount >= afs_maxvcount) {
- printf("afs_NewVCache - none freed\n");
+ afs_warn("afs_NewVCache - none freed\n");
return NULL;
}
}
* XXX assume FreeBSD is the same for now.
*/
AFS_GUNLOCK();
+#if defined(AFS_FBSD80_ENV)
+ /* vgone() is correct, but v_usecount is assumed not
+ * to be 0, and I suspect that currently our usage ensures that
+ * in fact it will */
+ if (vrefcnt(AFSTOV(tvc)) < 1) {
+ vref(AFSTOV(tvc));
+ }
+ vn_lock(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+#endif
vgone(AFSTOV(tvc));
+#if defined(AFS_FBSD80_ENV)
+ VOP_UNLOCK(AFSTOV(tvc), 0);
+#endif
fv_slept = 0;
code = 0;
AFS_GLOCK();
#ifdef AFS_OBSD_ENV
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
- afs_nbsd_getnewvnode(tvc); /* includes one refcount */
+ afs_obsd_getnewvnode(tvc); /* includes one refcount */
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,337);
lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
#ifdef AFS_DARWIN_ENV
ReleaseWriteLock(&afs_xvcache);
AFS_GUNLOCK();
- afs_darwin_getnewvnode(tvc); /* includes one refcount */
+ afs_darwin_getnewvnode(tvc, seq ? 0 : 1); /* includes one refcount */
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,338);
#ifdef AFS_DARWIN80_ENV
if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
#endif
panic("afs getnewvnode"); /* can't happen */
+#ifdef AFS_FBSD70_ENV
+ /* XXX verified on 80--TODO check on 7x */
+ if (!vp->v_mount) {
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* !glocked */
+ insmntque(vp, afs_globalVFS);
+ VOP_UNLOCK(vp, 0);
+ }
+#endif
AFS_GLOCK();
ObtainWriteLock(&afs_xvcache,339);
if (tvc->v != NULL) {
* then there probably needs to be some sort of additional
* mutual exclusion (an Embryonic flag would suffice).
* -GAW */
- printf("afs_NewVCache: lost the race\n");
+ afs_warn("afs_NewVCache: lost the race\n");
return (tvc);
}
tvc->v = vp;
#endif
vnode_pcache_init(&tvc->v);
#if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
- /* Above define is never true execpt in SGI test kernels. */
- init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
+ /* Above define is never true except in SGI test kernels. */
+ init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number));
#endif
#ifdef INTR_KTHREADS
- AFS_VN_INIT_BUF_LOCK(&(tvc->v));
+ AFS_VN_INIT_BUF_LOCK(&(tvc->v));
#endif
#else
SetAfsVnode(AFSTOV(tvc));
tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
gnodepnt->gn_vnode = &tvc->v;
#endif
-#ifdef AFS_FBSD70_ENV
-#ifndef AFS_FBSD80_ENV /* yup. they put it back. */
- insmntque(AFSTOV(tvc), afs_globalVFS);
-#endif
-#endif
#if defined(AFS_SGI_ENV)
VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
osi_Assert((tvc->v.v_flag & VINACT) == 0);
memset(&(tvc->callsort), 0, sizeof(struct afs_q));
tvc->slocks = NULL;
tvc->f.states &=~ CVInit;
+ if (seq) {
+ tvc->f.states |= CBulkFetching;
+ tvc->f.m.Length = seq;
+ }
afs_osi_Wakeup(&tvc->f.states);
return tvc;
-
} /*afs_NewVCache */
+struct vcache *
+afs_NewVCache(struct VenusFid *afid, struct server *serverp)
+{
+ return afs_NewVCache_int(afid, serverp, 0);
+}
+
+struct vcache *
+afs_NewBulkVCache(struct VenusFid *afid, struct server *serverp, int seq)
+{
+ return afs_NewVCache_int(afid, serverp, seq);
+}
+
/*!
* ???
*
*
* \note Must be called with a shared lock on the vnode
*/
-int afs_WriteVCacheDiscon(register struct vcache *avc,
- register struct AFSStoreStatus *astatus,
- struct vattr *attrs)
+int
+afs_WriteVCacheDiscon(register struct vcache *avc,
+ register struct AFSStoreStatus *astatus,
+ struct vattr *attrs)
{
afs_int32 code = 0;
afs_int32 flags = 0;
}
if (astatus->Mask & AFS_SETOWNER) {
- printf("Not allowed yet. \n");
- /*avc->f.m.Owner = astatus->Owner;*/
+ /* printf("Not allowed yet. \n"); */
+ /*avc->f.m.Owner = astatus->Owner;*/
}
if (astatus->Mask & AFS_SETGROUP) {
- printf("Not allowed yet. \n");
- /*avc->f.m.Group = astatus->Group;*/
+ /* printf("Not allowed yet. \n"); */
+ /*avc->f.m.Group = astatus->Group;*/
}
if (astatus->Mask & AFS_SETMODE) {
if (glocked)
AFS_GLOCK();
}
- vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
+ vinvalbuf(vp, V_SAVE, PINOD, 0); /* changed late in 8.0-CURRENT */
if (!iheldthelock)
VOP_UNLOCK(vp, 0);
#elif defined(AFS_FBSD60_ENV)
if (AFS_IS_DISCONNECTED) {
/* Nothing to do otherwise...*/
code = ENETDOWN;
- printf("Network is down in afs_GetCache");
+ /* printf("Network is down in afs_GetCache"); */
} else
code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
if (AFS_IS_DISCONNECTED) {
- printf("Network is down in afs_LookupVcache\n");
+ /* printf("Network is down in afs_LookupVcache\n"); */
code = ENETDOWN;
} else
code =
goto rootvc_loop;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->f.states & CDeadVnode) {
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->f.states);
- goto rootvc_loop;
- }
+ if (tvc->f.states & CDeadVnode) {
+ if (!(tvc->f.states & CBulkFetching)) {
+ ReleaseSharedLock(&afs_xvcache);
+ afs_osi_Sleep(&tvc->f.states);
+ goto rootvc_loop;
+ }
+ }
tvp = AFSTOV(tvc);
if (vnode_get(tvp)) /* this bumps ref count */
continue;
AFS_GLOCK();
continue;
}
+ if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
#endif
break;
}
* \note The vcache must be write locked.
*/
void
-afs_UpdateStatus(struct vcache *avc,
- struct VenusFid *afid,
- struct vrequest *areq,
- struct AFSFetchStatus *Outsp,
- struct AFSCallBack *acb,
- afs_uint32 start)
+afs_UpdateStatus(struct vcache *avc, struct VenusFid *afid,
+ struct vrequest *areq, struct AFSFetchStatus *Outsp,
+ struct AFSCallBack *acb, afs_uint32 start)
{
struct volume *volp;
* \note avc must be write locked on entry
*/
void
-afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred) {
+afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred)
+{
ObtainWriteLock(&afs_xcbhash, 456);
afs_DequeueCallback(avc);
avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
*
* \return
*/
-static void findvc_sleep(struct vcache *avc, int flag) {
+static void
+findvc_sleep(struct vcache *avc, int flag)
+{
+ int fstates = avc->f.states;
if (flag & IS_SLOCK) {
ReleaseSharedLock(&afs_xvcache);
} else {
ReleaseReadLock(&afs_xvcache);
}
}
- afs_osi_Sleep(&avc->f.states);
+ if (flag & FIND_CDEAD) {
+ ObtainWriteLock(&afs_xvcache, 342);
+ afs_FlushReclaimedVcaches();
+ if (fstates == avc->f.states) {
+ ReleaseWriteLock(&afs_xvcache);
+ afs_osi_Sleep(&avc->f.states);
+ } else
+ ReleaseWriteLock(&afs_xvcache);
+ } else
+ afs_osi_Sleep(&avc->f.states);
if (flag & IS_SLOCK) {
ObtainSharedLock(&afs_xvcache, 341);
} else {
goto findloop;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->f.states & CDeadVnode) {
- findvc_sleep(tvc, flag);
- goto findloop;
- }
+ if (tvc->f.states & CDeadVnode) {
+ if (!(flag & FIND_CDEAD)) {
+ findvc_sleep(tvc, flag);
+ goto findloop;
+ }
+ }
tvp = AFSTOV(tvc);
if (vnode_get(tvp))
continue;
AFS_GLOCK();
continue;
}
+ if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
#endif
break;
}
goto loop;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->f.states & CDeadVnode) {
- ReleaseSharedLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->f.states);
- goto loop;
- }
+ if (tvc->f.states & CDeadVnode) {
+ if (!(tvc->f.states & CBulkFetching)) {
+ ReleaseSharedLock(&afs_xvcache);
+ afs_osi_Sleep(&tvc->f.states);
+ goto loop;
+ }
+ }
tvp = AFSTOV(tvc);
if (vnode_get(tvp)) {
/* This vnode no longer exists. */
AFS_GLOCK();
continue;
}
+ if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
+ AFS_GUNLOCK();
+ vnode_recycle(AFSTOV(tvc));
+ AFS_GLOCK();
+ }
#endif /* AFS_DARWIN80_ENV */
count++;
if (found_tvc) {
}
void
-afs_DisconGiveUpCallbacks(void) {
+afs_DisconGiveUpCallbacks(void)
+{
int i;
struct vcache *tvc;
int nq=0;
*
*/
void
-afs_ClearAllStatdFlag(void) {
+afs_ClearAllStatdFlag(void)
+{
int i;
struct vcache *tvc;