afs_int32 vcachegen = 0;
unsigned int afs_paniconwarn = 0;
struct vcache *afs_vhashT[VCSIZE];
+static struct afs_cbr *afs_cbrHashT[CBRSIZE];
afs_int32 afs_bulkStatsLost;
int afs_norefpanic = 0;
/* Forward declarations */
static afs_int32 afs_QueueVCB(struct vcache *avc);
+/*
+ * afs_HashCBRFid
+ *
+ * Generate an index into the hash table for a given Fid.
+ */
+static int
+afs_HashCBRFid(struct AFSFid *fid) {
+ return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
+}
+
+/*
+ * afs_InsertHashCBR
+ *
+ * Insert a CBR entry into the hash table.
+ * Must be called with afs_xvcb held.
+ */
+static void
+afs_InsertHashCBR(struct afs_cbr *cbr) {
+ int slot = afs_HashCBRFid(&cbr->fid);
+
+ cbr->hash_next = afs_cbrHashT[slot];
+ if (afs_cbrHashT[slot])
+ afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
+
+ cbr->hash_pprev = &afs_cbrHashT[slot];
+ afs_cbrHashT[slot] = cbr;
+}
/*
* afs_FlushVCache
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
}
-#if defined(AFS_OBSD_ENV)
+#if defined(AFS_XBSD_ENV)
/* OK, there are no internal vrefCounts, so there shouldn't
* be any more refs here. */
if (avc->v) {
/*
* afs_FreeCBR
*
- * Description: free a callback return structure.
+ * Description: free a callback return structure, removing it from all lists.
*
* Parameters:
* asp -- the address of the structure to free.
int
afs_FreeCBR(register struct afs_cbr *asp)
{
+ *(asp->pprev) = asp->next;
+ if (asp->next)
+ asp->next->pprev = asp->pprev;
+
+ *(asp->hash_pprev) = asp->hash_next;
+ if (asp->hash_next)
+ asp->hash_next->hash_pprev = asp->hash_pprev;
+
asp->next = afs_cbrSpace;
afs_cbrSpace = asp;
return 0;
fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
cbArray.AFSCBs_len = 1;
cbArray.AFSCBs_val = callBacks;
+ memset(&callBacks[0], 0, sizeof(callBacks[0]));
callBacks[0].CallBackType = CB_EXCLUSIVE;
for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
tc = afs_ConnByHost(tsp, tsp->cell->fsport,
*/
tcbrp = tsp->cbrs;
tfids[tcount++] = tcbrp->fid;
- tsp->cbrs = tcbrp->next;
+
+ /* Freeing the CBR will unlink it from the server's CBR list */
afs_FreeCBR(tcbrp);
} /* while loop for this one server */
if (safety2 > afs_cacheStats) {
static afs_int32
afs_QueueVCB(struct vcache *avc)
{
- register struct server *tsp;
- register struct afs_cbr *tcbp;
+ struct server *tsp;
+ struct afs_cbr *tcbp;
AFS_STATCNT(afs_QueueVCB);
/* The callback is really just a struct server ptr. */
MObtainWriteLock(&afs_xvcb, 274);
tcbp = afs_AllocCBR();
tcbp->fid = avc->fid.Fid;
+
tcbp->next = tsp->cbrs;
+ if (tsp->cbrs)
+ tsp->cbrs->pprev = &tcbp->next;
+
tsp->cbrs = tcbp;
+ tcbp->pprev = &tsp->cbrs;
+
+ afs_InsertHashCBR(tcbp);
/* now release locks and return */
MReleaseWriteLock(&afs_xvcb);
* afs_RemoveVCB
*
* Description:
- * Remove a queued callback by looking through all the servers
- * to see if any have this callback queued.
+ * Remove a queued callback for a given Fid.
*
* Parameters:
* afid: The fid we want cleansed of queued callbacks.
* entries locked.
*/
-int
+void
afs_RemoveVCB(struct VenusFid *afid)
{
- register int i;
- register struct server *tsp;
- register struct afs_cbr *tcbrp;
- struct afs_cbr **lcbrpp;
+ int slot;
+ struct afs_cbr *cbr, *ncbr;
AFS_STATCNT(afs_RemoveVCB);
MObtainWriteLock(&afs_xvcb, 275);
- ObtainReadLock(&afs_xserver);
- for (i = 0; i < NSERVERS; i++) {
- for (tsp = afs_servers[i]; tsp; tsp = tsp->next) {
- /* if cell is known, and is wrong, then skip this server */
- if (tsp->cell && tsp->cell->cellNum != afid->Cell)
- continue;
- /*
- * Otherwise, iterate through file IDs we're sending to the
- * server.
- */
- lcbrpp = &tsp->cbrs; /* first queued return callback */
- for (tcbrp = *lcbrpp; tcbrp;
- lcbrpp = &tcbrp->next, tcbrp = *lcbrpp) {
- if (afid->Fid.Volume == tcbrp->fid.Volume
- && afid->Fid.Unique == tcbrp->fid.Unique
- && afid->Fid.Vnode == tcbrp->fid.Vnode) {
- *lcbrpp = tcbrp->next; /* unthread from list */
- afs_FreeCBR(tcbrp);
- goto done;
- }
- }
+ slot = afs_HashCBRFid(&afid->Fid);
+ ncbr = afs_cbrHashT[slot];
+
+ while (ncbr) {
+ cbr = ncbr;
+ ncbr = cbr->hash_next;
+
+ if (afid->Fid.Volume == cbr->fid.Volume &&
+ afid->Fid.Vnode == cbr->fid.Vnode &&
+ afid->Fid.Unique == cbr->fid.Unique) {
+ afs_FreeCBR(cbr);
}
}
- done:
- ReleaseReadLock(&afs_xserver);
+
MReleaseWriteLock(&afs_xvcb);
- return 0;
}
-#ifdef AFS_LINUX22_ENV
+#if defined(AFS_LINUX22_ENV) && !defined(AFS_LINUX26_ENV)
static void
__shrink_dcache_parent(struct dentry *parent)
DUNLOCK();
#endif
}
-#endif /* AFS_LINUX22_ENV */
+#endif /* AFS_LINUX22_ENV && !AFS_LINUX26_ENV */
/*
* afs_NewVCache
continue; /* start over - may have raced. */
}
}
-#elif defined(AFS_FBSD50_ENV)
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- if (!(VOP_LOCK(&tvc->v, LK_EXCLUSIVE, curthread))) {
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- VREFCOUNT_DEC(tvc);
- AFS_GUNLOCK(); /* perhaps inline inactive for locking */
- VOP_INACTIVE(&tvc->v, curthread);
- AFS_GLOCK();
- } else {
- VOP_UNLOCK(&tvc->v, 0, curthread);
- }
- }
- }
-#elif defined(AFS_FBSD_ENV) && !defined(AFS_FBSD50_ENV)
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- if (!(VOP_LOCK(&tvc->v, LK_EXCLUSIVE, curproc))) {
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- VREFCOUNT_DEC(tvc);
- AFS_GUNLOCK(); /* perhaps inline inactive for locking */
- VOP_INACTIVE(&tvc->v, curproc);
- AFS_GLOCK();
- } else {
- VOP_UNLOCK(&tvc->v, 0, curproc);
- }
- }
- }
#elif defined(AFS_LINUX22_ENV)
- if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0)
+ if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0) {
+#if defined(AFS_LINUX26_ENV)
+ AFS_GUNLOCK();
+ d_prune_aliases(AFSTOI(tvc));
+ AFS_GLOCK();
+#else
afs_TryFlushDcacheChildren(tvc);
#endif
+ }
+#endif
if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
&& (tvc->states & CUnlinkedDel) == 0) {
-#ifdef AFS_OBSD_ENV
+#if defined(AFS_XBSD_ENV)
/*
* vgone() reclaims the vnode, which calls afs_FlushVCache(),
* then it puts the vnode on the free list.
* If we don't do this we end up with a cleaned vnode that's
* not on the free list.
+ * XXX assume FreeBSD is the same for now.
*/
vgone(AFSTOV(tvc));
code = fv_slept = 0;
vm_info_ptr = tvc->v.v_vm_info;
#endif /* AFS_MACH_ENV */
-#if defined(AFS_OBSD_ENV)
+#if defined(AFS_XBSD_ENV)
if (tvc->v)
panic("afs_NewVCache(): free vcache with vnode attached");
#endif
tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
#endif /* AFS_MACH_ENV */
#ifdef AFS_OBSD_ENV
+ AFS_GUNLOCK();
afs_nbsd_getnewvnode(tvc); /* includes one refcount */
+ AFS_GLOCK();
lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
#endif
+#ifdef AFS_FBSD_ENV
+ {
+ struct vnode *vp;
+
+ AFS_GUNLOCK();
+#ifdef AFS_FBSD50_ENV
+ if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
+#else
+ if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
+#endif
+ panic("afs getnewvnode"); /* can't happen */
+ AFS_GLOCK();
+ if (tvc->v != NULL) {
+ /* I'd like to know if this ever happens...
+ We don't drop global for the rest of this function,
+ so if we do lose the race, the other thread should
+ have found the same vnode and finished initializing
+ the vcache entry. Is it conceivable that this vcache
+ entry could be recycled during this interval? If so,
+ then there probably needs to be some sort of additional
+ mutual exclusion (an Embryonic flag would suffice).
+ -GAW */
+ printf("afs_NewVCache: lost the race\n");
+ return (tvc);
+ }
+ tvc->v = vp;
+ tvc->v->v_data = tvc;
+ lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
+ }
+#endif
tvc->parentVnode = 0;
tvc->mvid = NULL;
tvc->linkData = NULL;
hzero(tvc->mapDV);
tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
+#if defined(AFS_LINUX22_ENV)
+{
+ struct inode *ip = AFSTOI(tvc);
+ struct address_space *mapping = &ip->i_data;
+
+#if defined(AFS_LINUX26_ENV)
+ inode_init_once(ip);
+#else
+ sema_init(&ip->i_sem, 1);
+ INIT_LIST_HEAD(&ip->i_hash);
+ INIT_LIST_HEAD(&ip->i_dentry);
+#if defined(AFS_LINUX24_ENV)
+ sema_init(&ip->i_zombie, 1);
+ init_waitqueue_head(&ip->i_wait);
+ spin_lock_init(&ip->i_data.i_shared_lock);
+#ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
+ spin_lock_init(&ip->i_data.page_lock);
+#endif
+ INIT_LIST_HEAD(&ip->i_data.clean_pages);
+ INIT_LIST_HEAD(&ip->i_data.dirty_pages);
+ INIT_LIST_HEAD(&ip->i_data.locked_pages);
+ INIT_LIST_HEAD(&ip->i_dirty_buffers);
+#ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
+ INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
+#endif
+#ifdef STRUCT_INODE_HAS_I_DEVICES
+ INIT_LIST_HEAD(&ip->i_devices);
+#endif
+#ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
+ init_rwsem(&ip->i_truncate_sem);
+#endif
+#ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
+ init_rwsem(&ip->i_alloc_sem);
+#endif
+
+#else /* AFS_LINUX22_ENV */
+ sema_init(&ip->i_atomic_write, 1);
+ init_waitqueue(&ip->i_wait);
+#endif
+#endif
+
+#if defined(AFS_LINUX24_ENV)
+ mapping->host = ip;
+ ip->i_mapping = mapping;
+#ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
+ ip->i_data.gfp_mask = GFP_HIGHUSER;
+#endif
+#if defined(AFS_LINUX26_ENV)
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+{
+ extern struct backing_dev_info afs_backing_dev_info;
+
+ mapping->backing_dev_info = &afs_backing_dev_info;
+}
+#endif
+#endif
+
+#if !defined(AFS_LINUX26_ENV)
+ if (afs_globalVFS)
+ ip->i_dev = afs_globalVFS->s_dev;
+#endif
+ ip->i_sb = afs_globalVFS;
+ put_inode_on_dummy_list(ip);
+}
+#endif
+
#ifdef AFS_OSF_ENV
/* Hold it for the LRU (should make count 2) */
VN_HOLD(AFSTOV(tvc));
#else /* AFS_OSF_ENV */
-#ifndef AFS_OBSD_ENV
+#if !defined(AFS_XBSD_ENV)
VREFCOUNT_SET(tvc, 1); /* us */
-#endif /* AFS_OBSD_ENV */
+#endif /* AFS_XBSD_ENV */
#endif /* AFS_OSF_ENV */
#ifdef AFS_AIX32_ENV
LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
tvc->v.v_freelist.tqe_prev = (struct vnode **)0xdeadb;
/*tvc->vrefCount++; */
#endif
-#ifdef AFS_FBSD_ENV
- lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
- cache_purge(AFSTOV(tvc));
- tvc->v.v_data = tvc;
- tvc->v.v_tag = VT_AFS;
- tvc->v.v_usecount++; /* steal an extra ref for now so vfree never happens */
- /* This extra ref is dealt with above... */
-#endif
/*
* The proper value for mvstat (for root fids) is setup by the caller.
*/
vn_initlist((struct vnlist *)&tvc->v);
tvc->lastr = 0;
#endif /* AFS_SGI_ENV */
-#if defined(AFS_LINUX22_ENV)
- {
- struct inode *ip = AFSTOI(tvc);
- sema_init(&ip->i_sem, 1);
-#if defined(AFS_LINUX24_ENV)
- sema_init(&ip->i_zombie, 1);
- init_waitqueue_head(&ip->i_wait);
- spin_lock_init(&ip->i_data.i_shared_lock);
-#ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
- spin_lock_init(&ip->i_data.page_lock);
-#endif
- INIT_LIST_HEAD(&ip->i_data.clean_pages);
- INIT_LIST_HEAD(&ip->i_data.dirty_pages);
- INIT_LIST_HEAD(&ip->i_data.locked_pages);
- INIT_LIST_HEAD(&ip->i_dirty_buffers);
-#ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
- INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
-#endif
-#ifdef STRUCT_INODE_HAS_I_DEVICES
- INIT_LIST_HEAD(&ip->i_devices);
-#endif
- ip->i_data.host = (void *)ip;
-#ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
- ip->i_data.gfp_mask = GFP_HIGHUSER;
-#endif
- ip->i_mapping = &ip->i_data;
-#ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
- init_rwsem(&ip->i_truncate_sem);
-#endif
-#ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
- init_rwsem(&ip->i_alloc_sem);
-#endif
-#else
- sema_init(&ip->i_atomic_write, 1);
- init_waitqueue(&ip->i_wait);
-#endif
- INIT_LIST_HEAD(&ip->i_hash);
- INIT_LIST_HEAD(&ip->i_dentry);
- if (afs_globalVFS) {
- ip->i_dev = afs_globalVFS->s_dev;
- ip->i_sb = afs_globalVFS;
- }
- }
-#endif
tvc->h1.dchint = 0;
osi_dnlc_purgedp(tvc); /* this may be overkill */
memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
uvm_vnp_uncache(AFSTOV(tvc));
VOP_UNLOCK(AFSTOV(tvc), 0, curproc);
#endif
+#ifdef AFS_FBSD_ENV
+ /*
+ * XXX - I really don't like this. Should try to understand better.
+ * It seems that sometimes, when we get called, we already hold the
+ * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
+ * We can't drop the vnode lock, because that could result in a race.
+ * Sometimes, though, we get here and don't hold the vnode lock.
+ * I hate code paths that sometimes hold locks and sometimes don't.
+ * In any event, the dodge we use here is to check whether the vnode
+ * is locked, and if it isn't, then we gain and drop it around the call
+ * to vinvalbuf; otherwise, we leave it alone.
+ */
+ {
+ struct vnode *vp;
+ int iheldthelock;
+
+ vp = AFSTOV(tvc);
+#ifdef AFS_FBSD50_ENV
+ iheldthelock = VOP_ISLOCKED(vp, curthread);
+ if (!iheldthelock)
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
+#else
+ iheldthelock = VOP_ISLOCKED(vp, curproc);
+ if (!iheldthelock)
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
+#endif
+ }
+#endif
ObtainWriteLock(&afs_xcbhash, 464);
tvc->states &= ~CUnique;
* free vcache entries and all the vcache entries are active ones then we allocate
* an additional one - admittedly we almost never had that occur.
*/
-#if !defined(AFS_OSF_ENV)
- afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
-#endif
-#ifdef KERNEL_HAVE_PIN
- unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
-#endif
{
register struct afs_q *tq, *uq;
afs_cbrSpace = 0;
#if !defined(AFS_OSF_ENV)
+ afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
+#endif
+#ifdef KERNEL_HAVE_PIN
+ unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
+#endif
+#if !defined(AFS_OSF_ENV)
freeVCList = Initial_freeVCList = 0;
#endif
RWLOCK_INIT(&afs_xvcache, "afs_xvcache");