afs_int32 vcachegen = 0;
unsigned int afs_paniconwarn = 0;
struct vcache *afs_vhashT[VCSIZE];
+static struct afs_cbr *afs_cbrHashT[CBRSIZE];
afs_int32 afs_bulkStatsLost;
int afs_norefpanic = 0;
/* Forward declarations */
static afs_int32 afs_QueueVCB(struct vcache *avc);
+/*
+ * afs_HashCBRFid
+ *
+ * Generate an index into the hash table for a given Fid.
+ */
+static int
+afs_HashCBRFid(struct AFSFid *fid)
+{
+ return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
+}
+
+/*
+ * afs_InsertHashCBR
+ *
+ * Insert a CBR entry into the hash table.
+ * Must be called with afs_xvcb held.
+ */
+static void
+afs_InsertHashCBR(struct afs_cbr *cbr)
+{
+ int slot = afs_HashCBRFid(&cbr->fid);
+
+ cbr->hash_next = afs_cbrHashT[slot];
+ if (afs_cbrHashT[slot])
+ afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
+
+ cbr->hash_pprev = &afs_cbrHashT[slot];
+ afs_cbrHashT[slot] = cbr;
+}
/*
* afs_FlushVCache
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
}
-#if defined(AFS_OBSD_ENV)
+#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
/* OK, there are no internal vrefCounts, so there shouldn't
* be any more refs here. */
if (avc->v) {
/* This should put it back on the vnode free list since usecount is 1 */
afs_vcount--;
vSetType(avc, VREG);
+#ifdef AFS_DARWIN80_ENV
+ if (vnode_isinuse(AFSTOV(avc), 0)) {
+#else
if (VREFCOUNT(avc) > 0) {
+#endif
VN_UNLOCK(AFSTOV(avc));
AFS_RELE(AFSTOV(avc));
} else {
/*
* afs_FreeCBR
*
- * Description: free a callback return structure.
+ * Description: free a callback return structure, removing it from all lists.
*
* Parameters:
* asp -- the address of the structure to free.
int
afs_FreeCBR(register struct afs_cbr *asp)
{
+ *(asp->pprev) = asp->next;
+ if (asp->next)
+ asp->next->pprev = asp->pprev;
+
+ *(asp->hash_pprev) = asp->hash_next;
+ if (asp->hash_next)
+ asp->hash_next->hash_pprev = asp->hash_pprev;
+
asp->next = afs_cbrSpace;
afs_cbrSpace = asp;
return 0;
struct vrequest treq;
struct conn *tc;
int safety1, safety2, safety3;
- XSTATS_DECLS if ((code = afs_InitReq(&treq, afs_osi_credp)))
+ XSTATS_DECLS;
+ if ((code = afs_InitReq(&treq, afs_osi_credp)))
return code;
treq.flags |= O_NONBLOCK;
tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
cbArray.AFSCBs_len = 1;
cbArray.AFSCBs_val = callBacks;
+ memset(&callBacks[0], 0, sizeof(callBacks[0]));
callBacks[0].CallBackType = CB_EXCLUSIVE;
for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
tc = afs_ConnByHost(tsp, tsp->cell->fsport,
*/
tcbrp = tsp->cbrs;
tfids[tcount++] = tcbrp->fid;
- tsp->cbrs = tcbrp->next;
+
+ /* Freeing the CBR will unlink it from the server's CBR list */
afs_FreeCBR(tcbrp);
} /* while loop for this one server */
if (safety2 > afs_cacheStats) {
static afs_int32
afs_QueueVCB(struct vcache *avc)
{
- register struct server *tsp;
- register struct afs_cbr *tcbp;
+ struct server *tsp;
+ struct afs_cbr *tcbp;
AFS_STATCNT(afs_QueueVCB);
/* The callback is really just a struct server ptr. */
MObtainWriteLock(&afs_xvcb, 274);
tcbp = afs_AllocCBR();
tcbp->fid = avc->fid.Fid;
+
tcbp->next = tsp->cbrs;
+ if (tsp->cbrs)
+ tsp->cbrs->pprev = &tcbp->next;
+
tsp->cbrs = tcbp;
+ tcbp->pprev = &tsp->cbrs;
+
+ afs_InsertHashCBR(tcbp);
/* now release locks and return */
MReleaseWriteLock(&afs_xvcb);
* afs_RemoveVCB
*
* Description:
- * Remove a queued callback by looking through all the servers
- * to see if any have this callback queued.
+ * Remove a queued callback for a given Fid.
*
* Parameters:
* afid: The fid we want cleansed of queued callbacks.
* entries locked.
*/
-int
+void
afs_RemoveVCB(struct VenusFid *afid)
{
- register int i;
- register struct server *tsp;
- register struct afs_cbr *tcbrp;
- struct afs_cbr **lcbrpp;
+ int slot;
+ struct afs_cbr *cbr, *ncbr;
AFS_STATCNT(afs_RemoveVCB);
MObtainWriteLock(&afs_xvcb, 275);
- ObtainReadLock(&afs_xserver);
- for (i = 0; i < NSERVERS; i++) {
- for (tsp = afs_servers[i]; tsp; tsp = tsp->next) {
- /* if cell is known, and is wrong, then skip this server */
- if (tsp->cell && tsp->cell->cellNum != afid->Cell)
- continue;
- /*
- * Otherwise, iterate through file IDs we're sending to the
- * server.
- */
- lcbrpp = &tsp->cbrs; /* first queued return callback */
- for (tcbrp = *lcbrpp; tcbrp;
- lcbrpp = &tcbrp->next, tcbrp = *lcbrpp) {
- if (afid->Fid.Volume == tcbrp->fid.Volume
- && afid->Fid.Unique == tcbrp->fid.Unique
- && afid->Fid.Vnode == tcbrp->fid.Vnode) {
- *lcbrpp = tcbrp->next; /* unthread from list */
- afs_FreeCBR(tcbrp);
- goto done;
- }
- }
- }
- }
- done:
- ReleaseReadLock(&afs_xserver);
- MReleaseWriteLock(&afs_xvcb);
- return 0;
-}
+ slot = afs_HashCBRFid(&afid->Fid);
+ ncbr = afs_cbrHashT[slot];
-#ifdef AFS_LINUX22_ENV
+ while (ncbr) {
+ cbr = ncbr;
+ ncbr = cbr->hash_next;
-static void
-__shrink_dcache_parent(struct dentry *parent)
-{
- struct dentry *this_parent = parent;
- struct list_head *next;
- int found = 0;
- LIST_HEAD(afs_dentry_unused);
-
- repeat:
- next = this_parent->d_subdirs.next;
- resume:
- while (next != &this_parent->d_subdirs) {
- struct list_head *tmp = next;
- struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
- next = tmp->next;
- if (!DCOUNT(dentry)) {
- list_del(&dentry->d_lru);
- list_add(&dentry->d_lru, afs_dentry_unused.prev);
- found++;
- }
- /*
- * Descend a level if the d_subdirs list is non-empty.
- */
- if (!list_empty(&dentry->d_subdirs)) {
- this_parent = dentry;
- goto repeat;
+ if (afid->Fid.Volume == cbr->fid.Volume &&
+ afid->Fid.Vnode == cbr->fid.Vnode &&
+ afid->Fid.Unique == cbr->fid.Unique) {
+ afs_FreeCBR(cbr);
}
}
- /*
- * All done at this level ... ascend and resume the search.
- */
- if (this_parent != parent) {
- next = this_parent->d_child.next;
- this_parent = this_parent->d_parent;
- goto resume;
- }
-
- for (;;) {
- struct dentry *dentry;
- struct list_head *tmp;
-
- tmp = afs_dentry_unused.prev;
- if (tmp == &afs_dentry_unused)
- break;
-#ifdef AFS_LINUX24_ENV
- list_del_init(tmp);
-#else
- list_del(tmp);
- INIT_LIST_HEAD(tmp);
-#endif /* AFS_LINUX24_ENV */
- dentry = list_entry(tmp, struct dentry, d_lru);
-
-#ifdef AFS_LINUX24_ENV
- /* Unused dentry with a count? */
- if (DCOUNT(dentry))
- BUG();
-#endif
- DGET(dentry);
-#ifdef AFS_LINUX24_ENV
- list_del_init(&dentry->d_hash); /* d_drop */
-#else
- list_del(&dentry->d_hash);
- INIT_LIST_HEAD(&dentry->d_hash);
-#endif /* AFS_LINUX24_ENV */
- DUNLOCK();
- dput(dentry);
- DLOCK();
- if (!--found)
- break;
- }
-}
-
-/* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
- * children of the dentry
- *
- * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
- * AFS_GLOCK, so it can call dput, which may call iput, but
- * keeps afs_xvcache exclusively.
- *
- * Tree traversal algorithm from fs/dcache.c: select_parent()
- */
-static void
-afs_TryFlushDcacheChildren(struct vcache *tvc)
-{
- struct inode *ip = AFSTOI(tvc);
- struct dentry *this_parent;
- struct list_head *next;
- struct list_head *cur;
- struct list_head *head = &ip->i_dentry;
- struct dentry *dentry;
-
- AFS_GUNLOCK();
- restart:
-#ifndef old_vcache_scheme
- DLOCK();
- cur = head;
- while ((cur = cur->next) != head) {
- dentry = list_entry(cur, struct dentry, d_alias);
-
- afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
- ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
- dentry->d_parent->d_name.name, ICL_TYPE_STRING,
- dentry->d_name.name);
-
- if (!list_empty(&dentry->d_hash) && !list_empty(&dentry->d_subdirs))
- __shrink_dcache_parent(dentry);
-
- if (!DCOUNT(dentry)) {
- DGET(dentry);
-#ifdef AFS_LINUX24_ENV
- list_del_init(&dentry->d_hash); /* d_drop */
-#else
- list_del(&dentry->d_hash);
- INIT_LIST_HEAD(&dentry->d_hash);
-#endif /* AFS_LINUX24_ENV */
- DUNLOCK();
- dput(dentry);
- goto restart;
- }
- }
- DUNLOCK();
- AFS_GLOCK();
-#else
- restart:
- DLOCK();
- cur = head;
- while ((cur = cur->next) != head) {
- dentry = list_entry(cur, struct dentry, d_alias);
-
- afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
- ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
- dentry->d_parent->d_name.name, ICL_TYPE_STRING,
- dentry->d_name.name);
-
- if (!DCOUNT(dentry)) {
- AFS_GUNLOCK();
- DGET(dentry);
- DUNLOCK();
- d_drop(dentry);
- dput(dentry);
- AFS_GLOCK();
- goto restart;
- }
- }
- DUNLOCK();
-#endif
+ MReleaseWriteLock(&afs_xvcb);
}
-#endif /* AFS_LINUX22_ENV */
/*
* afs_NewVCache
#ifdef AFS_AIX_ENV
struct gnode *gnodepnt;
#endif
-#ifdef AFS_MACH_ENV
- struct vm_info *vm_info_ptr;
-#endif /* AFS_MACH_ENV */
#ifdef AFS_OSF_ENV
struct vcache *nvc;
#endif /* AFS_OSF_ENV */
refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
else if (QNext(uq) != tq)
refpanic("VLRU inconsistent");
+#ifdef AFS_DARWIN80_ENV
+ else if (!vnode_isinuse(AFSTOV(tvc), 0))
+#else
else if (VREFCOUNT(tvc) < 1)
+#endif
refpanic("refcnt 0 on VLRU");
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
+#ifdef AFS_DARWIN80_ENV
+ if (vnode_isinuse(AFSTOV(tvc), 0) &&
+#else
+ if (VREFCOUNT(tvc) == 1 &&
+#endif
+ tvc->opens == 0
&& (tvc->states & CUnlinkedDel) == 0) {
code = afs_FlushVCache(tvc, &fv_slept);
if (code == 0) {
if (tvc->states & CVFlushed) {
refpanic("CVFlushed on VLRU");
+#if 0
} else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
+#endif
} else if (QNext(uq) != tq) {
refpanic("VLRU inconsistent");
}
-#ifdef AFS_DARWIN_ENV
- if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0)
- && VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
- osi_VM_TryReclaim(tvc, &fv_slept);
- if (fv_slept) {
- uq = VLRU.prev;
- i = 0;
- continue; /* start over - may have raced. */
- }
- }
-#elif defined(AFS_FBSD50_ENV)
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- if (!(VOP_LOCK(&tvc->v, LK_EXCLUSIVE, curthread))) {
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- VREFCOUNT_DEC(tvc);
- AFS_GUNLOCK(); /* perhaps inline inactive for locking */
- VOP_INACTIVE(&tvc->v, curthread);
- AFS_GLOCK();
- } else {
- VOP_UNLOCK(&tvc->v, 0, curthread);
- }
- }
- }
-#elif defined(AFS_FBSD_ENV) && !defined(AFS_FBSD50_ENV)
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- if (!(VOP_LOCK(&tvc->v, LK_EXCLUSIVE, curproc))) {
- if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
- VREFCOUNT_DEC(tvc);
- AFS_GUNLOCK(); /* perhaps inline inactive for locking */
- VOP_INACTIVE(&tvc->v, curproc);
- AFS_GLOCK();
- } else {
- VOP_UNLOCK(&tvc->v, 0, curproc);
+#if defined(AFS_LINUX22_ENV)
+ if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0) {
+ struct dentry *dentry;
+ struct list_head *cur, *head = &(AFSTOI(tvc))->i_dentry;
+ AFS_FAST_HOLD(tvc);
+ AFS_GUNLOCK();
+
+restart:
+#if defined(AFS_LINUX24_ENV)
+ spin_lock(&dcache_lock);
+#endif
+ cur = head;
+ while ((cur = cur->next) != head) {
+ dentry = list_entry(cur, struct dentry, d_alias);
+
+ if (d_unhashed(dentry))
+ continue;
+
+ dget_locked(dentry);
+
+#if defined(AFS_LINUX24_ENV)
+ spin_unlock(&dcache_lock);
+#endif
+ if (d_invalidate(dentry) == -EBUSY) {
+ dput(dentry);
+ /* perhaps lock and try to continue? (use cur as head?) */
+ goto inuse;
}
- }
+ dput(dentry);
+ goto restart;
+ }
+#if defined(AFS_LINUX24_ENV)
+ spin_unlock(&dcache_lock);
+#endif
+ inuse:
+ AFS_GLOCK();
+ AFS_FAST_RELE(tvc);
}
-#elif defined(AFS_LINUX22_ENV)
- if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0)
- afs_TryFlushDcacheChildren(tvc);
#endif
- if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
- && (tvc->states & CUnlinkedDel) == 0) {
-#ifdef AFS_OBSD_ENV
+#ifdef AFS_DARWIN80_ENV
+ if (!vnode_isinuse(AFSTOV(tvc), 0
+#else
+ if (((VREFCOUNT(tvc) == 0)
+#if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
+ || ((VREFCOUNT(tvc) == 1) &&
+ (UBCINFOEXISTS(AFSTOV(tvc))))
+#endif
+#endif
+ ) && tvc->opens == 0 && (tvc->states & CUnlinkedDel) == 0) {
+#if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
/*
* vgone() reclaims the vnode, which calls afs_FlushVCache(),
* then it puts the vnode on the free list.
* If we don't do this we end up with a cleaned vnode that's
* not on the free list.
+ * XXX assume FreeBSD is the same for now.
*/
+ AFS_GUNLOCK();
vgone(AFSTOV(tvc));
+ AFS_GLOCK();
code = fv_slept = 0;
#else
code = afs_FlushVCache(tvc, &fv_slept);
if (!freeVCList) {
/* none free, making one is better than a panic */
afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
+ if (afs_cacheStats == afs_stats_cmperf.vcacheXAllocs) printf("would vlru cycle panic\n");
tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
+#if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
+ tvc->v = NULL; /* important to clean this, or use memset 0 */
+#endif
#ifdef KERNEL_HAVE_PIN
pin((char *)tvc, sizeof(struct vcache)); /* XXX */
#endif
-#ifdef AFS_MACH_ENV
- /* In case it still comes here we need to fill this */
- tvc->v.v_vm_info = VM_INFO_NULL;
- vm_info_init(tvc->v.v_vm_info);
- /* perhaps we should also do close_flush on non-NeXT mach systems;
- * who knows; we don't currently have the sources.
- */
-#endif /* AFS_MACH_ENV */
#if defined(AFS_SGI_ENV)
{
char name[METER_NAMSZ];
}
#endif /* AFS_OSF_ENV */
-#ifdef AFS_MACH_ENV
- vm_info_ptr = tvc->v.v_vm_info;
-#endif /* AFS_MACH_ENV */
-
-#if defined(AFS_OBSD_ENV)
+#if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
if (tvc->v)
panic("afs_NewVCache(): free vcache with vnode attached");
#endif
RWLOCK_INIT(&tvc->vlock, "vcache vlock");
#endif /* defined(AFS_SUN5_ENV) */
-#ifdef AFS_MACH_ENV
- tvc->v.v_vm_info = vm_info_ptr;
- tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
-#endif /* AFS_MACH_ENV */
#ifdef AFS_OBSD_ENV
AFS_GUNLOCK();
afs_nbsd_getnewvnode(tvc); /* includes one refcount */
AFS_GLOCK();
lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
#endif
+#ifdef AFS_DARWIN_ENV
+ AFS_GUNLOCK();
+ afs_darwin_getnewvnode(tvc); /* includes one refcount */
+ AFS_GLOCK();
+#ifdef AFS_DARWIN80_ENV
+ LOCKINIT(tvc->rwlock);
+#else
+ lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
+#endif
+#endif
+#ifdef AFS_FBSD_ENV
+ {
+ struct vnode *vp;
+
+ AFS_GUNLOCK();
+#if defined(AFS_FBSD60_ENV)
+ if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
+#elif defined(AFS_FBSD50_ENV)
+ if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
+#else
+ if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
+#endif
+ panic("afs getnewvnode"); /* can't happen */
+ AFS_GLOCK();
+ if (tvc->v != NULL) {
+ /* I'd like to know if this ever happens...
+ * We don't drop global for the rest of this function,
+ * so if we do lose the race, the other thread should
+ * have found the same vnode and finished initializing
+ * the vcache entry. Is it conceivable that this vcache
+ * entry could be recycled during this interval? If so,
+ * then there probably needs to be some sort of additional
+ * mutual exclusion (an Embryonic flag would suffice).
+ * -GAW */
+ printf("afs_NewVCache: lost the race\n");
+ return (tvc);
+ }
+ tvc->v = vp;
+ tvc->v->v_data = tvc;
+ lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
+ }
+#endif
tvc->parentVnode = 0;
tvc->mvid = NULL;
tvc->linkData = NULL;
hzero(tvc->mapDV);
tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
+#if defined(AFS_LINUX22_ENV)
+ {
+ struct inode *ip = AFSTOI(tvc);
+#if defined(AFS_LINUX24_ENV)
+ struct address_space *mapping = &ip->i_data;
+#endif
+
+#if defined(AFS_LINUX26_ENV)
+ inode_init_once(ip);
+#else
+ sema_init(&ip->i_sem, 1);
+ INIT_LIST_HEAD(&ip->i_hash);
+ INIT_LIST_HEAD(&ip->i_dentry);
+#if defined(AFS_LINUX24_ENV)
+ sema_init(&ip->i_zombie, 1);
+ init_waitqueue_head(&ip->i_wait);
+ spin_lock_init(&ip->i_data.i_shared_lock);
+#ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
+ spin_lock_init(&ip->i_data.page_lock);
+#endif
+ INIT_LIST_HEAD(&ip->i_data.clean_pages);
+ INIT_LIST_HEAD(&ip->i_data.dirty_pages);
+ INIT_LIST_HEAD(&ip->i_data.locked_pages);
+ INIT_LIST_HEAD(&ip->i_dirty_buffers);
+#ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
+ INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
+#endif
+#ifdef STRUCT_INODE_HAS_I_DEVICES
+ INIT_LIST_HEAD(&ip->i_devices);
+#endif
+#ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
+ init_rwsem(&ip->i_truncate_sem);
+#endif
+#ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
+ init_rwsem(&ip->i_alloc_sem);
+#endif
+
+#else /* AFS_LINUX22_ENV */
+ sema_init(&ip->i_atomic_write, 1);
+ init_waitqueue(&ip->i_wait);
+#endif
+#endif
+
+#if defined(AFS_LINUX24_ENV)
+ mapping->host = ip;
+ ip->i_mapping = mapping;
+#ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
+ ip->i_data.gfp_mask = GFP_HIGHUSER;
+#endif
+#if defined(AFS_LINUX26_ENV)
+ mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
+ {
+ extern struct backing_dev_info afs_backing_dev_info;
+
+ mapping->backing_dev_info = &afs_backing_dev_info;
+ }
+#endif
+#endif
+
+#if !defined(AFS_LINUX26_ENV)
+ if (afs_globalVFS)
+ ip->i_dev = afs_globalVFS->s_dev;
+#else
+#ifdef STRUCT_INODE_HAS_I_SECURITY
+ ip->i_security = NULL;
+ if (security_inode_alloc(ip))
+ panic("Cannot allocate inode security");
+#endif
+#endif
+ ip->i_sb = afs_globalVFS;
+ put_inode_on_dummy_list(ip);
+#ifdef STRUCT_INODE_HAS_I_SB_LIST
+ list_add(&ip->i_sb_list, &ip->i_sb->s_inodes);
+#endif
+#if defined(STRUCT_INODE_HAS_INOTIFY_LOCK) || defined(STRUCT_INODE_HAS_INOTIFY_SEM)
+ INIT_LIST_HEAD(&ip->inotify_watches);
+#if defined(STRUCT_INODE_HAS_INOTIFY_SEM)
+ sema_init(&ip->inotify_sem, 1);
+#else
+ spin_lock_init(&ip->inotify_lock);
+#endif
+#endif
+ }
+#endif
+
#ifdef AFS_OSF_ENV
/* Hold it for the LRU (should make count 2) */
VN_HOLD(AFSTOV(tvc));
#else /* AFS_OSF_ENV */
-#ifndef AFS_OBSD_ENV
+#if !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
VREFCOUNT_SET(tvc, 1); /* us */
-#endif /* AFS_OBSD_ENV */
+#endif /* AFS_XBSD_ENV */
#endif /* AFS_OSF_ENV */
#ifdef AFS_AIX32_ENV
LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
tvc->vmh = tvc->segid = NULL;
tvc->credp = NULL;
#endif
-#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
+#ifdef AFS_BOZONLOCK_ENV
#if defined(AFS_SUN5_ENV)
rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
#else
SetAfsVnode(AFSTOV(tvc));
#endif /* AFS_SGI64_ENV */
-#ifdef AFS_DARWIN_ENV
- tvc->v.v_ubcinfo = UBC_INFO_NULL;
- lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
- cache_purge(AFSTOV(tvc));
- tvc->v.v_data = tvc;
- tvc->v.v_tag = VT_AFS;
- /* VLISTNONE(&tvc->v); */
- tvc->v.v_freelist.tqe_next = 0;
- tvc->v.v_freelist.tqe_prev = (struct vnode **)0xdeadb;
- /*tvc->vrefCount++; */
-#endif
-#ifdef AFS_FBSD_ENV
- lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
- cache_purge(AFSTOV(tvc));
- tvc->v.v_data = tvc;
- tvc->v.v_tag = VT_AFS;
- tvc->v.v_usecount++; /* steal an extra ref for now so vfree never happens */
- /* This extra ref is dealt with above... */
-#endif
/*
* The proper value for mvstat (for root fids) is setup by the caller.
*/
tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
gnodepnt->gn_vnode = &tvc->v;
#endif
-#ifdef AFS_DEC_ENV
- tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
-#endif
#if defined(AFS_DUX40_ENV)
insmntque(tvc, afs_globalVFS, &afs_ubcops);
#else
vn_initlist((struct vnlist *)&tvc->v);
tvc->lastr = 0;
#endif /* AFS_SGI_ENV */
-#if defined(AFS_LINUX22_ENV)
- {
- struct inode *ip = AFSTOI(tvc);
- sema_init(&ip->i_sem, 1);
-#if defined(AFS_LINUX24_ENV)
- sema_init(&ip->i_zombie, 1);
- init_waitqueue_head(&ip->i_wait);
- spin_lock_init(&ip->i_data.i_shared_lock);
-#ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
- spin_lock_init(&ip->i_data.page_lock);
-#endif
- INIT_LIST_HEAD(&ip->i_data.clean_pages);
- INIT_LIST_HEAD(&ip->i_data.dirty_pages);
- INIT_LIST_HEAD(&ip->i_data.locked_pages);
- INIT_LIST_HEAD(&ip->i_dirty_buffers);
-#ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
- INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
-#endif
-#ifdef STRUCT_INODE_HAS_I_DEVICES
- INIT_LIST_HEAD(&ip->i_devices);
-#endif
- ip->i_data.host = (void *)ip;
-#ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
- ip->i_data.gfp_mask = GFP_HIGHUSER;
-#endif
- ip->i_mapping = &ip->i_data;
-#ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
- init_rwsem(&ip->i_truncate_sem);
-#endif
-#ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
- init_rwsem(&ip->i_alloc_sem);
-#endif
-#else
- sema_init(&ip->i_atomic_write, 1);
- init_waitqueue(&ip->i_wait);
-#endif
- INIT_LIST_HEAD(&ip->i_hash);
- INIT_LIST_HEAD(&ip->i_dentry);
- if (afs_globalVFS) {
- ip->i_dev = afs_globalVFS->s_dev;
- ip->i_sb = afs_globalVFS;
- }
- }
-#endif
tvc->h1.dchint = 0;
osi_dnlc_purgedp(tvc); /* this may be overkill */
memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
struct vrequest treq, ureq;
struct AFSVolSync tsync;
int didCore;
- XSTATS_DECLS AFS_STATCNT(afs_FlushActiveVcaches);
+ XSTATS_DECLS;
+ AFS_STATCNT(afs_FlushActiveVcaches);
ObtainReadLock(&afs_xvcache);
for (i = 0; i < VCSIZE; i++) {
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
*/
osi_vnhold(tvc, 0);
ReleaseReadLock(&afs_xvcache);
-#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
+#ifdef AFS_BOZONLOCK_ENV
afs_BozonLock(&tvc->pvnLock, tvc);
#endif
#if defined(AFS_SGI_ENV)
tvc->execsOrWriters);
code = afs_StoreOnLastReference(tvc, &ureq);
ReleaseWriteLock(&tvc->lock);
-#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
+#ifdef AFS_BOZONLOCK_ENV
afs_BozonUnlock(&tvc->pvnLock, tvc);
#endif
hzero(tvc->flushDV);
* Ignore errors
*/
ReleaseWriteLock(&tvc->lock);
-#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
+#ifdef AFS_BOZONLOCK_ENV
afs_BozonUnlock(&tvc->pvnLock, tvc);
#endif
#if defined(AFS_SGI_ENV)
} else {
/* lost (or won, perhaps) the race condition */
ReleaseWriteLock(&tvc->lock);
-#if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
+#ifdef AFS_BOZONLOCK_ENV
afs_BozonUnlock(&tvc->pvnLock, tvc);
#endif
}
ObtainReadLock(&afs_xvcache);
AFS_FAST_RELE(tvc);
if (didCore) {
-#ifdef AFS_GFS_ENV
- VREFCOUNT_DEC(tvc);
-#else
AFS_RELE(AFSTOV(tvc));
-#endif
/* Matches write code setting CCore flag */
crfree(cred);
}
}
-#ifdef AFS_DARWIN_ENV
- if (VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
- if (tvc->opens)
- panic("flushactive open, hasubc, but refcnt 1");
- osi_VM_TryReclaim(tvc, 0);
- }
-#endif
}
}
ReleaseReadLock(&afs_xvcache);
struct conn *tc;
struct AFSFetchStatus OutStatus;
struct AFSVolSync tsync;
- XSTATS_DECLS AFS_STATCNT(afs_WriteVCache);
+ XSTATS_DECLS;
+ AFS_STATCNT(afs_WriteVCache);
afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
#ifdef AFS_LINUX22_ENV
vcache2inode(avc); /* Set the inode attr cache */
#endif
-#ifdef AFS_DARWIN_ENV
- osi_VM_Setup(avc, 1);
-#endif
} /*afs_ProcessFS */
afs_uint32 start;
register struct conn *tc;
struct AFSFetchStatus OutDirStatus;
- XSTATS_DECLS if (!name)
- name = ""; /* XXX */
+ XSTATS_DECLS;
+ if (!name)
+ name = ""; /* XXX */
do {
tc = afs_Conn(afid, areq, SHARED_LOCK);
if (tc) {
vcache2inode(tvc);
#endif
ReleaseWriteLock(&tvc->lock);
-#ifdef AFS_DARWIN_ENV
- osi_VM_Setup(tvc, 0);
-#endif
return tvc;
}
#if defined(AFS_OSF_ENV)
return tvc;
}
#endif /* AFS_OSF_ENV */
-#ifdef AFS_OBSD_ENV
- VOP_LOCK(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY, curproc);
- uvm_vnp_uncache(AFSTOV(tvc));
- VOP_UNLOCK(AFSTOV(tvc), 0, curproc);
+#if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
+ /*
+ * XXX - I really don't like this. Should try to understand better.
+ * It seems that sometimes, when we get called, we already hold the
+ * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
+ * We can't drop the vnode lock, because that could result in a race.
+ * Sometimes, though, we get here and don't hold the vnode lock.
+ * I hate code paths that sometimes hold locks and sometimes don't.
+ * In any event, the dodge we use here is to check whether the vnode
+ * is locked, and if it isn't, then we gain and drop it around the call
+ * to vinvalbuf; otherwise, we leave it alone.
+ */
+ {
+ struct vnode *vp = AFSTOV(tvc);
+ int iheldthelock;
+
+#if defined(AFS_DARWIN_ENV)
+ iheldthelock = VOP_ISLOCKED(vp);
+ if (!iheldthelock)
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
+ /* this is messy. we can call fsync which will try to reobtain this */
+ if (VTOAFS(vp) == tvc)
+ ReleaseWriteLock(&tvc->lock);
+ if (UBCINFOEXISTS(vp)) {
+ vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
+ }
+ if (VTOAFS(vp) == tvc)
+ ObtainWriteLock(&tvc->lock, 954);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
+#elif defined(AFS_FBSD60_ENV)
+ iheldthelock = VOP_ISLOCKED(vp, curthread);
+ if (!iheldthelock)
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
+#elif defined(AFS_FBSD50_ENV)
+ iheldthelock = VOP_ISLOCKED(vp, curthread);
+ if (!iheldthelock)
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
+ vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
+#elif defined(AFS_FBSD40_ENV)
+ iheldthelock = VOP_ISLOCKED(vp, curproc);
+ if (!iheldthelock)
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
+ vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
+#elif defined(AFS_OBSD_ENV)
+ iheldthelock = VOP_ISLOCKED(vp, curproc);
+ if (!iheldthelock)
+ VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
+ uvm_vnp_uncache(vp);
+ if (!iheldthelock)
+ VOP_UNLOCK(vp, 0, curproc);
+#endif
+ }
#endif
ObtainWriteLock(&afs_xcbhash, 464);
struct AFSCallBack CallBack;
struct AFSVolSync tsync;
struct volume *volp;
- XSTATS_DECLS
+ XSTATS_DECLS;
do {
tc = afs_Conn(afid, areq, SHARED_LOCK);
avc->quick.stamp = 0;
if (retry && *retry)
return 0;
#endif
+#ifdef AFS_DARWIN_ENV
+ tvc->states |= CUBCinit;
+ AFS_GUNLOCK();
+ if (UBCINFOMISSING(AFSTOV(tvc)) ||
+ UBCINFORECLAIMED(AFSTOV(tvc))) {
+ ubc_info_init(AFSTOV(tvc));
+ }
+ AFS_GLOCK();
+ tvc->states &= ~CUBCinit;
+#endif
/*
* only move to front of vlru if we have proper vcache locking)
*/
if (tvc && (tvc->states & CStatd))
vcache2inode(tvc); /* mainly to reset i_nlink */
#endif
-#ifdef AFS_DARWIN_ENV
- if (tvc)
- osi_VM_Setup(tvc, 0);
-#endif
return tvc;
} /*afs_FindVCache */
LOCK_INIT(&afs_xvcb, "afs_xvcb");
#if !defined(AFS_OSF_ENV)
+#ifdef AFS_LINUX26_ENV
+ printf("old style would have needed %d contiguous bytes\n", astatSize *
+ sizeof(struct vcache));
+ Initial_freeVCList = freeVCList = tvp = (struct vcache *)
+ afs_osi_Alloc(sizeof(struct vcache));
+ for (i = 0; i < astatSize; i++) {
+ tvp->nextfree = (struct vcache *) afs_osi_Alloc(sizeof(struct vcache));
+ tvp = tvp->nextfree;
+ }
+ tvp->nextfree = NULL;
+#else
/* Allocate and thread the struct vcache entries */
tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
#endif
#endif
-
+#endif
#if defined(AFS_SGI_ENV)
for (i = 0; i < astatSize; i++) {
* free vcache entries and all the vcache entries are active ones then we allocate
* an additional one - admittedly we almost never had that occur.
*/
-#if !defined(AFS_OSF_ENV)
- afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
-#endif
-#ifdef KERNEL_HAVE_PIN
- unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
-#endif
{
register struct afs_q *tq, *uq;
}
afs_cbrSpace = 0;
+#ifdef AFS_LINUX26_ENV
+ {
+ struct vcache *tvp = Initial_freeVCList;
+ while (tvp) {
+ struct vcache *next = tvp->nextfree;
+
+ afs_osi_Free(tvp, sizeof(struct vcache));
+ tvp = next;
+ }
+ }
+#else
+#ifdef KERNEL_HAVE_PIN
+ unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
+#endif
+#if !defined(AFS_OSF_ENV)
+ afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
+#endif
+#endif
+
#if !defined(AFS_OSF_ENV)
freeVCList = Initial_freeVCList = 0;
#endif