#ifdef AFS_OSF_ENV
extern struct mount *afs_globalVFS;
extern struct vnodeops Afs_vnodeops;
-#elif defined(AFS_DARWIN_ENV)
+#elif defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
extern struct mount *afs_globalVFS;
#else
extern struct vfs *afs_globalVFS;
ICL_TYPE_INT32, avc->states);
#ifdef AFS_OSF_ENV
AFS_GUNLOCK();
- VN_LOCK((struct vnode *)avc);
+ VN_LOCK(AFSTOV(avc));
AFS_GLOCK();
#endif
afs_vcount--;
vSetType(avc, VREG);
if (VREFCOUNT(avc) > 0) {
- VN_UNLOCK((struct vnode *)avc);
- AFS_RELE((struct vnode *)avc);
+ VN_UNLOCK(AFSTOV(avc));
+ AFS_RELE(AFSTOV(avc));
} else {
if (afs_norefpanic) {
printf ("flush vc refcnt < 1");
afs_norefpanic++;
(void) vgone(avc, VX_NOSLEEP, (struct vnodeops *) 0);
AFS_GLOCK();
- VN_UNLOCK((struct vnode *)avc);
+ VN_UNLOCK(AFSTOV(avc));
}
else osi_Panic ("flush vc refcnt < 1");
}
bad:
#ifdef AFS_OSF_ENV
- VN_UNLOCK((struct vnode *)avc);
+ VN_UNLOCK(AFSTOV(avc));
#endif
return code;
SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_GiveUpCallBacks(tc->id, &fidArray,
&cbArray);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
return 0;
}
+#ifdef AFS_LINUX22_ENV
+/* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
+ * children of the dentry
+ *
+ * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
+ * AFS_GLOCK, so it can call dput, which may call iput, but
+ * keeps afs_xvcache exclusively.
+ *
+ * Tree traversal algorithm from fs/dcache.c: select_parent()
+ */
+static void afs_TryFlushDcacheChildren(struct dentry *parent)
+{
+ struct dentry *this_parent = parent;
+ struct list_head *next;
+
+ repeat:
+ next = this_parent->d_subdirs.next;
+ resume:
+ DLOCK();
+ while (next != &this_parent->d_subdirs) {
+ struct list_head *tmp = next;
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+
+ next = tmp->next;
+ if (!DCOUNT(dentry) && !dentry->d_inode) {
+ DGET(dentry);
+ AFS_GUNLOCK();
+ DUNLOCK();
+ d_drop(dentry);
+ dput(dentry);
+ AFS_GLOCK();
+ goto repeat;
+ }
+ /*
+ * Descend a level if the d_subdirs list is non-empty.
+ */
+ if (!list_empty(&dentry->d_subdirs)) {
+ this_parent = dentry;
+ goto repeat;
+ }
+ }
+ DUNLOCK();
+
+ /*
+ * All done at this level ... ascend and resume the search.
+ */
+ if (this_parent != parent) {
+ next = this_parent->d_child.next;
+ this_parent = this_parent->d_parent;
+ goto resume;
+ }
+}
+#endif /* AFS_LINUX22_ENV */
/*
* afs_RemoveVCB
} /*afs_RemoveVCB*/
-
/*
* afs_NewVCache
*
#ifdef AFS_LINUX22_ENV
if (!freeVCList) {
/* Free some if possible. */
- struct afs_q *tq, *uq;
- int i; char *panicstr;
- int vmax = 2 * afs_cacheStats;
- int vn = VCACHE_FREE;
-
- AFS_GUNLOCK();
- shrink_dcache_sb(afs_globalVFS);
- AFS_GLOCK();
-
- i = 0;
- for(tq = VLRU.prev; tq != &VLRU && vn > 0; tq = uq) {
+ struct afs_q *tq, *uq;
+ int i; char *panicstr;
+ int vmax = 2 * afs_cacheStats;
+ int vn = VCACHE_FREE;
+
+ i = 0;
+ for(tq = VLRU.prev; tq != &VLRU && vn > 0; tq = uq) {
tvc = QTOV(tq);
uq = QPrev(tq);
if (tvc->states & CVFlushed)
- refpanic ("CVFlushed on VLRU");
+ refpanic ("CVFlushed on VLRU");
else if (i++ > vmax)
- refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
+ refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
else if (QNext(uq) != tq)
- refpanic ("VLRU inconsistent");
-
+ refpanic ("VLRU inconsistent");
+
if (tvc == afs_globalVp)
continue;
-
+
if ( VREFCOUNT(tvc) && tvc->opens == 0 ) {
- struct inode *ip = (struct inode*)tvc;
+ struct inode *ip = AFSTOI(tvc);
if (list_empty(&ip->i_dentry)) {
vn --;
}
struct list_head *head = &ip->i_dentry;
int all = 1;
restart:
-#if defined(AFS_LINUX24_ENV)
- spin_lock(&dcache_lock);
-#endif
+ DLOCK();
cur = head;
while ((cur = cur->next) != head) {
struct dentry *dentry = list_entry(cur, struct dentry, d_alias);
-#if defined(AFS_LINUX24_ENV)
- if (!atomic_read(&dentry->d_count)) {
-#else
- if (!dentry->d_count) {
-#endif
+ if (DCOUNT(dentry)) {
+ afs_TryFlushDcacheChildren(dentry);
+ }
+
+ if (!DCOUNT(dentry)) {
AFS_GUNLOCK();
-#if defined(AFS_LINUX24_ENV)
- dget_locked(dentry);
- spin_unlock(&dcache_lock);
-#else
- dget(dentry);
-#endif
+ DGET(dentry);
+ DUNLOCK();
d_drop(dentry);
dput(dentry);
AFS_GLOCK();
all = 0;
}
}
-#if defined(AFS_LINUX24_ENV)
- spin_unlock(&dcache_lock);
-#endif
+ DUNLOCK();
if (all) vn --;
}
}
if (tq == uq) break;
- }
+ }
}
#endif /* AFS_LINUX22_ENV */
#ifdef AFS_OSF_ENV
}
}
#endif
+#if defined(AFS_FBSD_ENV)
+ if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
+ && (tvc->states & CUnlinkedDel) == 0) {
+ if (!(VOP_LOCK(&tvc->v, LK_EXCLUSIVE, curproc))) {
+ if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
+ && (tvc->states & CUnlinkedDel) == 0) {
+ VREFCOUNT_DEC(tvc);
+ AFS_GUNLOCK(); /* perhaps inline inactive for locking */
+ VOP_INACTIVE(&tvc->v, curproc);
+ AFS_GLOCK();
+ } else {
+ VOP_UNLOCK(&tvc->v, 0, curproc);
+ }
+ }
+ }
+#endif
if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
&& (tvc->states & CUnlinkedDel) == 0) {
code = afs_FlushVCache(tvc, &fv_slept);
hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
#ifdef AFS_OSF_ENV
/* Hold it for the LRU (should make count 2) */
- VN_HOLD((struct vnode *)tvc);
+ VN_HOLD(AFSTOV(tvc));
#else /* AFS_OSF_ENV */
VREFCOUNT_SET(tvc, 1); /* us */
#endif /* AFS_OSF_ENV */
AFS_VN_INIT_BUF_LOCK(&(tvc->v));
#endif
#else
- SetAfsVnode((struct vnode *)tvc);
+ SetAfsVnode(AFSTOV(tvc));
#endif /* AFS_SGI64_ENV */
#ifdef AFS_DARWIN_ENV
tvc->v.v_ubcinfo = UBC_INFO_NULL;
lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
- cache_purge((struct vnode *)tvc);
+ cache_purge(AFSTOV(tvc));
tvc->v.v_data=tvc;
tvc->v.v_tag=VT_AFS;
/* VLISTNONE(&tvc->v); */
tvc->v.v_freelist.tqe_prev=(struct vnode **)0xdeadb;
/*tvc->vrefCount++;*/
#endif
+#ifdef AFS_FBSD_ENV
+ lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
+ cache_purge(AFSTOV(tvc));
+ tvc->v.v_data=tvc;
+ tvc->v.v_tag=VT_AFS;
+ tvc->v.v_usecount++; /* steal an extra ref for now so vfree never happens */
+ /* This extra ref is dealt with above... */
+#endif
/*
* The proper value for mvstat (for root fids) is setup by the caller.
*/
#endif /* AFS_SGI_ENV */
#if defined(AFS_LINUX22_ENV)
{
- struct inode *ip = (struct inode*)tvc;
+ struct inode *ip = AFSTOI(tvc);
sema_init(&ip->i_sem, 1);
#if defined(AFS_LINUX24_ENV)
sema_init(&ip->i_zombie, 1);
INIT_LIST_HEAD(&ip->i_data.dirty_pages);
INIT_LIST_HEAD(&ip->i_data.locked_pages);
INIT_LIST_HEAD(&ip->i_dirty_buffers);
+#ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
+ INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
+#endif
+#ifdef STRUCT_INODE_HAS_I_DEVICES
+ INIT_LIST_HEAD(&ip->i_devices);
+#endif
ip->i_data.host = (void*) ip;
ip->i_mapping = &ip->i_data;
#ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code =
RXAFS_ExtendLock(tc->id,
(struct AFSFid *) &tvc->fid.Fid,
&tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
#ifdef AFS_GFS_ENV
VREFCOUNT_DEC(tvc);
#else
- AFS_RELE((struct vnode *)tvc);
+ AFS_RELE(AFSTOV(tvc));
#endif
/* Matches write code setting CCore flag */
crfree(cred);
tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_StoreStatus(tc->id,
(struct AFSFid *) &avc->fid.Fid,
astatus, &OutStatus, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
if (serverp) *serverp = tc->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_Lookup(tc->id, (struct AFSFid *) &afid->Fid, name,
(struct AFSFid *) &nfid->Fid,
OutStatusp, &OutDirStatus, CallBackp, tsyncp);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
* can be safely implemented */
int vg;
AFS_GUNLOCK();
- vg = vget((struct vnode *)tvc); /* this bumps ref count */
+ vg = vget(AFSTOV(tvc)); /* this bumps ref count */
AFS_GLOCK();
if (vg)
continue;
avc->callback = tc->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_FetchStatus(tc->id,
(struct AFSFid *) &afid->Fid,
Outsp, &CallBack, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
/* Grab this vnode, possibly reactivating from the free list */
int vg;
AFS_GUNLOCK();
- vg = vget((struct vnode *)tvc);
+ vg = vget(AFSTOV(tvc));
AFS_GLOCK();
if (vg)
continue;
/* Grab this vnode, possibly reactivating from the free list */
int vg;
AFS_GUNLOCK();
- vg = vget((struct vnode *)tvc);
+ vg = vget(AFSTOV(tvc));
AFS_GLOCK();
if (vg) {
/* This vnode no longer exists. */
/* Duplicates */
#ifdef AFS_OSF_ENV
/* Drop our reference counts. */
- vrele((struct vnode *)tvc);
- vrele((struct vnode *)found_tvc);
+ vrele(AFSTOV(tvc));
+ vrele(AFSTOV(found_tvc));
#endif
afs_duplicate_nfs_fids++;
ReleaseSharedLock(&afs_xvcache);
tvc->mvid = (struct VenusFid*)0;
}
#ifdef AFS_AIX_ENV
- aix_gnode_rele((struct vnode *)tvc);
+ aix_gnode_rele(AFSTOV(tvc));
#endif
if (tvc->linkData) {
afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);