* shutdown_vcache
*
*/
-#include "../afs/param.h" /*Should be always first*/
+#include <afsconfig.h>
+#include "../afs/param.h"
+
+RCSID("$Header$");
+
#include "../afs/sysincludes.h" /*Standard vendor system headers*/
#include "../afs/afsincludes.h" /*AFS-based standard headers*/
#include "../afs/afs_stats.h"
#ifdef AFS_OSF_ENV
extern struct mount *afs_globalVFS;
extern struct vnodeops Afs_vnodeops;
+#elif defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
+extern struct mount *afs_globalVFS;
#else
extern struct vfs *afs_globalVFS;
#endif /* AFS_OSF_ENV */
ICL_TYPE_INT32, avc->states);
#ifdef AFS_OSF_ENV
AFS_GUNLOCK();
- VN_LOCK((struct vnode *)avc);
+ VN_LOCK(AFSTOV(avc));
AFS_GLOCK();
#endif
/* This should put it back on the vnode free list since usecount is 1 */
afs_vcount--;
vSetType(avc, VREG);
- if (avc->vrefCount > 0) {
- VN_UNLOCK((struct vnode *)avc);
- AFS_RELE((struct vnode *)avc);
+ if (VREFCOUNT(avc) > 0) {
+ VN_UNLOCK(AFSTOV(avc));
+ AFS_RELE(AFSTOV(avc));
} else {
if (afs_norefpanic) {
printf ("flush vc refcnt < 1");
afs_norefpanic++;
(void) vgone(avc, VX_NOSLEEP, (struct vnodeops *) 0);
AFS_GLOCK();
- VN_UNLOCK((struct vnode *)avc);
+ VN_UNLOCK(AFSTOV(avc));
}
else osi_Panic ("flush vc refcnt < 1");
}
bad:
#ifdef AFS_OSF_ENV
- VN_UNLOCK((struct vnode *)avc);
+ VN_UNLOCK(AFSTOV(avc));
#endif
return code;
AFS_STATCNT(afs_inactive);
if (avc->states & CDirty) {
/* we can't keep trying to push back dirty data forever. Give up. */
- afs_InvalidateAllSegments(avc, 1/*set lock*/); /* turns off dirty bit */
+ afs_InvalidateAllSegments(avc); /* turns off dirty bit */
}
avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
avc->states &= ~CDirty; /* Turn it off */
SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_GiveUpCallBacks(tc->id, &fidArray,
&cbArray);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
return 0;
}
+#ifdef AFS_LINUX22_ENV
+/* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
+ * children of the dentry
+ *
+ * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
+ * AFS_GLOCK, so it can call dput, which may call iput, but
+ * keeps afs_xvcache exclusively.
+ *
+ * Tree traversal algorithm from fs/dcache.c: select_parent()
+ */
+static void afs_TryFlushDcacheChildren(struct dentry *parent)
+{
+ struct dentry *this_parent = parent;
+ struct list_head *next;
+
+ repeat:
+ next = this_parent->d_subdirs.next;
+ resume:
+ DLOCK();
+ while (next != &this_parent->d_subdirs) {
+ struct list_head *tmp = next;
+ struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
+
+ next = tmp->next;
+ if (!DCOUNT(dentry) && !dentry->d_inode) {
+ DGET(dentry);
+ AFS_GUNLOCK();
+ DUNLOCK();
+ d_drop(dentry);
+ dput(dentry);
+ AFS_GLOCK();
+ goto repeat;
+ }
+ /*
+ * Descend a level if the d_subdirs list is non-empty.
+ */
+ if (!list_empty(&dentry->d_subdirs)) {
+ this_parent = dentry;
+ goto repeat;
+ }
+ }
+ DUNLOCK();
+
+ /*
+ * All done at this level ... ascend and resume the search.
+ */
+ if (this_parent != parent) {
+ next = this_parent->d_child.next;
+ this_parent = this_parent->d_parent;
+ goto resume;
+ }
+}
+#endif /* AFS_LINUX22_ENV */
/*
* afs_RemoveVCB
} /*afs_RemoveVCB*/
-
/*
* afs_NewVCache
*
#ifdef AFS_LINUX22_ENV
if (!freeVCList) {
/* Free some if possible. */
- struct afs_q *tq, *uq;
- int i; char *panicstr;
- int vmax = 2 * afs_cacheStats;
- int vn = VCACHE_FREE;
-
- i = 0;
- for(tq = VLRU.prev; tq != &VLRU && vn > 0; tq = uq) {
+ struct afs_q *tq, *uq;
+ int i; char *panicstr;
+ int vmax = 2 * afs_cacheStats;
+ int vn = VCACHE_FREE;
+
+ i = 0;
+ for(tq = VLRU.prev; tq != &VLRU && vn > 0; tq = uq) {
tvc = QTOV(tq);
uq = QPrev(tq);
if (tvc->states & CVFlushed)
- refpanic ("CVFlushed on VLRU");
+ refpanic ("CVFlushed on VLRU");
else if (i++ > vmax)
- refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
+ refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
else if (QNext(uq) != tq)
- refpanic ("VLRU inconsistent");
-
+ refpanic ("VLRU inconsistent");
+
if (tvc == afs_globalVp)
continue;
-
- if ( tvc->vrefCount && tvc->opens == 0 ) {
- struct inode *ip = (struct inode*)tvc;
+
+ if ( VREFCOUNT(tvc) && tvc->opens == 0 ) {
+ struct inode *ip = AFSTOI(tvc);
if (list_empty(&ip->i_dentry)) {
vn --;
}
struct list_head *head = &ip->i_dentry;
int all = 1;
restart:
-#if defined(AFS_LINUX24_ENV)
- spin_lock(&dcache_lock);
-#endif
+ DLOCK();
cur = head;
while ((cur = cur->next) != head) {
struct dentry *dentry = list_entry(cur, struct dentry, d_alias);
-#if defined(AFS_LINUX24_ENV)
- if (!atomic_read(&dentry->d_count)) {
-#else
- if (!dentry->d_count) {
-#endif
+ if (DCOUNT(dentry)) {
+ afs_TryFlushDcacheChildren(dentry);
+ }
+
+ if (!DCOUNT(dentry)) {
AFS_GUNLOCK();
-#if defined(AFS_LINUX24_ENV)
- dget_locked(dentry);
- spin_unlock(&dcache_lock);
-#else
- dget(dentry);
-#endif
+ DGET(dentry);
+ DUNLOCK();
d_drop(dentry);
dput(dentry);
AFS_GLOCK();
all = 0;
}
}
-#if defined(AFS_LINUX24_ENV)
- spin_unlock(&dcache_lock);
-#endif
+ DUNLOCK();
if (all) vn --;
}
}
if (tq == uq) break;
- }
+ }
}
#endif /* AFS_LINUX22_ENV */
#ifdef AFS_OSF_ENV
refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
else if (QNext(uq) != tq)
refpanic ("VLRU inconsistent");
- else if (tvc->vrefCount < 1)
+ else if (VREFCOUNT(tvc) < 1)
refpanic ("refcnt 0 on VLRU");
- if ( tvc->vrefCount == 1 && tvc->opens == 0
+ if ( VREFCOUNT(tvc) == 1 && tvc->opens == 0
&& (tvc->states & CUnlinkedDel) == 0) {
code = afs_FlushVCache(tvc, &fv_slept);
if (code == 0) {
else if (QNext(uq) != tq)
refpanic("VLRU inconsistent");
- if (tvc->vrefCount == 0 && tvc->opens == 0
+#ifdef AFS_DARWIN_ENV
+ if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0) &&
+ VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
+ osi_VM_TryReclaim(tvc, &fv_slept);
+ if (fv_slept) {
+ uq = VLRU.prev;
+ i = 0;
+ continue; /* start over - may have raced. */
+ }
+ }
+#endif
+#if defined(AFS_FBSD_ENV)
+ if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
+ && (tvc->states & CUnlinkedDel) == 0) {
+ if (!(VOP_LOCK(&tvc->v, LK_EXCLUSIVE, curproc))) {
+ if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
+ && (tvc->states & CUnlinkedDel) == 0) {
+ VREFCOUNT_DEC(tvc);
+ AFS_GUNLOCK(); /* perhaps inline inactive for locking */
+ VOP_INACTIVE(&tvc->v, curproc);
+ AFS_GLOCK();
+ } else {
+ VOP_UNLOCK(&tvc->v, 0, curproc);
+ }
+ }
+ }
+#endif
+ if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
&& (tvc->states & CUnlinkedDel) == 0) {
code = afs_FlushVCache(tvc, &fv_slept);
if (code == 0) {
#endif /* AFS_MACH_ENV */
#if defined(AFS_SGI_ENV)
{ char name[METER_NAMSZ];
- bzero(tvc, sizeof(struct vcache));
+ memset(tvc, 0, sizeof(struct vcache));
tvc->v.v_number = ++afsvnumbers;
tvc->vc_rwlockid = OSI_NO_LOCKID;
initnsema(&tvc->vc_rwlock, 1, makesname(name, "vrw", tvc->v.v_number));
#endif /* AFS_MACH_ENV */
#if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
- bzero((char *)tvc, sizeof(struct vcache));
+ memset((char *)tvc, 0, sizeof(struct vcache));
#else
tvc->uncred = 0;
#endif
hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
#ifdef AFS_OSF_ENV
/* Hold it for the LRU (should make count 2) */
- VN_HOLD((struct vnode *)tvc);
+ VN_HOLD(AFSTOV(tvc));
#else /* AFS_OSF_ENV */
- tvc->vrefCount = 1; /* us */
+ VREFCOUNT_SET(tvc, 1); /* us */
#endif /* AFS_OSF_ENV */
#ifdef AFS_AIX32_ENV
LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
#ifdef AFS_AIX_ENV
/* Don't forget to free the gnode space */
tvc->v.v_gnode = gnodepnt = (struct gnode *) osi_AllocSmallSpace(sizeof(struct gnode));
- bzero((char *)gnodepnt, sizeof(struct gnode));
+ memset((char *)gnodepnt, 0, sizeof(struct gnode));
#endif
#ifdef AFS_SGI64_ENV
- bzero((void*)&(tvc->vc_bhv_desc), sizeof(tvc->vc_bhv_desc));
+ memset((void*)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
#ifdef AFS_SGI65_ENV
vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
AFS_VN_INIT_BUF_LOCK(&(tvc->v));
#endif
#else
- SetAfsVnode((struct vnode *)tvc);
+ SetAfsVnode(AFSTOV(tvc));
#endif /* AFS_SGI64_ENV */
+#ifdef AFS_DARWIN_ENV
+ tvc->v.v_ubcinfo = UBC_INFO_NULL;
+ lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
+ cache_purge(AFSTOV(tvc));
+ tvc->v.v_data=tvc;
+ tvc->v.v_tag=VT_AFS;
+ /* VLISTNONE(&tvc->v); */
+ tvc->v.v_freelist.tqe_next=0;
+ tvc->v.v_freelist.tqe_prev=(struct vnode **)0xdeadb;
+ /*tvc->vrefCount++;*/
+#endif
+#ifdef AFS_FBSD_ENV
+ lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
+ cache_purge(AFSTOV(tvc));
+ tvc->v.v_data=tvc;
+ tvc->v.v_tag=VT_AFS;
+ tvc->v.v_usecount++; /* steal an extra ref for now so vfree never happens */
+ /* This extra ref is dealt with above... */
+#endif
/*
* The proper value for mvstat (for root fids) is setup by the caller.
*/
#endif /* AFS_SGI_ENV */
#if defined(AFS_LINUX22_ENV)
{
- struct inode *ip = (struct inode*)tvc;
+ struct inode *ip = AFSTOI(tvc);
sema_init(&ip->i_sem, 1);
#if defined(AFS_LINUX24_ENV)
sema_init(&ip->i_zombie, 1);
init_waitqueue_head(&ip->i_wait);
spin_lock_init(&ip->i_data.i_shared_lock);
- INIT_LIST_HEAD(&ip->i_data.pages);
+#ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
+ spin_lock_init(&ip->i_data.page_lock);
+#endif
+ INIT_LIST_HEAD(&ip->i_data.clean_pages);
+ INIT_LIST_HEAD(&ip->i_data.dirty_pages);
+ INIT_LIST_HEAD(&ip->i_data.locked_pages);
+ INIT_LIST_HEAD(&ip->i_dirty_buffers);
+#ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
+ INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
+#endif
+#ifdef STRUCT_INODE_HAS_I_DEVICES
+ INIT_LIST_HEAD(&ip->i_devices);
+#endif
ip->i_data.host = (void*) ip;
ip->i_mapping = &ip->i_data;
+#ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
+ init_rwsem(&ip->i_truncate_sem);
+#endif
#else
sema_init(&ip->i_atomic_write, 1);
init_waitqueue(&ip->i_wait);
#endif
tvc->h1.dchint = 0;
osi_dnlc_purgedp(tvc); /* this may be overkill */
- bzero((char *)&(tvc->quick),sizeof(struct vtodc));
- bzero((char *)&(tvc->callsort),sizeof(struct afs_q));
+ memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
+ memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
tvc->slocks = (struct SimpleLocks *)0;
i = VCHash(afid);
tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code =
RXAFS_ExtendLock(tc->id,
(struct AFSFid *) &tvc->fid.Fid,
&tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
/*
* That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
*/
- osi_Assert(tvc->vrefCount > 0);
+ osi_Assert(VREFCOUNT(tvc) > 0);
AFS_RWLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
#endif
ObtainWriteLock(&tvc->lock,52);
AFS_FAST_RELE(tvc);
if (didCore) {
#ifdef AFS_GFS_ENV
- tvc->vrefCount--;
+ VREFCOUNT_DEC(tvc);
#else
- AFS_RELE((struct vnode *)tvc);
+ AFS_RELE(AFSTOV(tvc));
#endif
/* Matches write code setting CCore flag */
crfree(cred);
}
}
+#ifdef AFS_DARWIN_ENV
+ if (VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
+ if (tvc->opens) panic("flushactive open, hasubc, but refcnt 1");
+ osi_VM_TryReclaim(tvc,0);
+ }
+#endif
}
}
ReleaseReadLock(&afs_xvcache);
struct vrequest *areq;
{ /*afs_SimpleVStat*/
+ afs_size_t length;
AFS_STATCNT(afs_SimpleVStat);
#ifdef AFS_SGI_ENV
#endif
{
+#ifdef AFS_64BIT_ClIENT
+ FillInt64(length, astat->Length_hi, astat->Length);
+#else /* AFS_64BIT_CLIENT */
+ length = astat->Length;
+#endif /* AFS_64BIT_CLIENT */
#if defined(AFS_SGI_ENV)
osi_Assert((valusema(&avc->vc_rwlock) <= 0) &&
(OSI_GET_LOCKID() == avc->vc_rwlockid));
- if (astat->Length < avc->m.Length) {
+ if (length < avc->m.Length) {
vnode_t *vp = (vnode_t *)avc;
osi_Assert(WriteLocked(&avc->lock));
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
- PTOSSVP(vp, (off_t)astat->Length, (off_t)MAXLONG);
+ PTOSSVP(vp, (off_t)length, (off_t)MAXLONG);
AFS_GLOCK();
ObtainWriteLock(&avc->lock,67);
}
/* if writing the file, don't fetch over this value */
afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT,
ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, avc->m.Length,
- ICL_TYPE_INT32, astat->Length);
- avc->m.Length = astat->Length;
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
+ avc->m.Length = length;
avc->m.Date = astat->ClientModTime;
}
avc->m.Owner = astat->Owner;
AFS_STATCNT(afs_WriteVCache);
afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, avc->m.Length);
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
do {
tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_StoreStatus(tc->id,
(struct AFSFid *) &avc->fid.Fid,
astatus, &OutStatus, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
}
else code = -1;
{ /*afs_ProcessFS*/
register int i;
+ afs_size_t length;
AFS_STATCNT(afs_ProcessFS);
+#ifdef AFS_64BIT_CLIENT
+ FillInt64(length, astat->Length_hi, astat->Length);
+#else /* AFS_64BIT_CLIENT */
+ length = astat->Length;
+#endif /* AFS_64BIT_CLIENT */
/* WARNING: afs_DoBulkStat uses the Length field to store a sequence
* number for each bulk status request. Under no circumstances
* should afs_DoBulkStat store a sequence number if the new
* values.
*/
afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, avc->m.Length,
- ICL_TYPE_INT32, astat->Length);
- avc->m.Length = astat->Length;
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
+ avc->m.Length = length;
avc->m.Date = astat->ClientModTime;
}
hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
if (serverp) *serverp = tc->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_Lookup(tc->id, (struct AFSFid *) &afid->Fid, name,
(struct AFSFid *) &nfid->Fid,
OutStatusp, &OutDirStatus, CallBackp, tsyncp);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
vcache2inode(tvc);
#endif
ReleaseWriteLock(&tvc->lock);
+#ifdef AFS_DARWIN_ENV
+ osi_VM_Setup(tvc);
+#endif
return tvc;
}
/* stat the file */
afs_RemoveVCB(afid);
{
- struct AFSFetchStatus OutStatus;
- code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
+ struct AFSFetchStatus OutStatus;
+
+ if (afs_DynrootNewVnode(tvc, &OutStatus)) {
+ afs_ProcessFS(tvc, &OutStatus, areq);
+ tvc->states |= CStatd | CUnique;
+ code = 0;
+ } else {
+ code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
+ }
}
if (code) {
}
ReleaseWriteLock(&tvc->lock);
+#ifdef AFS_DARWIN_ENV
+ osi_VM_Setup(avc);
+#endif
return tvc;
} /*afs_GetVCache*/
afs_ProcessFS(tvc, &OutStatus, areq);
ReleaseWriteLock(&tvc->lock);
+#ifdef AFS_DARWIN_ENV
+ osi_VM_Setup(tvc);
+#endif
return tvc;
}
* can be safely implemented */
int vg;
AFS_GUNLOCK();
- vg = vget((struct vnode *)tvc); /* this bumps ref count */
+ vg = vget(AFSTOV(tvc)); /* this bumps ref count */
AFS_GLOCK();
if (vg)
continue;
avc->callback = tc->srvr->server;
start = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
-#ifdef RX_ENABLE_LOCKS
- AFS_GUNLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GUNLOCK();
code = RXAFS_FetchStatus(tc->id,
(struct AFSFid *) &afid->Fid,
Outsp, &CallBack, &tsync);
-#ifdef RX_ENABLE_LOCKS
- AFS_GLOCK();
-#endif /* RX_ENABLE_LOCKS */
+ RX_AFS_GLOCK();
XSTATS_END_TIME;
return code;
}
+#if 0
/*
* afs_StuffVcache
*
*/
afs_PutVCache(tvc, WRITE_LOCK);
} /*afs_StuffVcache*/
+#endif
/*
* afs_PutVCache
/* Grab this vnode, possibly reactivating from the free list */
int vg;
AFS_GUNLOCK();
- vg = vget((struct vnode *)tvc);
+ vg = vget(AFSTOV(tvc));
AFS_GLOCK();
if (vg)
continue;
if (tvc && (tvc->states & CStatd))
vcache2inode(tvc); /* mainly to reset i_nlink */
#endif
+#ifdef AFS_DARWIN_ENV
+ if (tvc)
+ osi_VM_Setup(tvc);
+#endif
return tvc;
} /*afs_FindVCache*/
/* Grab this vnode, possibly reactivating from the free list */
int vg;
AFS_GUNLOCK();
- vg = vget((struct vnode *)tvc);
+ vg = vget(AFSTOV(tvc));
AFS_GLOCK();
if (vg) {
/* This vnode no longer exists. */
/* Duplicates */
#ifdef AFS_OSF_ENV
/* Drop our reference counts. */
- vrele((struct vnode *)tvc);
- vrele((struct vnode *)found_tvc);
+ vrele(AFSTOV(tvc));
+ vrele(AFSTOV(found_tvc));
#endif
afs_duplicate_nfs_fids++;
ReleaseSharedLock(&afs_xvcache);
#if !defined(AFS_OSF_ENV)
/* Allocate and thread the struct vcache entries */
tvp = (struct vcache *) afs_osi_Alloc(astatSize * sizeof(struct vcache));
- bzero((char *)tvp, sizeof(struct vcache)*astatSize);
+ memset((char *)tvp, 0, sizeof(struct vcache)*astatSize);
Initial_freeVCList = tvp;
freeVCList = &(tvp[0]);
tvc->mvid = (struct VenusFid*)0;
}
#ifdef AFS_AIX_ENV
- aix_gnode_rele((struct vnode *)tvc);
+ aix_gnode_rele(AFSTOV(tvc));
#endif
if (tvc->linkData) {
afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);
vms_delete(tvc->segid);
AFS_GLOCK();
tvc->segid = tvc->vmh = NULL;
- if (tvc->vrefCount) osi_Panic("flushVcache: vm race");
+ if (VREFCOUNT(tvc)) osi_Panic("flushVcache: vm race");
}
if (tvc->credp) {
crfree(tvc->credp);