register struct vcache *tvp = 0;
AFS_STATCNT(afs_root);
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
} else {
struct ucred *credp;
* avc was "VN_HELD" and "crheld" when CCore was set in
* afs_FakeClose
*/
- if (avc->states & CCore) {
- avc->states &= ~CCore;
+ if (avc->f.states & CCore) {
+ avc->f.states &= ~CCore;
avc->opens--;
avc->execsOrWriters--;
AFS_RELE(AFSTOV(avc));
}
if (flags & FNSHARE)
- tvp->states |= CNSHARE;
+ tvp->f.states |= CNSHARE;
if (!error) {
*vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
if (vp->v_count == 0)
osi_Panic("afs_rele: zero v_count");
if (--(vp->v_count) == 0) {
- if (vcp->states & CPageHog) {
+ if (vcp->f.states & CPageHog) {
vmPageHog--;
- vcp->states &= ~CPageHog;
+ vcp->f.states &= ~CPageHog;
}
error = afs_inactive(vp, 0);
}
AFS_STATCNT(afs_gn_close);
if (flags & FNSHARE) {
- tvp->states &= ~CNSHARE;
+ tvp->f.states &= ~CNSHARE;
afs_osi_Wakeup(&tvp->opens);
}
#endif
osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
ObtainWriteLock(&vcp->lock, 401);
- vcp->states |= CMAPPED; /* flag cleared at afs_inactive */
+ vcp->f.states |= CMAPPED; /* flag cleared at afs_inactive */
/*
* We map the segment into our address space using the handle returned by vm_create.
*/
if (!vcp->segid) {
- afs_uint32 tlen = vcp->m.Length;
+ afs_uint32 tlen = vcp->f.m.Length;
#ifdef AFS_64BIT_CLIENT
- if (vcp->m.Length > afs_vmMappingEnd)
+ if (vcp->f.m.Length > afs_vmMappingEnd)
tlen = afs_vmMappingEnd;
#endif
/* Consider V_INTRSEG too for interrupts */
if (ubuf->afsio_offset < afs_vmMappingEnd) {
#endif /* AFS_64BIT_CLIENT */
ObtainWriteLock(&vcp->lock, 240);
- vcp->states |= CDirty; /* Set the dirty bit */
+ vcp->f.states |= CDirty; /* Set the dirty bit */
afs_FakeOpen(vcp);
ReleaseWriteLock(&vcp->lock);
#ifdef AFS_64BIT_CLIENT
return 0;
ObtainReadLock(&vcp->lock);
- fileSize = vcp->m.Length;
+ fileSize = vcp->f.m.Length;
if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
uiop->afsio_offset = fileSize;
}
#endif /* AFS_64BIT_CLIENT */
if (!vcp->segid) {
- afs_uint32 tlen = vcp->m.Length;
+ afs_uint32 tlen = vcp->f.m.Length;
#ifdef AFS_64BIT_CLIENT
- if (vcp->m.Length > afs_vmMappingEnd)
+ if (vcp->f.m.Length > afs_vmMappingEnd)
tlen = afs_vmMappingEnd;
#endif
/* Consider V_INTRSEG too for interrupts */
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
ReleaseReadLock(&vcp->lock);
ObtainWriteLock(&vcp->lock, 400);
- vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
+ vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
/* extend file */
/* un-protect last page. */
- last_page = vcp->m.Length / PAGESIZE;
+ last_page = vcp->f.m.Length / PAGESIZE;
#ifdef AFS_64BIT_CLIENT
- if (vcp->m.Length > afs_vmMappingEnd)
+ if (vcp->f.m.Length > afs_vmMappingEnd)
last_page = afs_vmMappingEnd / PAGESIZE;
#endif
vm_protectp(vcp->segid, last_page, 1, FILEKEY);
if (xfrSize + xfrOffset > fileSize) {
- vcp->m.Length = xfrSize + xfrOffset;
+ vcp->f.m.Length = xfrSize + xfrOffset;
}
- if ((!(vcp->states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
+ if ((!(vcp->f.states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
vmPageHog++;
- vcp->states |= CPageHog;
+ vcp->f.states |= CPageHog;
}
ReleaseWriteLock(&vcp->lock);
*/
if (counter > 0 && code == 0 && xfrOffset == offset) {
ObtainWriteLock(&vcp->lock, 403);
- if (xfrOffset > vcp->m.Length)
- vcp->m.Length = xfrOffset;
+ if (xfrOffset > vcp->f.m.Length)
+ vcp->f.m.Length = xfrOffset;
code = afs_DoPartialWrite(vcp, &treq);
- vcp->states |= CDirty;
+ vcp->f.states |= CDirty;
ReleaseWriteLock(&vcp->lock);
if (code) {
goto fail;
}
ObtainWriteLock(&vcp->lock, 242);
- if (code == 0 && (vcp->states & CDirty)) {
+ if (code == 0 && (vcp->f.states & CDirty)) {
code = afs_DoPartialWrite(vcp, &treq);
}
vm_protectp(vcp->segid, last_page, 1, RDONLY);
return 0;
ObtainReadLock(&vcp->lock);
- fileSize = vcp->m.Length;
+ fileSize = vcp->f.m.Length;
if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
uiop->afsio_offset = fileSize;
}
ReleaseReadLock(&vcp->lock);
if (rw == UIO_WRITE) {
ObtainWriteLock(&vcp->lock, 400);
- vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
+ vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
/* extend file */
if (xfrSize + xfrOffset > fileSize)
- vcp->m.Length = xfrSize + xfrOffset;
+ vcp->f.m.Length = xfrSize + xfrOffset;
ReleaseWriteLock(&vcp->lock);
}
afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR, ICL_TYPE_POINTER, vp,
if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
ObtainWriteLock(&vcp->lock, 402);
code = afs_DoPartialWrite(vcp, &treq);
- vcp->states |= CDirty;
+ vcp->f.states |= CDirty;
ReleaseWriteLock(&vcp->lock);
}
}
AFS_GLOCK();
AFS_STATCNT(afs_root);
if (mdata == NULL && afs_globalVp
- && (afs_globalVp->states & CStatd)) {
+ && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
error = 0;
#ifdef AFS_DARWIN80_ENV
size = ubc_getsize(vp);
ubc_sync_range(vp, 0, size, UBC_INVALIDATE);
/* XXX what about when not CStatd */
- if (avc->states & CStatd && size != avc->m.Length)
- ubc_setsize(vp, avc->m.Length);
+ if (avc->f.states & CStatd && size != avc->f.m.Length)
+ ubc_setsize(vp, avc->f.m.Length);
#else
if (UBCINFOEXISTS(vp)) {
size = ubc_getsize(vp);
if (kret != 1) /* Should be KERN_SUCCESS */
printf("VMFlushPages: invalidate failed (error = %d)\n", kret);
/* XXX what about when not CStatd */
- if (avc->states & CStatd && size != avc->m.Length)
+ if (avc->f.states & CStatd && size != avc->f.m.Length)
if (UBCISVALID(vp))
- ubc_setsize(vp, avc->m.Length);
+ ubc_setsize(vp, avc->f.m.Length);
}
#endif
}
struct vnode *vp = AFSTOV(avc);
#ifndef AFS_DARWIN80_ENV
- if (UBCISVALID(vp) && ((avc->states & CStatd) || force)) {
+ if (UBCISVALID(vp) && ((avc->f.states & CStatd) || force)) {
if (!UBCINFOEXISTS(vp)) {
osi_vnhold(avc, 0);
- avc->states |= CUBCinit;
+ avc->f.states |= CUBCinit;
AFS_GUNLOCK();
if ((error = ubc_info_init(vp))) {
AFS_GLOCK();
- avc->states &= ~CUBCinit;
+ avc->f.states &= ~CUBCinit;
AFS_RELE(vp);
return error;
}
AFS_GLOCK();
- avc->states &= ~CUBCinit;
+ avc->f.states &= ~CUBCinit;
AFS_RELE(vp);
}
if (UBCINFOEXISTS(vp) && UBCISVALID(vp)) {
- ubc_setsize(vp, avc->m.Length);
+ ubc_setsize(vp, avc->f.m.Length);
}
}
#endif
struct vcache *tvc = VTOAFS(vp);
#ifndef AFS_DARWIN80_ENV
- tvc->states |= CUBCinit;
+ tvc->f.states |= CUBCinit;
#endif
#ifdef AFS_DARWIN80_ENV
- osi_Assert((tvc->states & CVInit) == 0);
- if (tvc->states & CDeadVnode)
+ osi_Assert((tvc->f.states & CVInit) == 0);
+ if (tvc->f.states & CDeadVnode)
osi_Assert(!vnode_isinuse(vp, 1));
#endif
if (haveGlock) AFS_GUNLOCK();
if (haveGlock) AFS_GLOCK();
#ifndef AFS_DARWIN80_ENV
- tvc->states &= ~CUBCinit;
+ tvc->f.states &= ~CUBCinit;
#endif
}
int
(*ap->a_vpp)->v_vfsp = dvp->v_vfsp;
vn_lock(*ap->a_vpp, LK_EXCLUSIVE | LK_RETRY, p);
if (UBCINFOMISSING(*ap->a_vpp) || UBCINFORECLAIMED(*ap->a_vpp)) {
- vcp->states |= CUBCinit;
+ vcp->f.states |= CUBCinit;
ubc_info_init(*ap->a_vpp);
- vcp->states &= ~CUBCinit;
+ vcp->f.states &= ~CUBCinit;
}
#endif
} else
code = afs_CheckCode(code, &treq, 56);
goto out;
}
- if (afs_fakestat_enable && tvc->mvstat && !(tvc->states & CStatd)) {
+ if (afs_fakestat_enable && tvc->mvstat && !(tvc->f.states & CStatd)) {
code = 0;
goto out;
}
#endif
if (code == 1 && vnode_vtype(ap->a_vp) == VREG &&
ap->a_action & KAUTH_VNODE_EXECUTE &&
- (tvc->m.Mode & 0100) != 0100) {
+ (tvc->f.m.Mode & 0100) != 0100) {
code = 0;
}
if (code) {
code = afs_read(tvc, uio, cred, 0, 0, 0);
if (code == 0) {
ObtainWriteLock(&tvc->lock, 2);
- tvc->states |= CMAPPED;
+ tvc->f.states |= CMAPPED;
ReleaseWriteLock(&tvc->lock);
}
AFS_GUNLOCK();
UPL_ABORT_FREE_ON_EMPTY);
return (EINVAL);
}
- if (f_offset >= tvc->m.Length) {
+ if (f_offset >= tvc->f.m.Length) {
if (!nocommit)
OSI_UPL_ABORT_RANGE(pl, pl_offset, size,
UPL_ABORT_FREE_ON_EMPTY);
/* size will always be a multiple of PAGE_SIZE */
/* pageout isn't supposed to extend files */
- if (f_offset + size > tvc->m.Length)
- iosize = tvc->m.Length - f_offset;
+ if (f_offset + size > tvc->f.m.Length)
+ iosize = tvc->f.m.Length - f_offset;
else
iosize = size;
* contents past end of the file before
* releasing it in the VM page cache
*/
- if ((f_offset < tvc->m.Length) && (f_offset + size) > tvc->m.Length) {
- size_t io = tvc->m.Length - f_offset;
+ if ((f_offset < tvc->f.m.Length) && (f_offset + size) > tvc->f.m.Length) {
+ size_t io = tvc->f.m.Length - f_offset;
memset((caddr_t) (ioaddr + pl_offset + io), 0, size - io);
}
#ifdef AFS_DARWIN80_ENV
struct vcache *tvc = VTOAFS(vp);
- if (!(tvc->states & CUnlinked)) {
+ if (!(tvc->f.states & CUnlinked)) {
ubc_setsize(vp, (off_t)0);
vnode_recycle(vp);
}
* run mv as the user, thus:
*/
printf("su %d -c /bin/mv /afs/.:mount/%d:%d:%d:%d/%s /afs/.:mount/%d:%d:%d:%d/%s\n",
- (cn_cred(tcnp))->cr_uid, fvc->fid.Cell, fvc->fid.Fid.Volume,
- fvc->fid.Fid.Vnode, fvc->fid.Fid.Unique, fname,
- tvc->fid.Cell, tvc->fid.Fid.Volume, tvc->fid.Fid.Vnode,
- tvc->fid.Fid.Unique, tname);
+ (cn_cred(tcnp))->cr_uid, fvc->f.fid.Cell, fvc->f.fid.Fid.Volume,
+ fvc->f.fid.Fid.Vnode, fvc->f.fid.Fid.Unique, fname,
+ tvc->f.fid.Cell, tvc->f.fid.Fid.Volume, tvc->f.fid.Fid.Vnode,
+ tvc->f.fid.Fid.Unique, tname);
}
#endif
#ifdef AFS_DARWIN80_ENV
#endif
if (tvc) {
#ifdef AFS_DARWIN80_ENV
- int unlinked = tvc->states & CUnlinked;
+ int unlinked = tvc->f.states & CUnlinked;
#endif
AFS_GLOCK();
afs_InactiveVCache(tvc, 0); /* decrs ref counts */
tvc->v->v_data = NULL; /* remove from vnode */
#endif
AFSTOV(tvc) = NULL; /* also drop the ptr to vnode */
- tvc->states |= CVInit; /* also CDeadVnode? */
+ tvc->f.states |= CVInit; /* also CDeadVnode? */
tvc->nextfree = ReclaimedVCList;
ReclaimedVCList = tvc;
ReleaseWriteLock(&afs_xvreclaim);
} else {
error = afs_FlushVCache(tvc, &sl); /* toss our stuff from vnode */
- if (tvc->states & (CVInit
+ if (tvc->f.states & (CVInit
#ifdef AFS_DARWIN80_ENV
| CDeadVnode
#endif
)) {
- tvc->states &= ~(CVInit
+ tvc->f.states &= ~(CVInit
#ifdef AFS_DARWIN80_ENV
| CDeadVnode
#endif
);
- afs_osi_Wakeup(&tvc->states);
+ afs_osi_Wakeup(&tvc->f.states);
}
if (!error && vnode_fsnode(vp))
panic("afs_reclaim: vnode not cleaned");
{
register struct vnode *vp = ap->a_vp;
register struct vcache *vc = VTOAFS(ap->a_vp);
- int s = vc->states;
+ int s = vc->f.states;
printf("tag %d, fid: %ld.%x.%x.%x, opens %d, writers %d", vp->v_tag,
- vc->fid.Cell, vc->fid.Fid.Volume, vc->fid.Fid.Vnode,
- vc->fid.Fid.Unique, vc->opens, vc->execsOrWriters);
+ vc->f.fid.Cell, vc->f.fid.Fid.Volume, vc->f.fid.Fid.Vnode,
+ vc->f.fid.Fid.Unique, vc->opens, vc->execsOrWriters);
printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
(s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
(s & CMAPPED) ? " mapped" : "",
#if 0
AFS_GLOCK();
ObtainWriteLock(&avc->lock,342);
- if (avc->states & CStatd) {
- par.vnfs_vtype = avc->m.Type;
+ if (avc->f.states & CStatd) {
+ par.vnfs_vtype = avc->f.m.Type;
par.vnfs_vops = afs_vnodeop_p;
- par.vnfs_filesize = avc->m.Length;
+ par.vnfs_filesize = avc->f.m.Length;
if (!ac->cnp)
par.vnfs_flags = VNFS_NOCACHE;
dead = 0;
#if 0
if (dead) {
vnode_recycle(vp); /* terminate as soon as iocount drops */
- avc->states |= CDeadVnode;
+ avc->f.states |= CDeadVnode;
} else if (!ac->markroot && !ac->cnp) {
/* the caller doesn't know anything about this vnode. if markroot
should have been set and wasn't, bad things may happen, so encourage
}
#else
vnode_recycle(vp); /* terminate as soon as iocount drops */
- avc->states |= CDeadVnode;
+ avc->f.states |= CDeadVnode;
#endif
}
return error;
AFS_GLOCK();
ObtainWriteLock(&avc->lock,325);
ovp = AFSTOV(avc);
- if (!(avc->states & CDeadVnode) && vnode_vtype(ovp) != VNON) {
+ if (!(avc->f.states & CDeadVnode) && vnode_vtype(ovp) != VNON) {
AFS_GUNLOCK();
#if 0 /* unsupported */
if (dvp && cnp)
AFS_GUNLOCK();
return 0;
}
- if ((avc->states & CDeadVnode) && vnode_vtype(ovp) != VNON)
+ if ((avc->f.states & CDeadVnode) && vnode_vtype(ovp) != VNON)
panic("vcache %p should not be CDeadVnode", avc);
AFS_GUNLOCK();
memset(&par, 0, sizeof(struct vnode_fsparam));
par.vnfs_mp = afs_globalVFS;
- par.vnfs_vtype = avc->m.Type;
+ par.vnfs_vtype = avc->f.m.Type;
par.vnfs_vops = afs_vnodeop_p;
- par.vnfs_filesize = avc->m.Length;
+ par.vnfs_filesize = avc->f.m.Length;
par.vnfs_fsnode = avc;
par.vnfs_dvp = dvp;
if (cnp && (cnp->cn_flags & ISDOTDOT) == 0)
error = vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &par, &nvp);
if (!error) {
vnode_addfsref(nvp);
- if ((avc->states & CDeadVnode) && vnode_vtype(ovp) != VNON)
+ if ((avc->f.states & CDeadVnode) && vnode_vtype(ovp) != VNON)
printf("vcache %p should not be CDeadVnode", avc);
if (avc->v == ovp) {
- if (!(avc->states & CVInit)) {
+ if (!(avc->f.states & CVInit)) {
vnode_clearfsnode(ovp);
vnode_removefsref(ovp);
}
}
avc->v = nvp;
- avc->states &=~ CDeadVnode;
+ avc->f.states &=~ CDeadVnode;
}
vnode_put(ovp);
vnode_rele(ovp);
AFS_GLOCK();
ReleaseWriteLock(&avc->lock);
if (!error)
- afs_osi_Wakeup(&avc->states);
+ afs_osi_Wakeup(&avc->f.states);
AFS_GUNLOCK();
return error;
}
AFS_GLOCK();
AFS_STATCNT(afs_root);
crhold(cr);
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
error = 0;
} else {
#endif
AFS_GUNLOCK();
#ifdef AFS_FBSD60_ENV
- vnode_create_vobject(ap->a_vp, vc->m.Length, ap->a_td);
+ vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
#endif
osi_FlushPages(vc, ap->a_cred);
return error;
{
register struct vnode *vp = ap->a_vp;
register struct vcache *vc = VTOAFS(ap->a_vp);
- int s = vc->states;
+ int s = vc->f.states;
#ifdef AFS_FBSD50_ENV
printf("tag %s, fid: %d.%x.%x.%x, opens %d, writers %d", vp->v_tag,
- (int)vc->fid.Cell, (u_int) vc->fid.Fid.Volume,
- (u_int) vc->fid.Fid.Vnode, (u_int) vc->fid.Fid.Unique, vc->opens,
+ (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
+ (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
vc->execsOrWriters);
#else
printf("tag %d, fid: %ld.%x.%x.%x, opens %d, writers %d", vp->v_tag,
- vc->fid.Cell, (u_int) vc->fid.Fid.Volume,
- (u_int) vc->fid.Fid.Vnode, (u_int) vc->fid.Fid.Unique, vc->opens,
+ vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
+ (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
vc->execsOrWriters);
#endif
printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
AFS_GLOCK();
AFS_STATCNT(afs_root);
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
} else {
if (afs_globalVp) {
AFS_GLOCK();
qprintf(" Len %d DV %d Date %d Own %d Grp %d Mode 0%o Lnk %d\n",
- avc->m.Length, avc->m.DataVersion, avc->m.Date, avc->m.Owner,
- avc->m.Group, avc->m.Mode, avc->m.LinkCount);
+ avc->f.m.Length, avc->f.m.DataVersion, avc->f.m.Date, avc->f.m.Owner,
+ avc->f.m.Group, avc->f.m.Mode, avc->f.m.LinkCount);
qprintf(" flushDV %d mapDV %d truncpos 0x%x cb 0x%x cbE 0x%x\n",
- avc->flushDV, avc->mapDV, avc->truncPos, avc->callback,
+ avc->flushDV, avc->mapDV, avc->f.truncPos, avc->callback,
avc->cbExpires);
qprintf(" opens %d ex/wr %d flckcnt %d state 0x%x ", avc->opens,
- avc->execsOrWriters, avc->flockCount, avc->states);
- printflags(avc->states, tab_vcache);
+ avc->execsOrWriters, avc->flockCount, avc->f.states);
+ printflags(avc->f.states, tab_vcache);
qprintf("\n");
#ifdef AFS_SGI64_ENV
qprintf(" mapcnt %llu, mvstat %d anyAcc 0x%x Access 0x%x\n",
- avc->mapcnt, avc->mvstat, avc->anyAccess, avc->Access);
+ avc->mapcnt, avc->mvstat, avc->f.anyAccess, avc->Access);
qprintf(" mvid 0x%x &lock 0x%x cred 0x%x\n", avc->mvid, &avc->lock,
avc->cred);
qprintf(" rwlock 0x%x (%d) id %llu trips %d\n", &avc->vc_rwlock,
valusema(&avc->vc_rwlock), avc->vc_rwlockid, avc->vc_locktrips);
#else
qprintf(" mapcnt %d mvstat %d anyAcc 0x%x Access 0x%x\n", avc->mapcnt,
- avc->mvstat, avc->anyAccess, avc->Access);
+ avc->mvstat, avc->f.anyAccess, avc->Access);
qprintf(" mvid 0x%x &lock 0x%x cred 0x%x\n", avc->mvid, &avc->lock,
avc->cred);
qprintf(" rwlock 0x%x (%d) id %d trips %d\n", &avc->vc_rwlock,
for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
tvc = QTOV(tq);
uq = QPrev(tq);
- nodeid = tvc->fid.Fid.Vnode + (tvc->fid.Fid.Volume << 16);
+ nodeid = tvc->f.fid.Fid.Vnode + (tvc->f.fid.Fid.Volume << 16);
nodeid &= 0x7fffffff;
qprintf("avp 0x%x type %s cnt %d pg %d map %d nodeid %d(0x%x)\n", tvc,
tab_vtypes[((vnode_t *) tvc)->v_type],
OSI_VFS_CONVERT(afsp);
AFS_STATCNT(afs_root);
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
} else {
if (afs_globalVp) {
AFS_GUNLOCK();
if (flags & SYNC_CLOSE) {
- PFLUSHINVALVP(vp, (off_t) 0, (off_t) tvc->m.Length);
+ PFLUSHINVALVP(vp, (off_t) 0, (off_t) tvc->f.m.Length);
}
#ifdef AFS_SGI61_ENV
else if (flags & SYNC_PDFLUSH) {
if ((flags & SYNC_DELWRI) && AFS_VN_DIRTY(vp)) {
#ifdef AFS_SGI61_ENV
- PFLUSHVP(vp, (off_t) tvc->m.Length,
+ PFLUSHVP(vp, (off_t) tvc->f.m.Length,
(flags & SYNC_WAIT) ? 0 : B_ASYNC, error);
#else /* AFS_SGI61_ENV */
if (flags & SYNC_WAIT)
/* push all and wait */
- PFLUSHVP(vp, (off_t) tvc->m.Length, (off_t) 0, error);
+ PFLUSHVP(vp, (off_t) tvc->f.m.Length, (off_t) 0, error);
else if (flags & SYNC_BDFLUSH) {
/* push oldest */
error = pdflush(vp, B_ASYNC);
} else {
/* push all but don't wait */
- PFLUSHVP(vp, (off_t) tvc->m.Length, (off_t) B_ASYNC, error);
+ PFLUSHVP(vp, (off_t) tvc->f.m.Length, (off_t) B_ASYNC, error);
}
#endif /* AFS_SGI61_ENV */
}
osi_VM_FSyncInval(struct vcache *avc)
{
AFS_GUNLOCK();
- PFLUSHINVALVP((vnode_t *) avc, (off_t) 0, (off_t) avc->m.Length);
+ PFLUSHINVALVP((vnode_t *) avc, (off_t) 0, (off_t) avc->f.m.Length);
AFS_GLOCK();
}
pdflush(AFSTOV(avc), 0);
}
- PFLUSHVP(AFSTOV(avc), (off_t) avc->m.Length, (off_t) 0, error);
+ PFLUSHVP(AFSTOV(avc), (off_t) avc->f.m.Length, (off_t) 0, error);
AFS_GLOCK();
if (error) {
/*
* does what we want (we don't use this normally since
* it also unhashes pages ..)
*/
- PINVALFREE((vnode_t *) avc, avc->m.Length);
+ PINVALFREE((vnode_t *) avc, avc->f.m.Length);
}
ObtainWriteLock(&avc->lock, 121);
- if (error && avc->m.LinkCount)
+ if (error && avc->f.m.LinkCount)
cmn_err(CE_WARN,
"AFS:Failed to push back pages for vnode 0x%x error %d (from afs_StoreOnLastReference)",
avc, error);
{
vnode_t *vp = (vnode_t *) avc;
- remapf(vp, /*avc->m.Length */ 0, 0);
+ remapf(vp, /*avc->f.m.Length */ 0, 0);
- /* Used to grab locks and recheck avc->m.DataVersion and
+ /* Used to grab locks and recheck avc->f.m.DataVersion and
* avc->execsOrWriters here, but we have to drop locks before calling
* ptossvp() anyway, so why bother.
*/
return EISDIR;
if (ioflag & IO_APPEND)
- uiop->uio_offset = avc->m.Length;
+ uiop->uio_offset = avc->f.m.Length;
#ifdef AFS_SGI64_ENV
#ifdef AFS_SGI65_ENV
if (!(ioflag & IO_ISLOCKED))
if (rw == UIO_WRITE) {
ObtainWriteLock(&avc->lock, 330);
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
ReleaseWriteLock(&avc->lock);
}
/*
* read/paging in a normal file
*/
- rem = avc->m.Length - uio->uio_offset;
+ rem = avc->f.m.Length - uio->uio_offset;
if (rem <= 0)
/* EOF */
break;
ObtainWriteLock(&avc->lock, 90);
error = afs_DoPartialWrite(avc, &treq);
if (error == 0)
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
if (error)
else
bp = chunkread(vp, bmv, 1, cr);
- avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
+ avc->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
}
if (bp->b_flags & B_ERROR) {
/*
* Make sure it is at least as high as the last byte we just wrote
* into the buffer.
*/
- if (avc->m.Length < uio->uio_offset) {
+ if (avc->f.m.Length < uio->uio_offset) {
AFS_GLOCK();
ObtainWriteLock(&avc->lock, 235);
- avc->m.Length = uio->uio_offset;
+ avc->f.m.Length = uio->uio_offset;
ReleaseWriteLock(&avc->lock);
AFS_GUNLOCK();
}
* explanation
*/
if (error) {
- if (avc->m.LinkCount == 0)
+ if (avc->f.m.LinkCount == 0)
cmn_err(CE_WARN,
"AFS: Process pid %d write error %d writing to unlinked file.",
OSI_GET_CURRENT_PID(), error);
}
}
} while (!error && uio->uio_resid > 0);
- afs_chkpgoob(&avc->v, btoc(avc->m.Length));
+ afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
AFS_GLOCK();
- if (rw == UIO_WRITE && error == 0 && (avc->states & CDirty)) {
+ if (rw == UIO_WRITE && error == 0 && (avc->f.states & CDirty)) {
ObtainWriteLock(&avc->lock, 405);
error = afs_DoPartialWrite(avc, &treq);
ReleaseWriteLock(&avc->lock);
bmv->bn = BTOBBT(offset - off);
bmv->offset = bmv->bn;
bmv->pboff = off;
- rem = avc->m.Length - offset;
+ rem = avc->f.m.Length - offset;
if (rem <= 0)
cnt = 0; /* EOF */
else
*/
ObtainReadLock(&avc->lock);
if (bp->b_flags & B_READ) {
- if (BBTOB(bp->b_blkno) >= avc->m.Length) {
+ if (BBTOB(bp->b_blkno) >= avc->f.m.Length) {
/* we are responsible for zero'ing the page */
caddr_t c;
c = bp_mapin(bp);
ReleaseReadLock(&avc->lock);
return;
}
- } else if ((avc->states & CWritingUFS) && (bp->b_flags & B_DELWRI)) {
+ } else if ((avc->f.states & CWritingUFS) && (bp->b_flags & B_DELWRI)) {
bp->b_ref = 3;
ReleaseReadLock(&avc->lock);
iodone(bp);
/* on last mapping push back and remove our reference */
osi_Assert(avc->execsOrWriters > 0);
osi_Assert(avc->opens > 0);
- if (avc->m.LinkCount == 0) {
+ if (avc->f.m.LinkCount == 0) {
ObtainWriteLock(&avc->lock, 238);
AFS_GUNLOCK();
PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
if (code == VNOVNODE)
code = 0;
if (code) {
- afs_StoreWarn(code, avc->fid.Fid.Volume, /* /dev/console */
+ afs_StoreWarn(code, avc->f.fid.Fid.Volume, /* /dev/console */
1);
}
code = afs_CheckCode(code, &treq, 52);
AFS_RWLOCK(vp, VRWLOCK_WRITE);
AFS_GUNLOCK();
error =
- fs_map_subr(vp, (off_t) avc->m.Length, (u_int) avc->m.Mode, off, prp,
+ fs_map_subr(vp, (off_t) avc->f.m.Length, (u_int) avc->f.m.Mode, off, prp,
*addrp, len, prot, maxprot, flags, cr);
AFS_GLOCK();
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
return VN_INACTIVE_CACHE;
}
- if (avc->states & CUnlinked) {
+ if (avc->f.states & CUnlinked) {
if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
- avc->states |= CUnlinkedDel;
+ avc->f.states |= CUnlinkedDel;
ReleaseWriteLock(&avc->lock);
AFS_RWUNLOCK(vp, VRWLOCK_WRITE);
} else {
}
return VN_INACTIVE_CACHE;
}
- if ((avc->states & CDirty) || (avc->execsOrWriters > 0)) {
+ if ((avc->f.states & CDirty) || (avc->execsOrWriters > 0)) {
/* File either already has dirty chunks (CDirty) or was mapped at
* time in its life with the potential for being written into.
* Note that afs_close defers storebacks if the vnode's ref count
if (mapcnt) {
cmn_err(CE_WARN,
"AFS: Failed to store FID (%x:%lu.%lu.%lu) in VOP_INACTIVE, error = %d\n",
- (int)(avc->fid.Cell) & 0xffffffff,
- avc->fid.Fid.Volume, avc->fid.Fid.Vnode,
- avc->fid.Fid.Unique, code);
+ (int)(avc->f.fid.Cell) & 0xffffffff,
+ avc->f.fid.Fid.Volume, avc->f.fid.Fid.Vnode,
+ avc->f.fid.Fid.Unique, code);
}
afs_InvalidateAllSegments(avc);
}
}
#endif
- osi_Assert((avc->states & (CCore | CMAPPED)) == 0);
+ osi_Assert((avc->f.states & (CCore | CMAPPED)) == 0);
if (avc->cred) {
crfree(avc->cred);
* If someone unlinked a file and this is the last hurrah -
* nuke all the pages.
*/
- if (avc->m.LinkCount == 0) {
+ if (avc->f.m.LinkCount == 0) {
AFS_GUNLOCK();
PTOSSVP(vp, (off_t) 0, (off_t) MAXLONG);
AFS_GLOCK();
}
#ifndef AFS_SGI65_ENV
osi_Assert(avc->mapcnt == 0);
- afs_chkpgoob(&avc->v, btoc(avc->m.Length));
+ afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
- avc->states &= ~CDirty; /* Give up on store-backs */
- if (avc->states & CUnlinked) {
+ avc->f.states &= ~CDirty; /* Give up on store-backs */
+ if (avc->f.states & CUnlinked) {
if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
- avc->states |= CUnlinkedDel;
+ avc->f.states |= CUnlinkedDel;
} else {
afs_remunlink(avc, 1); /* ignore any return code */
}
osi_Assert(sizeof(fid_t) >= sizeof(afs_fid2_t));
afid->af_len = sizeof(afs_fid2_t) - sizeof(afid->af_len);
- tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
+ tcell = afs_GetCell(avc->f.fid.Cell, READ_LOCK);
afid->af_cell = tcell->cellIndex & 0xffff;
afs_PutCell(tcell, READ_LOCK);
- afid->af_volid = avc->fid.Fid.Volume;
- afid->af_vno = avc->fid.Fid.Vnode;
- afid->af_uniq = avc->fid.Fid.Unique;
+ afid->af_volid = avc->f.fid.Fid.Volume;
+ afid->af_vno = avc->f.fid.Fid.Vnode;
+ afid->af_uniq = avc->f.fid.Fid.Unique;
return 0;
}
#ifdef OSI_EXPORT_DEBUG
printk("afs: encode_fh(0x%08x/%d/%d.%d)\n",
- tvc->fid.Cell, tvc->fid.Fid.Volume,
- tvc->fid.Fid.Vnode, tvc->fid.Fid.Unique);
+ tvc->f.fid.Cell, tvc->f.fid.Fid.Volume,
+ tvc->f.fid.Fid.Vnode, tvc->f.fid.Fid.Unique);
#endif
- if (afs_IsDynrootAnyFid(&tvc->fid)) {
- vntype = VNUM_TO_VNTYPE(tvc->fid.Fid.Vnode);
+ if (afs_IsDynrootAnyFid(&tvc->f.fid)) {
+ vntype = VNUM_TO_VNTYPE(tvc->f.fid.Fid.Vnode);
switch (vntype) {
case 0:
/* encode as a normal filehandle */
case VN_TYPE_CELL:
case VN_TYPE_ALIAS:
AFS_GLOCK();
- tc = afs_GetCellByIndex(VNUM_TO_CIDX(tvc->fid.Fid.Vnode),
+ tc = afs_GetCellByIndex(VNUM_TO_CIDX(tvc->f.fid.Fid.Vnode),
READ_LOCK);
if (!tc) {
AFS_GUNLOCK();
afs_PutCell(tc, READ_LOCK);
AFS_GUNLOCK();
if (vntype == VN_TYPE_MOUNT) {
- fh[4] = htonl(tvc->fid.Fid.Unique);
+ fh[4] = htonl(tvc->f.fid.Fid.Unique);
*max_len = 5;
return AFSFH_DYN_MOUNT;
}
*max_len = 4;
if (vntype == VN_TYPE_CELL) {
- return AFSFH_DYN_RO_CELL | VNUM_TO_RW(tvc->fid.Fid.Vnode);
+ return AFSFH_DYN_RO_CELL | VNUM_TO_RW(tvc->f.fid.Fid.Vnode);
} else {
- return AFSFH_DYN_RO_LINK | VNUM_TO_RW(tvc->fid.Fid.Vnode);
+ return AFSFH_DYN_RO_LINK | VNUM_TO_RW(tvc->f.fid.Fid.Vnode);
}
case VN_TYPE_SYMLINK:
if (*max_len < 7) {
/* not big enough for a migratable filehandle */
/* always encode in network order */
- fh[0] = htonl(tvc->fid.Cell);
- fh[1] = htonl(tvc->fid.Fid.Volume);
- fh[2] = htonl(tvc->fid.Fid.Vnode);
- fh[3] = htonl(tvc->fid.Fid.Unique);
+ fh[0] = htonl(tvc->f.fid.Cell);
+ fh[1] = htonl(tvc->f.fid.Fid.Volume);
+ fh[2] = htonl(tvc->f.fid.Fid.Vnode);
+ fh[3] = htonl(tvc->f.fid.Fid.Unique);
*max_len = 4;
return AFSFH_NET_VENUSFID;
}
AFS_GLOCK();
- tc = afs_GetCell(tvc->fid.Cell, READ_LOCK);
+ tc = afs_GetCell(tvc->f.fid.Cell, READ_LOCK);
if (!tc) {
AFS_GUNLOCK();
return 255;
afs_PutCell(tc, READ_LOCK);
AFS_GUNLOCK();
/* always encode in network order */
- fh[4] = htonl(tvc->fid.Fid.Volume);
- fh[5] = htonl(tvc->fid.Fid.Vnode);
- fh[6] = htonl(tvc->fid.Fid.Unique);
+ fh[4] = htonl(tvc->f.fid.Fid.Volume);
+ fh[5] = htonl(tvc->f.fid.Fid.Vnode);
+ fh[6] = htonl(tvc->f.fid.Fid.Unique);
*max_len = 7;
return AFSFH_NET_CELLFID;
int code;
redo:
- if (!(adp->states & CStatd)) {
+ if (!(adp->f.states & CStatd)) {
if ((code = afs_VerifyVCache2(adp, areq))) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: update_dir_parent(0x%08x/%d/%d.%d): VerifyVCache2: %d\n",
- adp->fid.Cell, adp->fid.Fid.Volume,
- adp->fid.Fid.Vnode, adp->fid.Fid.Unique, code);
+ adp->f.fid.Cell, adp->f.fid.Fid.Volume,
+ adp->f.fid.Fid.Vnode, adp->f.fid.Fid.Unique, code);
#endif
return code;
}
if (!tdc) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: update_dir_parent(0x%08x/%d/%d.%d): no dcache\n",
- adp->fid.Cell, adp->fid.Fid.Volume,
- adp->fid.Fid.Vnode, adp->fid.Fid.Unique);
+ adp->f.fid.Cell, adp->f.fid.Fid.Volume,
+ adp->f.fid.Fid.Vnode, adp->f.fid.Fid.Unique);
#endif
return EIO;
}
* 1. The cache data is being fetched by another process.
* 2. The cache data is no longer valid
*/
- while ((adp->states & CStatd)
+ while ((adp->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ && hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseSharedLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainSharedLock(&adp->lock, 802);
ObtainReadLock(&tdc->lock);
}
- if (!(adp->states & CStatd)
- || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ if (!(adp->f.states & CStatd)
+ || !hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseSharedLock(&adp->lock);
afs_PutDCache(tdc);
#ifdef OSI_EXPORT_DEBUG
printk("afs: update_dir_parent(0x%08x/%d/%d.%d): dir changed; retrying\n",
- adp->fid.Cell, adp->fid.Fid.Volume,
- adp->fid.Fid.Vnode, adp->fid.Fid.Unique);
+ adp->f.fid.Cell, adp->f.fid.Fid.Volume,
+ adp->f.fid.Fid.Vnode, adp->f.fid.Fid.Unique);
#endif
goto redo;
}
if (!code) {
UpgradeSToWLock(&adp->lock, 803);
- adp->parentVnode = tfid.Fid.Vnode;
- adp->parentUnique = tfid.Fid.Unique;
+ adp->f.parent.vnode = tfid.Fid.Vnode;
+ adp->f.parent.unique = tfid.Fid.Unique;
}
#ifdef OSI_EXPORT_DEBUG
if (code) {
printk("afs: update_dir_parent(0x%08x/%d/%d.%d): afs_dir_Lookup: %d\n",
- adp->fid.Cell, adp->fid.Fid.Volume,
- adp->fid.Fid.Vnode, adp->fid.Fid.Unique, code);
+ adp->f.fid.Cell, adp->f.fid.Fid.Volume,
+ adp->f.fid.Fid.Vnode, adp->f.fid.Fid.Unique, code);
} else {
printk("afs: update_dir_parent(0x%08x/%d/%d.%d) => %d.%d\n",
- adp->fid.Cell, adp->fid.Fid.Volume,
- adp->fid.Fid.Vnode, adp->fid.Fid.Unique,
- adp->parentVnode, adp->parentUnique);
+ adp->f.fid.Cell, adp->f.fid.Fid.Volume,
+ adp->f.fid.Fid.Vnode, adp->f.fid.Fid.Unique,
+ adp->parent.vnode, adp->parent.unique);
}
#endif
ReleaseSharedLock(&adp->lock);
return 0;
/* Figure out what FID to look for */
- tvp = afs_GetVolume(&(*vcpp)->fid, 0, READ_LOCK);
+ tvp = afs_GetVolume(&(*vcpp)->f.fid, 0, READ_LOCK);
if (!tvp) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: UnEvalFakeStat(0x%08x/%d/%d.%d): no volume\n",
- (*vcpp)->fid.Cell, (*vcpp)->fid.Fid.Volume,
- (*vcpp)->fid.Fid.Vnode, (*vcpp)->fid.Fid.Unique);
+ (*vcpp)->f.fid.Cell, (*vcpp)->f.fid.Fid.Volume,
+ (*vcpp)->f.fid.Fid.Vnode, (*vcpp)->f.fid.Fid.Unique);
#endif
return ENOENT;
}
if (!tvc) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: UnEvalFakeStat(0x%08x/%d/%d.%d): GetVCache(0x%08x/%d/%d.%d) failed\n",
- (*vcpp)->fid.Cell, (*vcpp)->fid.Fid.Volume,
- (*vcpp)->fid.Fid.Vnode, (*vcpp)->fid.Fid.Unique,
+ (*vcpp)->f.fid.Cell, (*vcpp)->f.fid.Fid.Volume,
+ (*vcpp)->f.fid.Fid.Vnode, (*vcpp)->f.fid.Fid.Unique,
tfid.Cell, tfid.Fid.Volume,
tfid.Fid.Vnode, tfid.Fid.Unique);
#endif
* at parentVnode on directories, except for VIOCGETVCXSTATUS.
* So, if this fails, we don't really care very much.
*/
- if (vType(vcp) == VDIR && vcp->mvstat != 2 && !vcp->parentVnode)
+ if (vType(vcp) == VDIR && vcp->mvstat != 2 && !vcp->f.parent.vnode)
update_dir_parent(&treq, vcp);
/*
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_name(%s, 0x%08x/%d/%d.%d): this is the dynmount dir\n",
parent->d_name.name ? (char *)parent->d_name.name : "?",
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
- data.fid = vcp->fid;
+ data.fid = vcp->f.fid;
if (VTOAFS(parent->d_inode) == afs_globalVp)
strcpy(name, AFS_DYNROOT_MOUNTNAME);
else
/* Figure out what FID to look for */
if (vcp->mvstat == 2) { /* volume root */
- tvp = afs_GetVolume(&vcp->fid, 0, READ_LOCK);
+ tvp = afs_GetVolume(&vcp->f.fid, 0, READ_LOCK);
if (!tvp) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_name(%s, 0x%08x/%d/%d.%d): no volume for root\n",
parent->d_name.name ? (char *)parent->d_name.name : "?",
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
code = ENOENT;
goto done;
data.fid = tvp->mtpoint;
afs_PutVolume(tvp, READ_LOCK);
} else {
- data.fid = vcp->fid;
+ data.fid = vcp->f.fid;
}
vcp = VTOAFS(parent->d_inode);
parent->d_name.name ? (char *)parent->d_name.name : "?",
data.fid.Cell, data.fid.Fid.Volume,
data.fid.Fid.Vnode, data.fid.Fid.Unique,
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
code = afs_InitReq(&treq, credp);
if (code)
goto done;
- if (vcp->fid.Cell != data.fid.Cell ||
- vcp->fid.Fid.Volume != data.fid.Fid.Volume) {
+ if (vcp->f.fid.Cell != data.fid.Cell ||
+ vcp->f.fid.Fid.Volume != data.fid.Fid.Volume) {
/* parent is not the expected cell and volume; thus it
* cannot possibly contain the fid we are looking for */
#ifdef OSI_EXPORT_DEBUG
parent->d_name.name ? (char *)parent->d_name.name : "?",
data.fid.Cell, data.fid.Fid.Volume,
data.fid.Fid.Vnode, data.fid.Fid.Unique,
- vcp->fid.Cell, vcp->fid.Fid.Volume);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume);
#endif
code = ENOENT;
goto done;
redo:
- if (!(vcp->states & CStatd)) {
+ if (!(vcp->f.states & CStatd)) {
if ((code = afs_VerifyVCache2(vcp, &treq))) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_name(%s, 0x%08x/%d/%d.%d): VerifyVCache2(0x%08x/%d/%d.%d): %d\n",
parent->d_name.name ? (char *)parent->d_name.name : "?",
data.fid.Cell, data.fid.Fid.Volume,
data.fid.Fid.Vnode, data.fid.Fid.Unique,
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique, code);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
goto done;
}
parent->d_name.name ? (char *)parent->d_name.name : "?",
data.fid.Cell, data.fid.Fid.Volume,
data.fid.Fid.Vnode, data.fid.Fid.Unique,
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique, code);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
code = EIO;
goto done;
* 1. The cache data is being fetched by another process.
* 2. The cache data is no longer valid
*/
- while ((vcp->states & CStatd)
+ while ((vcp->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(vcp->m.DataVersion, tdc->f.versionNo)) {
+ && hsame(vcp->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&vcp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&vcp->lock);
ObtainReadLock(&tdc->lock);
}
- if (!(vcp->states & CStatd)
- || !hsame(vcp->m.DataVersion, tdc->f.versionNo)) {
+ if (!(vcp->f.states & CStatd)
+ || !hsame(vcp->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&vcp->lock);
afs_PutDCache(tdc);
parent->d_name.name ? (char *)parent->d_name.name : "?",
data.fid.Cell, data.fid.Fid.Volume,
data.fid.Fid.Vnode, data.fid.Fid.Unique,
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
goto redo;
}
parent->d_name.name ? (char *)parent->d_name.name : "?",
data.fid.Cell, data.fid.Fid.Volume,
data.fid.Fid.Vnode, data.fid.Fid.Unique,
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique, code);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
}
if (afs_IsDynrootMount(vcp)) {
/* the dynmount directory; parent is always the AFS root */
- tfid = afs_globalVp->fid;
+ tfid = afs_globalVp->f.fid;
} else if (afs_IsDynrootAny(vcp) &&
- VNUM_TO_VNTYPE(vcp->fid.Fid.Vnode) == VN_TYPE_MOUNT) {
+ VNUM_TO_VNTYPE(vcp->f.fid.Fid.Vnode) == VN_TYPE_MOUNT) {
/* a mount point in the dynmount directory */
afs_GetDynrootMountFid(&tfid);
ReleaseReadLock(&vcp->lock);
} else {
ReleaseReadLock(&vcp->lock);
- tcell = afs_GetCell(vcp->fid.Cell, READ_LOCK);
+ tcell = afs_GetCell(vcp->f.fid.Cell, READ_LOCK);
if (!tcell) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_parent(0x%08x/%d/%d.%d): no cell\n",
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
dp = ERR_PTR(-ENOENT);
goto done;
afs_GetDynrootMountFid(&tfid);
tfid.Fid.Vnode = VNUM_FROM_TYPEID(VN_TYPE_MOUNT, cellidx << 2);
- tfid.Fid.Unique = vcp->fid.Fid.Volume;
+ tfid.Fid.Unique = vcp->f.fid.Fid.Volume;
}
} else {
/* any other vnode */
- if (vType(vcp) == VDIR && !vcp->parentVnode && vcp->mvstat != 1) {
+ if (vType(vcp) == VDIR && !vcp->f.parent.vnode && vcp->mvstat != 1) {
code = afs_InitReq(&treq, credp);
if (code) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_parent(0x%08x/%d/%d.%d): InitReq: %d\n",
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique, code);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
dp = ERR_PTR(-ENOENT);
goto done;
if (code) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_parent(0x%08x/%d/%d.%d): update_dir_parent: %d\n",
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique, code);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique, code);
#endif
dp = ERR_PTR(-ENOENT);
goto done;
}
}
- tfid.Cell = vcp->fid.Cell;
- tfid.Fid.Volume = vcp->fid.Fid.Volume;
- tfid.Fid.Vnode = vcp->parentVnode;
- tfid.Fid.Unique = vcp->parentUnique;
+ tfid.Cell = vcp->f.fid.Cell;
+ tfid.Fid.Volume = vcp->f.fid.Fid.Volume;
+ tfid.Fid.Vnode = vcp->f.parent.vnode;
+ tfid.Fid.Unique = vcp->f.parent.unique;
}
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_parent(0x%08x/%d/%d.%d): => 0x%08x/%d/%d.%d\n",
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique,
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique,
tfid.Cell, tfid.Fid.Volume, tfid.Fid.Vnode, tfid.Fid.Unique);
#endif
if (!dp) {
#ifdef OSI_EXPORT_DEBUG
printk("afs: get_parent(0x%08x/%d/%d.%d): no dentry\n",
- vcp->fid.Cell, vcp->fid.Fid.Volume,
- vcp->fid.Fid.Vnode, vcp->fid.Fid.Unique);
+ vcp->f.fid.Cell, vcp->f.fid.Fid.Volume,
+ vcp->f.fid.Fid.Vnode, vcp->f.fid.Fid.Unique);
#endif
dp = ERR_PTR(-ENOENT);
}
register struct vcache *tvp = 0;
AFS_STATCNT(afs_root);
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
} else {
cred_t *credp = crref();
{
struct inode *ip = AFSTOV(avc);
- if (!avc->states & CPageWrite)
- avc->states |= CPageWrite;
+ if (!avc->f.states & CPageWrite)
+ avc->f.states |= CPageWrite;
else
return; /* someone already writing */
AFS_GLOCK();
ObtainWriteLock(&avc->lock, 121);
#endif
- avc->states &= ~CPageWrite;
+ avc->f.states &= ~CPageWrite;
}
/* Purge VM for a file when its callback is revoked.
* 1. The cache data is being fetched by another process.
* 2. The cache data is no longer valid
*/
- while ((avc->states & CStatd)
+ while ((avc->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseSharedLock(&avc->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainSharedLock(&avc->lock, 812);
ObtainReadLock(&tdc->lock);
}
- if (!(avc->states & CStatd)
- || !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ if (!(avc->f.states & CStatd)
+ || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseSharedLock(&avc->lock);
afs_PutDCache(tdc);
/* Set the readdir-in-progress flag, and downgrade the lock
* to shared so others will be able to acquire a read lock.
*/
- avc->states |= CReadDir;
+ avc->f.states |= CReadDir;
avc->dcreaddir = tdc;
avc->readdir_pid = MyPidxx;
ConvertWToSLock(&avc->lock);
if (!de)
break;
- ino = afs_calc_inum (avc->fid.Fid.Volume, ntohl(de->fid.vnode));
+ ino = afs_calc_inum (avc->f.fid.Fid.Volume, ntohl(de->fid.vnode));
if (de->name)
len = strlen(de->name);
struct VenusFid afid;
struct vcache *tvc;
int vtype;
- afid.Cell = avc->fid.Cell;
- afid.Fid.Volume = avc->fid.Fid.Volume;
+ afid.Cell = avc->f.fid.Cell;
+ afid.Fid.Volume = avc->f.fid.Fid.Volume;
afid.Fid.Vnode = ntohl(de->fid.vnode);
afid.Fid.Unique = ntohl(de->fid.vunique);
- if ((avc->states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
+ if ((avc->f.states & CForeign) == 0 && (ntohl(de->fid.vnode) & 1)) {
type = DT_DIR;
} else if ((tvc = afs_FindVCache(&afid, 0, 0))) {
if (tvc->mvstat) {
type = DT_DIR;
- } else if (((tvc->states) & (CStatd | CTruth))) {
+ } else if (((tvc->f.states) & (CStatd | CTruth))) {
/* CTruth will be set if the object has
*ever* been statd */
vtype = vType(tvc);
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
UpgradeSToWLock(&avc->lock, 813);
- avc->states &= ~CReadDir;
+ avc->f.states &= ~CReadDir;
avc->dcreaddir = 0;
avc->readdir_pid = 0;
ReleaseSharedLock(&avc->lock);
code = generic_file_mmap(fp, vmap);
AFS_GLOCK();
if (!code)
- vcp->states |= CMAPPED;
+ vcp->f.states |= CMAPPED;
out:
AFS_GUNLOCK();
struct vcache *vcp = VTOAFS(dp->d_inode), *avc = NULL;
struct vcache *pvc = VTOAFS(dp->d_parent->d_inode);
- if (vcp->mvid->Fid.Volume != pvc->fid.Fid.Volume) { /* bad parent */
+ if (vcp->mvid->Fid.Volume != pvc->f.fid.Fid.Volume) { /* bad parent */
credp = crref();
/* force a lookup, so vcp->mvid is fixed up */
#ifdef notyet
/* Make this a fast path (no crref), since it's called so often. */
- if (vcp->states & CStatd) {
+ if (vcp->f.states & CStatd) {
if (*dp->d_name.name != '/' && vcp->mvstat == 2) /* root vnode */
check_bad_parent(dp); /* check and correct mvid */
goto good_dentry;
if (vcp->mvstat == 1) { /* mount point */
- if (vcp->mvid && (vcp->states & CMValid)) {
+ if (vcp->mvid && (vcp->f.states & CMValid)) {
int tryEvalOnly = 0;
int code = 0;
struct vrequest treq;
* isn't enough since the vnode may have been renamed.
*/
- if (hgetlo(pvcp->m.DataVersion) > dp->d_time || !(vcp->states & CStatd)) {
+ if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time || !(vcp->f.states & CStatd)) {
credp = crref();
afs_lookup(pvcp, dp->d_name.name, &tvc, credp);
goto bad_dentry;
vattr2inode(AFSTOV(vcp), &vattr);
- dp->d_time = hgetlo(pvcp->m.DataVersion);
+ dp->d_time = hgetlo(pvcp->f.m.DataVersion);
}
/* should we always update the attributes at this point? */
} else {
#ifdef notyet
pvcp = VTOAFS(dp->d_parent->d_inode); /* dget_parent()? */
- if (hgetlo(pvcp->m.DataVersion) > dp->d_time)
+ if (hgetlo(pvcp->f.m.DataVersion) > dp->d_time)
goto bad_dentry;
#endif
struct vcache *vcp = VTOAFS(ip);
AFS_GLOCK();
- if (!AFS_IS_DISCONNECTED || (vcp->states & CUnlinked)) {
+ if (!AFS_IS_DISCONNECTED || (vcp->f.states & CUnlinked)) {
(void) afs_InactiveVCache(vcp, NULL);
}
AFS_GUNLOCK();
static int
afs_dentry_delete(struct dentry *dp)
{
- if (dp->d_inode && (VTOAFS(dp->d_inode)->states & CUnlinked))
+ if (dp->d_inode && (VTOAFS(dp->d_inode)->f.states & CUnlinked))
return 1; /* bad inode? */
return 0;
afs_fill_inode(ip, &vattr);
insert_inode_hash(ip);
dp->d_op = &afs_dentry_operations;
- dp->d_time = hgetlo(VTOAFS(dip)->m.DataVersion);
+ dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
d_instantiate(dp, ip);
}
AFS_GUNLOCK();
insert_inode_hash(ip);
}
dp->d_op = &afs_dentry_operations;
- dp->d_time = hgetlo(VTOAFS(dip)->m.DataVersion);
+ dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
AFS_GUNLOCK();
#if defined(AFS_LINUX24_ENV)
maybe_lock_kernel();
#endif
if (VREFCOUNT(tvc) > 1 && tvc->opens > 0
- && !(tvc->states & CUnlinked)) {
+ && !(tvc->f.states & CUnlinked)) {
struct dentry *__dp;
char *__name;
crfree(tvc->uncred);
}
tvc->uncred = credp;
- tvc->states |= CUnlinked;
+ tvc->f.states |= CUnlinked;
#ifdef DCACHE_NFSFS_RENAMED
#ifdef AFS_LINUX26_ENV
spin_lock(&dp->d_lock);
AFS_GUNLOCK();
if (!code) {
- __dp->d_time = hgetlo(VTOAFS(dip)->m.DataVersion);
+ __dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
d_move(dp, __dp);
}
dput(__dp);
afs_fill_inode(ip, &vattr);
dp->d_op = &afs_dentry_operations;
- dp->d_time = hgetlo(VTOAFS(dip)->m.DataVersion);
+ dp->d_time = hgetlo(VTOAFS(dip)->f.m.DataVersion);
d_instantiate(dp, ip);
}
AFS_GUNLOCK();
ICL_TYPE_INT32, 99999);
ObtainReadLock(&vcp->lock);
- if (vcp->states & CPageWrite) {
+ if (vcp->f.states & CPageWrite) {
ReleaseReadLock(&vcp->lock);
AFS_GUNLOCK();
maybe_unlock_kernel();
code = afs_write(vcp, &tuio, f_flags, credp, 0);
- i_size_write(ip, vcp->m.Length);
- ip->i_blocks = ((vcp->m.Length + 1023) >> 10) << 1;
+ i_size_write(ip, vcp->f.m.Length);
+ ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
if (!code) {
struct vrequest treq;
code = afs_write(vcp, &tuio, fp->f_flags, credp, 0);
- i_size_write(ip, vcp->m.Length);
- ip->i_blocks = ((vcp->m.Length + 1023) >> 10) << 1;
+ i_size_write(ip, vcp->f.m.Length);
+ ip->i_blocks = ((vcp->f.m.Length + 1023) >> 10) << 1;
if (!code) {
struct vrequest treq;
ObtainWriteLock(&vcp->lock, 533);
- vcp->m.Date = osi_Time(); /* set modification time */
+ vcp->f.m.Date = osi_Time(); /* set modification time */
if (!afs_InitReq(&treq, credp))
code = afs_DoPartialWrite(vcp, &treq);
ReleaseWriteLock(&vcp->lock);
AFS_GLOCK();
AFS_STATCNT(afs_root);
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
} else {
if (afs_globalVp) {
if (afs_NFSRootOnly && (avc == afs_globalVp))
rootvp = 1;
if (!afs_NFSRootOnly || rootvp) {
- tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
- Sfid.Volume = avc->fid.Fid.Volume;
- fidp->fid_reserved = avc->fid.Fid.Vnode;
+ tcell = afs_GetCell(avc->f.fid.Cell, READ_LOCK);
+ Sfid.Volume = avc->f.fid.Fid.Volume;
+ fidp->fid_reserved = avc->f.fid.Fid.Vnode;
Sfid.CellAndUnique =
- ((tcell->cellIndex << 24) + (avc->fid.Fid.Unique & 0xffffff));
+ ((tcell->cellIndex << 24) + (avc->f.fid.Fid.Unique & 0xffffff));
afs_PutCell(tcell, READ_LOCK);
- if (avc->fid.Fid.Vnode > 0xffff)
+ if (avc->f.fid.Fid.Vnode > 0xffff)
afs_fid_vnodeoverflow++;
- if (avc->fid.Fid.Unique > 0xffffff)
+ if (avc->f.fid.Fid.Unique > 0xffffff)
afs_fid_uniqueoverflow++;
} else {
fidp->fid_reserved = AFS_XLATOR_MAGIC;
AFS_GLOCK();
error = afs_rdwr(avc, uio, UIO_READ, 0, acred);
afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_READ, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
+ ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->f.states);
if (error) {
error = EIO;
- } else if ((avc->states) == 0) {
+ } else if ((avc->f.states) == 0) {
afs_InitReq(&treq, acred);
ObtainWriteLock(&avc->lock, 161);
afs_Wire(avc, &treq);
AFS_GLOCK();
error = afs_rdwr(avc, uio, UIO_WRITE, 0, acred);
afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_WRITE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
+ ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->f.states);
if (error) {
error = EIO;
}
ObtainWriteLock(&avc->lock, 162);
/* adjust parameters when appending files */
if ((ioflag & IO_APPEND) && uio->uio_rw == UIO_WRITE)
- uio->uio_offset = avc->m.Length; /* write at EOF position */
+ uio->uio_offset = avc->f.m.Length; /* write at EOF position */
if (uio->uio_rw == UIO_WRITE) {
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
afs_FakeOpen(avc);
didFakeOpen = 1;
/*
* the I/O.
*/
size = uio->afsio_resid + uio->afsio_offset; /* new file size */
- if (size > avc->m.Length)
- avc->m.Length = size; /* file grew */
- avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
+ if (size > avc->f.m.Length)
+ avc->f.m.Length = size; /* file grew */
+ avc->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
if (uio->afsio_resid > PAGE_SIZE)
cnt = uio->afsio_resid / PAGE_SIZE;
save_resid = uio->afsio_resid;
eof = 0; /* flag telling us if we hit the EOF on the read */
if (uio->uio_rw == UIO_READ) { /* we're doing a read operation */
/* don't read past EOF */
- if (tsize + fileBase > avc->m.Length) {
- tsize = avc->m.Length - fileBase;
+ if (tsize + fileBase > avc->f.m.Length) {
+ tsize = avc->f.m.Length - fileBase;
eof = 1; /* we did hit the EOF */
if (tsize < 0)
tsize = 0; /* better safe than sorry */
if (uio->uio_rw == UIO_WRITE && counter > 0
&& AFS_CHUNKOFFSET(fileBase) == 0) {
code = afs_DoPartialWrite(avc, &treq);
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
}
if (code) {
if ((uio->uio_rw == UIO_WRITE)
&&
((pageOffset == 0
- && (size == PAGE_SIZE || fileBase >= avc->m.Length)))) {
+ && (size == PAGE_SIZE || fileBase >= avc->f.m.Length)))) {
struct vnode *vp = (struct vnode *)avc;
/* we're doing a write operation past eof; no need to read it */
newpage = 1;
}
if (didFakeOpen)
afs_FakeClose(avc, cred);
- if (uio->uio_rw == UIO_WRITE && code == 0 && (avc->states & CDirty)) {
+ if (uio->uio_rw == UIO_WRITE && code == 0 && (avc->f.states & CDirty)) {
code = afs_DoPartialWrite(avc, &treq);
}
ReleaseWriteLock(&avc->lock);
}
osi_FlushPages(avc); /* ensure old pages are gone */
ObtainWriteLock(&avc->lock, 166);
- avc->states |= CMAPPED;
+ avc->f.states |= CMAPPED;
ReleaseWriteLock(&avc->lock);
ap->a_offset = offset;
ap->a_vaddr = addrp;
goto out;
}
if (flags & B_NOCACHE) { /* if (page) */
- if ((rw & B_WRITE) && (offset + len >= avc->m.Length)) {
+ if ((rw & B_WRITE) && (offset + len >= avc->f.m.Length)) {
struct vnode *vp = (struct vnode *)avc;
/* we're doing a write operation past eof; no need to read it */
AFS_GUNLOCK();
done:
ReleaseWriteLock(&avc->lock);
afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_INT32, code,
- ICL_TYPE_INT32, avc->m.Length);
+ ICL_TYPE_INT32, avc->f.m.Length);
AFS_GUNLOCK();
return code;
}
struct vnode *vp = ap->a_vp;
struct vcache *vc = VTOAFS(ap->a_vp);
- printf("tag %d, fid: %d.%x.%x.%x, ", vp->v_tag, vc->fid.Cell,
- (int)vc->fid.Fid.Volume, (int)vc->fid.Fid.Vnode,
- (int)vc->fid.Fid.Unique);
+ printf("tag %d, fid: %d.%x.%x.%x, ", vp->v_tag, vc->f.fid.Cell,
+ (int)vc->f.fid.Fid.Volume, (int)vc->f.fid.Fid.Vnode,
+ (int)vc->f.fid.Fid.Unique);
lockmgr_printinfo(&vc->rwlock);
printf("\n");
return 0;
AFS_STATCNT(afs_root);
again:
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
} else {
if (MUTEX_HELD(&vp->v_lock)) {
ObtainReadLock(&tdc->lock);
/* Check to see whether the cache entry is still valid */
- if (!(avc->states & CStatd)
- || !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ if (!(avc->f.states & CStatd)
+ || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&avc->lock);
afs_BozonUnlock(&avc->pvnLock, avc);
if (len) {
endPos = (afs_offs_t) off + len; /* position we're supposed to write up to */
while ((afs_offs_t) toff < endPos
- && (afs_offs_t) toff < avc->m.Length) {
+ && (afs_offs_t) toff < avc->f.m.Length) {
/* If not invalidating pages use page_lookup_nowait to avoid reclaiming
* them from the free list
*/
* XXX Find a kluster that fits in one block (or page). We also
* adjust the i/o if the file space is less than a while page. XXX
*/
- if (off + tlen > avc->m.Length) {
- tlen = avc->m.Length - off;
+ if (off + tlen > avc->f.m.Length) {
+ tlen = avc->f.m.Length - off;
}
/* can't call mapout with 0 length buffers (rmfree panics) */
if (((tlen >> 24) & 0xff) == 0xff) {
/* adjust parameters when appending files */
if ((ioflag & IO_APPEND) && arw == UIO_WRITE) {
#if defined(AFS_SUN56_ENV)
- auio->uio_loffset = avc->m.Length; /* write at EOF position */
+ auio->uio_loffset = avc->f.m.Length; /* write at EOF position */
#else
- auio->uio_offset = avc->m.Length; /* write at EOF position */
+ auio->uio_offset = avc->f.m.Length; /* write at EOF position */
#endif
}
if (auio->afsio_offset < 0 || (auio->afsio_offset + auio->uio_resid) < 0) {
* to hold the results (since afs_putpage will be called to force the I/O */
size = auio->afsio_resid + auio->afsio_offset; /* new file size */
appendLength = size;
- origLength = avc->m.Length;
- if (size > avc->m.Length) {
+ origLength = avc->f.m.Length;
+ if (size > avc->f.m.Length) {
afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(size));
- avc->m.Length = size; /* file grew */
+ avc->f.m.Length = size; /* file grew */
}
- avc->states |= CDirty; /* Set the dirty bit */
- avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
+ avc->f.states |= CDirty; /* Set the dirty bit */
+ avc->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
} else {
mode = S_READ; /* map-in read-only */
- origLength = avc->m.Length;
+ origLength = avc->f.m.Length;
}
if (acred && AFS_NFSXLATORREQ(acred)) {
break; /* nothing to transfer, we're done */
}
if (arw == UIO_WRITE)
- avc->states |= CDirty; /* may have been cleared by DoPartialWrite */
+ avc->f.states |= CDirty; /* may have been cleared by DoPartialWrite */
/* Before dropping lock, hold the chunk (create it if necessary). This
* serves two purposes: (1) Ensure Cache Truncate Daemon doesn't try
AFS_GLOCK();
dcp_newpage = afs_FindDCache(avc, pageBase);
if (dcp_newpage
- && hsame(avc->m.DataVersion, dcp_newpage->f.versionNo)) {
+ && hsame(avc->f.m.DataVersion, dcp_newpage->f.versionNo)) {
ObtainWriteLock(&avc->lock, 251);
ObtainWriteLock(&avc->vlock, 576);
ObtainReadLock(&dcp_newpage->lock);
if ((avc->activeV == 0)
- && hsame(avc->m.DataVersion, dcp_newpage->f.versionNo)
+ && hsame(avc->f.m.DataVersion, dcp_newpage->f.versionNo)
&& !(dcp_newpage->dflags & (DFFetching))) {
AFS_GUNLOCK();
segmap_pagecreate(segkmap, raddr, rsize, 1);
if (didFakeOpen) {
afs_FakeClose(avc, acred);
}
- if (arw == UIO_WRITE && (avc->states & CDirty)) {
+ if (arw == UIO_WRITE && (avc->f.states & CDirty)) {
code2 = afs_DoPartialWrite(avc, &treq);
if (!code)
code = code2;
}
afs_BozonLock(&avc->pvnLock, avc);
osi_FlushPages(avc, cred); /* ensure old pages are gone */
- avc->states |= CMAPPED; /* flag cleared at afs_inactive */
+ avc->f.states |= CMAPPED; /* flag cleared at afs_inactive */
afs_BozonUnlock(&avc->pvnLock, avc);
AFS_GUNLOCK();
* lose the open count for volume roots (mvstat 2), even though they
* will get VOP_INACTIVE'd when released by afs_PutFakeStat().
*/
- if (avc->opens > 0 && avc->mvstat == 0 && !(avc->states & CCore))
+ if (avc->opens > 0 && avc->mvstat == 0 && !(avc->f.states & CCore))
avc->opens = avc->execsOrWriters = 0;
afs_InactiveVCache(avc, acred);
osi_FlushPages(register struct vcache *avc, struct AFS_UCRED *credp)
{
ObtainSharedLock(&avc->lock, 555);
- if ((hcmp((avc->m.DataVersion), (avc->mapDV)) <= 0)
+ if ((hcmp((avc->f.m.DataVersion), (avc->mapDV)) <= 0)
|| ((avc->execsOrWriters > 0) && afs_DirtyPages(avc))) {
ReleaseSharedLock(&avc->lock);
return;
}
UpgradeSToWLock(&avc->lock, 565);
- hset(avc->mapDV, avc->m.DataVersion);
+ hset(avc->mapDV, avc->f.m.DataVersion);
ReleaseWriteLock(&avc->lock);
return;
}
void
osi_FlushText_really(register struct vcache *vp)
{
- if (hcmp(vp->m.DataVersion, vp->flushDV) > 0) {
- hset(vp->flushDV, vp->m.DataVersion);
+ if (hcmp(vp->f.m.DataVersion, vp->flushDV) > 0) {
+ hset(vp->flushDV, vp->f.m.DataVersion);
}
return;
}
OSI_VFS_CONVERT(afsp);
AFS_STATCNT(afs_root);
- if (afs_globalVp && (afs_globalVp->states & CStatd)) {
+ if (afs_globalVp && (afs_globalVp->f.states & CStatd)) {
tvp = afs_globalVp;
} else {
if (afs_globalVp) {
{
AFS_STATCNT(afs_GetAccessBits);
/* see if anyuser has the required access bits */
- if ((arights & avc->anyAccess) == arights) {
+ if ((arights & avc->f.anyAccess) == arights) {
return arights;
}
}
}
- if (!(avc->states & CForeign)) {
+ if (!(avc->f.states & CForeign)) {
/* If there aren't any bits cached for this user (but the vnode
* _is_ cached, obviously), make sure this user has valid tokens
* before bothering with the RPC. */
struct unixuser *tu;
- tu = afs_FindUser(areq->uid, avc->fid.Cell, READ_LOCK);
+ tu = afs_FindUser(areq->uid, avc->f.fid.Cell, READ_LOCK);
if (!tu) {
- return (arights & avc->anyAccess);
+ return (arights & avc->f.anyAccess);
}
if ((tu->vid == UNDEFVID) || !(tu->states & UHasTokens)
|| (tu->states & UTokensBad)) {
afs_PutUser(tu, READ_LOCK);
- return (arights & avc->anyAccess);
+ return (arights & avc->f.anyAccess);
} else {
afs_PutUser(tu, READ_LOCK);
}
struct AFSFetchStatus OutStatus;
afs_int32 code;
- code = afs_FetchStatus(avc, &avc->fid, areq, &OutStatus);
+ code = afs_FetchStatus(avc, &avc->f.fid, areq, &OutStatus);
return (code ? 0 : OutStatus.CallerAccess & arights);
}
}
AFS_STATCNT(afs_AccessOK);
- if ((vType(avc) == VDIR) || (avc->states & CForeign)) {
+ if ((vType(avc) == VDIR) || (avc->f.states & CForeign)) {
/* rights are just those from acl */
if (afs_InReadDir(avc)) {
/* if we are already in readdir, then they may have read and
* rights for free. These rights will then be restricted by
* the access mask. */
dirBits = 0;
- if (avc->parentVnode) {
- dirFid.Cell = avc->fid.Cell;
- dirFid.Fid.Volume = avc->fid.Fid.Volume;
- dirFid.Fid.Vnode = avc->parentVnode;
- dirFid.Fid.Unique = avc->parentUnique;
+ if (avc->f.parent.vnode) {
+ dirFid.Cell = avc->f.fid.Cell;
+ dirFid.Fid.Volume = avc->f.fid.Fid.Volume;
+ dirFid.Fid.Vnode = avc->f.parent.vnode;
+ dirFid.Fid.Unique = avc->f.parent.unique;
/* Avoid this GetVCache call */
tvc = afs_GetVCache(&dirFid, areq, NULL, NULL);
if (tvc) {
* NFS translator and we don't know if it's a read or execute
* on the NFS client, but both need to read the data.
*/
- mask = (avc->m.Mode & 0700) >> 6; /* file restrictions to use */
+ mask = (avc->f.m.Mode & 0700) >> 6; /* file restrictions to use */
fileBits &= ~fileModeMap[mask];
if (check_mode_bits & CMB_ALLOW_EXEC_AS_READ) {
- if (avc->m.Mode & 0100)
+ if (avc->f.m.Mode & 0100)
fileBits |= PRSFS_READ;
}
}
AFS_STATCNT(afs_access);
afs_Trace3(afs_iclSetp, CM_TRACE_ACCESS, ICL_TYPE_POINTER, avc,
ICL_TYPE_INT32, amode, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
afs_InitFakeStat(&fakestate);
if ((code = afs_InitReq(&treq, acred)))
return code;
}
/* if we're looking for write access and we have a read-only file system, report it */
- if ((amode & VWRITE) && (avc->states & CRO)) {
+ if ((amode & VWRITE) && (avc->f.states & CRO)) {
afs_PutFakeStat(&fakestate);
AFS_DISCON_UNLOCK();
return EROFS;
}
code = 1; /* Default from here on in is access ok. */
- if (avc->states & CForeign) {
+ if (avc->f.states & CForeign) {
/* In the dfs xlator the EXEC bit is mapped to LOOKUP */
if (amode & VEXEC)
code = afs_AccessOK(avc, PRSFS_LOOKUP, &treq, CHECK_MODE_BITS);
*/
if (!((amode & VREAD) && AFS_NFSXLATORREQ(acred)))
#endif
- if ((avc->m.Mode & 0100) == 0)
+ if ((avc->f.m.Mode & 0100) == 0)
code = 0;
- } else if (avc->m.Mode & 0100)
+ } else if (avc->f.m.Mode & 0100)
code = 1;
}
if (code && (amode & VWRITE)) {
** call returns failure. hence, we retry without any file
** mode bit checking */
if (!code && AFS_NFSXLATORREQ(acred)
- && avc->m.Owner == ANONYMOUSID)
+ && avc->f.m.Owner == ANONYMOUSID)
code =
afs_AccessOK(avc, PRSFS_WRITE, &treq,
DONT_CHECK_MODE_BITS);
fakedir = 1;
attrs->va_type = fakedir ? VDIR : vType(avc);
#if defined(AFS_SGI_ENV) || defined(AFS_AIX32_ENV) || defined(AFS_SUN5_ENV)
- attrs->va_mode = fakedir ? 0755 : (mode_t) (avc->m.Mode & 0xffff);
+ attrs->va_mode = fakedir ? 0755 : (mode_t) (avc->f.m.Mode & 0xffff);
#else
- attrs->va_mode = fakedir ? VDIR | 0755 : avc->m.Mode;
+ attrs->va_mode = fakedir ? VDIR | 0755 : avc->f.m.Mode;
#endif
- if (avc->m.Mode & (VSUID | VSGID)) {
+ if (avc->f.m.Mode & (VSUID | VSGID)) {
/* setuid or setgid, make sure we're allowed to run them from this cell */
- tcell = afs_GetCell(avc->fid.Cell, 0);
+ tcell = afs_GetCell(avc->f.fid.Cell, 0);
if (tcell && (tcell->states & CNoSUID))
attrs->va_mode &= ~(VSUID | VSGID);
}
}
}
#endif /* AFS_DARWIN_ENV */
- attrs->va_uid = fakedir ? 0 : avc->m.Owner;
- attrs->va_gid = fakedir ? 0 : avc->m.Group; /* yeah! */
+ attrs->va_uid = fakedir ? 0 : avc->f.m.Owner;
+ attrs->va_gid = fakedir ? 0 : avc->f.m.Group; /* yeah! */
#if defined(AFS_SUN56_ENV)
attrs->va_fsid = avc->v.v_vfsp->vfs_fsid.val[0];
#elif defined(AFS_OSF_ENV)
attrs->va_fsid = 1;
#endif
if (avc->mvstat == 2) {
- tvp = afs_GetVolume(&avc->fid, 0, READ_LOCK);
+ tvp = afs_GetVolume(&avc->f.fid, 0, READ_LOCK);
/* The mount point's vnode. */
if (tvp) {
attrs->va_nodeid =
afs_calc_inum (tvp->mtpoint.Fid.Volume,
tvp->mtpoint.Fid.Vnode);
- if (FidCmp(&afs_rootFid, &avc->fid) && !attrs->va_nodeid)
+ if (FidCmp(&afs_rootFid, &avc->f.fid) && !attrs->va_nodeid)
attrs->va_nodeid = 2;
afs_PutVolume(tvp, READ_LOCK);
} else
attrs->va_nodeid = 2;
} else
attrs->va_nodeid =
- afs_calc_inum (avc->fid.Fid.Volume,
- avc->fid.Fid.Vnode);
+ afs_calc_inum (avc->f.fid.Fid.Volume,
+ avc->f.fid.Fid.Vnode);
attrs->va_nodeid &= 0x7fffffff; /* Saber C hates negative inode #s! */
- attrs->va_nlink = fakedir ? 100 : avc->m.LinkCount;
- attrs->va_size = fakedir ? 4096 : avc->m.Length;
+ attrs->va_nlink = fakedir ? 100 : avc->f.m.LinkCount;
+ attrs->va_size = fakedir ? 4096 : avc->f.m.Length;
attrs->va_atime.tv_sec = attrs->va_mtime.tv_sec = attrs->va_ctime.tv_sec =
- fakedir ? 0 : (int)avc->m.Date;
+ fakedir ? 0 : (int)avc->f.m.Date;
/* set microseconds to be dataversion # so that we approximate NFS-style
* use of mtime as a dataversion #. We take it mod 512K because
* microseconds *must* be less than a million, and 512K is the biggest
attrs->va_atime.tv_nsec = attrs->va_mtime.tv_nsec =
attrs->va_ctime.tv_nsec = 0;
- attrs->va_gen = hgetlo(avc->m.DataVersion);
+ attrs->va_gen = hgetlo(avc->f.m.DataVersion);
#elif defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_AIX41_ENV) || defined(AFS_OBSD_ENV)
attrs->va_atime.tv_nsec = attrs->va_mtime.tv_nsec =
attrs->va_ctime.tv_nsec =
- (hgetlo(avc->m.DataVersion) & 0x7ffff) * 1000;
+ (hgetlo(avc->f.m.DataVersion) & 0x7ffff) * 1000;
#else
attrs->va_atime.tv_usec = attrs->va_mtime.tv_usec =
- attrs->va_ctime.tv_usec = (hgetlo(avc->m.DataVersion) & 0x7ffff);
+ attrs->va_ctime.tv_usec = (hgetlo(avc->f.m.DataVersion) & 0x7ffff);
#endif
#if defined(AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV) || defined(AFS_OSF_ENV)
attrs->va_flags = 0;
AFS_STATCNT(afs_getattr);
afs_Trace2(afs_iclSetp, CM_TRACE_GETATTR, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
if (afs_fakestat_enable && avc->mvstat == 1) {
struct afs_fakestat_state fakestat;
}
#endif
#if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
- if (avc->states & CUBCinit) {
+ if (avc->f.states & CUBCinit) {
code = afs_CopyOutAttrs(avc, attrs);
return code;
}
if (afs_shuttingdown)
return EIO;
- if (!(avc->states & CStatd)) {
+ if (!(avc->f.states & CStatd)) {
if (!(code = afs_InitReq(&treq, acred))) {
code = afs_VerifyVCache2(avc, &treq);
inited = 1;
#endif
mask |= AFS_SETMODE;
as->UnixModeBits = av->va_mode & 0xffff;
- if (avc->states & CForeign) {
+ if (avc->f.states & CForeign) {
ObtainWriteLock(&avc->lock, 127);
afs_FreeAllAxs(&(avc->Access));
ReleaseWriteLock(&avc->lock);
return 0;
}
-/* We don't set CDirty bit in avc->states because setattr calls WriteVCache
+/* We don't set CDirty bit in avc->f.states because setattr calls WriteVCache
* synchronously, therefore, it's not needed.
*/
#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
afs_Trace4(afs_iclSetp, CM_TRACE_SETATTR, ICL_TYPE_POINTER, avc,
ICL_TYPE_INT32, attrs->va_mask, ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(attrs->va_size), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
#else
afs_Trace4(afs_iclSetp, CM_TRACE_SETATTR, ICL_TYPE_POINTER, avc,
ICL_TYPE_INT32, attrs->va_mode, ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(attrs->va_size), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
#endif
if ((code = afs_InitReq(&treq, acred)))
return code;
if (code)
goto done;
- if (avc->states & CRO) {
+ if (avc->f.states & CRO) {
code = EROFS;
goto done;
}
#endif
afs_size_t tsize = attrs->va_size;
ObtainWriteLock(&avc->lock, 128);
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
- if (AFS_IS_DISCONNECTED && tsize >=avc->m.Length) {
+ if (AFS_IS_DISCONNECTED && tsize >=avc->f.m.Length) {
/* If we're growing the file, and we're disconnected, we need
* to make the relevant dcache chunks appear ourselves. */
code = afs_ExtendSegments(avc, tsize, &treq);
}
if (code == 0) {
- if (((avc->execsOrWriters <= 0) && (avc->states & CCreating) == 0)
+ if (((avc->execsOrWriters <= 0) && (avc->f.states & CCreating) == 0)
|| (avc->execsOrWriters == 1 && AFS_NFSXLATORREQ(acred))) {
/* Store files now if not disconnected. */
if (!AFS_IS_DISCONNECTED) {
code = afs_StoreAllSegments(avc, &treq, AFS_ASYNC);
if (!code)
- avc->states &= ~CDirty;
+ avc->f.states &= ~CDirty;
}
}
} else
- avc->states &= ~CDirty;
+ avc->f.states &= ~CDirty;
ReleaseWriteLock(&avc->lock);
hzero(avc->flushDV);
if (code) {
ObtainWriteLock(&afs_xcbhash, 487);
afs_DequeueCallback(avc);
- avc->states &= ~CStatd;
+ avc->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
- if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
+ if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
osi_dnlc_purgedp(avc);
/* error? erase any changes we made to vcache entry */
}
/** If the volume is read-only, return error without making an RPC to the
* fileserver
*/
- if (adp->states & CRO) {
+ if (adp->f.states & CRO) {
code = EROFS;
goto done;
}
* Make sure that the data in the cache is current. We may have
* received a callback while we were waiting for the write lock.
*/
- if (!(adp->states & CStatd)
- || (tdc && !hsame(adp->m.DataVersion, tdc->f.versionNo))) {
+ if (!(adp->f.states & CStatd)
+ || (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) {
ReleaseWriteLock(&adp->lock);
if (tdc) {
ReleaseSharedLock(&tdc->lock);
goto done;
}
/* found the file, so use it */
- newFid.Cell = adp->fid.Cell;
- newFid.Fid.Volume = adp->fid.Fid.Volume;
+ newFid.Cell = adp->f.fid.Cell;
+ newFid.Fid.Volume = adp->f.fid.Fid.Volume;
tvc = NULL;
if (newFid.Fid.Unique == 0) {
tvc = afs_LookupVCache(&newFid, &treq, NULL, adp, aname);
#endif
{
/* needed for write access check */
- tvc->parentVnode = adp->fid.Fid.Vnode;
- tvc->parentUnique = adp->fid.Fid.Unique;
+ tvc->f.parent.vnode = adp->f.fid.Fid.Vnode;
+ tvc->f.parent.unique = adp->f.fid.Fid.Unique;
/* need write mode for these guys */
if (!afs_AccessOK
(tvc, PRSFS_WRITE, &treq, CHECK_MODE_BITS)) {
#endif
attrs->va_size = len;
ObtainWriteLock(&tvc->lock, 136);
- tvc->states |= CCreating;
+ tvc->f.states |= CCreating;
ReleaseWriteLock(&tvc->lock);
#if defined(AFS_SUN5_ENV) || defined(AFS_SGI_ENV)
#if defined(AFS_SGI64_ENV)
code = afs_setattr(tvc, attrs, acred);
#endif /* SUN5 || SGI */
ObtainWriteLock(&tvc->lock, 137);
- tvc->states &= ~CCreating;
+ tvc->f.states &= ~CCreating;
ReleaseWriteLock(&tvc->lock);
if (code) {
afs_PutVCache(tvc);
/* if we create the file, we don't do any access checks, since
* that's how O_CREAT is supposed to work */
- if (adp->states & CForeign) {
+ if (adp->f.states & CForeign) {
origCBs = afs_allCBs;
origZaps = afs_allZaps;
} else {
InStatus.UnixModeBits = attrs->va_mode & 0xffff; /* only care about protection bits */
do {
- tc = afs_Conn(&adp->fid, &treq, SHARED_LOCK);
+ tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
if (tc) {
hostp = tc->srvr->server; /* remember for callback processing */
now = osi_Time();
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_CREATEFILE);
RX_AFS_GUNLOCK();
code =
- RXAFS_CreateFile(tc->id, (struct AFSFid *)&adp->fid.Fid,
+ RXAFS_CreateFile(tc->id, (struct AFSFid *)&adp->f.fid.Fid,
aname, &InStatus, (struct AFSFid *)
&newFid.Fid, &OutFidStatus, &OutDirStatus,
&CallBack, &tsync);
} else
code = -1;
} while (afs_Analyze
- (tc, code, &adp->fid, &treq, AFS_STATS_FS_RPCIDX_CREATEFILE,
+ (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_CREATEFILE,
SHARED_LOCK, NULL));
if ((code == EEXIST || code == UAEEXIST) &&
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 488);
afs_DequeueCallback(adp);
- adp->states &= ~CStatd;
+ adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
}
} else {
#if defined(AFS_DISCON_ENV)
/* Generate a fake FID for disconnected mode. */
- newFid.Cell = adp->fid.Cell;
- newFid.Fid.Volume = adp->fid.Fid.Volume;
+ newFid.Cell = adp->f.fid.Cell;
+ newFid.Fid.Volume = adp->f.fid.Fid.Volume;
afs_GenFakeFid(&newFid, VREG, 1);
#endif
} /* if (!AFS_IS_DISCON_RW) */
afs_PutDCache(tdc);
}
if (AFS_IS_DISCON_RW)
- adp->m.LinkCount++;
+ adp->f.m.LinkCount++;
- newFid.Cell = adp->fid.Cell;
- newFid.Fid.Volume = adp->fid.Fid.Volume;
+ newFid.Cell = adp->f.fid.Cell;
+ newFid.Fid.Volume = adp->f.fid.Fid.Volume;
ReleaseWriteLock(&adp->lock);
volp = afs_FindVolume(&newFid, READ_LOCK);
* would fail, since no call would be able to update the local vnode status after modifying
* a file on a file server. */
ObtainWriteLock(&afs_xvcache, 138);
- if (adp->states & CForeign)
+ if (adp->f.states & CForeign)
finalZaps = afs_allZaps; /* do this before calling newvcache */
else
finalZaps = afs_evenZaps; /* do this before calling newvcache */
ObtainWriteLock(&afs_xcbhash, 489);
finalCBs = afs_evenCBs;
/* add the callback in */
- if (adp->states & CForeign) {
- tvc->states |= CForeign;
+ if (adp->f.states & CForeign) {
+ tvc->f.states |= CForeign;
finalCBs = afs_allCBs;
}
if (origCBs == finalCBs && origZaps == finalZaps) {
- tvc->states |= CStatd; /* we've fake entire thing, so don't stat */
- tvc->states &= ~CBulkFetching;
+ tvc->f.states |= CStatd; /* we've fake entire thing, so don't stat */
+ tvc->f.states &= ~CBulkFetching;
if (!AFS_IS_DISCON_RW) {
tvc->cbExpires = CallBack.ExpirationTime;
afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), volp);
}
} else {
afs_DequeueCallback(tvc);
- tvc->states &= ~(CStatd | CUnique);
+ tvc->f.states &= ~(CStatd | CUnique);
tvc->callback = 0;
- if (tvc->fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))
+ if (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))
osi_dnlc_purgedp(tvc);
}
ReleaseWriteLock(&afs_xcbhash);
afs_ProcessFS(tvc, &OutFidStatus, &treq);
}
- tvc->parentVnode = adp->fid.Fid.Vnode;
- tvc->parentUnique = adp->fid.Fid.Unique;
+ tvc->f.parent.vnode = adp->f.fid.Fid.Vnode;
+ tvc->f.parent.unique = adp->f.fid.Fid.Unique;
ReleaseWriteLock(&tvc->lock);
*avcp = tvc;
code = 0;
}
#ifdef AFS_OSF_ENV
if (!code && !strcmp(aname, "core"))
- tvc->states |= CCore1;
+ tvc->f.states |= CCore1;
#endif
afs_PutFakeStat(&fakestate);
hset64(avers, astat->dataVersionHigh, astat->DataVersion);
/* this *is* the version number, no matter what */
if (adc) {
- ok = (hsame(avc->m.DataVersion, adc->f.versionNo) && avc->callback
- && (avc->states & CStatd) && avc->cbExpires >= osi_Time());
+ ok = (hsame(avc->f.m.DataVersion, adc->f.versionNo) && avc->callback
+ && (avc->f.states & CStatd) && avc->cbExpires >= osi_Time());
} else {
ok = 0;
}
#endif
/* The bulk status code used the length as a sequence number. */
/* Don't update the vcache entry unless the stats are current. */
- if (avc->states & CStatd) {
- hset(avc->m.DataVersion, avers);
+ if (avc->f.states & CStatd) {
+ hset(avc->f.m.DataVersion, avers);
#ifdef AFS_64BIT_CLIENT
- FillInt64(avc->m.Length, astat->Length_hi, astat->Length);
+ FillInt64(avc->f.m.Length, astat->Length_hi, astat->Length);
#else /* AFS_64BIT_CLIENT */
- avc->m.Length = astat->Length;
+ avc->f.m.Length = astat->Length;
#endif /* AFS_64BIT_CLIENT */
- avc->m.Date = astat->ClientModTime;
+ avc->f.m.Date = astat->ClientModTime;
}
if (ok) {
/* we've been tracking things correctly */
ZapDCE(adc);
DZap(adc);
}
- if (avc->states & CStatd) {
+ if (avc->f.states & CStatd) {
osi_dnlc_purgedp(avc);
}
return 0;
/** If the volume is read-only, return error without making an RPC to the
* fileserver
*/
- if (adp->states & CRO) {
+ if (adp->f.states & CRO) {
code = EROFS;
goto done;
}
if (!AFS_IS_DISCON_RW) {
do {
- tc = afs_Conn(&adp->fid, &treq, SHARED_LOCK);
+ tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_MAKEDIR);
now = osi_Time();
RX_AFS_GUNLOCK();
code =
RXAFS_MakeDir(tc->id,
- (struct AFSFid *)&adp->fid.Fid,
+ (struct AFSFid *)&adp->f.fid.Fid,
aname,
&InStatus,
(struct AFSFid *)&newFid.Fid,
} else
code = -1;
} while (afs_Analyze
- (tc, code, &adp->fid, &treq, AFS_STATS_FS_RPCIDX_MAKEDIR,
+ (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_MAKEDIR,
SHARED_LOCK, NULL));
if (code) {
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 490);
afs_DequeueCallback(adp);
- adp->states &= ~CStatd;
+ adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
}
/* We have the dir entry now, we can use it while disconnected. */
if (adp->mvid == NULL) {
/* If not mount point, generate a new fid. */
- newFid.Cell = adp->fid.Cell;
- newFid.Fid.Volume = adp->fid.Fid.Volume;
+ newFid.Cell = adp->f.fid.Cell;
+ newFid.Fid.Volume = adp->f.fid.Fid.Volume;
afs_GenFakeFid(&newFid, VDIR, 1);
}
/* XXX: If mount point???*/
if (AFS_IS_DISCON_RW)
/* We will have to settle with the local link count. */
- adp->m.LinkCount++;
+ adp->f.m.LinkCount++;
else
- adp->m.LinkCount = OutDirStatus.LinkCount;
- newFid.Cell = adp->fid.Cell;
- newFid.Fid.Volume = adp->fid.Fid.Volume;
+ adp->f.m.LinkCount = OutDirStatus.LinkCount;
+ newFid.Cell = adp->f.fid.Cell;
+ newFid.Fid.Volume = adp->f.fid.Fid.Volume;
ReleaseWriteLock(&adp->lock);
if (AFS_IS_DISCON_RW) {
#if defined(AFS_DISCON_ENV)
ObtainWriteLock(&afs_xdcache, 739);
code = afs_dir_MakeDir(new_dc,
- (afs_int32 *) &newFid.Fid,
- (afs_int32 *) &adp->fid.Fid);
+ (afs_int32 *) &newFid.Fid,
+ (afs_int32 *) &adp->f.fid.Fid);
ReleaseWriteLock(&afs_xdcache);
if (code)
printf("afs_mkdir: afs_dirMakeDir code = %u\n", code);
afs_PutDCache(new_dc);
ObtainWriteLock(&tvc->lock, 731);
- afs_DisconAddDirty(tvc, VDisconCreate, 1);
/* Update length in the vcache. */
- tvc->m.Length = new_dc->f.chunkBytes;
+ tvc->f.m.Length = new_dc->f.chunkBytes;
+
+ afs_DisconAddDirty(tvc, VDisconCreate, 1);
ReleaseWriteLock(&tvc->lock);
#endif /* #ifdef AFS_DISCON_ENV */
} else {
/** If the volume is read-only, return error without making an RPC to the
* fileserver
*/
- if (adp->states & CRO) {
+ if (adp->f.states & CRO) {
code = EROFS;
goto done;
}
ObtainWriteLock(&adp->lock, 154);
if (tdc)
ObtainSharedLock(&tdc->lock, 633);
- if (tdc && (adp->states & CForeign)) {
+ if (tdc && (adp->f.states & CForeign)) {
struct VenusFid unlinkFid;
unlinkFid.Fid.Vnode = 0;
if (code == 0) {
afs_int32 cached = 0;
- unlinkFid.Cell = adp->fid.Cell;
- unlinkFid.Fid.Volume = adp->fid.Fid.Volume;
+ unlinkFid.Cell = adp->f.fid.Cell;
+ unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume;
if (unlinkFid.Fid.Unique == 0) {
tvc =
afs_LookupVCache(&unlinkFid, &treq, &cached, adp, aname);
if (!AFS_IS_DISCON_RW) {
/* Not disconnected, can connect to server. */
do {
- tc = afs_Conn(&adp->fid, &treq, SHARED_LOCK);
+ tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEDIR);
RX_AFS_GUNLOCK();
code =
RXAFS_RemoveDir(tc->id,
- (struct AFSFid *)&adp->fid.Fid,
+ (struct AFSFid *)&adp->f.fid.Fid,
aname,
&OutDirStatus,
&tsync);
} else
code = -1;
} while (afs_Analyze
- (tc, code, &adp->fid, &treq, AFS_STATS_FS_RPCIDX_REMOVEDIR,
+ (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_REMOVEDIR,
SHARED_LOCK, NULL));
if (code) {
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 491);
afs_DequeueCallback(adp);
- adp->states &= ~CStatd;
+ adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
}
}
/* here if rpc worked; update the in-core link count */
- adp->m.LinkCount = OutDirStatus.LinkCount;
+ adp->f.m.LinkCount = OutDirStatus.LinkCount;
} else {
#if defined(AFS_DISCON_ENV)
/* Find the vcache. */
struct VenusFid tfid;
- tfid.Cell = adp->fid.Cell;
- tfid.Fid.Volume = adp->fid.Fid.Volume;
+ tfid.Cell = adp->f.fid.Cell;
+ tfid.Fid.Volume = adp->f.fid.Fid.Volume;
code = afs_dir_Lookup(tdc, aname, &tfid.Fid);
ObtainSharedLock(&afs_xvcache, 764);
}
}
- if (tvc->m.LinkCount > 2) {
+ if (tvc->f.m.LinkCount > 2) {
/* This dir contains more than . and .., thus it can't be
* deleted.
*/
* If we were created locally, then we don't need to have a shadow
* directory (as there's no server state to remember)
*/
- if (!adp->shVnode && !(adp->ddirty_flags & VDisconCreate)) {
+ if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) {
afs_MakeShadowDir(adp, tdc);
}
- adp->m.LinkCount--;
+ adp->f.m.LinkCount--;
#endif /* #ifdef AFS_DISCON_ENV */
} /* if (!AFS_IS_DISCON_RW) */
if (tvc) {
ObtainWriteLock(&tvc->lock, 155);
- tvc->states &= ~CUnique; /* For the dfs xlator */
+ tvc->f.states &= ~CUnique; /* For the dfs xlator */
#if AFS_DISCON_ENV
if (AFS_IS_DISCON_RW) {
- if (tvc->ddirty_flags & VDisconCreate) {
+ if (tvc->f.ddirty_flags & VDisconCreate) {
/* If we we were created whilst disconnected, removal doesn't
* need to get logged. Just go away gracefully */
afs_DisconRemoveDirty(tvc);
|| USE_SMALLFID(credp)
#endif
) {
- tcell = afs_GetCell(avc->fid.Cell, READ_LOCK);
- Sfid.Volume = avc->fid.Fid.Volume;
- Sfid.Vnode = avc->fid.Fid.Vnode;
+ tcell = afs_GetCell(avc->f.fid.Cell, READ_LOCK);
+ Sfid.Volume = avc->f.fid.Fid.Volume;
+ Sfid.Vnode = avc->f.fid.Fid.Vnode;
Sfid.CellAndUnique =
- ((tcell->cellIndex << 24) + (avc->fid.Fid.Unique & 0xffffff));
+ ((tcell->cellIndex << 24) + (avc->f.fid.Fid.Unique & 0xffffff));
afs_PutCell(tcell, READ_LOCK);
- if (avc->fid.Fid.Vnode > 0xffff)
+ if (avc->f.fid.Fid.Vnode > 0xffff)
afs_fid_vnodeoverflow++;
- if (avc->fid.Fid.Unique > 0xffffff)
+ if (avc->f.fid.Fid.Unique > 0xffffff)
afs_fid_uniqueoverflow++;
} else {
#if defined(AFS_SUN57_64BIT_ENV) || (defined(AFS_SGI61_ENV) && (_MIPS_SZPTR == 64))
if (avc->flockCount == 0) {
if (!AFS_IS_DISCONNECTED) {
do {
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK);
RX_AFS_GUNLOCK();
code = RXAFS_ReleaseLock(tc->id, (struct AFSFid *)
- &avc->fid.Fid, &tsync);
+ &avc->f.fid.Fid, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &avc->fid, areq,
+ (tc, code, &avc->f.fid, areq,
AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL));
} else {
/*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/
if (!code && avc->flockCount == 0) {
if (!AFS_IS_DISCONNECTED) {
do {
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME
(AFS_STATS_FS_RPCIDX_RELEASELOCK);
code =
RXAFS_ReleaseLock(tc->id,
(struct AFSFid *)&avc->
- fid.Fid, &tsync);
+ f.fid.Fid, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &avc->fid, areq,
+ (tc, code, &avc->f.fid, areq,
AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK,
NULL));
}
lockType = ((acom & LOCK_EX) ? LockWrite : LockRead);
if (!AFS_IS_DISCONNECTED) {
do {
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK);
RX_AFS_GUNLOCK();
code = RXAFS_SetLock(tc->id, (struct AFSFid *)
- &avc->fid.Fid, lockType,
+ &avc->f.fid.Fid, lockType,
&tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &avc->fid, areq,
+ (tc, code, &avc->f.fid, areq,
AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK,
NULL));
} else
return 0;
do {
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
RX_AFS_GUNLOCK();
code =
- RXAFS_FetchStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
+ RXAFS_FetchStatus(tc->id, (struct AFSFid *)&avc->f.fid.Fid,
&OutStatus, &CallBack, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
+ (tc, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
SHARED_LOCK, NULL));
if (temp)
if (code)
goto done;
- if (avc->fid.Cell != adp->fid.Cell
- || avc->fid.Fid.Volume != adp->fid.Fid.Volume) {
+ if (avc->f.fid.Cell != adp->f.fid.Cell
+ || avc->f.fid.Fid.Volume != adp->f.fid.Fid.Volume) {
code = EXDEV;
goto done;
}
/** If the volume is read-only, return error without making an RPC to the
* fileserver
*/
- if (adp->states & CRO) {
+ if (adp->f.states & CRO) {
code = EROFS;
goto done;
}
tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1); /* test for error below */
ObtainWriteLock(&adp->lock, 145);
do {
- tc = afs_Conn(&adp->fid, &treq, SHARED_LOCK);
+ tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_LINK);
RX_AFS_GUNLOCK();
code =
- RXAFS_Link(tc->id, (struct AFSFid *)&adp->fid.Fid, aname,
- (struct AFSFid *)&avc->fid.Fid, &OutFidStatus,
+ RXAFS_Link(tc->id, (struct AFSFid *)&adp->f.fid.Fid, aname,
+ (struct AFSFid *)&avc->f.fid.Fid, &OutFidStatus,
&OutDirStatus, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &adp->fid, &treq, AFS_STATS_FS_RPCIDX_LINK,
+ (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_LINK,
SHARED_LOCK, NULL));
if (code) {
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 492);
afs_DequeueCallback(adp);
- adp->states &= ~CStatd;
+ adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
}
if (afs_LocalHero(adp, tdc, &OutDirStatus, 1)) {
/* we can do it locally */
ObtainWriteLock(&afs_xdcache, 290);
- code = afs_dir_Create(tdc, aname, &avc->fid.Fid);
+ code = afs_dir_Create(tdc, aname, &avc->f.fid.Fid);
ReleaseWriteLock(&afs_xdcache);
if (code) {
ZapDCE(tdc); /* surprise error -- invalid value */
ObtainWriteLock(&afs_xcbhash, 493);
afs_DequeueCallback(avc);
- avc->states &= ~CStatd; /* don't really know new link count */
+ avc->f.states &= ~CStatd; /* don't really know new link count */
ReleaseWriteLock(&afs_xcbhash);
- if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
+ if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
osi_dnlc_purgedp(avc);
ReleaseWriteLock(&avc->lock);
code = 0;
AFS_STATCNT(EvalMountPoint);
#ifdef notdef
- if (avc->mvid && (avc->states & CMValid))
+ if (avc->mvid && (avc->f.states & CMValid))
return 0; /* done while racing */
#endif
*avolpp = NULL;
/* Determine which cell and volume the mointpoint goes to */
code = EvalMountData(avc->linkData[0], avc->linkData + 1,
- avc->states, avc->fid.Cell, avolpp, areq, 0, 0,
+ avc->f.states, avc->f.fid.Cell, avolpp, areq, 0, 0,
&avnoid);
if (code) return code;
avc->mvid->Fid.Volume = (*avolpp)->volume;
avc->mvid->Fid.Vnode = avnoid;
avc->mvid->Fid.Unique = 1;
- avc->states |= CMValid;
+ avc->f.states |= CMValid;
/* Used to: if the mount point is stored within a backup volume,
* then we should only update the parent pointer information if
*
* Next two lines used to be under this if:
*
- * if (!(avc->states & CBackup) || tvp->dotdot.Fid.Volume == 0)
+ * if (!(avc->f.states & CBackup) || tvp->dotdot.Fid.Volume == 0)
*
* Now: update mount point back pointer on every call, so that we handle
* multiple mount points better. This way, when du tries to go back
* cd'ing via a new path to a volume will reset the ".." pointer
* to the new path.
*/
- (*avolpp)->mtpoint = avc->fid; /* setup back pointer to mtpoint */
+ (*avolpp)->mtpoint = avc->f.fid; /* setup back pointer to mtpoint */
+
if (advc)
- (*avolpp)->dotdot = advc->fid;
+ (*avolpp)->dotdot = advc->f.fid;
return 0;
}
if (code)
goto done;
if (tvolp) {
- tvolp->dotdot = tvc->fid;
- tvolp->dotdot.Fid.Vnode = tvc->parentVnode;
- tvolp->dotdot.Fid.Unique = tvc->parentUnique;
+ tvolp->dotdot = tvc->f.fid;
+ tvolp->dotdot.Fid.Vnode = tvc->f.parent.vnode;
+ tvolp->dotdot.Fid.Unique = tvc->f.parent.unique;
}
}
- if (tvc->mvid && (tvc->states & CMValid)) {
+ if (tvc->mvid && (tvc->f.states & CMValid)) {
if (!canblock) {
afs_int32 retry;
goto done;
}
#ifdef AFS_DARWIN80_ENV
- root_vp->m.Type = VDIR;
+ root_vp->f.m.Type = VDIR;
AFS_GUNLOCK();
code = afs_darwin_finalizevnode(root_vp, NULL, NULL, 0);
AFS_GLOCK();
if (!afs_nfsexporter)
strcpy(bufp, (*sysnamelist)[0]);
else {
- au = afs_GetUser(areq->uid, adp->fid.Cell, 0);
+ au = afs_GetUser(areq->uid, adp->f.fid.Cell, 0);
if (au->exporter) {
error = EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, num, 0);
if (error) {
*sysnamelist = afs_sysnamelist;
if (afs_nfsexporter) {
- au = afs_GetUser(areq->uid, avc->fid.Cell, 0);
+ au = afs_GetUser(areq->uid, avc->f.fid.Cell, 0);
if (au->exporter) {
error =
EXP_SYSNAME(au->exporter, (char *)0, sysnamelist, &num, 0);
* 1. The cache data is being fetched by another process.
* 2. The cache data is no longer valid
*/
- while ((adp->states & CStatd)
+ while ((adp->f.states & CStatd)
&& (dcp->dflags & DFFetching)
- && hsame(adp->m.DataVersion, dcp->f.versionNo)) {
+ && hsame(adp->f.m.DataVersion, dcp->f.versionNo)) {
afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, dcp,
ICL_TYPE_INT32, dcp->dflags);
ObtainReadLock(&adp->lock);
ObtainReadLock(&dcp->lock);
}
- if (!(adp->states & CStatd)
- || !hsame(adp->m.DataVersion, dcp->f.versionNo)) {
+ if (!(adp->f.states & CStatd)
+ || !hsame(adp->f.m.DataVersion, dcp->f.versionNo)) {
ReleaseReadLock(&dcp->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(dcp);
* also make us skip "." and probably "..", unless it has
* disappeared from the cache since we did our namei call.
*/
- tfid.Cell = adp->fid.Cell;
- tfid.Fid.Volume = adp->fid.Fid.Volume;
+ tfid.Cell = adp->f.fid.Cell;
+ tfid.Fid.Volume = adp->f.fid.Fid.Volume;
tfid.Fid.Vnode = ntohl(dirEntryp->fid.vnode);
tfid.Fid.Unique = ntohl(dirEntryp->fid.vunique);
do {
}
#ifdef AFS_DARWIN80_ENV
- if (tvcp->states & CVInit) {
+ if (tvcp->f.states & CVInit) {
/* XXX don't have status yet, so creating the vnode is
not yet useful. we would get CDeadVnode set, and the
upcoming PutVCache will cause the vcache to be flushed &
* if the new length will be ignored when afs_ProcessFS is
* called with new stats. */
#ifdef AFS_SGI_ENV
- if (!(tvcp->states & (CStatd | CBulkFetching))
+ if (!(tvcp->f.states & (CStatd | CBulkFetching))
&& (tvcp->execsOrWriters <= 0)
&& !afs_DirtyPages(tvcp)
&& !AFS_VN_MAPPED((vnode_t *) tvcp))
#else
- if (!(tvcp->states & (CStatd | CBulkFetching))
+ if (!(tvcp->f.states & (CStatd | CBulkFetching))
&& (tvcp->execsOrWriters <= 0)
&& !afs_DirtyPages(tvcp))
#endif
*/
memcpy((char *)(fidsp + fidIndex), (char *)&tfid.Fid,
sizeof(*fidsp));
- tvcp->states |= CBulkFetching;
- tvcp->m.Length = statSeqNo;
+ tvcp->f.states |= CBulkFetching;
+ tvcp->f.m.Length = statSeqNo;
fidIndex++;
}
afs_PutVCache(tvcp);
/* start the timer; callback expirations are relative to this */
startTime = osi_Time();
- tcp = afs_Conn(&adp->fid, areqp, SHARED_LOCK);
+ tcp = afs_Conn(&adp->f.fid, areqp, SHARED_LOCK);
if (tcp) {
hostp = tcp->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_BULKSTATUS);
} else
code = -1;
} while (afs_Analyze
- (tcp, code, &adp->fid, areqp, AFS_STATS_FS_RPCIDX_BULKSTATUS,
+ (tcp, code, &adp->f.fid, areqp, AFS_STATS_FS_RPCIDX_BULKSTATUS,
SHARED_LOCK, NULL));
/* now, if we didnt get the info, bail out. */
/* we need vol flags to create the entries properly */
dotdot.Fid.Volume = 0;
- volp = afs_GetVolume(&adp->fid, areqp, READ_LOCK);
+ volp = afs_GetVolume(&adp->f.fid, areqp, READ_LOCK);
if (volp) {
volStates = volp->states;
if (volp->dotdot.Fid.Volume != 0)
for (i = 0; i < fidIndex; i++) {
if ((&statsp[i])->errorCode)
continue;
- afid.Cell = adp->fid.Cell;
- afid.Fid.Volume = adp->fid.Fid.Volume;
+ afid.Cell = adp->f.fid.Cell;
+ afid.Fid.Volume = adp->f.fid.Fid.Volume;
afid.Fid.Vnode = fidsp[i].Vnode;
afid.Fid.Unique = fidsp[i].Unique;
do {
* and we may not have the latest status information for this
* file. Leave the entry alone.
*/
- if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
+ if (!(tvcp->f.states & CBulkFetching) || (tvcp->f.m.Length != statSeqNo)) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
afs_PutVCache(tvcp);
/* We need to check the flags again. We may have missed
* something while we were waiting for a lock.
*/
- if (!(tvcp->states & CBulkFetching) || (tvcp->m.Length != statSeqNo)) {
+ if (!(tvcp->f.states & CBulkFetching) || (tvcp->f.m.Length != statSeqNo)) {
flagIndex++;
ReleaseWriteLock(&tvcp->lock);
ReleaseWriteLock(&afs_xcbhash);
* loaded, so we can tell if we use it before it gets
* recycled.
*/
- tvcp->states |= CBulkStat;
- tvcp->states &= ~CBulkFetching;
+ tvcp->f.states |= CBulkStat;
+ tvcp->f.states &= ~CBulkFetching;
flagIndex++;
afs_bulkStatsDone++;
/* merge in vol info */
if (volStates & VRO)
- tvcp->states |= CRO;
+ tvcp->f.states |= CRO;
if (volStates & VBackup)
- tvcp->states |= CBackup;
+ tvcp->f.states |= CBackup;
if (volStates & VForeign)
- tvcp->states |= CForeign;
+ tvcp->f.states |= CForeign;
/* merge in the callback info */
- tvcp->states |= CTruth;
+ tvcp->f.states |= CTruth;
/* get ptr to the callback we are interested in */
tcbp = cbsp + i;
if (tcbp->ExpirationTime != 0) {
tvcp->cbExpires = tcbp->ExpirationTime + startTime;
tvcp->callback = hostp;
- tvcp->states |= CStatd;
+ tvcp->f.states |= CStatd;
afs_QueueCallback(tvcp, CBHash(tcbp->ExpirationTime), volp);
- } else if (tvcp->states & CRO) {
+ } else if (tvcp->f.states & CRO) {
/* ordinary callback on a read-only volume -- AFS 3.2 style */
tvcp->cbExpires = 3600 + startTime;
tvcp->callback = hostp;
- tvcp->states |= CStatd;
+ tvcp->f.states |= CStatd;
afs_QueueCallback(tvcp, CBHash(3600), volp);
} else {
tvcp->callback = 0;
- tvcp->states &= ~(CStatd | CUnique);
+ tvcp->f.states &= ~(CStatd | CUnique);
afs_DequeueCallback(tvcp);
- if ((tvcp->states & CForeign) || (vType(tvcp) == VDIR))
+ if ((tvcp->f.states & CForeign) || (vType(tvcp) == VDIR))
osi_dnlc_purgedp(tvcp); /* if it (could be) a directory */
}
ReleaseWriteLock(&afs_xcbhash);
done:
/* Be sure to turn off the CBulkFetching flags */
for (i = flagIndex; i < fidIndex; i++) {
- afid.Cell = adp->fid.Cell;
- afid.Fid.Volume = adp->fid.Fid.Volume;
+ afid.Cell = adp->f.fid.Cell;
+ afid.Fid.Volume = adp->f.fid.Fid.Volume;
afid.Fid.Vnode = fidsp[i].Vnode;
afid.Fid.Unique = fidsp[i].Unique;
do {
tvcp = afs_FindVCache(&afid, &retry, 0 /* !stats&!lru */ );
ReleaseReadLock(&afs_xvcache);
} while (tvcp && retry);
- if (tvcp != NULL && (tvcp->states & CBulkFetching)
- && (tvcp->m.Length == statSeqNo)) {
- tvcp->states &= ~CBulkFetching;
+ if (tvcp != NULL && (tvcp->f.states & CBulkFetching)
+ && (tvcp->f.m.Length == statSeqNo)) {
+ tvcp->f.states &= ~CBulkFetching;
}
if (tvcp != NULL) {
afs_PutVCache(tvcp);
/* If we did the InlineBulk RPC pull out the return code */
if (inlinebulk) {
if ((&statsp[0])->errorCode) {
- afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->fid, areqp,
+ afs_Analyze(tcp, (&statsp[0])->errorCode, &adp->f.fid, areqp,
AFS_STATS_FS_RPCIDX_BULKSTATUS, SHARED_LOCK, NULL);
code = (&statsp[0])->errorCode;
}
*avcp = NULL; /* Since some callers don't initialize it */
bulkcode = 0;
- if (!(adp->states & CStatd) && !afs_InReadDir(adp)) {
+ if (!(adp->f.states & CStatd) && !afs_InReadDir(adp)) {
if ((code = afs_VerifyVCache2(adp, &treq))) {
goto done;
}
* use that. This eliminates several possible deadlocks.
*/
if (!afs_InReadDir(adp)) {
- while ((adp->states & CStatd)
+ while ((adp->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ && hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_osi_Sleep(&tdc->validPos);
ObtainReadLock(&adp->lock);
ObtainReadLock(&tdc->lock);
}
- if (!(adp->states & CStatd)
- || !hsame(adp->m.DataVersion, tdc->f.versionNo)) {
+ if (!(adp->f.states & CStatd)
+ || !hsame(adp->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&adp->lock);
afs_PutDCache(tdc);
}
/* new fid has same cell and volume */
- tfid.Cell = adp->fid.Cell;
- tfid.Fid.Volume = adp->fid.Fid.Volume;
+ tfid.Cell = adp->f.fid.Cell;
+ tfid.Fid.Volume = adp->f.fid.Fid.Volume;
afs_Trace4(afs_iclSetp, CM_TRACE_LOOKUP, ICL_TYPE_POINTER, adp,
ICL_TYPE_STRING, tname, ICL_TYPE_FID, &tfid,
ICL_TYPE_INT32, code);
* dirCookie tells us where to start prefetching from.
*/
if (!AFS_IS_DISCONNECTED &&
- AFSDOBULK && adp->opens > 0 && !(adp->states & CForeign)
+ AFSDOBULK && adp->opens > 0 && !(adp->f.states & CForeign)
&& !afs_IsDynroot(adp) && !afs_InReadDir(adp)) {
afs_int32 retry;
/* if the entry is not in the cache, or is in the cache,
ReleaseReadLock(&afs_xvcache);
} while (tvc && retry);
- if (!tvc || !(tvc->states & CStatd))
+ if (!tvc || !(tvc->f.states & CStatd))
bulkcode = afs_DoBulkStat(adp, dirCookie, &treq);
else
bulkcode = 0;
/* if the vcache isn't usable, release it */
- if (tvc && !(tvc->states & CStatd)) {
+ if (tvc && !(tvc->f.states & CStatd)) {
#ifndef AFS_FBSD80_ENV
afs_PutVCache(tvc);
#endif
*/
if (!tvc) {
afs_int32 cached = 0;
- if (!tfid.Fid.Unique && (adp->states & CForeign)) {
+ if (!tfid.Fid.Unique && (adp->f.states & CForeign)) {
tvc = afs_LookupVCache(&tfid, &treq, &cached, adp, tname);
}
if (!tvc && !bulkcode) { /* lookup failed or wasn't called */
} /* sub-block just to reduce stack usage */
if (tvc) {
- if (adp->states & CForeign)
- tvc->states |= CForeign;
- tvc->parentVnode = adp->fid.Fid.Vnode;
- tvc->parentUnique = adp->fid.Fid.Unique;
- tvc->states &= ~CBulkStat;
+ if (adp->f.states & CForeign)
+ tvc->f.states |= CForeign;
+ tvc->f.parent.vnode = adp->f.fid.Fid.Vnode;
+ tvc->f.parent.unique = adp->f.fid.Fid.Unique;
+ tvc->f.states &= ~CBulkStat;
if (afs_fakestat_enable == 2 && tvc->mvstat == 1) {
ObtainSharedLock(&tvc->lock, 680);
force_eval = 1;
ReleaseReadLock(&tvc->lock);
}
- if (tvc->mvstat == 1 && (tvc->states & CMValid) && tvc->mvid != NULL)
+ if (tvc->mvstat == 1 && (tvc->f.states & CMValid) && tvc->mvid != NULL)
force_eval = 1; /* This is now almost for free, get it correct */
#if defined(UKERNEL) && defined(AFS_WEB_ENHANCEMENTS)
}
/* next, we want to continue using the target of the mt point */
- if (tvc->mvid && (tvc->states & CMValid)) {
+ if (tvc->mvid && (tvc->f.states & CMValid)) {
struct vcache *uvc;
/* now lookup target, to set .. pointer */
afs_Trace2(afs_iclSetp, CM_TRACE_LOOKUP1,
ICL_TYPE_POINTER, tvc, ICL_TYPE_FID,
- &tvc->fid);
+ &tvc->f.fid);
uvc = tvc; /* remember for later */
if (tvolp && (tvolp->states & VForeign)) {
if (!AFS_IS_DISCONNECTED) {
if (pass == 0) {
struct volume *tv;
- tv = afs_GetVolume(&adp->fid, &treq, READ_LOCK);
+ tv = afs_GetVolume(&adp->f.fid, &treq, READ_LOCK);
if (tv) {
if (tv->states & VRO) {
pass = 1; /* try this *once* */
ObtainWriteLock(&afs_xcbhash, 495);
afs_DequeueCallback(adp);
/* re-stat to get later version */
- adp->states &= ~CStatd;
+ adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
afs_PutVolume(tv, READ_LOCK);
#ifdef AFS_OSF_ENV
/* Handle RENAME; only need to check rename "." */
if (opflag == RENAME && wantparent && *ndp->ni_next == 0) {
- if (!FidCmp(&(tvc->fid), &(adp->fid))) {
+ if (!FidCmp(&(tvc->f.fid), &(adp->f.fid))) {
afs_PutVCache(*avcp);
*avcp = NULL;
afs_PutFakeStat(&fakestate);
goto done;
} else {
if (!afs_AccessOK
- (tvc, ((tvc->states & CForeign) ? PRSFS_READ : PRSFS_LOOKUP),
+ (tvc, ((tvc->f.states & CForeign) ? PRSFS_READ : PRSFS_LOOKUP),
&treq, CHECK_MODE_BITS)) {
code = EACCES;
printf("afs_Open: no access for dir\n");
if (aflags & FTRUNC) {
/* this fixes touch */
ObtainWriteLock(&tvc->lock, 123);
- tvc->m.Date = osi_Time();
- tvc->states |= CDirty;
+ tvc->f.m.Date = osi_Time();
+ tvc->f.states |= CDirty;
ReleaseWriteLock(&tvc->lock);
}
ObtainReadLock(&tvc->lock);
afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_INT32,
totalLength, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
error = 0;
transferLength = 0;
if (!noLock)
ObtainReadLock(&avc->lock);
#if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
- hset(avc->flushDV, avc->m.DataVersion);
+ hset(avc->flushDV, avc->f.m.DataVersion);
}
#endif
* Locks held:
* avc->lock(R)
*/
- if (filePos >= avc->m.Length) {
+ if (filePos >= avc->f.m.Length) {
if (len > AFS_ZEROS)
len = sizeof(afs_zeros); /* and in 0 buffer */
len = 0;
AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
}
- while (avc->m.Length > 0 && totalLength > 0) {
+ while (avc->f.m.Length > 0 && totalLength > 0) {
/* read all of the cached info */
- if (filePos >= avc->m.Length)
+ if (filePos >= avc->f.m.Length)
break; /* all done */
if (noLock) {
if (tdc) {
* 2 requests never return a null dcache entry, btw.
*/
if (!(tdc->dflags & DFFetching)
- && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ && !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
/* have cache entry, it is not coming in now,
* and we'll need new data */
tagain:
} else {
/* no longer fetching, verify data version
* (avoid new GetDCache call) */
- if (hsame(avc->m.DataVersion, tdc->f.versionNo)
+ if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)
&& ((len = tdc->validPos - filePos) > 0)) {
offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
} else {
/* don't have current data, so get it below */
afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
- ICL_TYPE_HYPER, &avc->m.DataVersion,
+ ICL_TYPE_HYPER, &avc->f.m.DataVersion,
ICL_TYPE_HYPER, &tdc->f.versionNo);
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
if (len > totalLength)
len = totalLength; /* and still within xfr request */
- tlen = avc->m.Length - offset; /* and still within file */
+ tlen = avc->f.m.Length - offset; /* and still within file */
if (len > tlen)
len = tlen;
if (len > AFS_ZEROS)
offset = AFS_CHUNKTOBASE(offset); /* base of next chunk */
ObtainReadLock(&adc->lock);
ObtainSharedLock(&adc->mflock, 662);
- if (offset < avc->m.Length && !(adc->mflags & DFNextStarted)
+ if (offset < avc->f.m.Length && !(adc->mflags & DFNextStarted)
&& !afs_BBusy()) {
struct brequest *bp;
afs_Trace4(afs_iclSetp, CM_TRACE_READ, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_INT32,
totalLength, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
error = 0;
transferLength = 0;
if (!noLock)
ObtainReadLock(&avc->lock);
#if defined(AFS_TEXT_ENV) && !defined(AFS_VM_RDWR_ENV)
if (avc->flushDV.high == AFS_MAXDV && avc->flushDV.low == AFS_MAXDV) {
- hset(avc->flushDV, avc->m.DataVersion);
+ hset(avc->flushDV, avc->f.m.DataVersion);
}
#endif
- if (filePos >= avc->m.Length) {
+ if (filePos >= avc->f.m.Length) {
if (len > AFS_ZEROS)
len = sizeof(afs_zeros); /* and in 0 buffer */
len = 0;
AFS_UIOMOVE(afs_zeros, trimlen, UIO_READ, tuiop, code);
}
- while (avc->m.Length > 0 && totalLength > 0) {
+ while (avc->f.m.Length > 0 && totalLength > 0) {
/* read all of the cached info */
- if (filePos >= avc->m.Length)
+ if (filePos >= avc->f.m.Length)
break; /* all done */
if (noLock) {
if (tdc) {
* data already coming, we don't need to do this, obviously. Type
* 2 requests never return a null dcache entry, btw. */
if (!(tdc->dflags & DFFetching)
- && !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ && !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
/* have cache entry, it is not coming in now, and we'll need new data */
tagain:
if (trybusy && !afs_BBusy()) {
} else {
/* no longer fetching, verify data version (avoid new
* GetDCache call) */
- if (hsame(avc->m.DataVersion, tdc->f.versionNo)
+ if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)
&& ((len = tdc->validPos - filePos) > 0)) {
offset = filePos - AFS_CHUNKTOBASE(tdc->f.chunk);
} else {
/* don't have current data, so get it below */
afs_Trace3(afs_iclSetp, CM_TRACE_VERSIONNO,
ICL_TYPE_INT64, ICL_HANDLE_OFFSET(filePos),
- ICL_TYPE_HYPER, &avc->m.DataVersion,
+ ICL_TYPE_HYPER, &avc->f.m.DataVersion,
ICL_TYPE_HYPER, &tdc->f.versionNo);
ReleaseReadLock(&tdc->lock);
afs_PutDCache(tdc);
len = AFS_CHUNKTOSIZE(tdc->f.chunk) - offset; /* bytes left in chunk addr space */
if (len > totalLength)
len = totalLength; /* and still within xfr request */
- tlen = avc->m.Length - offset; /* and still within file */
+ tlen = avc->f.m.Length - offset; /* and still within file */
if (len > tlen)
len = tlen;
if (len > AFS_ZEROS)
struct VenusFid tfid;
struct vcache *tvc;
int vtype;
- tfid.Cell = avc->fid.Cell;
- tfid.Fid.Volume = avc->fid.Fid.Volume;
+ tfid.Cell = avc->f.fid.Cell;
+ tfid.Fid.Volume = avc->f.fid.Fid.Volume;
tfid.Fid.Vnode = ntohl(ade->fid.vnode);
tfid.Fid.Unique = ntohl(ade->fid.vunique);
- if ((avc->states & CForeign) == 0 && (ntohl(ade->fid.vnode) & 1)) {
+ if ((avc->f.states & CForeign) == 0 && (ntohl(ade->fid.vnode) & 1)) {
return DT_DIR;
}
ObtainReadLock(&afs_xvcache);
if (tvc->mvstat) {
afs_PutVCache(tvc);
return DT_DIR;
- } else if (((tvc->states) & (CStatd | CTruth))) {
+ } else if (((tvc->f.states) & (CStatd | CTruth))) {
/* CTruth will be set if the object has
*ever* been statd */
vtype = vType(tvc);
{
int code = 0;
struct volume *tvp;
- afs_uint32 Volume = vc->fid.Fid.Volume;
+ afs_uint32 Volume = vc->f.fid.Fid.Volume;
afs_uint32 Vnode = de->fid.vnode;
#if defined(AFS_SUN56_ENV)
struct dirent64 *direntp;
/* This is the '.' entry; if we are a volume root, we need to
* ignore the directory and use the inum for the mount point.
*/
- if (!FidCmp(&afs_rootFid, &vc->fid)) {
+ if (!FidCmp(&afs_rootFid, &vc->f.fid)) {
Volume = 0;
Vnode = 2;
} else if (vc->mvstat == 2) {
- tvp = afs_GetVolume(&vc->fid, 0, READ_LOCK);
+ tvp = afs_GetVolume(&vc->f.fid, 0, READ_LOCK);
if (tvp) {
Volume = tvp->mtpoint.Fid.Volume;
Vnode = tvp->mtpoint.Fid.Vnode;
* because we might be a volume root (so our parent is in a
* different volume), or our parent might be a volume root
* (so we actually want the mount point) or BOTH! */
- if (!FidCmp(&afs_rootFid, &vc->fid)) {
+ if (!FidCmp(&afs_rootFid, &vc->f.fid)) {
/* We are the root of the AFS root, and thus our own parent */
Volume = 0;
Vnode = 2;
} else if (de->fid.vnode == 1 && de->fid.vunique == 1) {
/* XXX The above test is evil and probably breaks DFS */
/* Parent directory is a volume root; use the right inum */
- tvp = afs_GetVolume(&vc->fid, 0, READ_LOCK);
+ tvp = afs_GetVolume(&vc->f.fid, 0, READ_LOCK);
if (tvp) {
if (tvp->cell == afs_rootFid.Cell
&& tvp->volume == afs_rootFid.Fid.Volume) {
* 1. The cache data is being fetched by another process.
* 2. The cache data is no longer valid
*/
- while ((avc->states & CStatd)
+ while ((avc->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, tdc,
ICL_TYPE_INT32, tdc->dflags);
ObtainReadLock(&avc->lock);
ObtainReadLock(&tdc->lock);
}
- if (!(avc->states & CStatd)
- || !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ if (!(avc->f.states & CStatd)
+ || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&avc->lock);
afs_PutDCache(tdc);
/* something to hand over. */
#ifdef AFS_HPUX_ENV
sdirEntry->d_fileno =
- (avc->fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
+ (avc->f.fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
FIXUPSTUPIDINODE(sdirEntry->d_fileno);
sdirEntry->d_reclen = rlen = AFS_UIO_RESID(auio);
sdirEntry->d_namlen = o_slen;
if (len) {
#ifdef AFS_HPUX_ENV
sdirEntry->d_fileno =
- (avc->fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
+ (avc->f.fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
FIXUPSTUPIDINODE(sdirEntry->d_fileno);
sdirEntry->d_reclen = rlen = AFS_UIO_RESID(auio);
sdirEntry->d_namlen = o_slen;
if (len) {
#ifdef AFS_HPUX_ENV
sdirEntry->d_fileno =
- (avc->fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
+ (avc->f.fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
FIXUPSTUPIDINODE(sdirEntry->d_fileno);
sdirEntry->d_reclen = rlen = len;
sdirEntry->d_namlen = o_slen;
* 1. The cache data is being fetched by another process.
* 2. The cache data is no longer valid
*/
- while ((avc->states & CStatd)
+ while ((avc->f.states & CStatd)
&& (tdc->dflags & DFFetching)
- && hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ && hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_INT32, __LINE__, ICL_TYPE_POINTER, tdc,
ICL_TYPE_INT32, tdc->dflags);
ObtainReadLock(&avc->lock);
ObtainReadLock(&tdc->lock);
}
- if (!(avc->states & CStatd)
- || !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ if (!(avc->f.states & CStatd)
+ || !hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
ReleaseReadLock(&tdc->lock);
ReleaseReadLock(&avc->lock);
afs_PutDCache(tdc);
/* something to hand over. */
#if defined(AFS_HPUX_ENV) || defined(AFS_OSF_ENV)
sdirEntry->d_fileno =
- (avc->fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
+ (avc->f.fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
FIXUPSTUPIDINODE(sdirEntry->d_fileno);
sdirEntry->d_reclen = rlen = AFS_UIO_RESID(auio);
sdirEntry->d_namlen = o_slen;
if (len) {
#if defined(AFS_HPUX_ENV) || defined(AFS_OSF_ENV)
sdirEntry->d_fileno =
- (avc->fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
+ (avc->f.fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
FIXUPSTUPIDINODE(sdirEntry->d_fileno);
sdirEntry->d_reclen = rlen = AFS_UIO_RESID(auio);
sdirEntry->d_namlen = o_slen;
if (len) {
#if defined(AFS_HPUX_ENV) || defined(AFS_OSF_ENV)
sdirEntry->d_fileno =
- (avc->fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
+ (avc->f.fid.Fid.Volume << 16) + ntohl(ode->fid.vnode);
FIXUPSTUPIDINODE(sdirEntry->d_fileno);
sdirEntry->d_reclen = rlen = len;
sdirEntry->d_namlen = o_slen;
afs_size_t pos, offset, len;
AFS_STATCNT(FetchWholeEnchilada);
- if ((avc->states & CStatd) == 0)
+ if ((avc->f.states & CStatd) == 0)
return; /* don't know size */
for (nextChunk = 0; nextChunk < 1024; nextChunk++) { /* sanity check on N chunks */
pos = AFS_CHUNKTOBASE(nextChunk);
#if defined(AFS_OSF_ENV)
- if (pos >= avc->m.Length)
+ if (pos >= avc->f.m.Length)
break; /* all done */
#else /* AFS_OSF_ENV */
- if (pos >= avc->m.Length)
+ if (pos >= avc->f.m.Length)
return; /* all done */
#endif
tdc = afs_GetDCache(avc, pos, areq, &offset, &len, 0);
afs_PutDCache(tdc);
}
#if defined(AFS_OSF_ENV)
- avc->states |= CWired;
+ avc->f.states |= CWired;
#endif /* AFS_OSF_ENV */
}
*/
afs_IsWired(register struct vcache *avc)
{
- if (avc->states & CWired) {
+ if (avc->f.states & CWired) {
if (osi_Active(avc)) {
return 1;
}
- avc->states &= ~CWired;
+ avc->f.states &= ~CWired;
}
return 0;
}
XSTATS_DECLS;
if (!AFS_IS_DISCONNECTED) {
do {
- tc = afs_Conn(&adp->fid, treqp, SHARED_LOCK);
+ tc = afs_Conn(&adp->f.fid, treqp, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEFILE);
RX_AFS_GUNLOCK();
code =
- RXAFS_RemoveFile(tc->id, (struct AFSFid *)&adp->fid.Fid,
+ RXAFS_RemoveFile(tc->id, (struct AFSFid *)&adp->f.fid.Fid,
aname, &OutDirStatus, &tsync);
RX_AFS_GLOCK();
XSTATS_END_TIME;
} else
code = -1;
} while (afs_Analyze
- (tc, code, &adp->fid, treqp, AFS_STATS_FS_RPCIDX_REMOVEFILE,
+ (tc, code, &adp->f.fid, treqp, AFS_STATS_FS_RPCIDX_REMOVEFILE,
SHARED_LOCK, NULL));
}
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 497);
afs_DequeueCallback(adp);
- adp->states &= ~CStatd;
+ adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
}
ObtainWriteLock(&tvc->lock, 141);
/* note that callback will be broken on the deleted file if there are
* still >0 links left to it, so we'll get the stat right */
- tvc->m.LinkCount--;
- tvc->states &= ~CUnique; /* For the dfs xlator */
- if (tvc->m.LinkCount == 0 && !osi_Active(tvc)) {
+ tvc->f.m.LinkCount--;
+ tvc->f.states &= ~CUnique; /* For the dfs xlator */
+ if (tvc->f.m.LinkCount == 0 && !osi_Active(tvc)) {
if (!AFS_NFSXLATORREQ(acred))
afs_TryToSmush(tvc, acred, 0);
}
/** If the volume is read-only, return error without making an RPC to the
* fileserver
*/
- if (adp->states & CRO) {
+ if (adp->f.states & CRO) {
#ifdef AFS_OSF_ENV
afs_PutVCache(tvc);
#endif
* Make sure that the data in the cache is current. We may have
* received a callback while we were waiting for the write lock.
*/
- if (!(adp->states & CStatd)
- || (tdc && !hsame(adp->m.DataVersion, tdc->f.versionNo))) {
+ if (!(adp->f.states & CStatd)
+ || (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) {
ReleaseWriteLock(&adp->lock);
if (tdc) {
ReleaseSharedLock(&tdc->lock);
if (code == 0) {
afs_int32 cached = 0;
- unlinkFid.Cell = adp->fid.Cell;
- unlinkFid.Fid.Volume = adp->fid.Fid.Volume;
+ unlinkFid.Cell = adp->f.fid.Cell;
+ unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume;
if (unlinkFid.Fid.Unique == 0) {
tvc =
afs_LookupVCache(&unlinkFid, &treq, &cached, adp,
#if defined(AFS_DISCON_ENV)
if (AFS_IS_DISCON_RW) {
- if (!adp->shVnode && !(adp->ddirty_flags & VDisconCreate)) {
+ if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) {
/* Make shadow copy of parent dir. */
afs_MakeShadowDir(adp, tdc);
}
/* If we were locally created, then we don't need to do very
* much beyond ensuring that we don't exist anymore */
- if (tvc->ddirty_flags & VDisconCreate) {
+ if (tvc->f.ddirty_flags & VDisconCreate) {
afs_DisconRemoveDirty(tvc);
} else {
/* Add removed file vcache to dirty list. */
afs_DisconAddDirty(tvc, VDisconRemove, 1);
}
- adp->m.LinkCount--;
+ adp->f.m.LinkCount--;
ReleaseWriteLock(&tvc->lock);
if (tdc)
ObtainSharedLock(&tdc->lock, 714);
#endif
#ifdef AFS_AIX_ENV
if (tvc && VREFCOUNT_GT(tvc, 2) && tvc->opens > 0
- && !(tvc->states & CUnlinked)) {
+ && !(tvc->f.states & CUnlinked)) {
#else
if (tvc && VREFCOUNT_GT(tvc, 1) && tvc->opens > 0
- && !(tvc->states & CUnlinked)) {
+ && !(tvc->f.states & CUnlinked)) {
#endif
char *unlname = afs_newname();
crfree(tvc->uncred);
}
tvc->uncred = acred;
- tvc->states |= CUnlinked;
+ tvc->f.states |= CUnlinked;
} else {
osi_FreeSmallSpace(unlname);
}
}
#endif
- if (avc->mvid && (doit || (avc->states & CUnlinkedDel))) {
+ if (avc->mvid && (doit || (avc->f.states & CUnlinkedDel))) {
if ((code = afs_InitReq(&treq, avc->uncred))) {
ReleaseWriteLock(&avc->lock);
} else {
/* We'll only try this once. If it fails, just release the vnode.
* Clear after doing hold so that NewVCache doesn't find us yet.
*/
- avc->states &= ~(CUnlinked | CUnlinkedDel);
+ avc->f.states &= ~(CUnlinked | CUnlinkedDel);
ReleaseWriteLock(&avc->lock);
- dirFid.Cell = avc->fid.Cell;
- dirFid.Fid.Volume = avc->fid.Fid.Volume;
- dirFid.Fid.Vnode = avc->parentVnode;
- dirFid.Fid.Unique = avc->parentUnique;
+ dirFid.Cell = avc->f.fid.Cell;
+ dirFid.Fid.Volume = avc->f.fid.Fid.Volume;
+ dirFid.Fid.Vnode = avc->f.parent.vnode;
+ dirFid.Fid.Unique = avc->f.parent.unique;
adp = afs_GetVCache(&dirFid, &treq, NULL, NULL);
if (adp) {
goto done;
/* lock in appropriate order, after some checks */
- if (aodp->fid.Cell != andp->fid.Cell
- || aodp->fid.Fid.Volume != andp->fid.Fid.Volume) {
+ if (aodp->f.fid.Cell != andp->f.fid.Cell
+ || aodp->f.fid.Fid.Volume != andp->f.fid.Fid.Volume) {
code = EXDEV;
goto done;
}
oneDir = 0;
code = 0;
- if (andp->fid.Fid.Vnode == aodp->fid.Fid.Vnode) {
+ if (andp->f.fid.Fid.Vnode == aodp->f.fid.Fid.Vnode) {
if (!strcmp(aname1, aname2)) {
/* Same directory and same name; this is a noop and just return success
* to save cycles and follow posix standards */
}
tdc2 = tdc1;
oneDir = 1; /* only one dude locked */
- } else if ((andp->states & CRO) || (aodp->states & CRO)) {
+ } else if ((andp->f.states & CRO) || (aodp->f.states & CRO)) {
code = EROFS;
goto done;
- } else if (andp->fid.Fid.Vnode < aodp->fid.Fid.Vnode) {
+ } else if (andp->f.fid.Fid.Vnode < aodp->f.fid.Fid.Vnode) {
ObtainWriteLock(&andp->lock, 148); /* lock smaller one first */
ObtainWriteLock(&aodp->lock, 149);
tdc2 = afs_FindDCache(andp, (afs_size_t) 0);
* received a callback while we were waiting for the write lock.
*/
if (tdc1) {
- if (!(aodp->states & CStatd)
- || !hsame(aodp->m.DataVersion, tdc1->f.versionNo)) {
+ if (!(aodp->f.states & CStatd)
+ || !hsame(aodp->f.m.DataVersion, tdc1->f.versionNo)) {
ReleaseWriteLock(&aodp->lock);
if (!oneDir) {
if (!AFS_IS_DISCON_RW) {
/* Connected. */
do {
- tc = afs_Conn(&aodp->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&aodp->f.fid, areq, SHARED_LOCK);
if (tc) {
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RENAME);
RX_AFS_GUNLOCK();
code =
RXAFS_Rename(tc->id,
- (struct AFSFid *)&aodp->fid.Fid,
+ (struct AFSFid *)&aodp->f.fid.Fid,
aname1,
- (struct AFSFid *)&andp->fid.Fid,
+ (struct AFSFid *)&andp->f.fid.Fid,
aname2,
&OutOldDirStatus,
&OutNewDirStatus,
code = -1;
} while (afs_Analyze
- (tc, code, &andp->fid, areq, AFS_STATS_FS_RPCIDX_RENAME,
+ (tc, code, &andp->f.fid, areq, AFS_STATS_FS_RPCIDX_RENAME,
SHARED_LOCK, NULL));
} else {
/* Disconnected. */
/* Seek moved file vcache. */
- fileFid.Cell = aodp->fid.Cell;
- fileFid.Fid.Volume = aodp->fid.Fid.Volume;
+ fileFid.Cell = aodp->f.fid.Cell;
+ fileFid.Fid.Volume = aodp->f.fid.Fid.Volume;
ObtainSharedLock(&afs_xvcache, 754);
tvc = afs_FindVCache(&fileFid, 0 , 1);
ReleaseSharedLock(&afs_xvcache);
if (tvc) {
/* XXX - We're locking this vcache whilst holding dcaches. Ooops */
ObtainWriteLock(&tvc->lock, 750);
- if (!(tvc->ddirty_flags & (VDisconRename|VDisconCreate))) {
+ if (!(tvc->f.ddirty_flags & (VDisconRename|VDisconCreate))) {
/* If the vnode was created locally, then we don't care
* about recording the rename - we'll do it automatically
* on replay. If we've already renamed, we've already stored
* the required information about where we came from.
*/
- if (!aodp->shVnode) {
+ if (!aodp->f.shadow.vnode) {
/* Make shadow copy of parent dir only. */
afs_MakeShadowDir(aodp, tdc1);
}
+ /* Save old parent dir fid so it will be searchable
+ * in the shadow dir.
+ */
+ tvc->f.oldParent.vnode = aodp->f.fid.Fid.Vnode;
+ tvc->f.oldParent.unique = aodp->f.fid.Fid.Unique;
+
afs_DisconAddDirty(tvc,
VDisconRename
| (oneDir ? VDisconRenameSameDir:0),
1);
-
- /* Save old parent dir fid so it will be searchable
- * in the shadow dir.
- */
- tvc->oldVnode = aodp->fid.Fid.Vnode;
- tvc->oldUnique = aodp->fid.Fid.Unique;
}
ReleaseWriteLock(&tvc->lock);
/* update dir link counts */
if (AFS_IS_DISCON_RW) {
if (!oneDir) {
- aodp->m.LinkCount--;
- andp->m.LinkCount++;
+ aodp->f.m.LinkCount--;
+ andp->f.m.LinkCount++;
}
/* If we're in the same directory, link count doesn't change */
} else {
- aodp->m.LinkCount = OutOldDirStatus.LinkCount;
+ aodp->f.m.LinkCount = OutOldDirStatus.LinkCount;
if (!oneDir)
- andp->m.LinkCount = OutNewDirStatus.LinkCount;
+ andp->f.m.LinkCount = OutNewDirStatus.LinkCount;
}
} else { /* operation failed (code != 0) */
ObtainWriteLock(&afs_xcbhash, 498);
afs_DequeueCallback(aodp);
afs_DequeueCallback(andp);
- andp->states &= ~CStatd;
- aodp->states &= ~CStatd;
+ andp->f.states &= ~CStatd;
+ aodp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(andp);
osi_dnlc_purgedp(aodp);
}
ReleaseWriteLock(&aodp->lock);
- if (!oneDir)
+
+ if (!oneDir) {
ReleaseWriteLock(&andp->lock);
+ }
+
if (returnCode) {
code = returnCode;
goto done;
* entry */
if (unlinkFid.Fid.Vnode) {
- unlinkFid.Fid.Volume = aodp->fid.Fid.Volume;
- unlinkFid.Cell = aodp->fid.Cell;
+ unlinkFid.Fid.Volume = aodp->f.fid.Fid.Volume;
+ unlinkFid.Cell = aodp->f.fid.Cell;
tvc = NULL;
if (!unlinkFid.Fid.Unique) {
tvc = afs_LookupVCache(&unlinkFid, areq, NULL, aodp, aname1);
afs_BozonLock(&tvc->pvnLock, tvc); /* Since afs_TryToSmush will do a pvn_vptrunc */
#endif
ObtainWriteLock(&tvc->lock, 151);
- tvc->m.LinkCount--;
- tvc->states &= ~CUnique; /* For the dfs xlator */
- if (tvc->m.LinkCount == 0 && !osi_Active(tvc)) {
+ tvc->f.m.LinkCount--;
+ tvc->f.states &= ~CUnique; /* For the dfs xlator */
+ if (tvc->f.m.LinkCount == 0 && !osi_Active(tvc)) {
/* if this was last guy (probably) discard from cache.
* We have to be careful to not get rid of the stat
* information, since otherwise operations will start
/* now handle ".." invalidation */
if (!oneDir) {
- fileFid.Fid.Volume = aodp->fid.Fid.Volume;
- fileFid.Cell = aodp->fid.Cell;
+ fileFid.Fid.Volume = aodp->f.fid.Fid.Volume;
+ fileFid.Cell = aodp->f.fid.Cell;
if (!fileFid.Fid.Unique)
tvc = afs_LookupVCache(&fileFid, areq, NULL, andp, aname2);
else
/* If disconnected, we need to fix (not discard) the "..".*/
afs_dir_ChangeFid(tdc1,
"..",
- &aodp->fid.Fid.Vnode,
- &andp->fid.Fid.Vnode);
+ &aodp->f.fid.Fid.Vnode,
+ &andp->f.fid.Fid.Vnode);
#endif
} else {
ObtainWriteLock(&tdc1->lock, 648);
ReleaseWriteLock(&tvc->lock);
afs_PutVCache(tvc);
} else if (AFS_IS_DISCON_RW && tvc && (vType(tvc) == VREG)) {
- tvc->parentVnode = andp->fid.Fid.Vnode;
- tvc->parentUnique = andp->fid.Fid.Unique;
+ /* XXX - Should tvc not get locked here? */
+ tvc->f.parent.vnode = andp->f.fid.Fid.Vnode;
+ tvc->f.parent.unique = andp->f.fid.Fid.Unique;
} else if (tvc) {
/* True we shouldn't come here since tvc SHOULD be a dir, but we
* 'syntactically' need to unless we change the 'if' above...
* to it, go ahead and write protect the page. This way we will detect
* storing beyond EOF in the future
*/
- if (dbtob(abp->b_blkno) + abp->b_bcount > tvc->m.Length) {
+ if (dbtob(abp->b_blkno) + abp->b_bcount > tvc->f.m.Length) {
if ((abp->b_flags & B_PFSTORE) == 0) {
AFS_GUNLOCK();
vm_protectp(tvc->segid, dbtob(abp->b_blkno) / PAGESIZE,
* XXX It this really right? Ideally we should always write block size multiple
* and not any arbitrary size, right? XXX
*/
- len = MIN(len, tvc->m.Length - dbtob(abp->b_blkno));
+ len = MIN(len, tvc->f.m.Length - dbtob(abp->b_blkno));
#endif
#ifdef AFS_OSF_ENV
len =
MIN(abp->b_bcount,
- (VTOAFS(abp->b_vp))->m.Length - dbtob(abp->b_blkno));
+ (VTOAFS(abp->b_vp))->f.m.Length - dbtob(abp->b_blkno));
#endif /* AFS_OSF_ENV */
tuio.afsio_resid = len;
#if defined(AFS_XBSD_ENV)
}
len = strlen(aname);
- avc->m.Length = len;
+ avc->f.m.Length = len;
ObtainWriteLock(&tdc->lock, 720);
afs_AdjustSize(tdc, len);
/** If the volume is read-only, return error without making an RPC to the
* fileserver
*/
- if (adp->states & CRO) {
+ if (adp->f.states & CRO) {
code = EROFS;
goto done;
}
alen++; /* add in the null */
}
tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1);
- volp = afs_FindVolume(&adp->fid, READ_LOCK); /*parent is also in same vol */
+ volp = afs_FindVolume(&adp->f.fid, READ_LOCK); /*parent is also in same vol */
ObtainWriteLock(&adp->lock, 156);
if (tdc)
ObtainWriteLock(&tdc->lock, 636);
/* XXX Pay attention to afs_xvcache around the whole thing!! XXX */
if (!AFS_IS_DISCON_RW) {
do {
- tc = afs_Conn(&adp->fid, &treq, SHARED_LOCK);
+ tc = afs_Conn(&adp->f.fid, &treq, SHARED_LOCK);
if (tc) {
hostp = tc->srvr->server;
XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SYMLINK);
- if (adp->states & CForeign) {
+ if (adp->f.states & CForeign) {
now = osi_Time();
RX_AFS_GUNLOCK();
code =
RXAFS_DFSSymlink(tc->id,
- (struct AFSFid *)&adp->fid.Fid,
+ (struct AFSFid *)&adp->f.fid.Fid,
aname, atargetName, &InStatus,
(struct AFSFid *)&newFid.Fid,
&OutFidStatus, &OutDirStatus,
} else {
RX_AFS_GUNLOCK();
code =
- RXAFS_Symlink(tc->id, (struct AFSFid *)&adp->fid.Fid,
+ RXAFS_Symlink(tc->id, (struct AFSFid *)&adp->f.fid.Fid,
aname, atargetName, &InStatus,
(struct AFSFid *)&newFid.Fid,
&OutFidStatus, &OutDirStatus, &tsync);
} else
code = -1;
} while (afs_Analyze
- (tc, code, &adp->fid, &treq, AFS_STATS_FS_RPCIDX_SYMLINK,
+ (tc, code, &adp->f.fid, &treq, AFS_STATS_FS_RPCIDX_SYMLINK,
SHARED_LOCK, NULL));
} else {
#ifdef AFS_DISCON_ENV
- newFid.Cell = adp->fid.Cell;
- newFid.Fid.Volume = adp->fid.Fid.Volume;
+ newFid.Cell = adp->f.fid.Cell;
+ newFid.Fid.Volume = adp->f.fid.Fid.Volume;
afs_GenFakeFid(&newFid, VREG, 0);
#endif
}
if (code < 0) {
ObtainWriteLock(&afs_xcbhash, 499);
afs_DequeueCallback(adp);
- adp->states &= ~CStatd;
+ adp->f.states &= ~CStatd;
ReleaseWriteLock(&afs_xcbhash);
osi_dnlc_purgedp(adp);
}
ReleaseWriteLock(&tdc->lock);
afs_PutDCache(tdc);
}
- newFid.Cell = adp->fid.Cell;
- newFid.Fid.Volume = adp->fid.Fid.Volume;
+ newFid.Cell = adp->f.fid.Cell;
+ newFid.Fid.Volume = adp->f.fid.Fid.Volume;
ReleaseWriteLock(&adp->lock);
/* now we're done with parent dir, create the link's entry. Note that
}
ObtainWriteLock(&tvc->lock, 157);
ObtainWriteLock(&afs_xcbhash, 500);
- tvc->states |= CStatd; /* have valid info */
- tvc->states &= ~CBulkFetching;
+ tvc->f.states |= CStatd; /* have valid info */
+ tvc->f.states &= ~CBulkFetching;
- if (adp->states & CForeign) {
- tvc->states |= CForeign;
+ if (adp->f.states & CForeign) {
+ tvc->f.states |= CForeign;
/* We don't have to worry about losing the callback since we're doing it
* under the afs_xvcache lock actually, afs_NewVCache may drop the
* afs_xvcache lock, if it calls afs_FlushVCache */
afs_PutDCache(tdc);
return EFAULT;
}
- if (avc->m.Mode & 0111)
+ if (avc->f.m.Mode & 0111)
alen = len + 1; /* regular link */
else
alen = len; /* mt point */
tdc = afs_GetDCache(avc, (afs_size_t) 0, areq, &offset, &len, 0);
afs_Trace3(afs_iclSetp, CM_TRACE_UFSLINK, ICL_TYPE_POINTER, avc,
ICL_TYPE_POINTER, tdc, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
if (!tdc) {
if (AFS_IS_DISCONNECTED)
return ENETDOWN;
afs_PutDCache(tdc);
return EFAULT;
}
- if (avc->m.Mode & 0111)
+ if (avc->f.m.Mode & 0111)
alen = len + 1; /* regular link */
else
alen = len; /* mt point */
* ourselves now. If we're called by the CCore clearer, the CCore
* flag will already be clear, so we don't have to worry about
* clearing it twice. */
- if (avc->states & CCore) {
- avc->states &= ~CCore;
+ if (avc->f.states & CCore) {
+ avc->f.states &= ~CCore;
#if defined(AFS_SGI_ENV)
osi_Assert(avc->opens > 0 && avc->execsOrWriters > 0);
#endif
afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
if (!noLock) {
afs_MaybeWakeupTruncateDaemon();
ObtainWriteLock(&avc->lock, 126);
* Since we are called via strategy, we need to trim the write to
* the actual size of the file
*/
- osi_Assert(filePos <= avc->m.Length);
- diff = avc->m.Length - filePos;
+ osi_Assert(filePos <= avc->f.m.Length);
+ diff = avc->f.m.Length - filePos;
AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
totalLength = AFS_UIO_RESID(auio);
}
#if defined(AFS_SUN56_ENV)
auio->uio_loffset = 0;
#endif
- filePos = avc->m.Length;
+ filePos = avc->f.m.Length;
AFS_UIO_SETOFFSET(auio, filePos);
}
#endif
* Note that we use startDate rather than calling osi_Time() here.
* This is to avoid counting lock-waiting time in file date (for ranlib).
*/
- avc->m.Date = startDate;
+ avc->f.m.Date = startDate;
#if defined(AFS_HPUX_ENV)
#if defined(AFS_HPUX101_ENV)
#else
afs_FakeOpen(avc);
#endif
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
#ifndef AFS_DARWIN80_ENV
tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
#endif
filePos += len;
#if defined(AFS_SGI_ENV)
/* afs_xwrite handles setting m.Length */
- osi_Assert(filePos <= avc->m.Length);
+ osi_Assert(filePos <= avc->f.m.Length);
#else
- if (filePos > avc->m.Length) {
+ if (filePos > avc->f.m.Length) {
#if AFS_DISCON_ENV
if (AFS_IS_DISCON_RW)
afs_PopulateDCache(avc, filePos, &treq);
#endif
afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(filePos));
- avc->m.Length = filePos;
+ avc->f.m.Length = filePos;
}
#endif
ReleaseWriteLock(&tdc->lock);
afs_Trace4(afs_iclSetp, CM_TRACE_WRITE, ICL_TYPE_POINTER, avc,
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(filePos), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(totalLength), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_HANDLE_OFFSET(avc->f.m.Length));
if (!noLock) {
afs_MaybeWakeupTruncateDaemon();
ObtainWriteLock(&avc->lock, 556);
* Since we are called via strategy, we need to trim the write to
* the actual size of the file
*/
- osi_Assert(filePos <= avc->m.Length);
- diff = avc->m.Length - filePos;
+ osi_Assert(filePos <= avc->f.m.Length);
+ diff = avc->f.m.Length - filePos;
AFS_UIO_SETRESID(auio, MIN(totalLength, diff));
totalLength = AFS_UIO_RESID(auio);
}
#if defined(AFS_SUN56_ENV)
auio->uio_loffset = 0;
#endif
- filePos = avc->m.Length;
- AFS_UIO_SETOFFSET(auio, avc->m.Length);
+ filePos = avc->f.m.Length;
+ AFS_UIO_SETOFFSET(auio, avc->f.m.Length);
}
#endif
/*
* Note that we use startDate rather than calling osi_Time() here.
* This is to avoid counting lock-waiting time in file date (for ranlib).
*/
- avc->m.Date = startDate;
+ avc->f.m.Date = startDate;
#if defined(AFS_HPUX_ENV)
#if defined(AFS_HPUX101_ENV)
#else
afs_FakeOpen(avc);
#endif
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
#ifndef AFS_DARWIN80_ENV
tvec = (struct iovec *)osi_AllocSmallSpace(sizeof(struct iovec));
#endif
("\n\n\n*** Cache partition is full - decrease cachesize!!! ***\n\n\n");
#elif defined(AFS_SGI_ENV)
AFS_GUNLOCK();
- avc->states |= CWritingUFS;
+ avc->f.states |= CWritingUFS;
AFS_VOP_RWLOCK(tfile->vnode, VRWLOCK_WRITE);
AFS_VOP_WRITE(tfile->vnode, &tuio, IO_ISLOCKED, afs_osi_credp, code);
AFS_VOP_RWUNLOCK(tfile->vnode, VRWLOCK_WRITE);
- avc->states &= ~CWritingUFS;
+ avc->f.states &= ~CWritingUFS;
AFS_GLOCK();
#elif defined(AFS_OSF_ENV)
{
filePos += len;
#if defined(AFS_SGI_ENV)
/* afs_xwrite handles setting m.Length */
- osi_Assert(filePos <= avc->m.Length);
+ osi_Assert(filePos <= avc->f.m.Length);
#else
- if (filePos > avc->m.Length) {
+ if (filePos > avc->f.m.Length) {
# ifdef AFS_DISCON_ENV
if (AFS_IS_DISCON_RW)
afs_PopulateDCache(avc, filePos, &treq);
# endif
afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH, ICL_TYPE_STRING,
__FILE__, ICL_TYPE_LONG, __LINE__, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(filePos));
- avc->m.Length = filePos;
+ avc->f.m.Length = filePos;
}
#endif
osi_UFSClose(tfile);
return 0; /* nothing to do */
/* otherwise, call afs_StoreDCache (later try to do this async, if possible) */
afs_Trace2(afs_iclSetp, CM_TRACE_PARTIALWRITE, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
#if defined(AFS_SUN5_ENV)
code = afs_StoreAllSegments(avc, areq, AFS_ASYNC | AFS_VMSYNC_INVAL);
HandleFlock(avc, LOCK_UN, &treq, OSI_GET_CURRENT_PID(), 1 /*onlymine */ );
#endif /* AFS_SGI65_ENV */
/* afs_chkpgoob will drop and re-acquire the global lock. */
- afs_chkpgoob(&avc->v, btoc(avc->m.Length));
+ afs_chkpgoob(&avc->v, btoc(avc->f.m.Length));
#elif defined(AFS_SUN5_ENV)
if (count > 1) {
/* The vfs layer may call this repeatedly with higher "count"; only on the last close (i.e. count = 1) we should actually proceed with the close. */
ReleaseWriteLock(&avc->lock);
}
#ifdef AFS_OSF_ENV
- if ((VREFCOUNT(avc) <= 2) && (avc->states & CUnlinked)) {
+ if ((VREFCOUNT(avc) <= 2) && (avc->f.states & CUnlinked)) {
afs_remunlink(avc, 1); /* ignore any return code */
}
#endif
struct afs_q *prev;
};
+#define QZero(e) ((e)->prev = (e)->next = NULL)
#define QInit(q) ((q)->prev = (q)->next = (q))
#define QAdd(q,e) ((e)->next = (q)->next, (e)->prev = (q), \
(q)->next->prev = (e), (q)->next = (e))
#define QTOC(e) QEntry(e, struct cell, lruq)
#define QTOVH(e) QEntry(e, struct vcache, vhashq)
+/*!
+ * List of free slot numbers
+ */
+struct afs_slotlist {
+ afs_uint32 slot;
+ struct afs_slotlist *next;
+};
+
struct vrequest {
afs_int32 uid; /* user id making the request */
afs_int32 busyCount; /* how many busies we've seen so far */
|| (a)->Fid.Volume != (b)->Fid.Volume \
|| (a)->Cell != (b)->Cell)
-#define FidMatches(afid,tvc) ((tvc)->fid.Fid.Vnode == (afid)->Fid.Vnode && \
- (tvc)->fid.Fid.Volume == (afid)->Fid.Volume && \
- (tvc)->fid.Cell == (afid)->Cell && \
- ( (tvc)->fid.Fid.Unique == (afid)->Fid.Unique || \
- (!(afid)->Fid.Unique && ((tvc)->states & CUnique))))
+#define FidMatches(afid,tvc) ((tvc)->f.fid.Fid.Vnode == (afid)->Fid.Vnode && \
+ (tvc)->f.fid.Fid.Volume == (afid)->Fid.Volume && \
+ (tvc)->f.fid.Cell == (afid)->Cell && \
+ ( (tvc)->f.fid.Fid.Unique == (afid)->Fid.Unique || \
+ (!(afid)->Fid.Unique && ((tvc)->f.states & CUnique))))
#define SRVADDR_MH 1
#define CBulkFetching 0x04000000 /* stats are being fetched by bulk stat */
#define CExtendedFile 0x08000000 /* extended file via ftruncate call. */
#define CVInit 0x10000000 /* being initialized */
+#define CMetaDirty 0x20000000 /* vnode meta-data needs to be flushed */
/* vcache vstate bits */
#define VRevokeWait 0x1
#define AFSTOV(V) (&(V)->v)
#endif
+struct afs_vnuniq {
+ afs_uint32 vnode;
+ afs_uint32 unique;
+};
+
+/* VCache elements which are kept on disk, and in the kernel */
+struct fvcache {
+ struct VenusFid fid;
+ struct mstat {
+ afs_size_t Length;
+ afs_hyper_t DataVersion;
+ afs_uint32 Date;
+ afs_uint32 Owner;
+ afs_uint32 Group;
+ afs_uint16 Mode; /* XXXX Should be afs_int32 XXXX */
+ afs_uint16 LinkCount;
+#ifdef AFS_DARWIN80_ENV
+ afs_uint16 Type;
+#else
+ /* vnode type is in v.v_type */
+#endif
+ } m;
+ struct afs_vnuniq parent;
+
+ /*! Truncate file to this position at the next store */
+ afs_size_t truncPos;
+
+ /*! System:AnyUser's access to this. */
+ afs_int32 anyAccess;
+
+ /*! state bits */
+ afs_uint32 states;
+
+#if defined(AFS_DISCON_ENV)
+ /*! Disconnected flags for this vcache element. */
+ afs_uint32 ddirty_flags;
+ /*! Shadow vnode + unique keep the shadow dir location. */
+ struct afs_vnuniq shadow;
+ /*! The old parent FID for renamed vnodes */
+ struct afs_vnuniq oldParent;
+#endif
+};
+
/* INVARIANTs: (vlruq.next != NULL) == (vlruq.prev != NULL)
* nextfree => !vlruq.next && ! vlruq.prev
* !(avc->nextfree) && !avc->vlruq.next => (FreeVCList == avc->nextfree)
struct afs_q dirtyq;
/*! Queue of vcaches with shadow entries. Lock with afs_disconDirtyLock */
struct afs_q shadowq;
- /*! Disconnected flags for this vcache element. */
- uint32_t ddirty_flags;
- /*! Shadow vnode + unique keep the shadow dir location. */
- afs_uint32 shVnode;
- afs_uint32 shUnique;
- /*! The old parent FID for renamed vnodes. */
- afs_uint32 oldVnode;
- afs_uint32 oldUnique;
-#endif
-
- struct VenusFid fid;
- struct mstat {
- afs_size_t Length;
- afs_hyper_t DataVersion;
- afs_uint32 Date;
- afs_uint32 Owner;
- afs_uint32 Group;
- afs_uint16 Mode; /* XXXX Should be afs_int32 XXXX */
- afs_uint16 LinkCount;
-#ifdef AFS_DARWIN80_ENV
- afs_uint16 Type;
-#else
- /* vnode type is in v.v_type */
+ /*! Queue of vcaches with dirty metadata. Locked by afs_xvcdirty */
+ struct afs_q metadirty;
+ /*! Vcaches slot number in the disk backup. Protected by tvc->lock */
+ afs_uint32 diskSlot;
#endif
- } m;
+ struct fvcache f;
afs_rwlock_t lock; /* The lock on the vcache contents. */
#if defined(AFS_SUN5_ENV)
/* Lock used to protect the activeV, multipage, and vstates fields.
#ifdef AFS_XBSD_ENV
struct lock rwlock;
#endif
- afs_int32 parentVnode; /* Parent dir, if a file. */
- afs_int32 parentUnique;
+
struct VenusFid *mvid; /* Either parent dir (if root) or root (if mt pt) */
char *linkData; /* Link data if a symlink. */
afs_hyper_t flushDV; /* data version last flushed from text */
afs_hyper_t mapDV; /* data version last flushed from map */
- afs_size_t truncPos; /* truncate file to this position at next store */
struct server *callback; /* The callback host, if any */
afs_uint32 cbExpires; /* time the callback expires */
struct afs_q callsort; /* queue in expiry order, sort of */
struct axscache *Access; /* a list of cached access bits */
- afs_int32 anyAccess; /* System:AnyUser's access to this. */
afs_int32 last_looker; /* pag/uid from last lookup here */
#if defined(AFS_SUN5_ENV)
afs_int32 activeV;
#else
off_t next_seq_blk_offset; /* accounted in blocks for Solaris & IRIX */
#endif
-#endif
+#endif
- afs_uint32 states; /* state bits */
#if defined(AFS_SUN5_ENV)
afs_uint32 vstates; /* vstate bits */
#endif /* defined(AFS_SUN5_ENV) */
#define afs_FakeClose(avc, acred) \
{ if (avc->execsOrWriters == 1) { \
/* we're the last writer, just use CCore flag */ \
- avc->states |= CCore; /* causes close to be called later */ \
+ avc->f.states |= CCore; /* causes close to be called later */ \
\
/* The cred and vnode holds will be released in afs_FlushActiveVcaches */ \
VN_HOLD(AFSTOV(avc)); /* So it won't disappear */ \
#define AFS_ZEROS 64 /* zero buffer */
-/*#define afs_DirtyPages(avc) (((avc)->states & CDirty) || osi_VMDirty_p((avc)))*/
-#define afs_DirtyPages(avc) ((avc)->states & CDirty)
+/*#define afs_DirtyPages(avc) (((avc)->f.states & CDirty) || osi_VMDirty_p((avc)))*/
+#define afs_DirtyPages(avc) ((avc)->f.states & CDirty)
-#define afs_InReadDir(avc) (((avc)->states & CReadDir) && (avc)->readdir_pid == MyPidxx)
+#define afs_InReadDir(avc) (((avc)->f.states & CReadDir) && (avc)->readdir_pid == MyPidxx)
/* The PFlush algorithm makes use of the fact that Fid.Unique is not used in
below hash algorithms. Change it if need be so that flushing algorithm
* expiration/breaking code */
#ifdef AFS_DARWIN_ENV
#define afs_VerifyVCache(avc, areq) \
- (((avc)->states & CStatd) ? (osi_VM_Setup(avc, 0), 0) : \
+ (((avc)->f.states & CStatd) ? (osi_VM_Setup(avc, 0), 0) : \
afs_VerifyVCache2((avc),areq))
#else
#define afs_VerifyVCache(avc, areq) \
- (((avc)->states & CStatd) ? 0 : afs_VerifyVCache2((avc),areq))
+ (((avc)->f.states & CStatd) ? 0 : afs_VerifyVCache2((avc),areq))
#endif
#define DO_STATS 1 /* bits used by FindVCache */
if(!avc)
return;
- if(avc->states & FCSBypass)
+ if(avc->f.states & FCSBypass)
osi_Panic("afs_TransitionToBypass: illegal transition to bypass--already FCSBypass\n");
if(aflags & TRANSChangeDesiredBit)
/* If we never cached this, just change state */
if(setDesire && (!avc->cachingStates & FCSBypass)) {
- avc->states |= FCSBypass;
+ avc->f.states |= FCSBypass;
goto done;
}
/* cg2v, try to store any chunks not written 20071204 */
afs_DequeueCallback(avc);
ReleaseWriteLock(&afs_xcbhash);
#endif
- avc->states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */
+ avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
osi_dnlc_purgedp(avc);
- if (avc->linkData && !(avc->states & CCore)) {
+ if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
}
if(!avc)
return;
- if(!avc->states & FCSBypass)
+ if(!avc->f.states & FCSBypass)
osi_Panic("afs_TransitionToCaching: illegal transition to caching--already caching\n");
if(aflags & TRANSChangeDesiredBit)
/* Ok, we actually do need to flush */
ObtainWriteLock(&afs_xcbhash, 957);
afs_DequeueCallback(avc);
- avc->states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */
+ avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */
ReleaseWriteLock(&afs_xcbhash);
/* now find the disk cache entries */
afs_TryToSmush(avc, acred, 1);
osi_dnlc_purgedp(avc);
- if (avc->linkData && !(avc->states & CCore)) {
+ if (avc->linkData && !(avc->f.states & CCore)) {
afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
avc->linkData = NULL;
}
* We do not do this for AFS file servers because they sometimes
* return large negative numbers as the transfer size.
*/
- if (avc->states & CForeign) {
+ if (avc->f.states & CForeign) {
moredata = length & 0x80000000;
length &= ~0x80000000;
} else {
tcallspec = (struct tlocal1 *) osi_Alloc(sizeof(struct tlocal1));
do {
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK /* ignored */);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK /* ignored */);
if (tc) {
avc->callback = tc->srvr->server;
i = osi_Time();
#ifdef AFS_64BIT_CLIENT
if(!afs_serverHasNo64Bit(tc)) {
code = StartRXAFS_FetchData64(tcall,
- (struct AFSFid *) &avc->fid.Fid,
+ (struct AFSFid *) &avc->f.fid.Fid,
auio->uio_offset,
bparms->length);
if (code == 0) {
if (!tcall)
tcall = rx_NewCall(tc->id);
code = StartRXAFS_FetchData(tcall,
- (struct AFSFid *) &avc->fid.Fid,
+ (struct AFSFid *) &avc->f.fid.Fid,
pos, bparms->length);
COND_RE_GLOCK(locked);
}
} /* afs_serverHasNo64Bit */
#else
code = StartRXAFS_FetchData(tcall,
- (struct AFSFid *) &avc->fid.Fid,
+ (struct AFSFid *) &avc->f.fid.Fid,
auio->uio_offset, bparms->length);
#endif
#endif
goto done;
}
- } while (afs_Analyze(tc, code, &avc->fid, areq,
+ } while (afs_Analyze(tc, code, &avc->f.fid, areq,
AFS_STATS_FS_RPCIDX_FETCHDATA,
SHARED_LOCK,0));
done:
#ifdef AFS_DISCON_ENV
{ "afs_discon_lock", (char *)&afs_discon_lock},
{ "afs_disconDirtyLock", (char *)&afs_disconDirtyLock},
+ { "afs_discon_vc_dirty", (char *)&afs_xvcdirty},
#endif
};
unsigned long lastCallBack_vnode;
* Copy out the located entry.
*/
a_result->addr = afs_data_pointer_to_int32(tvc);
- a_result->cell = tvc->fid.Cell;
- a_result->netFid.Volume = tvc->fid.Fid.Volume;
- a_result->netFid.Vnode = tvc->fid.Fid.Vnode;
- a_result->netFid.Unique = tvc->fid.Fid.Unique;
+ a_result->cell = tvc->f.fid.Cell;
+ a_result->netFid.Volume = tvc->f.fid.Fid.Volume;
+ a_result->netFid.Vnode = tvc->f.fid.Fid.Vnode;
+ a_result->netFid.Unique = tvc->f.fid.Fid.Unique;
a_result->lock.waitStates = tvc->lock.wait_states;
a_result->lock.exclLocked = tvc->lock.excl_locked;
a_result->lock.readersReading = tvc->lock.readers_reading;
a_result->lock.src_indicator = 0;
#endif /* AFS_OSF20_ENV */
#ifdef AFS_64BIT_CLIENT
- a_result->Length = (afs_int32) tvc->m.Length & 0xffffffff;
+ a_result->Length = (afs_int32) tvc->f.m.Length & 0xffffffff;
#else /* AFS_64BIT_CLIENT */
- a_result->Length = tvc->m.Length;
+ a_result->Length = tvc->f.m.Length;
#endif /* AFS_64BIT_CLIENT */
- a_result->DataVersion = hgetlo(tvc->m.DataVersion);
+ a_result->DataVersion = hgetlo(tvc->f.m.DataVersion);
a_result->callback = afs_data_pointer_to_int32(tvc->callback); /* XXXX Now a pointer; change it XXXX */
a_result->cbExpires = tvc->cbExpires;
- if (tvc->states & CVInit) {
+ if (tvc->f.states & CVInit) {
a_result->refCount = 1;
} else {
#ifdef AFS_DARWIN80_ENV
a_result->opens = tvc->opens;
a_result->writers = tvc->execsOrWriters;
a_result->mvstat = tvc->mvstat;
- a_result->states = tvc->states;
+ a_result->states = tvc->f.states;
code = 0;
/*
* Copy out the located entry.
*/
a_result->addr = afs_data_pointer_to_int32(tvc);
- a_result->cell = tvc->fid.Cell;
- a_result->netFid.Volume = tvc->fid.Fid.Volume;
- a_result->netFid.Vnode = tvc->fid.Fid.Vnode;
- a_result->netFid.Unique = tvc->fid.Fid.Unique;
+ a_result->cell = tvc->f.fid.Cell;
+ a_result->netFid.Volume = tvc->f.fid.Fid.Volume;
+ a_result->netFid.Vnode = tvc->f.fid.Fid.Vnode;
+ a_result->netFid.Unique = tvc->f.fid.Fid.Unique;
a_result->lock.waitStates = tvc->lock.wait_states;
a_result->lock.exclLocked = tvc->lock.excl_locked;
a_result->lock.readersReading = tvc->lock.readers_reading;
#endif /* AFS_OSF20_ENV */
#if !defined(AFS_64BIT_ENV)
a_result->Length.high = 0;
- a_result->Length.low = tvc->m.Length;
+ a_result->Length.low = tvc->f.m.Length;
#else
- a_result->Length = tvc->m.Length;
+ a_result->Length = tvc->f.m.Length;
#endif
- a_result->DataVersion = hgetlo(tvc->m.DataVersion);
+ a_result->DataVersion = hgetlo(tvc->f.m.DataVersion);
a_result->callback = afs_data_pointer_to_int32(tvc->callback); /* XXXX Now a pointer; change it XXXX */
a_result->cbExpires = tvc->cbExpires;
- if (tvc->states & CVInit) {
+ if (tvc->f.states & CVInit) {
a_result->refCount = 1;
} else {
#ifdef AFS_DARWIN80_ENV
a_result->opens = tvc->opens;
a_result->writers = tvc->execsOrWriters;
a_result->mvstat = tvc->mvstat;
- a_result->states = tvc->states;
+ a_result->states = tvc->f.states;
code = 0;
/*
for (tq = afs_vhashTV[i].prev; tq != &afs_vhashTV[i]; tq = uq) {
uq = QPrev(tq);
tvc = QTOVH(tq);
- if (tvc->fid.Fid.Volume == a_fid->Volume) {
+ if (tvc->f.fid.Fid.Volume == a_fid->Volume) {
tvc->callback = NULL;
if (!localFid.Cell)
- localFid.Cell = tvc->fid.Cell;
+ localFid.Cell = tvc->f.fid.Cell;
tvc->dchint = NULL; /* invalidate hints */
- if (tvc->states & CVInit) {
+ if (tvc->f.states & CVInit) {
ReleaseReadLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto loop1;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->states & CDeadVnode) {
+ if (tvc->f.states & CDeadVnode) {
ReleaseReadLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto loop1;
}
#endif
ReleaseReadLock(&afs_xvcache);
ObtainWriteLock(&afs_xcbhash, 449);
afs_DequeueCallback(tvc);
- tvc->states &= ~(CStatd | CUnique | CBulkFetching);
+ tvc->f.states &= ~(CStatd | CUnique | CBulkFetching);
afs_allCBs++;
- if (tvc->fid.Fid.Vnode & 1)
+ if (tvc->f.fid.Fid.Vnode & 1)
afs_oddCBs++;
else
afs_evenCBs++;
ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
+ if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
osi_dnlc_purgedp(tvc);
afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK,
ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
- tvc->states, ICL_TYPE_INT32,
+ tvc->f.states, ICL_TYPE_INT32,
a_fid->Volume);
#ifdef AFS_DARWIN80_ENV
vnode_put(AFSTOV(tvc));
ObtainReadLock(&afs_xvcache);
uq = QPrev(tq);
AFS_FAST_RELE(tvc);
- } else if ((tvc->states & CMValid)
+ } else if ((tvc->f.states & CMValid)
&& (tvc->mvid->Fid.Volume == a_fid->Volume)) {
- tvc->states &= ~CMValid;
+ tvc->f.states &= ~CMValid;
if (!localFid.Cell)
localFid.Cell = tvc->mvid->Cell;
}
i = VCHash(&localFid);
for (tvc = afs_vhashT[i]; tvc; tvc = uvc) {
uvc = tvc->hnext;
- if (tvc->fid.Fid.Vnode == a_fid->Vnode
- && tvc->fid.Fid.Volume == a_fid->Volume
- && tvc->fid.Fid.Unique == a_fid->Unique) {
+ if (tvc->f.fid.Fid.Vnode == a_fid->Vnode
+ && tvc->f.fid.Fid.Volume == a_fid->Volume
+ && tvc->f.fid.Fid.Unique == a_fid->Unique) {
tvc->callback = NULL;
tvc->dchint = NULL; /* invalidate hints */
- if (tvc->states & CVInit) {
+ if (tvc->f.states & CVInit) {
ReleaseReadLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto loop2;
}
#ifdef AFS_DARWIN80_ENV
- if (tvc->states & CDeadVnode) {
+ if (tvc->f.states & CDeadVnode) {
ReleaseReadLock(&afs_xvcache);
- afs_osi_Sleep(&tvc->states);
+ afs_osi_Sleep(&tvc->f.states);
goto loop2;
}
#endif
ReleaseReadLock(&afs_xvcache);
ObtainWriteLock(&afs_xcbhash, 450);
afs_DequeueCallback(tvc);
- tvc->states &= ~(CStatd | CUnique | CBulkFetching);
+ tvc->f.states &= ~(CStatd | CUnique | CBulkFetching);
ReleaseWriteLock(&afs_xcbhash);
- if ((tvc->fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
+ if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
osi_dnlc_purgedp(tvc);
afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK,
ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
- tvc->states, ICL_TYPE_LONG, 0);
+ tvc->f.states, ICL_TYPE_LONG, 0);
#ifdef CBDEBUG
lastCallBack_vnode = afid->Vnode;
lastCallBack_dv = tvc->mstat.DataVersion.low;
ObtainWriteLock(&afs_xcbhash, 451);
afs_DequeueCallback(tvc);
tvc->callback = NULL;
- tvc->states &= ~(CStatd | CUnique | CBulkFetching);
+ tvc->f.states &= ~(CStatd | CUnique | CBulkFetching);
ReleaseWriteLock(&afs_xcbhash);
}
}
/* Get the volume, and if its callback expiration time is more than secs
* seconds into the future, update this vcache entry and requeue it below
*/
- if ((tvc->states & CRO)
- && (tvp = afs_FindVolume(&(tvc->fid), READ_LOCK))) {
+ if ((tvc->f.states & CRO)
+ && (tvp = afs_FindVolume(&(tvc->f.fid), READ_LOCK))) {
if (tvp->expireTime > now + secs) {
tvc->cbExpires = tvp->expireTime; /* XXX race here */
} else {
/* What about locking xvcache or vrefcount++ or
* write locking tvc? */
QRemove(tq);
- tvc->states &= ~(CStatd | CMValid | CUnique);
- if (!(tvc->states & (CVInit|CVFlushed)) &&
- (tvc->fid.Fid.Vnode & 1 ||
+ tvc->f.states &= ~(CStatd | CMValid | CUnique);
+ if (!(tvc->f.states & (CVInit|CVFlushed)) &&
+ (tvc->f.fid.Fid.Vnode & 1 ||
(vType(tvc) == VDIR)))
osi_dnlc_purgedp(tvc);
tvc->dchint = NULL; /*invalidate em */
* What about locking xvcache or vrefcount++ or write locking tvc?
*/
QRemove(tq);
- tvc->states &= ~(CStatd | CMValid | CUnique);
- if (!(tvc->states & (CVInit|CVFlushed)) &&
- (tvc->fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
+ tvc->f.states &= ~(CStatd | CMValid | CUnique);
+ if (!(tvc->f.states & (CVInit|CVFlushed)) &&
+ (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)))
osi_dnlc_purgedp(tvc);
}
}
for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
tvc->callback = 0;
tvc->dchint = NULL; /* invalidate hints */
- tvc->states &= ~(CStatd);
+ tvc->f.states &= ~(CStatd);
if (QPrev(&(tvc->callsort)))
QRemove(&(tvc->callsort));
- if (!(tvc->states & (CVInit|CVFlushed)) &&
- ((tvc->fid.Fid.Vnode & 1) || (vType(tvc) == VDIR)))
+ if (!(tvc->f.states & (CVInit|CVFlushed)) &&
+ ((tvc->f.fid.Fid.Vnode & 1) || (vType(tvc) == VDIR)))
osi_dnlc_purgedp(tvc);
}
if (tvc->callback == srvp) {
tvc->callback = 0;
tvc->dchint = NULL; /* invalidate hints */
- tvc->states &= ~(CStatd);
- if (!(tvc->states & (CVInit|CVFlushed)) &&
- ((tvc->fid.Fid.Vnode & 1) || (vType(tvc) == VDIR))) {
+ tvc->f.states &= ~(CStatd);
+ if (!(tvc->f.states & (CVInit|CVFlushed)) &&
+ ((tvc->f.fid.Fid.Vnode & 1) || (vType(tvc) == VDIR))) {
osi_dnlc_purgedp(tvc);
}
afs_DequeueCallback(tvc);
if (vcp->v.v_gnode->gn_mwrcnt) {
afs_offs_t newlength =
(afs_offs_t) dbtob(bp->b_blkno) + bp->b_bcount;
- if (vcp->m.Length < newlength) {
+ if (vcp->f.m.Length < newlength) {
afs_Trace4(afs_iclSetp, CM_TRACE_SETLENGTH,
ICL_TYPE_STRING, __FILE__, ICL_TYPE_LONG,
__LINE__, ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(vcp->m.Length),
+ ICL_HANDLE_OFFSET(vcp->f.m.Length),
ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(newlength));
- vcp->m.Length = newlength;
+ vcp->f.m.Length = newlength;
}
}
ReleaseWriteLock(&vcp->lock);
return 1;
/* This should be replaced with some sort of user configurable function */
- if (avc->states & CRO) {
+ if (avc->f.states & CRO) {
return 2;
- } else if (avc->states & CBackup) {
+ } else if (avc->f.states & CBackup) {
return 1;
} else {
/* RW */
if (((phase & 1) == 0) && osi_Active(tvc))
skip = 1;
if (((phase & 1) == 1) && osi_Active(tvc)
- && (tvc->states & CDCLock)
+ && (tvc->f.states & CDCLock)
&& (chunkFlags & IFAnyPages))
skip = 1;
if (chunkFlags & IFDataMod)
MObtainWriteLock(&afs_xdcache, 333);
chunkFlags = afs_indexFlags[tdc->index];
if (tdc->refCount > 1 || (chunkFlags & IFDataMod)
- || (osi_Active(tvc) && (tvc->states & CDCLock)
+ || (osi_Active(tvc) && (tvc->f.states & CDCLock)
&& (chunkFlags & IFAnyPages))) {
skip = 1;
MReleaseWriteLock(&afs_xdcache);
register int i;
AFS_STATCNT(afs_TryToSmush);
afs_Trace2(afs_iclSetp, CM_TRACE_TRYTOSMUSH, ICL_TYPE_POINTER, avc,
- ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
+ ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
sync = 1; /* XX Temp testing XX */
#if defined(AFS_SUN5_ENV)
/*
* Get the hash chain containing all dce's for this fid
*/
- i = DVHash(&avc->fid);
+ i = DVHash(&avc->f.fid);
MObtainWriteLock(&afs_xdcache, 277);
for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) {
i = afs_dvnextTbl[index]; /* next pointer this hash table */
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
int releaseTlock = 1;
tdc = afs_GetDSlot(index, NULL);
- if (!FidCmp(&tdc->f.fid, &avc->fid)) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
if (sync) {
if ((afs_indexFlags[index] & IFDataMod) == 0
&& tdc->refCount == 1) {
afs_uint32 totalChunks = 0;
struct dcache *tdc;
- totalLength = avc->m.Length;
- if (avc->truncPos < totalLength)
- totalLength = avc->truncPos;
+ totalLength = avc->f.m.Length;
+ if (avc->f.truncPos < totalLength)
+ totalLength = avc->f.truncPos;
/* Length is 0, no chunk missing. */
if (totalLength == 0)
/* If we're a directory, we only ever have one chunk, regardless of
* the size of the dir.
*/
- if (avc->fid.Fid.Vnode & 1 || vType(avc) == VDIR)
+ if (avc->f.fid.Fid.Vnode & 1 || vType(avc) == VDIR)
totalChunks = 1;
/*
printf("Should have %d chunks for %u bytes\n",
totalChunks, (totalLength + 1));
*/
- i = DVHash(&avc->fid);
+ i = DVHash(&avc->f.fid);
MObtainWriteLock(&afs_xdcache, 1001);
for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) {
i = afs_dvnextTbl[index];
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetDSlot(index, NULL);
- if (!FidCmp(&tdc->f.fid, &avc->fid)) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid)) {
totalChunks--;
}
ReleaseReadLock(&tdc->tlock);
* Hash on the [fid, chunk] and get the corresponding dcache index
* after write-locking the dcache.
*/
- i = DCHash(&avc->fid, chunk);
+ i = DCHash(&avc->f.fid, chunk);
MObtainWriteLock(&afs_xdcache, 278);
for (index = afs_dchashTbl[i]; index != NULLIDX;) {
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetDSlot(index, NULL);
ReleaseReadLock(&tdc->tlock);
- if (!FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid) && chunk == tdc->f.chunk) {
break; /* leaving refCount high for caller */
}
afs_PutDCache(tdc);
#endif /* AFS_NOSTATS */
afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
- ICL_TYPE_FID, &(avc->fid), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_INT32, alen);
+ ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, alen);
tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
while (alen > 0) {
tlen = (alen > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : alen);
}
}
afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
- ICL_TYPE_FID, &(avc->fid), ICL_TYPE_OFFSET,
- ICL_HANDLE_OFFSET(avc->m.Length), ICL_TYPE_INT32, alen);
+ ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
+ ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_INT32, alen);
osi_FreeLargeSpace(tbuffer);
return 0;
* We do not do this for AFS file servers because they sometimes
* return large negative numbers as the transfer size.
*/
- if (avc->states & CForeign) {
+ if (avc->f.states & CForeign) {
moredata = length & 0x80000000;
length &= ~0x80000000;
} else {
tdc->f.fid = *ashFid;
else
/* Use normal vcache's fid otherwise. */
- tdc->f.fid = avc->fid;
- if (avc->states & CRO)
+ tdc->f.fid = avc->f.fid;
+ if (avc->f.states & CRO)
tdc->f.states = DRO;
- else if (avc->states & CBackup)
+ else if (avc->f.states & CBackup)
tdc->f.states = DBackup;
else
tdc->f.states = DRW;
updateV2DC(int lockVc, struct vcache *v, struct dcache *d, int src)
{
if (!lockVc || 0 == NBObtainWriteLock(&v->lock, src)) {
- if (hsame(v->m.DataVersion, d->f.versionNo) && v->callback)
+ if (hsame(v->f.m.DataVersion, d->f.versionNo) && v->callback)
v->dchint = d;
if (lockVc)
ReleaseWriteLock(&v->lock);
* Determine the chunk number and offset within the chunk corresponding
* to the desired byte.
*/
- if (avc->fid.Fid.Vnode & 1) { /* if (vType(avc) == VDIR) */
+ if (avc->f.fid.Fid.Vnode & 1) { /* if (vType(avc) == VDIR) */
chunk = 0;
} else {
chunk = AFS_CHUNK(abyte);
dcLocked = (0 == NBObtainSharedLock(&tdc->lock, 601));
if (dcLocked && (tdc->index != NULLIDX)
- && !FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk
+ && !FidCmp(&tdc->f.fid, &avc->f.fid) && chunk == tdc->f.chunk
&& !(afs_indexFlags[tdc->index] & (IFFree | IFDiscarded))) {
/* got the right one. It might not be the right version, and it
* might be fetching, but it's the right dcache entry.
MReleaseReadLock(&afs_xdcache);
shortcut = 1;
- if (hsame(tdc->f.versionNo, avc->m.DataVersion)
+ if (hsame(tdc->f.versionNo, avc->f.m.DataVersion)
&& !(tdc->dflags & DFFetching)) {
afs_stats_cmperf.dcacheHits++;
* avc->lock(W) if !setLocks || slowPass
*/
- i = DCHash(&avc->fid, chunk);
+ i = DCHash(&avc->f.fid, chunk);
/* check to make sure our space is fine */
afs_MaybeWakeupTruncateDaemon();
MObtainWriteLock(&afs_xdcache, 280);
us = NULLIDX;
for (index = afs_dchashTbl[i]; index != NULLIDX;) {
- if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
+ if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) {
tdc = afs_GetDSlot(index, NULL);
ReleaseReadLock(&tdc->tlock);
/*
* avc->lock(W) if !setLocks || slowPass
* afs_xdcache(W)
*/
- if (!FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk) {
+ if (!FidCmp(&tdc->f.fid, &avc->f.fid) && chunk == tdc->f.chunk) {
/* Move it up in the beginning of the list */
if (afs_dchashTbl[i] != index) {
afs_dcnextTbl[us] = afs_dcnextTbl[index];
if (afs_discardDCList == NULLIDX && afs_freeDCList == NULLIDX) {
while (1) {
if (!setLocks)
- avc->states |= CDCLock;
+ avc->f.states |= CDCLock;
/* just need slots */
afs_GetDownD(5, (int *)0, afs_DCGetBucket(avc));
if (!setLocks)
- avc->states &= ~CDCLock;
+ avc->f.states &= ~CDCLock;
if (afs_discardDCList != NULLIDX
|| afs_freeDCList != NULLIDX)
break;
*/
afs_dcnextTbl[tdc->index] = afs_dchashTbl[i];
afs_dchashTbl[i] = tdc->index;
- i = DVHash(&avc->fid);
+ i = DVHash(&avc->f.fid);
afs_dvnextTbl[tdc->index] = afs_dvhashTbl[i];
afs_dvhashTbl[i] = tdc->index;
tdc->dflags = DFEntryMod;
afs_Trace4(afs_iclSetp, CM_TRACE_GETDCACHE2, ICL_TYPE_POINTER, avc,
ICL_TYPE_POINTER, tdc, ICL_TYPE_INT32,
hgetlo(tdc->f.versionNo), ICL_TYPE_INT32,
- hgetlo(avc->m.DataVersion));
+ hgetlo(avc->f.m.DataVersion));
/*
* Here we have the entry in tdc, with its refCount incremented.
* Note: we don't use the S-lock on avc; it costs concurrency when
ICL_TYPE_INT32, aflags, ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(abyte), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(Position));
- if ((aflags & 4) && (hiszero(avc->m.DataVersion)))
+ if ((aflags & 4) && (hiszero(avc->f.m.DataVersion)))
doAdjustSize = 1;
- if ((AFS_CHUNKTOBASE(chunk) >= avc->m.Length) ||
+ if ((AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length) ||
((aflags & 4) && (abyte == Position) && (tlen >= size)))
overWriteWholeChunk = 1;
if (doAdjustSize || overWriteWholeChunk) {
if (doAdjustSize)
adjustsize = 4096;
#endif /* AFS_SGI_ENV */
- if (AFS_CHUNKTOBASE(chunk) + adjustsize >= avc->m.Length &&
+ if (AFS_CHUNKTOBASE(chunk) + adjustsize >= avc->f.m.Length &&
#else /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
#if defined(AFS_SUN5_ENV) || defined(AFS_OSF_ENV)
- if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk) >= avc->m.Length)) &&
+ if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length)) &&
#else
- if (AFS_CHUNKTOBASE(chunk) >= avc->m.Length &&
+ if (AFS_CHUNKTOBASE(chunk) >= avc->f.m.Length &&
#endif
#endif /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
- !hsame(avc->m.DataVersion, tdc->f.versionNo))
+ !hsame(avc->f.m.DataVersion, tdc->f.versionNo))
doReallyAdjustSize = 1;
if (doReallyAdjustSize || overWriteWholeChunk) {
afs_CFileTruncate(file, 0);
afs_CFileClose(file);
afs_AdjustSize(tdc, 0);
- hset(tdc->f.versionNo, avc->m.DataVersion);
+ hset(tdc->f.versionNo, avc->f.m.DataVersion);
tdc->dflags |= DFEntryMod;
ConvertWToSLock(&tdc->lock);
* avc->lock(W) if !setLocks || slowPass
* tdc->lock(S)
*/
- if (!hsame(avc->m.DataVersion, tdc->f.versionNo) && !overWriteWholeChunk) {
+ if (!hsame(avc->f.m.DataVersion, tdc->f.versionNo) && !overWriteWholeChunk) {
/*
* Version number mismatch.
*/
* flush. Clearly, at least, we don't have to flush the file more
* often than it changes
*/
- if (hcmp(avc->flushDV, avc->m.DataVersion) < 0) {
+ if (hcmp(avc->flushDV, avc->f.m.DataVersion) < 0) {
/*
* By here, the cache entry is always write-locked. We can
* deadlock if we call osi_Flush with the cache entry locked...
*/
/* Watch for standard race condition around osi_FlushText */
- if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
+ if (hsame(avc->f.m.DataVersion, tdc->f.versionNo)) {
updateV2DC(setLocks, avc, tdc, 569); /* set hint */
afs_stats_cmperf.dcacheHits++;
ConvertWToSLock(&tdc->lock);
}
/* Do not fetch data beyond truncPos. */
- maxGoodLength = avc->m.Length;
- if (avc->truncPos < maxGoodLength)
- maxGoodLength = avc->truncPos;
+ maxGoodLength = avc->f.m.Length;
+ if (avc->f.truncPos < maxGoodLength)
+ maxGoodLength = avc->f.truncPos;
Position = AFS_CHUNKBASE(abyte);
if (vType(avc) == VDIR) {
- size = avc->m.Length;
+ size = avc->f.m.Length;
if (size > tdc->f.chunkBytes) {
/* pre-reserve space for file */
afs_AdjustSize(tdc, size);
#else
file = afs_CFileOpen(tdc->f.inode);
#endif
- afs_RemoveVCB(&avc->fid);
+ afs_RemoveVCB(&avc->f.fid);
tdc->f.states |= DWriting;
tdc->dflags |= DFFetching;
tdc->validPos = Position; /* which is AFS_CHUNKBASE(abyte) */
* Remember if we are doing the reading from a replicated volume,
* and how many times we've zipped around the fetch/analyze loop.
*/
- fromReplica = (avc->states & CRO) ? 1 : 0;
+ fromReplica = (avc->f.states & CRO) ? 1 : 0;
numFetchLoops = 0;
accP = &(afs_stats_cmfullperf.accessinf);
if (fromReplica)
#endif /* AFS_NOSTATS */
/* this is a cache miss */
afs_Trace4(afs_iclSetp, CM_TRACE_FETCHPROC, ICL_TYPE_POINTER, avc,
- ICL_TYPE_FID, &(avc->fid), ICL_TYPE_OFFSET,
+ ICL_TYPE_FID, &(avc->f.fid), ICL_TYPE_OFFSET,
ICL_HANDLE_OFFSET(Position), ICL_TYPE_INT32, size);
if (size)
* tdc->lock(W)
*/
- tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
if (tc) {
afs_int32 length_hi, length, bytes;
#ifndef AFS_NOSTATS
RX_AFS_GUNLOCK();
code =
StartRXAFS_FetchData64(tcall,
- (struct AFSFid *)&avc->fid.
+ (struct AFSFid *)&avc->f.fid.
Fid, Position, tsize);
if (code != 0) {
RX_AFS_GLOCK();
tcall = rx_NewCall(tc->id);
code =
StartRXAFS_FetchData(tcall, (struct AFSFid *)
- &avc->fid.Fid, pos,
+ &avc->f.fid.Fid, pos,
size);
RX_AFS_GLOCK();
}
RX_AFS_GUNLOCK();
code =
StartRXAFS_FetchData(tcall,
- (struct AFSFid *)&avc->fid.Fid,
+ (struct AFSFid *)&avc->f.fid.Fid,
Position, size);
RX_AFS_GLOCK();
if (code == 0) {
if (!setLocks || slowPass) {
ObtainWriteLock(&afs_xcbhash, 453);
afs_DequeueCallback(avc);
- avc->states &= ~(CStatd | CUnique);
+ avc->f.states &= ~(CStatd | CUnique);
avc->callback = NULL;
ReleaseWriteLock(&afs_xcbhash);
- if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
+ if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
osi_dnlc_purgedp(avc);
} else {
/* Something lost. Forget about performance, and go
}
} while (afs_Analyze
- (tc, code, &avc->fid, areq,
+ (tc, code, &avc->f.fid, areq,
AFS_STATS_FS_RPCIDX_FETCHDATA, SHARED_LOCK, NULL));
/*
if (!afs_IsDynroot(avc)) {
ObtainWriteLock(&afs_xcbhash, 454);
afs_DequeueCallback(avc);
- avc->states &= ~(CStatd | CUnique);
+ avc->f.states &= ~(CStatd | CUnique);
ReleaseWriteLock(&afs_xcbhash);
- if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
+ if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
osi_dnlc_purgedp(avc);
/*
* Locks held:
/*
* See if this was a reference to a file in the local cell.
*/
- if (afs_IsPrimaryCellNum(avc->fid.Cell))
+ if (afs_IsPrimaryCellNum(avc->f.fid.Cell))
afs_stats_cmperf.dlocalAccesses++;
else
afs_stats_cmperf.dremoteAccesses++;
*/
afs_hyper_t currentDV, statusDV;
- hset(currentDV, avc->m.DataVersion);
+ hset(currentDV, avc->f.m.DataVersion);
if (setNewCallback && avc->callback != newCallback)
doVcacheUpdate = 1;
hset64(statusDV, tsmall->OutStatus.dataVersionHigh,
tsmall->OutStatus.DataVersion);
- if (setVcacheStatus && avc->m.Length != tsmall->OutStatus.Length)
+ if (setVcacheStatus && avc->f.m.Length != tsmall->OutStatus.Length)
doVcacheUpdate = 1;
if (setVcacheStatus && !hsame(currentDV, statusDV))
doVcacheUpdate = 1;
if (doVcacheUpdate) {
ObtainWriteLock(&avc->lock, 615);
- if (!hsame(avc->m.DataVersion, currentDV)) {
+ if (!hsame(avc->f.m.DataVersion, currentDV)) {
/* We lose. Someone will beat us to it. */
doVcacheUpdate = 0;
ReleaseWriteLock(&avc->lock);
* I think this is redundant now because this sort of thing
* is already being handled by the higher-level code.
*/
- if ((avc->states & CSafeStore) == 0) {
+ if ((avc->f.states & CSafeStore) == 0) {
tb->code = 0;
tb->flags |= BUVALID;
if (tb->flags & BUWAIT) {
tdc = afs_FindDCache(avc, filePos);
if (tdc) {
ObtainWriteLock(&tdc->lock, 658);
- if (!hsame(tdc->f.versionNo, avc->m.DataVersion)
+ if (!hsame(tdc->f.versionNo, avc->f.m.DataVersion)
|| (tdc->dflags & DFFetching)) {
ReleaseWriteLock(&tdc->lock);
afs_PutDCache(tdc);
afs_MaybeWakeupTruncateDaemon();
ObtainWriteLock(&avc->lock, 509);
}
- avc->states |= CDirty;
+ avc->f.states |= CDirty;
tdc = afs_GetDCache(avc, filePos, areq, &offset, &len, 4);
if (tdc)
ObtainWriteLock(&tdc->lock, 659);
if (vType(avc) != VDIR)
return ENOTDIR;
- if (avc->shVnode || avc->shUnique)
+ if (avc->f.shadow.vnode || avc->f.shadow.unique)
return EEXIST;
/* Generate a fid for the shadow dir. */
- shadow_fid.Cell = avc->fid.Cell;
- shadow_fid.Fid.Volume = avc->fid.Fid.Volume;
+ shadow_fid.Cell = avc->f.fid.Cell;
+ shadow_fid.Fid.Volume = avc->f.fid.Fid.Volume;
afs_GenShadowFid(&shadow_fid);
ObtainWriteLock(&afs_xdcache, 716);
ReleaseWriteLock(&afs_disconDirtyLock);
ReleaseWriteLock(&afs_xvcache);
- avc->shVnode = shadow_fid.Fid.Vnode;
- avc->shUnique = shadow_fid.Fid.Unique;
+ avc->f.shadow.vnode = shadow_fid.Fid.Vnode;
+ avc->f.shadow.unique = shadow_fid.Fid.Unique;
}
done:
struct dcache *tdc;
struct VenusFid shadow_fid;
- shadow_fid.Cell = avc->fid.Cell;
- shadow_fid.Fid.Volume = avc->fid.Fid.Volume;
- shadow_fid.Fid.Vnode = avc->shVnode;
- shadow_fid.Fid.Unique = avc->shUnique;
+ shadow_fid.Cell = avc->f.fid.Cell;
+ shadow_fid.Fid.Volume = avc->f.fid.Fid.Volume;
+ shadow_fid.Fid.Vnode = avc->f.shadow.vnode;
+ shadow_fid.Fid.Unique = avc->f.shadow.unique;
tdc = afs_FindDCacheByFid(&shadow_fid);
if (tdc) {
afs_DiscardDCache(tdc);
afs_PutDCache(tdc);
}
- avc->shVnode = avc->shUnique = 0;
+ avc->f.shadow.vnode = avc->f.shadow.unique = 0;
ObtainWriteLock(&afs_disconDirtyLock, 708);
QRemove(&avc->shadowq);
ReleaseWriteLock(&afs_disconDirtyLock);
* length we GetDCache for that chunk.
*/
- if (AFS_CHUNK(apos) == 0 || apos <= avc->m.Length)
+ if (AFS_CHUNK(apos) == 0 || apos <= avc->f.m.Length)
return;
- if (avc->m.Length == 0)
+ if (avc->f.m.Length == 0)
start = 0;
else
- start = AFS_CHUNK(avc->m.Length)+1;
+ start = AFS_CHUNK(avc->f.m.Length)+1;
end = AFS_CHUNK(apos);
#ifdef AFS_DISCON_ENV
#define dv_match(vc, fstat) \
- ((vc->m.DataVersion.low == fstat.DataVersion) && \
- (vc->m.DataVersion.high == fstat.dataVersionHigh))
+ ((vc->f.m.DataVersion.low == fstat.DataVersion) && \
+ (vc->f.m.DataVersion.high == fstat.dataVersionHigh))
/*! Circular queue of dirty vcaches */
struct afs_q afs_disconDirty;
*/
int afs_GenStoreStatus(struct vcache *avc, struct AFSStoreStatus *astat)
{
- if (!avc || !astat || !avc->ddirty_flags)
+ if (!avc || !astat || !avc->f.ddirty_flags)
return 0;
/* Clean up store stat. */
memset(astat, 0, sizeof(struct AFSStoreStatus));
- if (avc->ddirty_flags & VDisconSetTime) {
+ if (avc->f.ddirty_flags & VDisconSetTime) {
/* Update timestamp. */
- astat->ClientModTime = avc->m.Date;
+ astat->ClientModTime = avc->f.m.Date;
astat->Mask |= AFS_SETMODTIME;
}
- if (avc->ddirty_flags & VDisconSetMode) {
+ if (avc->f.ddirty_flags & VDisconSetMode) {
/* Copy the mode bits. */
- astat->UnixModeBits = avc->m.Mode;
+ astat->UnixModeBits = avc->f.m.Mode;
astat->Mask |= AFS_SETMODE;
}
{
struct dcache *tdc;
- afid->Cell = avc->fid.Cell;
- afid->Fid.Volume = avc->fid.Fid.Volume;
+ afid->Cell = avc->f.fid.Cell;
+ afid->Fid.Volume = avc->f.fid.Fid.Volume;
switch (vType(avc)) {
case VREG:
case VLNK:
/* Normal files have the dir fid embedded in the vcache. */
- afid->Fid.Vnode = avc->parentVnode;
- afid->Fid.Unique = avc->parentUnique;
+ afid->Fid.Vnode = avc->f.parent.vnode;
+ afid->Fid.Unique = avc->f.parent.unique;
break;
case VDIR:
/* If dir or parent dir created locally*/
- tdc = afs_FindDCacheByFid(&avc->fid);
+ tdc = afs_FindDCacheByFid(&avc->f.fid);
if (tdc) {
afid->Fid.Unique = 0;
/* Lookup each entry for the fid. It should be the first. */
/* For deleted files, get the shadow dir's tdc: */
/* Get the parent dir's vcache that contains the shadow fid. */
- parent_fid.Cell = avc->fid.Cell;
- parent_fid.Fid.Volume = avc->fid.Fid.Volume;
- if (avc->ddirty_flags & VDisconRename) {
+ parent_fid.Cell = avc->f.fid.Cell;
+ parent_fid.Fid.Volume = avc->f.fid.Fid.Volume;
+ if (avc->f.ddirty_flags & VDisconRename) {
/* For renames the old dir fid is needed. */
- parent_fid.Fid.Vnode = avc->oldVnode;
- parent_fid.Fid.Unique = avc->oldUnique;
+ parent_fid.Fid.Vnode = avc->f.oldParent.vnode;
+ parent_fid.Fid.Unique = avc->f.oldParent.unique;
} else {
parent_fid.Fid.Vnode = afid->Fid.Vnode;
parent_fid.Fid.Unique = afid->Fid.Unique;
return ENOENT;
}
- shadow_fid.Cell = parent_vc->fid.Cell;
- shadow_fid.Fid.Volume = parent_vc->fid.Fid.Volume;
- shadow_fid.Fid.Vnode = parent_vc->shVnode;
- shadow_fid.Fid.Unique = parent_vc->shUnique;
+ shadow_fid.Cell = parent_vc->f.fid.Cell;
+ shadow_fid.Fid.Volume = parent_vc->f.fid.Fid.Volume;
+ shadow_fid.Fid.Vnode = parent_vc->f.shadow.vnode;
+ shadow_fid.Fid.Unique = parent_vc->f.shadow.unique;
afs_PutVCache(parent_vc);
} /* if (deleted) */
if (tdc) {
- tnf.fid = &avc->fid;
+ tnf.fid = &avc->f.fid;
tnf.name_len = -1;
tnf.name = aname;
afs_dir_EnumerateDir(tdc, &get_vnode_name_hook, &tnf);
return 0;
/* Get this file's vcache. */
- tfid.Cell = v->vc->fid.Cell;
- tfid.Fid.Volume = v->vc->fid.Fid.Volume;
+ tfid.Cell = v->vc->f.fid.Cell;
+ tfid.Fid.Volume = v->vc->f.fid.Fid.Volume;
tfid.Fid.Vnode = vnode;
tfid.Fid.Unique = unique;
/* Count unfinished dirty children. */
if (tvc) {
ObtainReadLock(&tvc->lock);
- if (tvc->ddirty_flags || tvc->shVnode)
+ if (tvc->f.ddirty_flags || tvc->f.shadow.vnode)
v->count++;
ReleaseReadLock(&tvc->lock);
struct DirtyChildrenCount dcc;
struct VenusFid shadow_fid;
- if (!avc->shVnode)
+ if (!avc->f.shadow.vnode)
/* Empty dir. */
return 0;
- shadow_fid.Cell = avc->fid.Cell;
- shadow_fid.Fid.Volume = avc->fid.Fid.Volume;
- shadow_fid.Fid.Vnode = avc->shVnode;
- shadow_fid.Fid.Unique = avc->shUnique;
+ shadow_fid.Cell = avc->f.fid.Cell;
+ shadow_fid.Fid.Volume = avc->f.fid.Fid.Volume;
+ shadow_fid.Fid.Vnode = avc->f.shadow.vnode;
+ shadow_fid.Fid.Unique = avc->f.shadow.unique;
dcc.count = 0;
/* Change the fields. */
if (tvc) {
- tvc->parentVnode = afid->Fid.Vnode;
- tvc->parentUnique = afid->Fid.Unique;
+ tvc->f.parent.vnode = afid->Fid.Vnode;
+ tvc->f.parent.unique = afid->Fid.Unique;
afs_PutVCache(tvc);
}
goto end;
}
- if ((*adp)->ddirty_flags & VDisconCreate) {
+ if ((*adp)->f.ddirty_flags & VDisconCreate) {
printf("afs_GetParentVCache: deferring until parent exists\n");
code = EAGAIN;
goto end;
XSTATS_DECLS;
/* Get old dir vcache. */
- old_pdir_fid.Cell = avc->fid.Cell;
- old_pdir_fid.Fid.Volume = avc->fid.Fid.Volume;
- old_pdir_fid.Fid.Vnode = avc->oldVnode;
- old_pdir_fid.Fid.Unique = avc->oldUnique;
+ old_pdir_fid.Cell = avc->f.fid.Cell;
+ old_pdir_fid.Fid.Volume = avc->f.fid.Fid.Volume;
+ old_pdir_fid.Fid.Vnode = avc->f.oldParent.vnode;
+ old_pdir_fid.Fid.Unique = avc->f.oldParent.unique;
/* Get old name. */
old_name = (char *) afs_osi_Alloc(AFSNAMEMAX);
goto done;
}
- if (avc->ddirty_flags & VDisconRenameSameDir) {
+ if (avc->f.ddirty_flags & VDisconRenameSameDir) {
/* If we're in the same dir, don't do the lookups all over again,
* just copy fid and vcache from the old dir.
*/
/* Set status. */
InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE | AFS_SETGROUP;
- InStatus.ClientModTime = avc->m.Date;
- InStatus.Owner = avc->m.Owner;
+ InStatus.ClientModTime = avc->f.m.Date;
+ InStatus.Owner = avc->f.m.Owner;
InStatus.Group = (afs_int32) acred->cr_gid;
/* Only care about protection bits. */
- InStatus.UnixModeBits = avc->m.Mode & 0xffff;
+ InStatus.UnixModeBits = avc->f.m.Mode & 0xffff;
do {
- tc = afs_Conn(&tdp->fid, areq, SHARED_LOCK);
+ tc = afs_Conn(&tdp->f.fid, areq, SHARED_LOCK);
if (tc) {
switch (vType(avc)) {
case VREG:
XSTATS_START_TIME(op);
RX_AFS_GUNLOCK();
code = RXAFS_CreateFile(tc->id,
- (struct AFSFid *)&tdp->fid.Fid,
+ (struct AFSFid *)&tdp->f.fid.Fid,
tname, &InStatus,
(struct AFSFid *) &newFid.Fid,
&OutFidStatus, &OutDirStatus,
op = AFS_STATS_FS_RPCIDX_MAKEDIR;
XSTATS_START_TIME(op);
RX_AFS_GUNLOCK();
- code = RXAFS_MakeDir(tc->id, (struct AFSFid *) &tdp->fid.Fid,
+ code = RXAFS_MakeDir(tc->id, (struct AFSFid *) &tdp->f.fid.Fid,
tname, &InStatus,
(struct AFSFid *) &newFid.Fid,
&OutFidStatus, &OutDirStatus,
XSTATS_START_TIME(op);
RX_AFS_GUNLOCK();
code = RXAFS_Symlink(tc->id,
- (struct AFSFid *) &tdp->fid.Fid,
+ (struct AFSFid *) &tdp->f.fid.Fid,
tname, ttargetName, &InStatus,
(struct AFSFid *) &newFid.Fid,
&OutFidStatus, &OutDirStatus, &tsync);
}
} else
code = -1;
- } while (afs_Analyze(tc, code, &tdp->fid, areq, op, SHARED_LOCK, NULL));
+ } while (afs_Analyze(tc, code, &tdp->f.fid, areq, op, SHARED_LOCK, NULL));
/* TODO: Handle errors. */
if (code) {
}
/* Th