RCSID("$Header$");
-#include <afs/sysincludes.h> /* Standard vendor system headers */
-#include <afs/afsincludes.h> /* Afs-based standard headers */
-#include <afs/afs_stats.h> /* statistics */
+#include <afs/sysincludes.h> /* Standard vendor system headers */
+#include <afsincludes.h> /* Afs-based standard headers */
+#include <afs/afs_stats.h> /* statistics */
#include <sys/malloc.h>
#include <sys/namei.h>
+#ifndef AFS_FBSD50_ENV
#include <vm/vm_zone.h>
+#endif
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vm_pager.h>
int afs_vop_putpages(struct vop_putpages_args *);
int afs_vop_ioctl(struct vop_ioctl_args *);
int afs_vop_poll(struct vop_poll_args *);
+#ifndef AFS_FBSD50_ENV
int afs_vop_mmap(struct vop_mmap_args *);
+#endif
int afs_vop_fsync(struct vop_fsync_args *);
int afs_vop_remove(struct vop_remove_args *);
int afs_vop_link(struct vop_link_args *);
{ &vop_access_desc, (vop_t *) afs_vop_access }, /* access */
{ &vop_advlock_desc, (vop_t *) afs_vop_advlock }, /* advlock */
{ &vop_bmap_desc, (vop_t *) afs_vop_bmap }, /* bmap */
+#ifndef AFS_FBSD50_ENV
{ &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
+#endif
{ &vop_close_desc, (vop_t *) afs_vop_close }, /* close */
{ &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject },
{ &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject },
{ &vop_lookup_desc, (vop_t *) afs_vop_lookup }, /* lookup */
{ &vop_mkdir_desc, (vop_t *) afs_vop_mkdir }, /* mkdir */
{ &vop_mknod_desc, (vop_t *) afs_vop_mknod }, /* mknod */
+#ifndef AFS_FBSD50_ENV
{ &vop_mmap_desc, (vop_t *) afs_vop_mmap }, /* mmap */
+#endif
{ &vop_open_desc, (vop_t *) afs_vop_open }, /* open */
{ &vop_poll_desc, (vop_t *) afs_vop_poll }, /* select */
{ &vop_print_desc, (vop_t *) afs_vop_print }, /* print */
name[cnp->cn_namelen] = '\0'
#define DROPNAME() FREE(name, M_TEMP)
+
+/* This is a bit of a cheat... */
+#ifdef AFS_FBSD50_ENV
+#define a_p a_td
+#endif
int
afs_vop_lookup(ap)
-struct vop_lookup_args /* {
- struct vnodeop_desc * a_desc;
- struct vnode *a_dvp;
- struct vnode **a_vpp;
- struct componentname *a_cnp;
- } */ *ap;
+ struct vop_lookup_args /* {
+ struct vnodeop_desc * a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ } */ *ap;
{
int error;
struct vcache *vcp;
register int flags = ap->a_cnp->cn_flags;
int lockparent; /* 1 => lockparent flag is set */
int wantparent; /* 1 => wantparent or lockparent flag */
- struct proc *p;
+#ifdef AFS_FBSD50_ENV
+ struct thread *p = ap->a_cnp->cn_thread;
+#else
+ struct proc *p = ap->a_cnp->cn_proc;
+#endif
GETNAME();
- p=cnp->cn_proc;
+
lockparent = flags & LOCKPARENT;
wantparent = flags & (LOCKPARENT|WANTPARENT);
if (flags & ISDOTDOT)
VOP_UNLOCK(dvp, 0, p);
AFS_GLOCK();
- error = afs_lookup((struct vcache *)dvp, name, &vcp, cnp->cn_cred);
+ error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
AFS_GUNLOCK();
if (error) {
if (flags & ISDOTDOT)
*ap->a_vpp = 0;
return (error);
}
- vp = (struct vnode *)vcp; /* always get a node if no error */
+ vp = AFSTOV(vcp); /* always get a node if no error */
/* The parent directory comes in locked. We unlock it on return
unless the caller wants it left locked.
int error = 0;
struct vcache *vcp;
register struct vnode *dvp = ap->a_dvp;
- struct proc *p;
+#ifdef AFS_FBSD50_ENV
+ struct thread *p = ap->a_cnp->cn_thread;
+#else
+ struct proc *p = ap->a_cnp->cn_proc;
+#endif
GETNAME();
- p=cnp->cn_proc;
AFS_GLOCK();
- error = afs_create((struct vcache *)dvp, name, ap->a_vap, ap->a_vap->va_vaflags & VA_EXCLUSIVE? EXCL : NONEXCL,
+ error = afs_create(VTOAFS(dvp), name, ap->a_vap, ap->a_vap->va_vaflags & VA_EXCLUSIVE? EXCL : NONEXCL,
ap->a_vap->va_mode, &vcp,
cnp->cn_cred);
AFS_GUNLOCK();
}
if (vcp) {
- *ap->a_vpp = (struct vnode *)vcp;
- vn_lock((struct vnode *)vcp, LK_EXCLUSIVE| LK_RETRY, p);
+ *ap->a_vpp = AFSTOV(vcp);
+ vn_lock(AFSTOV(vcp), LK_EXCLUSIVE| LK_RETRY, p);
}
else *ap->a_vpp = 0;
{
int error;
int bad;
- struct vcache *vc = (struct vcache *)ap->a_vp;
+ struct vcache *vc = VTOAFS(ap->a_vp);
bad=0;
AFS_GLOCK();
error = afs_open(&vc, ap->a_mode, ap->a_cred);
#ifdef DIAGNOSTIC
- if ((struct vnode *)vc != ap->a_vp)
+ if (AFSTOV(vc) != ap->a_vp)
panic("AFS open changed vnode!");
#endif
afs_BozonLock(&vc->pvnLock, vc);
- osi_FlushPages(vc);
+ osi_FlushPages(vc, ap->a_cred);
afs_BozonUnlock(&vc->pvnLock, vc);
AFS_GUNLOCK();
return error;
} */ *ap;
{
int code;
- struct vcache *avc=ap->a_vp;
+ struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
if (ap->a_cred)
- code=afs_close(avc, ap->a_fflag, ap->a_cred, ap->a_p);
+ code = afs_close(avc, ap->a_fflag, ap->a_cred);
else
- code=afs_close(avc, ap->a_fflag, &afs_osi_cred, ap->a_p);
+ code = afs_close(avc, ap->a_fflag, &afs_osi_cred);
afs_BozonLock(&avc->pvnLock, avc);
- osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
+ osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
afs_BozonUnlock(&avc->pvnLock, avc);
AFS_GUNLOCK();
return code;
{
int code;
AFS_GLOCK();
- code=afs_access((struct vcache *)ap->a_vp, ap->a_mode, ap->a_cred);
+ code = afs_access(VTOAFS(ap->a_vp), ap->a_mode, ap->a_cred);
AFS_GUNLOCK();
return code;
}
{
int code;
AFS_GLOCK();
- code=afs_getattr((struct vcache *)ap->a_vp, ap->a_vap, ap->a_cred);
+ code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
AFS_GUNLOCK();
return code;
}
{
int code;
AFS_GLOCK();
- code=afs_setattr((struct vcache *)ap->a_vp, ap->a_vap, ap->a_cred);
+ code = afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
AFS_GUNLOCK();
return code;
}int
} */ *ap;
{
int code;
- struct vcache *avc=(struct vcache *)ap->a_vp;
+ struct vcache *avc=VTOAFS(ap->a_vp);
AFS_GLOCK();
afs_BozonLock(&avc->pvnLock, avc);
- osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
- code=afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
+ osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
+ code = afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
afs_BozonUnlock(&avc->pvnLock, avc);
AFS_GUNLOCK();
return code;
struct iovec iov;
struct buf *bp;
vm_offset_t kva;
- struct vcache *avc=(struct vcache *)ap->a_vp;
+ struct vcache *avc=VTOAFS(ap->a_vp);
if (avc->v.v_object == NULL) {
printf("afs_getpages: called with non-merged cache vnode??\n");
*/
{
- vm_page_t m = ap->a_m[ap->a_reqpage];
-
- if (m->valid != 0) {
- /* handled by vm_fault now */
- /* vm_page_zero_invalid(m, TRUE); */
- for (i = 0; i < npages; ++i) {
- if (i != ap->a_reqpage)
- vnode_pager_freepage(ap->a_m[i]);
- }
- return(0);
- }
+ vm_page_t m = ap->a_m[ap->a_reqpage];
+
+ if (m->valid != 0) {
+ /* handled by vm_fault now */
+ /* vm_page_zero_invalid(m, TRUE); */
+ for (i = 0; i < npages; ++i) {
+ if (i != ap->a_reqpage)
+ vm_page_free(ap->a_m[i]);
+ }
+ return(0);
+ }
}
bp = getpbuf(&afs_pbuf_freecnt);
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
- iov.iov_base=(caddr_t)kva;
- iov.iov_len=ap->a_count;
- uio.uio_iov=&iov;
- uio.uio_iovcnt=1;
- uio.uio_offset=IDX_TO_OFF(ap->a_m[0]->pindex);
- uio.uio_resid=ap->a_count;
- uio.uio_segflg=UIO_SYSSPACE;
- uio.uio_rw=UIO_READ;
- uio.uio_procp=curproc;
+ iov.iov_base = (caddr_t)kva;
+ iov.iov_len = ap->a_count;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
+ uio.uio_resid = ap->a_count;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+#ifdef AFS_FBSD50_ENV
+ uio.uio_td = curthread;
+#else
+ uio.uio_procp = curproc;
+#endif
AFS_GLOCK();
afs_BozonLock(&avc->pvnLock, avc);
- osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
- code=afs_read(avc, &uio, curproc->p_cred->pc_ucred, 0, 0, 0);
+ osi_FlushPages(avc, osi_curcred()); /* hold bozon lock, but not basic vnode lock */
+ code = afs_read(avc, &uio, osi_curcred(), 0, 0, 0);
afs_BozonUnlock(&avc->pvnLock, avc);
AFS_GUNLOCK();
pmap_qremove(kva, npages);
relpbuf(bp, &afs_pbuf_freecnt);
if (code && (uio.uio_resid == ap->a_count)) {
- for (i = 0; i < npages; ++i) {
- if (i != ap->a_reqpage)
- vnode_pager_freepage(ap->a_m[i]);
- }
- return VM_PAGER_ERROR;
+ for (i = 0; i < npages; ++i) {
+ if (i != ap->a_reqpage)
+ vm_page_free(ap->a_m[i]);
+ }
+ return VM_PAGER_ERROR;
}
size = ap->a_count - uio.uio_resid;
for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
m->flags &= ~PG_ZERO;
if (nextoff <= size) {
- /*
- * Read operation filled an entire page
- */
- m->valid = VM_PAGE_BITS_ALL;
- vm_page_undirty(m);
+ /*
+ * Read operation filled an entire page
+ */
+ m->valid = VM_PAGE_BITS_ALL;
+ vm_page_undirty(m);
} else if (size > toff) {
- /*
- * Read operation filled a partial page.
- */
- m->valid = 0;
- vm_page_set_validclean(m, 0, size - toff);
- /* handled by vm_fault now */
- /* vm_page_zero_invalid(m, TRUE); */
+ /*
+ * Read operation filled a partial page.
+ */
+ m->valid = 0;
+ vm_page_set_validclean(m, 0, size - toff);
+ /* handled by vm_fault now */
+ /* vm_page_zero_invalid(m, TRUE); */
}
if (i != ap->a_reqpage) {
- /*
- * Whether or not to leave the page activated is up in
- * the air, but we should put the page on a page queue
- * somewhere (it already is in the object). Result:
- * It appears that emperical results show that
- * deactivating pages is best.
- */
-
- /*
- * Just in case someone was asking for this page we
- * now tell them that it is ok to use.
- */
- if (!code) {
- if (m->flags & PG_WANTED)
- vm_page_activate(m);
- else
- vm_page_deactivate(m);
- vm_page_wakeup(m);
- } else {
- vnode_pager_freepage(m);
- }
+ /*
+ * Whether or not to leave the page activated is up in
+ * the air, but we should put the page on a page queue
+ * somewhere (it already is in the object). Result:
+ * It appears that emperical results show that
+ * deactivating pages is best.
+ */
+
+ /*
+ * Just in case someone was asking for this page we
+ * now tell them that it is ok to use.
+ */
+ if (!code) {
+ if (m->flags & PG_WANTED)
+ vm_page_activate(m);
+ else
+ vm_page_deactivate(m);
+ vm_page_wakeup(m);
+ } else {
+ vm_page_free(m);
+ }
}
}
return 0;
} */ *ap;
{
int code;
- struct vcache *avc=(struct vcache *)ap->a_vp;
+ struct vcache *avc = VTOAFS(ap->a_vp);
AFS_GLOCK();
afs_BozonLock(&avc->pvnLock, avc);
- osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
- code=afs_write((struct vcache *)ap->a_vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
+ osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
+ code = afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
afs_BozonUnlock(&avc->pvnLock, avc);
AFS_GUNLOCK();
return code;
struct iovec iov;
struct buf *bp;
vm_offset_t kva;
- struct vcache *avc=(struct vcache *)ap->a_vp;
+ struct vcache *avc=VTOAFS(ap->a_vp);
if (avc->v.v_object == NULL) {
printf("afs_putpages: called with non-merged cache vnode??\n");
bp = getpbuf(&afs_pbuf_freecnt);
kva = (vm_offset_t) bp->b_data;
pmap_qenter(kva, ap->a_m, npages);
- iov.iov_base=(caddr_t)kva;
- iov.iov_len=ap->a_count;
- uio.uio_iov=&iov;
- uio.uio_iovcnt=1;
- uio.uio_offset=IDX_TO_OFF(ap->a_m[0]->pindex);
- uio.uio_resid=ap->a_count;
- uio.uio_segflg=UIO_SYSSPACE;
- uio.uio_rw=UIO_WRITE;
- uio.uio_procp=curproc;
- sync=IO_VMIO;
+ iov.iov_base = (caddr_t)kva;
+ iov.iov_len = ap->a_count;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
+ uio.uio_resid = ap->a_count;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_WRITE;
+#ifdef AFS_FBSD50_ENV
+ uio.uio_td = curthread;
+#else
+ uio.uio_procp = curproc;
+#endif
+ sync = IO_VMIO;
if (ap->a_sync & VM_PAGER_PUT_SYNC)
- sync|=IO_SYNC;
+ sync |= IO_SYNC;
/*if (ap->a_sync & VM_PAGER_PUT_INVAL)
- sync|=IO_INVAL;*/
+ sync |= IO_INVAL;*/
AFS_GLOCK();
afs_BozonLock(&avc->pvnLock, avc);
- code=afs_write(avc, &uio, sync, curproc->p_cred->pc_ucred, 0);
+ code = afs_write(avc, &uio, sync, osi_curcred(), 0);
afs_BozonUnlock(&avc->pvnLock, avc);
AFS_GUNLOCK();
pmap_qremove(kva, npages);
relpbuf(bp, &afs_pbuf_freecnt);
if (!code) {
- size = ap->a_count - uio.uio_resid;
- for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
- ap->a_rtvals[i]=VM_PAGER_OK;
- ap->a_m[i]->dirty=0;
- }
- return VM_PAGER_ERROR;
+ size = ap->a_count - uio.uio_resid;
+ for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
+ ap->a_rtvals[i]=VM_PAGER_OK;
+ ap->a_m[i]->dirty=0;
+ }
+ return VM_PAGER_ERROR;
}
return ap->a_rtvals[0];
}
+
int
afs_vop_ioctl(ap)
struct vop_ioctl_args /* {
struct proc *a_p;
} */ *ap;
{
- struct vcache *tvc = (struct vcache *)ap->a_vp;
- struct afs_ioctl data;
+ struct vcache *tvc = VTOAFS(ap->a_vp);
int error = 0;
/* in case we ever get in here... */
AFS_STATCNT(afs_ioctl);
if (((ap->a_command >> 8) & 0xff) == 'V') {
/* This is a VICEIOCTL call */
- AFS_GLOCK();
- error = HandleIoctl(tvc, (struct file *)0/*Not used*/,
+ AFS_GLOCK();
+ error = HandleIoctl(tvc, NULL /*Not used*/,
ap->a_command, ap->a_data);
- AFS_GUNLOCK();
+ AFS_GUNLOCK();
return(error);
} else {
/* No-op call; just return. */
struct proc *a_p;
} */ *ap;
{
- /*
- * We should really check to see if I/O is possible.
- */
- return (1);
+ /*
+ * We should really check to see if I/O is possible.
+ */
+ return (1);
}
/*
* Mmap a file
struct proc *a_p;
} */ *ap;
{
- int wait = ap->a_waitfor == MNT_WAIT;
int error;
register struct vnode *vp = ap->a_vp;
AFS_GLOCK();
/*vflushbuf(vp, wait);*/
if (ap->a_cred)
- error=afs_fsync((struct vcache *)vp, ap->a_cred);
+ error = afs_fsync(VTOAFS(vp), ap->a_cred);
else
- error=afs_fsync((struct vcache *)vp, &afs_osi_cred);
+ error = afs_fsync(VTOAFS(vp), &afs_osi_cred);
AFS_GUNLOCK();
return error;
}
GETNAME();
AFS_GLOCK();
- error = afs_remove((struct vcache *)dvp, name, cnp->cn_cred);
+ error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
AFS_GUNLOCK();
cache_purge(vp);
DROPNAME();
int error = 0;
register struct vnode *dvp = ap->a_tdvp;
register struct vnode *vp = ap->a_vp;
- struct proc *p;
+#ifdef AFS_FBSD50_ENV
+ struct thread *p = ap->a_cnp->cn_thread;
+#else
+ struct proc *p = ap->a_cnp->cn_proc;
+#endif
GETNAME();
- p=cnp->cn_proc;
if (dvp->v_mount != vp->v_mount) {
error = EXDEV;
goto out;
error = EISDIR;
goto out;
}
- if (error = vn_lock(vp, LK_EXCLUSIVE, p)) {
+ if ((error = vn_lock(vp, LK_EXCLUSIVE, p)) != 0) {
goto out;
}
AFS_GLOCK();
- error = afs_link((struct vcache *)vp, (struct vcache *)dvp, name, cnp->cn_cred);
+ error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
AFS_GUNLOCK();
if (dvp != vp)
VOP_UNLOCK(vp,0, p);
register struct vnode *tdvp = ap->a_tdvp;
struct vnode *fvp = ap->a_fvp;
register struct vnode *fdvp = ap->a_fdvp;
- struct proc *p=fcnp->cn_proc;
+#ifdef AFS_FBSD50_ENV
+ struct thread *p = fcnp->cn_thread;
+#else
+ struct proc *p = fcnp->cn_proc;
+#endif
/*
* Check for cross-device rename.
vput(fvp);
return (error);
}
- if (error = vn_lock(fvp, LK_EXCLUSIVE, p))
+ if ((error = vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
goto abortit;
MALLOC(fname, char *, fcnp->cn_namelen+1, M_TEMP, M_WAITOK);
AFS_GLOCK();
/* XXX use "from" or "to" creds? NFS uses "to" creds */
- error = afs_rename((struct vcache *)fdvp, fname, (struct vcache *)tdvp, tname, tcnp->cn_cred);
+ error = afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
AFS_GUNLOCK();
FREE(fname, M_TEMP);
register struct vattr *vap = ap->a_vap;
int error = 0;
struct vcache *vcp;
- struct proc *p;
+#ifdef AFS_FBSD50_ENV
+ struct thread *p = ap->a_cnp->cn_thread;
+#else
+ struct proc *p = ap->a_cnp->cn_proc;
+#endif
GETNAME();
- p=cnp->cn_proc;
#ifdef DIAGNOSTIC
if ((cnp->cn_flags & HASBUF) == 0)
panic("afs_vop_mkdir: no name");
#endif
AFS_GLOCK();
- error = afs_mkdir((struct vcache *)dvp, name, vap, &vcp, cnp->cn_cred);
+ error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
AFS_GUNLOCK();
if (error) {
vput(dvp);
return(error);
}
if (vcp) {
- *ap->a_vpp = (struct vnode *)vcp;
- vn_lock((struct vnode *)vcp, LK_EXCLUSIVE|LK_RETRY, p);
+ *ap->a_vpp = AFSTOV(vcp);
+ vn_lock(AFSTOV(vcp), LK_EXCLUSIVE|LK_RETRY, p);
} else
*ap->a_vpp = 0;
DROPNAME();
} */ *ap;
{
int error = 0;
- register struct vnode *vp = ap->a_vp;
register struct vnode *dvp = ap->a_dvp;
GETNAME();
AFS_GLOCK();
- error = afs_rmdir((struct vcache *)dvp, name, cnp->cn_cred);
+ error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
AFS_GUNLOCK();
DROPNAME();
return error;
GETNAME();
AFS_GLOCK();
- error = afs_symlink((struct vcache *)dvp, name, ap->a_vap, ap->a_target,
+ error = afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target,
cnp->cn_cred);
AFS_GUNLOCK();
DROPNAME();
ap->a_ncookies); */
off=ap->a_uio->uio_offset;
AFS_GLOCK();
- error= afs_readdir((struct vcache *)ap->a_vp, ap->a_uio, ap->a_cred,
+ error= afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred,
ap->a_eofflag);
AFS_GUNLOCK();
if (!error && ap->a_ncookies != NULL) {
int error;
/* printf("readlink %x\n", ap->a_vp);*/
AFS_GLOCK();
- error= afs_readlink((struct vcache *)ap->a_vp, ap->a_uio, ap->a_cred);
+ error= afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
AFS_GUNLOCK();
return error;
}
vprint("afs_vop_inactive(): pushing active", vp);
AFS_GLOCK();
- afs_InactiveVCache((struct vcache *)vp, 0); /* decrs ref counts */
+ afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
AFS_GUNLOCK();
VOP_UNLOCK(vp, 0, ap->a_p);
return 0;
struct vnode *a_vp;
} */ *ap;
{
- int error;
- int sl;
+#ifdef AFS_DO_FLUSH_IN_RECLAIM
+ int error, sl;
+#endif
register struct vnode *vp = ap->a_vp;
cache_purge(vp); /* just in case... */
-#if 0
+#ifdef AFS_DO_FLUSH_IN_RECLAIM
AFS_GLOCK();
- error = afs_FlushVCache((struct vcache *)vp, &sl); /* tosses our stuff from vnode */
+ error = afs_FlushVCache(VTOAFS(vp), &sl); /* tosses our stuff from vnode */
AFS_GUNLOCK();
ubc_unlink(vp);
if (!error && vp->v_data)
panic("afs_reclaim: vnode not cleaned");
return error;
#else
- if (vp->v_usecount == 2) {
+ if (vp->v_usecount == 2) {
vprint("reclaim count==2", vp);
- } else if (vp->v_usecount == 1) {
+ } else if (vp->v_usecount == 1) {
vprint("reclaim count==1", vp);
- } else
+ } else
vprint("reclaim bad count", vp);
- return 0;
+ return 0;
#endif
}
struct vnode *a_vp;
} */ *ap;
{
- register struct vnode *vp = ap->a_vp;
- register struct vcache *avc = (struct vcache *)vp;
+ register struct vnode *vp = ap->a_vp;
+ register struct vcache *avc = VTOAFS(vp);
- if (vp->v_tag == VT_NON)
- return (ENOENT);
- return (lockmgr(&avc->rwlock, ap->a_flags, &vp->v_interlock,
- ap->a_p));
+#ifdef AFS_FBSD50_ENV
+ if (!strcmp(vp->v_tag, "none"))
+#else
+ if (vp->v_tag == VT_NON)
+#endif
+ return (ENOENT);
+ return (lockmgr(&avc->rwlock, ap->a_flags, &vp->v_interlock,
+ ap->a_p));
}
int
} */ *ap;
{
struct vnode *vp = ap->a_vp;
- struct vcache *avc = (struct vcache *)vp;
+ struct vcache *avc = VTOAFS(vp);
return (lockmgr(&avc->rwlock, ap->a_flags | LK_RELEASE,
&vp->v_interlock, ap->a_p));
int *a_runb;
} */ *ap;
{
- struct vcache *vcp;
- int error;
if (ap->a_bnp) {
*ap->a_bnp = ap->a_bn * (PAGE_SIZE / DEV_BSIZE);
}
if (ap->a_vpp) {
*ap->a_vpp = ap->a_vp;
}
- if (ap->a_runp != NULL)
- *ap->a_runp = 0;
- if (ap->a_runb != NULL)
- *ap->a_runb = 0;
+ if (ap->a_runp != NULL)
+ *ap->a_runp = 0;
+ if (ap->a_runb != NULL)
+ *ap->a_runb = 0;
return 0;
}
+
int
afs_vop_strategy(ap)
struct vop_strategy_args /* {
AFS_GUNLOCK();
return error;
}
+
int
afs_vop_print(ap)
struct vop_print_args /* {
} */ *ap;
{
register struct vnode *vp = ap->a_vp;
- register struct vcache *vc = (struct vcache *)ap->a_vp;
+ register struct vcache *vc = VTOAFS(ap->a_vp);
int s = vc->states;
+
+#ifdef AFS_FBSD50_ENV
+ printf("tag %s, fid: %d.%x.%x.%x, opens %d, writers %d", vp->v_tag, (int) vc->fid.Cell,
+ (u_int) vc->fid.Fid.Volume, (u_int) vc->fid.Fid.Vnode, (u_int) vc->fid.Fid.Unique,
+ vc->opens, vc->execsOrWriters);
+#else
printf("tag %d, fid: %ld.%x.%x.%x, opens %d, writers %d", vp->v_tag, vc->fid.Cell,
- vc->fid.Fid.Volume, vc->fid.Fid.Vnode, vc->fid.Fid.Unique, vc->opens,
- vc->execsOrWriters);
+ (u_int) vc->fid.Fid.Volume, (u_int) vc->fid.Fid.Vnode, (u_int) vc->fid.Fid.Unique,
+ vc->opens, vc->execsOrWriters);
+#endif
printf("\n states%s%s%s%s%s", (s&CStatd) ? " statd" : "", (s&CRO) ? " readonly" : "",(s&CDirty) ? " dirty" : "",(s&CMAPPED) ? " mapped" : "", (s&CVFlushed) ? " flush in progress" : "");
printf("\n");
return 0;
struct vnode *a_vp;
} */ *ap;
{
- struct vcache *vc = (struct vcache *)ap->a_vp;
+ struct vcache *vc = VTOAFS(ap->a_vp);
return lockstatus(&vc->rwlock, ap->a_p);
}
} */ *ap;
{
int error;
- struct proc *p=curproc;
- struct ucred cr;
- cr=*p->p_cred->pc_ucred;
+ struct ucred cr = *osi_curcred();
+
AFS_GLOCK();
- error= afs_lockctl((struct vcache *)ap->a_vp, ap->a_fl, ap->a_op, &cr,
+ error= afs_lockctl(VTOAFS(ap->a_vp), ap->a_fl, ap->a_op, &cr,
(int) ap->a_id);
AFS_GUNLOCK();
return error;
}
-