6 #include <afs/sysincludes.h> /* Standard vendor system headers */
7 #include <afsincludes.h> /* Afs-based standard headers */
8 #include <afs/afs_stats.h> /* statistics */
9 #include <sys/malloc.h>
10 #include <sys/namei.h>
11 #include <vm/vm_zone.h>
12 #include <vm/vm_page.h>
13 #include <vm/vm_object.h>
14 #include <vm/vm_pager.h>
15 #include <vm/vnode_pager.h>
16 extern int afs_pbuf_freecnt;
18 int afs_vop_lookup(struct vop_lookup_args *);
19 int afs_vop_create(struct vop_create_args *);
20 int afs_vop_mknod(struct vop_mknod_args *);
21 int afs_vop_open(struct vop_open_args *);
22 int afs_vop_close(struct vop_close_args *);
23 int afs_vop_access(struct vop_access_args *);
24 int afs_vop_getattr(struct vop_getattr_args *);
25 int afs_vop_setattr(struct vop_setattr_args *);
26 int afs_vop_read(struct vop_read_args *);
27 int afs_vop_write(struct vop_write_args *);
28 int afs_vop_getpages(struct vop_getpages_args *);
29 int afs_vop_putpages(struct vop_putpages_args *);
30 int afs_vop_ioctl(struct vop_ioctl_args *);
31 int afs_vop_poll(struct vop_poll_args *);
32 int afs_vop_mmap(struct vop_mmap_args *);
33 int afs_vop_fsync(struct vop_fsync_args *);
34 int afs_vop_remove(struct vop_remove_args *);
35 int afs_vop_link(struct vop_link_args *);
36 int afs_vop_rename(struct vop_rename_args *);
37 int afs_vop_mkdir(struct vop_mkdir_args *);
38 int afs_vop_rmdir(struct vop_rmdir_args *);
39 int afs_vop_symlink(struct vop_symlink_args *);
40 int afs_vop_readdir(struct vop_readdir_args *);
41 int afs_vop_readlink(struct vop_readlink_args *);
42 int afs_vop_inactive(struct vop_inactive_args *);
43 int afs_vop_reclaim(struct vop_reclaim_args *);
44 int afs_vop_lock(struct vop_lock_args *);
45 int afs_vop_unlock(struct vop_unlock_args *);
46 int afs_vop_bmap(struct vop_bmap_args *);
47 int afs_vop_strategy(struct vop_strategy_args *);
48 int afs_vop_print(struct vop_print_args *);
49 int afs_vop_islocked(struct vop_islocked_args *);
50 int afs_vop_advlock(struct vop_advlock_args *);
54 /* Global vfs data structures for AFS. */
55 vop_t **afs_vnodeop_p;
56 struct vnodeopv_entry_desc afs_vnodeop_entries[] = {
57 { &vop_default_desc, (vop_t *) vop_eopnotsupp },
58 { &vop_access_desc, (vop_t *) afs_vop_access }, /* access */
59 { &vop_advlock_desc, (vop_t *) afs_vop_advlock }, /* advlock */
60 { &vop_bmap_desc, (vop_t *) afs_vop_bmap }, /* bmap */
61 { &vop_bwrite_desc, (vop_t *) vop_stdbwrite },
62 { &vop_close_desc, (vop_t *) afs_vop_close }, /* close */
63 { &vop_createvobject_desc, (vop_t *) vop_stdcreatevobject },
64 { &vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject },
65 { &vop_create_desc, (vop_t *) afs_vop_create }, /* create */
66 { &vop_fsync_desc, (vop_t *) afs_vop_fsync }, /* fsync */
67 { &vop_getattr_desc, (vop_t *) afs_vop_getattr }, /* getattr */
68 { &vop_getpages_desc, (vop_t *) afs_vop_getpages }, /* read */
69 { &vop_getvobject_desc, (vop_t *) vop_stdgetvobject },
70 { &vop_putpages_desc, (vop_t *) afs_vop_putpages }, /* write */
71 { &vop_inactive_desc, (vop_t *) afs_vop_inactive }, /* inactive */
72 { &vop_islocked_desc, (vop_t *) afs_vop_islocked }, /* islocked */
73 { &vop_lease_desc, (vop_t *) vop_null },
74 { &vop_link_desc, (vop_t *) afs_vop_link }, /* link */
75 { &vop_lock_desc, (vop_t *) afs_vop_lock }, /* lock */
76 { &vop_lookup_desc, (vop_t *) afs_vop_lookup }, /* lookup */
77 { &vop_mkdir_desc, (vop_t *) afs_vop_mkdir }, /* mkdir */
78 { &vop_mknod_desc, (vop_t *) afs_vop_mknod }, /* mknod */
79 { &vop_mmap_desc, (vop_t *) afs_vop_mmap }, /* mmap */
80 { &vop_open_desc, (vop_t *) afs_vop_open }, /* open */
81 { &vop_poll_desc, (vop_t *) afs_vop_poll }, /* select */
82 { &vop_print_desc, (vop_t *) afs_vop_print }, /* print */
83 { &vop_read_desc, (vop_t *) afs_vop_read }, /* read */
84 { &vop_readdir_desc, (vop_t *) afs_vop_readdir }, /* readdir */
85 { &vop_readlink_desc, (vop_t *) afs_vop_readlink }, /* readlink */
86 { &vop_reclaim_desc, (vop_t *) afs_vop_reclaim }, /* reclaim */
87 { &vop_remove_desc, (vop_t *) afs_vop_remove }, /* remove */
88 { &vop_rename_desc, (vop_t *) afs_vop_rename }, /* rename */
89 { &vop_rmdir_desc, (vop_t *) afs_vop_rmdir }, /* rmdir */
90 { &vop_setattr_desc, (vop_t *) afs_vop_setattr }, /* setattr */
91 { &vop_strategy_desc, (vop_t *) afs_vop_strategy }, /* strategy */
92 { &vop_symlink_desc, (vop_t *) afs_vop_symlink }, /* symlink */
93 { &vop_unlock_desc, (vop_t *) afs_vop_unlock }, /* unlock */
94 { &vop_write_desc, (vop_t *) afs_vop_write }, /* write */
95 { &vop_ioctl_desc, (vop_t *) afs_vop_ioctl }, /* XXX ioctl */
96 /*{ &vop_seek_desc, afs_vop_seek },*/ /* seek */
99 struct vnodeopv_desc afs_vnodeop_opv_desc =
100 { &afs_vnodeop_p, afs_vnodeop_entries };
103 struct componentname *cnp = ap->a_cnp; \
105 MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
106 memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
107 name[cnp->cn_namelen] = '\0'
109 #define DROPNAME() FREE(name, M_TEMP)
115 struct vop_lookup_args /* {
116 struct vnodeop_desc * a_desc;
118 struct vnode **a_vpp;
119 struct componentname *a_cnp;
124 struct vnode *vp, *dvp;
125 register int flags = ap->a_cnp->cn_flags;
126 int lockparent; /* 1 => lockparent flag is set */
127 int wantparent; /* 1 => wantparent or lockparent flag */
131 lockparent = flags & LOCKPARENT;
132 wantparent = flags & (LOCKPARENT|WANTPARENT);
134 if (ap->a_dvp->v_type != VDIR) {
140 if (flags & ISDOTDOT)
141 VOP_UNLOCK(dvp, 0, p);
143 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
146 if (flags & ISDOTDOT)
147 VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
148 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
149 (flags & ISLASTCN) && error == ENOENT)
151 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
152 cnp->cn_flags |= SAVENAME;
157 vp = AFSTOV(vcp); /* always get a node if no error */
159 /* The parent directory comes in locked. We unlock it on return
160 unless the caller wants it left locked.
161 we also always return the vnode locked. */
163 if (flags & ISDOTDOT) {
164 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
165 /* always return the child locked */
166 if (lockparent && (flags & ISLASTCN) &&
167 (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
172 } else if (vp == dvp) {
173 /* they're the same; afs_lookup() already ref'ed the leaf.
174 It came in locked, so we don't need to ref OR lock it */
176 if (!lockparent || !(flags & ISLASTCN))
177 VOP_UNLOCK(dvp, 0, p); /* done with parent. */
178 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
179 /* always return the child locked */
183 if ((cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) ||
184 (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)))
185 cnp->cn_flags |= SAVENAME;
193 struct vop_create_args /* {
195 struct vnode **a_vpp;
196 struct componentname *a_cnp;
202 register struct vnode *dvp = ap->a_dvp;
208 error = afs_create(VTOAFS(dvp), name, ap->a_vap, ap->a_vap->va_vaflags & VA_EXCLUSIVE? EXCL : NONEXCL,
209 ap->a_vap->va_mode, &vcp,
218 *ap->a_vpp = AFSTOV(vcp);
219 vn_lock(AFSTOV(vcp), LK_EXCLUSIVE| LK_RETRY, p);
229 struct vop_mknod_args /* {
231 struct vnode **a_vpp;
232 struct componentname *a_cnp;
240 static int validate_vops(struct vnode *vp, int after)
243 struct vnodeopv_entry_desc *this;
244 for (this=afs_vnodeop_entries; this->opve_op; this++) {
245 if (vp->v_op[this->opve_op->vdesc_offset] != this->opve_impl) {
247 printf("v_op %d ", after);
251 printf("For oper %d (%s), func is %p, not %p",
252 this->opve_op->vdesc_offset, this->opve_op->vdesc_name,
253 vp->v_op[this->opve_op->vdesc_offset], this->opve_impl);
261 struct vop_open_args /* {
264 struct ucred *a_cred;
270 struct vcache *vc = VTOAFS(ap->a_vp);
273 error = afs_open(&vc, ap->a_mode, ap->a_cred);
275 if (AFSTOV(vc) != ap->a_vp)
276 panic("AFS open changed vnode!");
278 afs_BozonLock(&vc->pvnLock, vc);
279 osi_FlushPages(vc, ap->a_cred);
280 afs_BozonUnlock(&vc->pvnLock, vc);
287 struct vop_close_args /* {
290 struct ucred *a_cred;
295 struct vcache *avc=ap->a_vp;
298 code=afs_close(avc, ap->a_fflag, ap->a_cred, ap->a_p);
300 code=afs_close(avc, ap->a_fflag, &afs_osi_cred, ap->a_p);
301 afs_BozonLock(&avc->pvnLock, avc);
302 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
303 afs_BozonUnlock(&avc->pvnLock, avc);
310 struct vop_access_args /* {
313 struct ucred *a_cred;
319 code=afs_access(VTOAFS(ap->a_vp), ap->a_mode, ap->a_cred);
325 struct vop_getattr_args /* {
328 struct ucred *a_cred;
334 code=afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
340 struct vop_setattr_args /* {
343 struct ucred *a_cred;
349 code=afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
354 struct vop_read_args /* {
358 struct ucred *a_cred;
363 struct vcache *avc=VTOAFS(ap->a_vp);
365 afs_BozonLock(&avc->pvnLock, avc);
366 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
367 code=afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
368 afs_BozonUnlock(&avc->pvnLock, avc);
374 struct vop_getpages_args /* {
379 vm_oofset_t a_offset;
383 int i, nextoff, size, toff, npages;
388 struct vcache *avc=VTOAFS(ap->a_vp);
390 if (avc->v.v_object == NULL) {
391 printf("afs_getpages: called with non-merged cache vnode??\n");
392 return VM_PAGER_ERROR;
394 npages=btoc(ap->a_count);
396 * If the requested page is partially valid, just return it and
397 * allow the pager to zero-out the blanks. Partially valid pages
398 * can only occur at the file EOF.
402 vm_page_t m = ap->a_m[ap->a_reqpage];
405 /* handled by vm_fault now */
406 /* vm_page_zero_invalid(m, TRUE); */
407 for (i = 0; i < npages; ++i) {
408 if (i != ap->a_reqpage)
409 vnode_pager_freepage(ap->a_m[i]);
414 bp = getpbuf(&afs_pbuf_freecnt);
415 kva = (vm_offset_t) bp->b_data;
416 pmap_qenter(kva, ap->a_m, npages);
417 iov.iov_base=(caddr_t)kva;
418 iov.iov_len=ap->a_count;
421 uio.uio_offset=IDX_TO_OFF(ap->a_m[0]->pindex);
422 uio.uio_resid=ap->a_count;
423 uio.uio_segflg=UIO_SYSSPACE;
425 uio.uio_procp=curproc;
427 afs_BozonLock(&avc->pvnLock, avc);
428 osi_FlushPages(avc, curproc->p_cred->pc_ucred); /* hold bozon lock, but not basic vnode lock */
429 code=afs_read(avc, &uio, curproc->p_cred->pc_ucred, 0, 0, 0);
430 afs_BozonUnlock(&avc->pvnLock, avc);
432 pmap_qremove(kva, npages);
434 relpbuf(bp, &afs_pbuf_freecnt);
435 if (code && (uio.uio_resid == ap->a_count)) {
436 for (i = 0; i < npages; ++i) {
437 if (i != ap->a_reqpage)
438 vnode_pager_freepage(ap->a_m[i]);
440 return VM_PAGER_ERROR;
442 size = ap->a_count - uio.uio_resid;
443 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
445 nextoff = toff + PAGE_SIZE;
448 m->flags &= ~PG_ZERO;
450 if (nextoff <= size) {
452 * Read operation filled an entire page
454 m->valid = VM_PAGE_BITS_ALL;
456 } else if (size > toff) {
458 * Read operation filled a partial page.
461 vm_page_set_validclean(m, 0, size - toff);
462 /* handled by vm_fault now */
463 /* vm_page_zero_invalid(m, TRUE); */
466 if (i != ap->a_reqpage) {
468 * Whether or not to leave the page activated is up in
469 * the air, but we should put the page on a page queue
470 * somewhere (it already is in the object). Result:
471 * It appears that emperical results show that
472 * deactivating pages is best.
476 * Just in case someone was asking for this page we
477 * now tell them that it is ok to use.
480 if (m->flags & PG_WANTED)
483 vm_page_deactivate(m);
486 vnode_pager_freepage(m);
495 struct vop_write_args /* {
499 struct ucred *a_cred;
503 struct vcache *avc=VTOAFS(ap->a_vp);
505 afs_BozonLock(&avc->pvnLock, avc);
506 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
507 code=afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
508 afs_BozonUnlock(&avc->pvnLock, avc);
515 struct vop_putpages_args /* {
521 vm_oofset_t a_offset;
525 int i, size, npages, sync;
530 struct vcache *avc=VTOAFS(ap->a_vp);
532 if (avc->v.v_object == NULL) {
533 printf("afs_putpages: called with non-merged cache vnode??\n");
534 return VM_PAGER_ERROR;
536 if (vType(avc) != VREG) {
537 printf("afs_putpages: not VREG");
538 return VM_PAGER_ERROR;
540 npages=btoc(ap->a_count);
541 for (i=0; i < npages; i++ ) ap->a_rtvals[i]=VM_PAGER_AGAIN;
542 bp = getpbuf(&afs_pbuf_freecnt);
543 kva = (vm_offset_t) bp->b_data;
544 pmap_qenter(kva, ap->a_m, npages);
545 iov.iov_base=(caddr_t)kva;
546 iov.iov_len=ap->a_count;
549 uio.uio_offset=IDX_TO_OFF(ap->a_m[0]->pindex);
550 uio.uio_resid=ap->a_count;
551 uio.uio_segflg=UIO_SYSSPACE;
552 uio.uio_rw=UIO_WRITE;
553 uio.uio_procp=curproc;
555 if (ap->a_sync & VM_PAGER_PUT_SYNC)
557 /*if (ap->a_sync & VM_PAGER_PUT_INVAL)
561 afs_BozonLock(&avc->pvnLock, avc);
562 code=afs_write(avc, &uio, sync, curproc->p_cred->pc_ucred, 0);
563 afs_BozonUnlock(&avc->pvnLock, avc);
565 pmap_qremove(kva, npages);
567 relpbuf(bp, &afs_pbuf_freecnt);
569 size = ap->a_count - uio.uio_resid;
570 for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
571 ap->a_rtvals[i]=VM_PAGER_OK;
574 return VM_PAGER_ERROR;
576 return ap->a_rtvals[0];
580 struct vop_ioctl_args /* {
585 struct ucred *a_cred;
589 struct vcache *tvc = VTOAFS(ap->a_vp);
590 struct afs_ioctl data;
593 /* in case we ever get in here... */
595 AFS_STATCNT(afs_ioctl);
596 if (((ap->a_command >> 8) & 0xff) == 'V') {
597 /* This is a VICEIOCTL call */
599 error = HandleIoctl(tvc, (struct file *)0/*Not used*/,
600 ap->a_command, ap->a_data);
604 /* No-op call; just return. */
612 struct vop_poll_args /* {
615 struct ucred *a_cred;
620 * We should really check to see if I/O is possible.
627 * NB Currently unsupported.
632 struct vop_mmap_args /* {
635 struct ucred *a_cred;
644 struct vop_fsync_args /* {
646 struct ucred *a_cred;
651 int wait = ap->a_waitfor == MNT_WAIT;
653 register struct vnode *vp = ap->a_vp;
656 /*vflushbuf(vp, wait);*/
658 error=afs_fsync(VTOAFS(vp), ap->a_cred);
660 error=afs_fsync(VTOAFS(vp), &afs_osi_cred);
667 struct vop_remove_args /* {
670 struct componentname *a_cnp;
674 register struct vnode *vp = ap->a_vp;
675 register struct vnode *dvp = ap->a_dvp;
679 error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
688 struct vop_link_args /* {
690 struct vnode *a_tdvp;
691 struct componentname *a_cnp;
695 register struct vnode *dvp = ap->a_tdvp;
696 register struct vnode *vp = ap->a_vp;
701 if (dvp->v_mount != vp->v_mount) {
705 if (vp->v_type == VDIR) {
709 if (error = vn_lock(vp, LK_EXCLUSIVE, p)) {
713 error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
724 struct vop_rename_args /* {
725 struct vnode *a_fdvp;
727 struct componentname *a_fcnp;
728 struct vnode *a_tdvp;
730 struct componentname *a_tcnp;
734 struct componentname *fcnp = ap->a_fcnp;
736 struct componentname *tcnp = ap->a_tcnp;
738 struct vnode *tvp = ap->a_tvp;
739 register struct vnode *tdvp = ap->a_tdvp;
740 struct vnode *fvp = ap->a_fvp;
741 register struct vnode *fdvp = ap->a_fdvp;
742 struct proc *p=fcnp->cn_proc;
745 * Check for cross-device rename.
747 if ((fvp->v_mount != tdvp->v_mount) ||
748 (tvp && (fvp->v_mount != tvp->v_mount))) {
762 * if fvp == tvp, we're just removing one name of a pair of
763 * directory entries for the same element. convert call into rename.
764 ( (pinched from FreeBSD 4.4's ufs_rename())
768 if (fvp->v_type == VDIR) {
773 /* Release destination completely. */
780 fcnp->cn_flags &= ~MODMASK;
781 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
782 if ((fcnp->cn_flags & SAVESTART) == 0)
783 panic("afs_rename: lost from startdir");
784 fcnp->cn_nameiop = DELETE;
786 error=relookup(fdvp, &fvp, fcnp);
793 error=VOP_REMOVE(fdvp, fvp, fcnp);
801 if (error = vn_lock(fvp, LK_EXCLUSIVE, p))
804 MALLOC(fname, char *, fcnp->cn_namelen+1, M_TEMP, M_WAITOK);
805 memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
806 fname[fcnp->cn_namelen] = '\0';
807 MALLOC(tname, char *, tcnp->cn_namelen+1, M_TEMP, M_WAITOK);
808 memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
809 tname[tcnp->cn_namelen] = '\0';
813 /* XXX use "from" or "to" creds? NFS uses "to" creds */
814 error = afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
832 struct vop_mkdir_args /* {
834 struct vnode **a_vpp;
835 struct componentname *a_cnp;
839 register struct vnode *dvp = ap->a_dvp;
840 register struct vattr *vap = ap->a_vap;
848 if ((cnp->cn_flags & HASBUF) == 0)
849 panic("afs_vop_mkdir: no name");
852 error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
860 *ap->a_vpp = AFSTOV(vcp);
861 vn_lock(AFSTOV(vcp), LK_EXCLUSIVE|LK_RETRY, p);
870 struct vop_rmdir_args /* {
873 struct componentname *a_cnp;
877 register struct vnode *vp = ap->a_vp;
878 register struct vnode *dvp = ap->a_dvp;
882 error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
890 struct vop_symlink_args /* {
892 struct vnode **a_vpp;
893 struct componentname *a_cnp;
898 register struct vnode *dvp = ap->a_dvp;
900 /* NFS ignores a_vpp; so do we. */
904 error = afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target,
913 struct vop_readdir_args /* {
916 struct ucred *a_cred;
924 /* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
926 off=ap->a_uio->uio_offset;
928 error= afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred,
931 if (!error && ap->a_ncookies != NULL) {
932 struct uio *uio = ap->a_uio;
933 const struct dirent *dp, *dp_start, *dp_end;
935 u_long *cookies, *cookiep;
937 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
938 panic("afs_readdir: burned cookies");
939 dp = (const struct dirent *)
940 ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
942 dp_end = (const struct dirent *) uio->uio_iov->iov_base;
943 for (dp_start = dp, ncookies = 0;
945 dp = (const struct dirent *)((const char *) dp + dp->d_reclen))
948 MALLOC(cookies, u_long *, ncookies * sizeof(u_long),
950 for (dp = dp_start, cookiep = cookies;
952 dp = (const struct dirent *)((const char *) dp + dp->d_reclen)) {
956 *ap->a_cookies = cookies;
957 *ap->a_ncookies = ncookies;
965 struct vop_readlink_args /* {
968 struct ucred *a_cred;
972 /* printf("readlink %x\n", ap->a_vp);*/
974 error= afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
979 extern int prtactive;
983 struct vop_inactive_args /* {
988 register struct vnode *vp = ap->a_vp;
990 if (prtactive && vp->v_usecount != 0)
991 vprint("afs_vop_inactive(): pushing active", vp);
994 afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
996 VOP_UNLOCK(vp, 0, ap->a_p);
1002 struct vop_reclaim_args /* {
1008 register struct vnode *vp = ap->a_vp;
1010 cache_purge(vp); /* just in case... */
1014 error = afs_FlushVCache(VTOAFS(vp), &sl); /* tosses our stuff from vnode */
1017 if (!error && vp->v_data)
1018 panic("afs_reclaim: vnode not cleaned");
1021 if (vp->v_usecount == 2) {
1022 vprint("reclaim count==2", vp);
1023 } else if (vp->v_usecount == 1) {
1024 vprint("reclaim count==1", vp);
1026 vprint("reclaim bad count", vp);
1034 struct vop_lock_args /* {
1038 register struct vnode *vp = ap->a_vp;
1039 register struct vcache *avc = VTOAFS(vp);
1041 if (vp->v_tag == VT_NON)
1043 return (lockmgr(&avc->rwlock, ap->a_flags, &vp->v_interlock,
1049 struct vop_unlock_args /* {
1053 struct vnode *vp = ap->a_vp;
1054 struct vcache *avc = VTOAFS(vp);
1055 return (lockmgr(&avc->rwlock, ap->a_flags | LK_RELEASE,
1056 &vp->v_interlock, ap->a_p));
1062 struct vop_bmap_args /* {
1065 struct vnode **a_vpp;
1074 *ap->a_bnp = ap->a_bn * (PAGE_SIZE / DEV_BSIZE);
1077 *ap->a_vpp = ap->a_vp;
1079 if (ap->a_runp != NULL)
1081 if (ap->a_runb != NULL)
1087 afs_vop_strategy(ap)
1088 struct vop_strategy_args /* {
1094 error= afs_ustrategy(ap->a_bp);
1100 struct vop_print_args /* {
1104 register struct vnode *vp = ap->a_vp;
1105 register struct vcache *vc = VTOAFS(ap->a_vp);
1107 printf("tag %d, fid: %ld.%x.%x.%x, opens %d, writers %d", vp->v_tag, vc->fid.Cell,
1108 vc->fid.Fid.Volume, vc->fid.Fid.Vnode, vc->fid.Fid.Unique, vc->opens,
1109 vc->execsOrWriters);
1110 printf("\n states%s%s%s%s%s", (s&CStatd) ? " statd" : "", (s&CRO) ? " readonly" : "",(s&CDirty) ? " dirty" : "",(s&CMAPPED) ? " mapped" : "", (s&CVFlushed) ? " flush in progress" : "");
1116 afs_vop_islocked(ap)
1117 struct vop_islocked_args /* {
1121 struct vcache *vc = VTOAFS(ap->a_vp);
1122 return lockstatus(&vc->rwlock, ap->a_p);
1126 * Advisory record locking support (fcntl() POSIX style)
1130 struct vop_advlock_args /* {
1139 struct proc *p=curproc;
1141 cr=*p->p_cred->pc_ucred;
1143 error= afs_lockctl(VTOAFS(ap->a_vp), ap->a_fl, ap->a_op, &cr,