2 * A large chunk of this file appears to be copied directly from
3 * sys/nfsclient/nfs_bio.c, which has the following license:
6 * Copyright (c) 1989, 1993
7 * The Regents of the University of California. All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * Rick Macklem at The University of Guelph.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
43 * Pursuant to a statement of U.C. Berkeley dated 1999-07-22, this license
44 * is amended to drop clause (3) above.
47 #include <afsconfig.h>
48 #include <afs/param.h>
51 #include <afs/sysincludes.h> /* Standard vendor system headers */
52 #include <afsincludes.h> /* Afs-based standard headers */
53 #include <afs/afs_stats.h> /* statistics */
54 #include <sys/malloc.h>
55 #include <sys/namei.h>
56 #include <sys/unistd.h>
57 #if __FreeBSD_version >= 1000030
58 #include <sys/rwlock.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_pager.h>
63 #include <vm/vnode_pager.h>
64 extern int afs_pbuf_freecnt;
67 struct componentname *cnp = ap->a_cnp; \
69 MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
70 memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
71 name[cnp->cn_namelen] = '\0'
73 #define DROPNAME() FREE(name, M_TEMP)
76 * Here we define compatibility functions/macros for interfaces that
77 * have changed between different FreeBSD versions.
79 static __inline void ma_vm_page_lock_queues(void) {};
80 static __inline void ma_vm_page_unlock_queues(void) {};
81 static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
82 static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
84 #define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
85 #define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
86 #define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
88 #define MA_PCPU_INC(c) PCPU_INC(c)
89 #define MA_PCPU_ADD(c, n) PCPU_ADD(c, n)
91 #if __FreeBSD_version >= 1000030
92 #define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
93 #define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
95 #define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
96 #define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
100 * Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
101 * We should know the correct answers to these questions with
102 * respect to the AFS protocol (which may differ from the UFS
103 * values) but for the moment this will do.
106 afs_vop_pathconf(struct vop_pathconf_args *ap)
111 switch (ap->a_name) {
113 *ap->a_retval = LINK_MAX;
116 *ap->a_retval = NAME_MAX;
119 *ap->a_retval = PATH_MAX;
122 *ap->a_retval = PIPE_BUF;
124 case _PC_CHOWN_RESTRICTED:
130 #ifdef _PC_ACL_EXTENDED
131 case _PC_ACL_EXTENDED:
134 case _PC_ACL_PATH_MAX:
138 #ifdef _PC_MAC_PRESENT
139 case _PC_MAC_PRESENT:
145 /* _PC_ASYNC_IO should have been handled by upper layers. */
146 KASSERT(0, ("_PC_ASYNC_IO should not get here"));
156 #ifdef _PC_ALLOC_SIZE_MIN
157 case _PC_ALLOC_SIZE_MIN:
158 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize;
161 #ifdef _PC_FILESIZEBITS
162 case _PC_FILESIZEBITS:
163 *ap->a_retval = 32; /* XXX */
166 #ifdef _PC_REC_INCR_XFER_SIZE
167 case _PC_REC_INCR_XFER_SIZE:
168 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
170 case _PC_REC_MAX_XFER_SIZE:
171 *ap->a_retval = -1; /* means ``unlimited'' */
173 case _PC_REC_MIN_XFER_SIZE:
174 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
176 case _PC_REC_XFER_ALIGN:
177 *ap->a_retval = PAGE_SIZE;
180 #ifdef _PC_SYMLINK_MAX
181 case _PC_SYMLINK_MAX:
182 *ap->a_retval = MAXPATHLEN;
194 struct vop_lookup_args /* {
195 * struct vnodeop_desc * a_desc;
196 * struct vnode *a_dvp;
197 * struct vnode **a_vpp;
198 * struct componentname *a_cnp;
203 struct vnode *vp, *dvp;
204 int flags = ap->a_cnp->cn_flags;
205 int lockparent; /* 1 => lockparent flag is set */
206 int wantparent; /* 1 => wantparent or lockparent flag */
209 if (dvp->v_type != VDIR) {
213 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
218 lockparent = flags & LOCKPARENT;
219 wantparent = flags & (LOCKPARENT | WANTPARENT);
221 #if __FreeBSD_version < 1000021
222 cnp->cn_flags |= MPSAFE; /* steel */
225 if (flags & ISDOTDOT)
226 MA_VOP_UNLOCK(dvp, 0, p);
229 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
233 if (flags & ISDOTDOT)
234 MA_VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
235 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
236 && (flags & ISLASTCN) && error == ENOENT)
238 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
239 cnp->cn_flags |= SAVENAME;
244 vp = AFSTOV(vcp); /* always get a node if no error */
246 /* The parent directory comes in locked. We unlock it on return
247 * unless the caller wants it left locked.
248 * we also always return the vnode locked. */
250 if (flags & ISDOTDOT) {
251 /* vp before dvp since we go root to leaf, and .. comes first */
252 ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
253 ma_vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
254 /* always return the child locked */
255 if (lockparent && (flags & ISLASTCN)
256 && (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
261 } else if (vp == dvp) {
262 /* they're the same; afs_lookup() already ref'ed the leaf.
263 * It came in locked, so we don't need to ref OR lock it */
265 ma_vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
266 /* always return the child locked */
270 if ((cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN))
271 || (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)))
272 cnp->cn_flags |= SAVENAME;
280 struct vop_create_args /* {
281 * struct vnode *a_dvp;
282 * struct vnode **a_vpp;
283 * struct componentname *a_cnp;
284 * struct vattr *a_vap;
289 struct vnode *dvp = ap->a_dvp;
294 afs_create(VTOAFS(dvp), name, ap->a_vap,
295 ap->a_vap->va_vaflags & VA_EXCLUSIVE ? EXCL : NONEXCL,
296 ap->a_vap->va_mode, &vcp, cnp->cn_cred);
304 *ap->a_vpp = AFSTOV(vcp);
305 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
315 struct vop_mknod_args /* {
316 * struct vnode *a_dvp;
317 * struct vnode **a_vpp;
318 * struct componentname *a_cnp;
319 * struct vattr *a_vap;
327 struct vop_open_args /* {
328 * struct vnode *a_vp;
330 * struct ucred *a_cred;
331 * struct thread *a_td;
336 struct vcache *vc = VTOAFS(ap->a_vp);
339 error = afs_open(&vc, ap->a_mode, ap->a_cred);
341 if (AFSTOV(vc) != ap->a_vp)
342 panic("AFS open changed vnode!");
345 vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
346 osi_FlushPages(vc, ap->a_cred);
352 struct vop_close_args /* {
353 * struct vnode *a_vp;
355 * struct ucred *a_cred;
356 * struct thread *a_td;
360 struct vnode *vp = ap->a_vp;
361 struct vcache *avc = VTOAFS(vp);
364 iflag = vp->v_iflag & VI_DOOMED;
366 if (iflag & VI_DOOMED) {
367 /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
368 * have an afs_close to process, in that case */
370 panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
371 vp, avc, avc->opens);
377 code = afs_close(avc, ap->a_fflag, ap->a_cred);
379 code = afs_close(avc, ap->a_fflag, afs_osi_credp);
380 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
387 struct vop_access_args /* {
388 * struct vnode *a_vp;
389 * accmode_t a_accmode;
390 * struct ucred *a_cred;
391 * struct thread *a_td;
396 code = afs_access(VTOAFS(ap->a_vp), ap->a_accmode, ap->a_cred);
403 struct vop_getattr_args /* {
404 * struct vnode *a_vp;
405 * struct vattr *a_vap;
406 * struct ucred *a_cred;
412 code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
420 struct vop_setattr_args /* {
421 * struct vnode *a_vp;
422 * struct vattr *a_vap;
423 * struct ucred *a_cred;
428 code = afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
435 struct vop_read_args /* {
436 * struct vnode *a_vp;
439 * struct ucred *a_cred;
444 struct vcache *avc = VTOAFS(ap->a_vp);
446 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
447 code = afs_read(avc, ap->a_uio, ap->a_cred, 0);
452 /* struct vop_getpages_args {
453 * struct vnode *a_vp;
461 afs_vop_getpages(struct vop_getpages_args *ap)
464 int i, nextoff, size, toff, npages, count;
474 memset(&uio, 0, sizeof(uio));
475 memset(&iov, 0, sizeof(iov));
480 #ifdef FBSD_VOP_GETPAGES_BUSIED
481 npages = ap->a_count;
487 npages = btoc(ap->a_count);
490 if ((object = vp->v_object) == NULL) {
491 printf("afs_getpages: called with non-merged cache vnode??\n");
492 return VM_PAGER_ERROR;
496 * If the requested page is partially valid, just return it and
497 * allow the pager to zero-out the blanks. Partially valid pages
498 * can only occur at the file EOF.
501 #ifdef FBSD_VOP_GETPAGES_BUSIED
502 AFS_VM_OBJECT_WLOCK(object);
503 ma_vm_page_lock_queues();
504 if(pages[npages - 1]->valid != 0) {
506 ma_vm_page_unlock_queues();
507 AFS_VM_OBJECT_WUNLOCK(object);
508 return (VM_PAGER_OK);
512 vm_page_t m = pages[ap->a_reqpage];
513 AFS_VM_OBJECT_WLOCK(object);
514 ma_vm_page_lock_queues();
516 /* handled by vm_fault now */
517 /* vm_page_zero_invalid(m, TRUE); */
518 for (i = 0; i < npages; ++i) {
519 if (i != ap->a_reqpage) {
520 ma_vm_page_lock(pages[i]);
521 vm_page_free(pages[i]);
522 ma_vm_page_unlock(pages[i]);
525 ma_vm_page_unlock_queues();
526 AFS_VM_OBJECT_WUNLOCK(object);
530 ma_vm_page_unlock_queues();
531 AFS_VM_OBJECT_WUNLOCK(object);
533 bp = getpbuf(&afs_pbuf_freecnt);
535 kva = (vm_offset_t) bp->b_data;
536 pmap_qenter(kva, pages, npages);
537 MA_PCPU_INC(cnt.v_vnodein);
538 MA_PCPU_ADD(cnt.v_vnodepgsin, npages);
540 #ifdef FBSD_VOP_GETPAGES_BUSIED
541 count = ctob(npages);
545 iov.iov_base = (caddr_t) kva;
549 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
550 uio.uio_resid = count;
551 uio.uio_segflg = UIO_SYSSPACE;
552 uio.uio_rw = UIO_READ;
553 uio.uio_td = curthread;
556 osi_FlushPages(avc, osi_curcred()); /* hold GLOCK, but not basic vnode lock */
557 code = afs_read(avc, &uio, osi_curcred(), 0);
559 pmap_qremove(kva, npages);
561 relpbuf(bp, &afs_pbuf_freecnt);
563 if (code && (uio.uio_resid == count)) {
564 #ifndef FBSD_VOP_GETPAGES_BUSIED
565 AFS_VM_OBJECT_WLOCK(object);
566 ma_vm_page_lock_queues();
567 for (i = 0; i < npages; ++i) {
568 if (i != ap->a_reqpage)
569 vm_page_free(pages[i]);
571 ma_vm_page_unlock_queues();
572 AFS_VM_OBJECT_WUNLOCK(object);
574 return VM_PAGER_ERROR;
577 size = count - uio.uio_resid;
578 AFS_VM_OBJECT_WLOCK(object);
579 ma_vm_page_lock_queues();
580 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
582 nextoff = toff + PAGE_SIZE;
585 /* XXX not in nfsclient? */
586 m->flags &= ~PG_ZERO;
588 if (nextoff <= size) {
590 * Read operation filled an entire page
592 m->valid = VM_PAGE_BITS_ALL;
593 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
594 } else if (size > toff) {
596 * Read operation filled a partial page.
599 vm_page_set_validclean(m, 0, size - toff);
600 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
603 #ifndef FBSD_VOP_GETPAGES_BUSIED
604 if (i != ap->a_reqpage) {
605 #if __FreeBSD_version >= 1000042
606 vm_page_readahead_finish(m);
609 * Whether or not to leave the page activated is up in
610 * the air, but we should put the page on a page queue
611 * somewhere (it already is in the object). Result:
612 * It appears that emperical results show that
613 * deactivating pages is best.
617 * Just in case someone was asking for this page we
618 * now tell them that it is ok to use.
621 if (m->oflags & VPO_WANTED) {
624 ma_vm_page_unlock(m);
628 vm_page_deactivate(m);
629 ma_vm_page_unlock(m);
635 ma_vm_page_unlock(m);
637 #endif /* __FreeBSD_version 1000042 */
639 #endif /* ndef FBSD_VOP_GETPAGES_BUSIED */
641 ma_vm_page_unlock_queues();
642 AFS_VM_OBJECT_WUNLOCK(object);
648 struct vop_write_args /* {
649 * struct vnode *a_vp;
652 * struct ucred *a_cred;
656 struct vcache *avc = VTOAFS(ap->a_vp);
658 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
660 afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
666 * struct vop_putpages_args {
667 * struct vnode *a_vp;
672 * vm_oofset_t a_offset;
676 * All of the pages passed to us in ap->a_m[] are already marked as busy,
677 * so there is no additional locking required to set their flags. -GAW
680 afs_vop_putpages(struct vop_putpages_args *ap)
683 int i, size, npages, sync;
691 memset(&uio, 0, sizeof(uio));
692 memset(&iov, 0, sizeof(iov));
696 /* Perhaps these two checks should just be KASSERTs instead... */
697 if (vp->v_object == NULL) {
698 printf("afs_putpages: called with non-merged cache vnode??\n");
699 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
701 if (vType(avc) != VREG) {
702 printf("afs_putpages: not VREG");
703 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
705 npages = btoc(ap->a_count);
706 for (i = 0; i < npages; i++)
707 ap->a_rtvals[i] = VM_PAGER_AGAIN;
708 bp = getpbuf(&afs_pbuf_freecnt);
710 kva = (vm_offset_t) bp->b_data;
711 pmap_qenter(kva, ap->a_m, npages);
712 MA_PCPU_INC(cnt.v_vnodeout);
713 MA_PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
715 iov.iov_base = (caddr_t) kva;
716 iov.iov_len = ap->a_count;
719 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
720 uio.uio_resid = ap->a_count;
721 uio.uio_segflg = UIO_SYSSPACE;
722 uio.uio_rw = UIO_WRITE;
723 uio.uio_td = curthread;
725 if (ap->a_sync & VM_PAGER_PUT_SYNC)
727 /*if (ap->a_sync & VM_PAGER_PUT_INVAL)
728 * sync |= IO_INVAL; */
731 code = afs_write(avc, &uio, sync, osi_curcred(), 0);
734 pmap_qremove(kva, npages);
735 relpbuf(bp, &afs_pbuf_freecnt);
738 size = ap->a_count - uio.uio_resid;
739 for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
740 ap->a_rtvals[i] = VM_PAGER_OK;
741 vm_page_undirty(ap->a_m[i]);
744 return ap->a_rtvals[0];
749 struct vop_ioctl_args /* {
750 * struct vnode *a_vp;
754 * struct ucred *a_cred;
755 * struct thread *a_td;
758 struct vcache *tvc = VTOAFS(ap->a_vp);
761 /* in case we ever get in here... */
763 AFS_STATCNT(afs_ioctl);
764 if (((ap->a_command >> 8) & 0xff) == 'V') {
765 /* This is a VICEIOCTL call */
767 error = HandleIoctl(tvc, ap->a_command, ap->a_data);
771 /* No-op call; just return. */
778 struct vop_fsync_args /* {
779 * struct vnode *a_vp;
785 struct vnode *vp = ap->a_vp;
788 /*vflushbuf(vp, wait); */
789 error = afs_fsync(VTOAFS(vp), ap->a_td->td_ucred);
796 struct vop_remove_args /* {
797 * struct vnode *a_dvp;
798 * struct vnode *a_vp;
799 * struct componentname *a_cnp;
803 struct vnode *vp = ap->a_vp;
804 struct vnode *dvp = ap->a_dvp;
808 error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
817 struct vop_link_args /* {
818 * struct vnode *a_vp;
819 * struct vnode *a_tdvp;
820 * struct componentname *a_cnp;
824 struct vnode *dvp = ap->a_tdvp;
825 struct vnode *vp = ap->a_vp;
828 if (dvp->v_mount != vp->v_mount) {
832 if (vp->v_type == VDIR) {
836 if ((error = ma_vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE, p)) != 0) {
840 error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
843 MA_VOP_UNLOCK(vp, 0, p);
851 struct vop_rename_args /* {
852 * struct vnode *a_fdvp;
853 * struct vnode *a_fvp;
854 * struct componentname *a_fcnp;
855 * struct vnode *a_tdvp;
856 * struct vnode *a_tvp;
857 * struct componentname *a_tcnp;
861 struct componentname *fcnp = ap->a_fcnp;
863 struct componentname *tcnp = ap->a_tcnp;
865 struct vnode *tvp = ap->a_tvp;
866 struct vnode *tdvp = ap->a_tdvp;
867 struct vnode *fvp = ap->a_fvp;
868 struct vnode *fdvp = ap->a_fdvp;
871 * Check for cross-device rename.
873 if ((fvp->v_mount != tdvp->v_mount)
874 || (tvp && (fvp->v_mount != tvp->v_mount))) {
888 * if fvp == tvp, we're just removing one name of a pair of
889 * directory entries for the same element. convert call into rename.
890 ( (pinched from FreeBSD 4.4's ufs_rename())
894 if (fvp->v_type == VDIR) {
899 /* Release destination completely. */
906 fcnp->cn_flags &= ~MODMASK;
907 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
908 if ((fcnp->cn_flags & SAVESTART) == 0)
909 panic("afs_rename: lost from startdir");
910 fcnp->cn_nameiop = DELETE;
912 error = relookup(fdvp, &fvp, fcnp);
919 error = VOP_REMOVE(fdvp, fvp, fcnp);
927 if ((error = ma_vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
930 MALLOC(fname, char *, fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
931 memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
932 fname[fcnp->cn_namelen] = '\0';
933 MALLOC(tname, char *, tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
934 memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
935 tname[tcnp->cn_namelen] = '\0';
939 /* XXX use "from" or "to" creds? NFS uses "to" creds */
941 afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
959 struct vop_mkdir_args /* {
960 * struct vnode *a_dvp;
961 * struct vnode **a_vpp;
962 * struct componentname *a_cnp;
963 * struct vattr *a_vap;
966 struct vnode *dvp = ap->a_dvp;
967 struct vattr *vap = ap->a_vap;
973 if ((cnp->cn_flags & HASBUF) == 0)
974 panic("afs_vop_mkdir: no name");
977 error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
984 *ap->a_vpp = AFSTOV(vcp);
985 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
994 struct vop_rmdir_args /* {
995 * struct vnode *a_dvp;
996 * struct vnode *a_vp;
997 * struct componentname *a_cnp;
1001 struct vnode *dvp = ap->a_dvp;
1005 error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
1011 /* struct vop_symlink_args {
1012 * struct vnode *a_dvp;
1013 * struct vnode **a_vpp;
1014 * struct componentname *a_cnp;
1015 * struct vattr *a_vap;
1020 afs_vop_symlink(struct vop_symlink_args *ap)
1023 struct vnode *newvp;
1034 afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, NULL,
1037 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
1039 newvp = AFSTOV(vcp);
1040 ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
1045 *(ap->a_vpp) = newvp;
1051 struct vop_readdir_args /* {
1052 * struct vnode *a_vp;
1053 * struct uio *a_uio;
1054 * struct ucred *a_cred;
1056 * u_long *a_cookies;
1062 /* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
1064 off = ap->a_uio->uio_offset;
1067 afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred, ap->a_eofflag);
1069 if (!error && ap->a_ncookies != NULL) {
1070 struct uio *uio = ap->a_uio;
1071 const struct dirent *dp, *dp_start, *dp_end;
1073 u_long *cookies, *cookiep;
1075 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1076 panic("afs_readdir: burned cookies");
1077 dp = (const struct dirent *)
1078 ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
1080 dp_end = (const struct dirent *)uio->uio_iov->iov_base;
1081 for (dp_start = dp, ncookies = 0; dp < dp_end;
1082 dp = (const struct dirent *)((const char *)dp + dp->d_reclen))
1085 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP,
1087 for (dp = dp_start, cookiep = cookies; dp < dp_end;
1088 dp = (const struct dirent *)((const char *)dp + dp->d_reclen)) {
1089 off += dp->d_reclen;
1092 *ap->a_cookies = cookies;
1093 *ap->a_ncookies = ncookies;
1100 afs_vop_readlink(ap)
1101 struct vop_readlink_args /* {
1102 * struct vnode *a_vp;
1103 * struct uio *a_uio;
1104 * struct ucred *a_cred;
1108 /* printf("readlink %x\n", ap->a_vp);*/
1110 error = afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
1116 afs_vop_inactive(ap)
1117 struct vop_inactive_args /* {
1118 * struct vnode *a_vp;
1119 * struct thread *td;
1122 struct vnode *vp = ap->a_vp;
1125 afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
1131 * struct vop_reclaim_args {
1132 * struct vnode *a_vp;
1136 afs_vop_reclaim(struct vop_reclaim_args *ap)
1138 /* copied from ../OBSD/osi_vnodeops.c:afs_nbsd_reclaim() */
1140 struct vnode *vp = ap->a_vp;
1141 struct vcache *avc = VTOAFS(vp);
1142 int haveGlock = ISAFS_GLOCK();
1143 int haveVlock = CheckLock(&afs_xvcache);
1148 ObtainWriteLock(&afs_xvcache, 901);
1149 /* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
1150 code = afs_FlushVCache(avc, &slept);
1152 if (avc->f.states & CVInit) {
1153 avc->f.states &= ~CVInit;
1154 afs_osi_Wakeup(&avc->f.states);
1158 ReleaseWriteLock(&afs_xvcache);
1163 afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
1167 /* basically, it must not fail */
1168 vnode_destroy_vobject(vp);
1175 afs_vop_strategy(ap)
1176 struct vop_strategy_args /* {
1182 error = afs_ustrategy(ap->a_bp, osi_curcred());
1189 struct vop_print_args /* {
1190 * struct vnode *a_vp;
1193 struct vnode *vp = ap->a_vp;
1194 struct vcache *vc = VTOAFS(ap->a_vp);
1195 int s = vc->f.states;
1197 printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
1198 (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
1199 (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
1200 vc->execsOrWriters);
1201 printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
1202 (s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
1203 (s & CMAPPED) ? " mapped" : "",
1204 (s & CVFlushed) ? " flush in progress" : "");
1210 * Advisory record locking support (fcntl() POSIX style)
1214 struct vop_advlock_args /* {
1215 * struct vnode *a_vp;
1218 * struct flock *a_fl;
1223 struct ucred cr = *osi_curcred();
1226 if (a_op == F_UNLCK) {
1228 * When a_fl->type is F_UNLCK, FreeBSD passes in an a_op of F_UNLCK.
1229 * This is (confusingly) different than how you actually release a lock
1230 * with fcntl(), which is done with an a_op of F_SETLK and an l_type of
1231 * F_UNLCK. Pretend we were given an a_op of F_SETLK in this case,
1232 * since this is what afs_lockctl expects.
1239 afs_lockctl(VTOAFS(ap->a_vp),
1242 (int)(intptr_t)ap->a_id); /* XXX: no longer unique! */
1247 struct vop_vector afs_vnodeops = {
1248 .vop_default = &default_vnodeops,
1249 .vop_access = afs_vop_access,
1250 .vop_advlock = afs_vop_advlock,
1251 .vop_close = afs_vop_close,
1252 .vop_create = afs_vop_create,
1253 .vop_fsync = afs_vop_fsync,
1254 .vop_getattr = afs_vop_getattr,
1255 .vop_getpages = afs_vop_getpages,
1256 .vop_inactive = afs_vop_inactive,
1257 .vop_ioctl = afs_vop_ioctl,
1258 .vop_link = afs_vop_link,
1259 .vop_lookup = afs_vop_lookup,
1260 .vop_mkdir = afs_vop_mkdir,
1261 .vop_mknod = afs_vop_mknod,
1262 .vop_open = afs_vop_open,
1263 .vop_pathconf = afs_vop_pathconf,
1264 .vop_print = afs_vop_print,
1265 .vop_putpages = afs_vop_putpages,
1266 .vop_read = afs_vop_read,
1267 .vop_readdir = afs_vop_readdir,
1268 .vop_readlink = afs_vop_readlink,
1269 .vop_reclaim = afs_vop_reclaim,
1270 .vop_remove = afs_vop_remove,
1271 .vop_rename = afs_vop_rename,
1272 .vop_rmdir = afs_vop_rmdir,
1273 .vop_setattr = afs_vop_setattr,
1274 .vop_strategy = afs_vop_strategy,
1275 .vop_symlink = afs_vop_symlink,
1276 .vop_write = afs_vop_write,