2 * A large chunk of this file appears to be copied directly from
3 * sys/nfsclient/nfs_bio.c, which has the following license:
6 * Copyright (c) 1989, 1993
7 * The Regents of the University of California. All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * Rick Macklem at The University of Guelph.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
43 * Pursuant to a statement of U.C. Berkeley dated 1999-07-22, this license
44 * is amended to drop clause (3) above.
47 #include <afsconfig.h>
48 #include <afs/param.h>
51 #include <afs/sysincludes.h> /* Standard vendor system headers */
52 #include <afsincludes.h> /* Afs-based standard headers */
53 #include <afs/afs_stats.h> /* statistics */
54 #include <sys/malloc.h>
55 #include <sys/namei.h>
56 #include <sys/unistd.h>
57 #if __FreeBSD_version >= 1000030
58 #include <sys/rwlock.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_pager.h>
63 #include <vm/vnode_pager.h>
64 extern int afs_pbuf_freecnt;
66 static vop_access_t afs_vop_access;
67 static vop_advlock_t afs_vop_advlock;
68 static vop_close_t afs_vop_close;
69 static vop_create_t afs_vop_create;
70 static vop_fsync_t afs_vop_fsync;
71 static vop_getattr_t afs_vop_getattr;
72 static vop_getpages_t afs_vop_getpages;
73 static vop_inactive_t afs_vop_inactive;
74 static vop_ioctl_t afs_vop_ioctl;
75 static vop_link_t afs_vop_link;
76 static vop_lookup_t afs_vop_lookup;
77 static vop_mkdir_t afs_vop_mkdir;
78 static vop_mknod_t afs_vop_mknod;
79 static vop_open_t afs_vop_open;
80 static vop_pathconf_t afs_vop_pathconf;
81 static vop_print_t afs_vop_print;
82 static vop_putpages_t afs_vop_putpages;
83 static vop_read_t afs_vop_read;
84 static vop_readdir_t afs_vop_readdir;
85 static vop_readlink_t afs_vop_readlink;
86 static vop_reclaim_t afs_vop_reclaim;
87 static vop_remove_t afs_vop_remove;
88 static vop_rename_t afs_vop_rename;
89 static vop_rmdir_t afs_vop_rmdir;
90 static vop_setattr_t afs_vop_setattr;
91 static vop_strategy_t afs_vop_strategy;
92 static vop_symlink_t afs_vop_symlink;
93 static vop_write_t afs_vop_write;
95 struct vop_vector afs_vnodeops = {
96 .vop_default = &default_vnodeops,
97 .vop_access = afs_vop_access,
98 .vop_advlock = afs_vop_advlock,
99 .vop_close = afs_vop_close,
100 .vop_create = afs_vop_create,
101 .vop_fsync = afs_vop_fsync,
102 .vop_getattr = afs_vop_getattr,
103 .vop_getpages = afs_vop_getpages,
104 .vop_inactive = afs_vop_inactive,
105 .vop_ioctl = afs_vop_ioctl,
106 .vop_link = afs_vop_link,
107 .vop_lookup = afs_vop_lookup,
108 .vop_mkdir = afs_vop_mkdir,
109 .vop_mknod = afs_vop_mknod,
110 .vop_open = afs_vop_open,
111 .vop_pathconf = afs_vop_pathconf,
112 .vop_print = afs_vop_print,
113 .vop_putpages = afs_vop_putpages,
114 .vop_read = afs_vop_read,
115 .vop_readdir = afs_vop_readdir,
116 .vop_readlink = afs_vop_readlink,
117 .vop_reclaim = afs_vop_reclaim,
118 .vop_remove = afs_vop_remove,
119 .vop_rename = afs_vop_rename,
120 .vop_rmdir = afs_vop_rmdir,
121 .vop_setattr = afs_vop_setattr,
122 .vop_strategy = afs_vop_strategy,
123 .vop_symlink = afs_vop_symlink,
124 .vop_write = afs_vop_write,
128 struct componentname *cnp = ap->a_cnp; \
130 MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
131 memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
132 name[cnp->cn_namelen] = '\0'
134 #define DROPNAME() FREE(name, M_TEMP)
137 * Here we define compatibility functions/macros for interfaces that
138 * have changed between different FreeBSD versions.
140 static __inline void ma_vm_page_lock_queues(void) {};
141 static __inline void ma_vm_page_unlock_queues(void) {};
142 static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
143 static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
145 #define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
146 #define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
147 #define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
149 #define MA_PCPU_INC(c) PCPU_INC(c)
150 #define MA_PCPU_ADD(c, n) PCPU_ADD(c, n)
152 #if __FreeBSD_version >= 1000030
153 #define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
154 #define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
156 #define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
157 #define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
161 * Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
162 * We should know the correct answers to these questions with
163 * respect to the AFS protocol (which may differ from the UFS
164 * values) but for the moment this will do.
167 afs_vop_pathconf(struct vop_pathconf_args *ap)
172 switch (ap->a_name) {
174 *ap->a_retval = LINK_MAX;
177 *ap->a_retval = NAME_MAX;
180 *ap->a_retval = PATH_MAX;
183 *ap->a_retval = PIPE_BUF;
185 case _PC_CHOWN_RESTRICTED:
191 #ifdef _PC_ACL_EXTENDED
192 case _PC_ACL_EXTENDED:
195 case _PC_ACL_PATH_MAX:
199 #ifdef _PC_MAC_PRESENT
200 case _PC_MAC_PRESENT:
206 /* _PC_ASYNC_IO should have been handled by upper layers. */
207 KASSERT(0, ("_PC_ASYNC_IO should not get here"));
217 #ifdef _PC_ALLOC_SIZE_MIN
218 case _PC_ALLOC_SIZE_MIN:
219 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize;
222 #ifdef _PC_FILESIZEBITS
223 case _PC_FILESIZEBITS:
224 *ap->a_retval = 32; /* XXX */
227 #ifdef _PC_REC_INCR_XFER_SIZE
228 case _PC_REC_INCR_XFER_SIZE:
229 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
231 case _PC_REC_MAX_XFER_SIZE:
232 *ap->a_retval = -1; /* means ``unlimited'' */
234 case _PC_REC_MIN_XFER_SIZE:
235 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
237 case _PC_REC_XFER_ALIGN:
238 *ap->a_retval = PAGE_SIZE;
241 #ifdef _PC_SYMLINK_MAX
242 case _PC_SYMLINK_MAX:
243 *ap->a_retval = MAXPATHLEN;
255 struct vop_lookup_args /* {
256 * struct vnodeop_desc * a_desc;
257 * struct vnode *a_dvp;
258 * struct vnode **a_vpp;
259 * struct componentname *a_cnp;
264 struct vnode *vp, *dvp;
265 int flags = ap->a_cnp->cn_flags;
266 int lockparent; /* 1 => lockparent flag is set */
267 int wantparent; /* 1 => wantparent or lockparent flag */
270 if (dvp->v_type != VDIR) {
274 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
279 lockparent = flags & LOCKPARENT;
280 wantparent = flags & (LOCKPARENT | WANTPARENT);
282 #if __FreeBSD_version < 1000021
283 cnp->cn_flags |= MPSAFE; /* steel */
286 if (flags & ISDOTDOT)
287 MA_VOP_UNLOCK(dvp, 0, p);
290 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
294 if (flags & ISDOTDOT)
295 MA_VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
296 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
297 && (flags & ISLASTCN) && error == ENOENT)
299 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
300 cnp->cn_flags |= SAVENAME;
305 vp = AFSTOV(vcp); /* always get a node if no error */
307 /* The parent directory comes in locked. We unlock it on return
308 * unless the caller wants it left locked.
309 * we also always return the vnode locked. */
311 if (flags & ISDOTDOT) {
312 /* vp before dvp since we go root to leaf, and .. comes first */
313 ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
314 ma_vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
315 /* always return the child locked */
316 if (lockparent && (flags & ISLASTCN)
317 && (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
322 } else if (vp == dvp) {
323 /* they're the same; afs_lookup() already ref'ed the leaf.
324 * It came in locked, so we don't need to ref OR lock it */
326 ma_vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
327 /* always return the child locked */
331 if ((cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN))
332 || (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)))
333 cnp->cn_flags |= SAVENAME;
341 struct vop_create_args /* {
342 * struct vnode *a_dvp;
343 * struct vnode **a_vpp;
344 * struct componentname *a_cnp;
345 * struct vattr *a_vap;
350 struct vnode *dvp = ap->a_dvp;
355 afs_create(VTOAFS(dvp), name, ap->a_vap,
356 ap->a_vap->va_vaflags & VA_EXCLUSIVE ? EXCL : NONEXCL,
357 ap->a_vap->va_mode, &vcp, cnp->cn_cred);
365 *ap->a_vpp = AFSTOV(vcp);
366 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
376 struct vop_mknod_args /* {
377 * struct vnode *a_dvp;
378 * struct vnode **a_vpp;
379 * struct componentname *a_cnp;
380 * struct vattr *a_vap;
388 struct vop_open_args /* {
389 * struct vnode *a_vp;
391 * struct ucred *a_cred;
392 * struct thread *a_td;
397 struct vcache *vc = VTOAFS(ap->a_vp);
400 error = afs_open(&vc, ap->a_mode, ap->a_cred);
402 if (AFSTOV(vc) != ap->a_vp)
403 panic("AFS open changed vnode!");
406 vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
407 osi_FlushPages(vc, ap->a_cred);
413 struct vop_close_args /* {
414 * struct vnode *a_vp;
416 * struct ucred *a_cred;
417 * struct thread *a_td;
421 struct vnode *vp = ap->a_vp;
422 struct vcache *avc = VTOAFS(vp);
425 iflag = vp->v_iflag & VI_DOOMED;
427 if (iflag & VI_DOOMED) {
428 /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
429 * have an afs_close to process, in that case */
431 panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
432 vp, avc, avc->opens);
438 code = afs_close(avc, ap->a_fflag, ap->a_cred);
440 code = afs_close(avc, ap->a_fflag, afs_osi_credp);
441 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
448 struct vop_access_args /* {
449 * struct vnode *a_vp;
450 * accmode_t a_accmode;
451 * struct ucred *a_cred;
452 * struct thread *a_td;
457 code = afs_access(VTOAFS(ap->a_vp), ap->a_accmode, ap->a_cred);
464 struct vop_getattr_args /* {
465 * struct vnode *a_vp;
466 * struct vattr *a_vap;
467 * struct ucred *a_cred;
473 code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
481 struct vop_setattr_args /* {
482 * struct vnode *a_vp;
483 * struct vattr *a_vap;
484 * struct ucred *a_cred;
489 code = afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
496 struct vop_read_args /* {
497 * struct vnode *a_vp;
500 * struct ucred *a_cred;
505 struct vcache *avc = VTOAFS(ap->a_vp);
507 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
508 code = afs_read(avc, ap->a_uio, ap->a_cred, 0);
513 /* struct vop_getpages_args {
514 * struct vnode *a_vp;
522 afs_vop_getpages(struct vop_getpages_args *ap)
525 int i, nextoff, size, toff, npages, count;
535 memset(&uio, 0, sizeof(uio));
536 memset(&iov, 0, sizeof(iov));
541 #ifdef FBSD_VOP_GETPAGES_BUSIED
542 npages = ap->a_count;
548 npages = btoc(ap->a_count);
551 if ((object = vp->v_object) == NULL) {
552 printf("afs_getpages: called with non-merged cache vnode??\n");
553 return VM_PAGER_ERROR;
557 * If the requested page is partially valid, just return it and
558 * allow the pager to zero-out the blanks. Partially valid pages
559 * can only occur at the file EOF.
562 #ifdef FBSD_VOP_GETPAGES_BUSIED
563 AFS_VM_OBJECT_WLOCK(object);
564 ma_vm_page_lock_queues();
565 if(pages[npages - 1]->valid != 0) {
567 ma_vm_page_unlock_queues();
568 AFS_VM_OBJECT_WUNLOCK(object);
569 return (VM_PAGER_OK);
573 vm_page_t m = pages[ap->a_reqpage];
574 AFS_VM_OBJECT_WLOCK(object);
575 ma_vm_page_lock_queues();
577 /* handled by vm_fault now */
578 /* vm_page_zero_invalid(m, TRUE); */
579 for (i = 0; i < npages; ++i) {
580 if (i != ap->a_reqpage) {
581 ma_vm_page_lock(pages[i]);
582 vm_page_free(pages[i]);
583 ma_vm_page_unlock(pages[i]);
586 ma_vm_page_unlock_queues();
587 AFS_VM_OBJECT_WUNLOCK(object);
591 ma_vm_page_unlock_queues();
592 AFS_VM_OBJECT_WUNLOCK(object);
594 bp = getpbuf(&afs_pbuf_freecnt);
596 kva = (vm_offset_t) bp->b_data;
597 pmap_qenter(kva, pages, npages);
598 MA_PCPU_INC(cnt.v_vnodein);
599 MA_PCPU_ADD(cnt.v_vnodepgsin, npages);
601 #ifdef FBSD_VOP_GETPAGES_BUSIED
602 count = ctob(npages);
606 iov.iov_base = (caddr_t) kva;
610 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
611 uio.uio_resid = count;
612 uio.uio_segflg = UIO_SYSSPACE;
613 uio.uio_rw = UIO_READ;
614 uio.uio_td = curthread;
617 osi_FlushPages(avc, osi_curcred()); /* hold GLOCK, but not basic vnode lock */
618 code = afs_read(avc, &uio, osi_curcred(), 0);
620 pmap_qremove(kva, npages);
622 relpbuf(bp, &afs_pbuf_freecnt);
624 if (code && (uio.uio_resid == count)) {
625 #ifndef FBSD_VOP_GETPAGES_BUSIED
626 AFS_VM_OBJECT_WLOCK(object);
627 ma_vm_page_lock_queues();
628 for (i = 0; i < npages; ++i) {
629 if (i != ap->a_reqpage)
630 vm_page_free(pages[i]);
632 ma_vm_page_unlock_queues();
633 AFS_VM_OBJECT_WUNLOCK(object);
635 return VM_PAGER_ERROR;
638 size = count - uio.uio_resid;
639 AFS_VM_OBJECT_WLOCK(object);
640 ma_vm_page_lock_queues();
641 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
643 nextoff = toff + PAGE_SIZE;
646 /* XXX not in nfsclient? */
647 m->flags &= ~PG_ZERO;
649 if (nextoff <= size) {
651 * Read operation filled an entire page
653 m->valid = VM_PAGE_BITS_ALL;
654 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
655 } else if (size > toff) {
657 * Read operation filled a partial page.
660 vm_page_set_validclean(m, 0, size - toff);
661 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
664 #ifndef FBSD_VOP_GETPAGES_BUSIED
665 if (i != ap->a_reqpage) {
666 #if __FreeBSD_version >= 1000042
667 vm_page_readahead_finish(m);
670 * Whether or not to leave the page activated is up in
671 * the air, but we should put the page on a page queue
672 * somewhere (it already is in the object). Result:
673 * It appears that emperical results show that
674 * deactivating pages is best.
678 * Just in case someone was asking for this page we
679 * now tell them that it is ok to use.
682 if (m->oflags & VPO_WANTED) {
685 ma_vm_page_unlock(m);
689 vm_page_deactivate(m);
690 ma_vm_page_unlock(m);
696 ma_vm_page_unlock(m);
698 #endif /* __FreeBSD_version 1000042 */
700 #endif /* ndef FBSD_VOP_GETPAGES_BUSIED */
702 ma_vm_page_unlock_queues();
703 AFS_VM_OBJECT_WUNLOCK(object);
709 struct vop_write_args /* {
710 * struct vnode *a_vp;
713 * struct ucred *a_cred;
717 struct vcache *avc = VTOAFS(ap->a_vp);
719 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
721 afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
727 * struct vop_putpages_args {
728 * struct vnode *a_vp;
733 * vm_oofset_t a_offset;
737 * All of the pages passed to us in ap->a_m[] are already marked as busy,
738 * so there is no additional locking required to set their flags. -GAW
741 afs_vop_putpages(struct vop_putpages_args *ap)
744 int i, size, npages, sync;
752 memset(&uio, 0, sizeof(uio));
753 memset(&iov, 0, sizeof(iov));
757 /* Perhaps these two checks should just be KASSERTs instead... */
758 if (vp->v_object == NULL) {
759 printf("afs_putpages: called with non-merged cache vnode??\n");
760 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
762 if (vType(avc) != VREG) {
763 printf("afs_putpages: not VREG");
764 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
766 npages = btoc(ap->a_count);
767 for (i = 0; i < npages; i++)
768 ap->a_rtvals[i] = VM_PAGER_AGAIN;
769 bp = getpbuf(&afs_pbuf_freecnt);
771 kva = (vm_offset_t) bp->b_data;
772 pmap_qenter(kva, ap->a_m, npages);
773 MA_PCPU_INC(cnt.v_vnodeout);
774 MA_PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
776 iov.iov_base = (caddr_t) kva;
777 iov.iov_len = ap->a_count;
780 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
781 uio.uio_resid = ap->a_count;
782 uio.uio_segflg = UIO_SYSSPACE;
783 uio.uio_rw = UIO_WRITE;
784 uio.uio_td = curthread;
786 if (ap->a_sync & VM_PAGER_PUT_SYNC)
788 /*if (ap->a_sync & VM_PAGER_PUT_INVAL)
789 * sync |= IO_INVAL; */
792 code = afs_write(avc, &uio, sync, osi_curcred(), 0);
795 pmap_qremove(kva, npages);
796 relpbuf(bp, &afs_pbuf_freecnt);
799 size = ap->a_count - uio.uio_resid;
800 for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
801 ap->a_rtvals[i] = VM_PAGER_OK;
802 vm_page_undirty(ap->a_m[i]);
805 return ap->a_rtvals[0];
810 struct vop_ioctl_args /* {
811 * struct vnode *a_vp;
815 * struct ucred *a_cred;
816 * struct thread *a_td;
819 struct vcache *tvc = VTOAFS(ap->a_vp);
822 /* in case we ever get in here... */
824 AFS_STATCNT(afs_ioctl);
825 if (((ap->a_command >> 8) & 0xff) == 'V') {
826 /* This is a VICEIOCTL call */
828 error = HandleIoctl(tvc, ap->a_command, ap->a_data);
832 /* No-op call; just return. */
839 struct vop_fsync_args /* {
840 * struct vnode *a_vp;
846 struct vnode *vp = ap->a_vp;
849 /*vflushbuf(vp, wait); */
850 error = afs_fsync(VTOAFS(vp), ap->a_td->td_ucred);
857 struct vop_remove_args /* {
858 * struct vnode *a_dvp;
859 * struct vnode *a_vp;
860 * struct componentname *a_cnp;
864 struct vnode *vp = ap->a_vp;
865 struct vnode *dvp = ap->a_dvp;
869 error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
878 struct vop_link_args /* {
879 * struct vnode *a_vp;
880 * struct vnode *a_tdvp;
881 * struct componentname *a_cnp;
885 struct vnode *dvp = ap->a_tdvp;
886 struct vnode *vp = ap->a_vp;
889 if (dvp->v_mount != vp->v_mount) {
893 if (vp->v_type == VDIR) {
897 if ((error = ma_vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE, p)) != 0) {
901 error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
904 MA_VOP_UNLOCK(vp, 0, p);
912 struct vop_rename_args /* {
913 * struct vnode *a_fdvp;
914 * struct vnode *a_fvp;
915 * struct componentname *a_fcnp;
916 * struct vnode *a_tdvp;
917 * struct vnode *a_tvp;
918 * struct componentname *a_tcnp;
922 struct componentname *fcnp = ap->a_fcnp;
924 struct componentname *tcnp = ap->a_tcnp;
926 struct vnode *tvp = ap->a_tvp;
927 struct vnode *tdvp = ap->a_tdvp;
928 struct vnode *fvp = ap->a_fvp;
929 struct vnode *fdvp = ap->a_fdvp;
932 * Check for cross-device rename.
934 if ((fvp->v_mount != tdvp->v_mount)
935 || (tvp && (fvp->v_mount != tvp->v_mount))) {
949 * if fvp == tvp, we're just removing one name of a pair of
950 * directory entries for the same element. convert call into rename.
951 ( (pinched from FreeBSD 4.4's ufs_rename())
955 if (fvp->v_type == VDIR) {
960 /* Release destination completely. */
967 fcnp->cn_flags &= ~MODMASK;
968 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
969 if ((fcnp->cn_flags & SAVESTART) == 0)
970 panic("afs_rename: lost from startdir");
971 fcnp->cn_nameiop = DELETE;
973 error = relookup(fdvp, &fvp, fcnp);
980 error = VOP_REMOVE(fdvp, fvp, fcnp);
988 if ((error = ma_vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
991 MALLOC(fname, char *, fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
992 memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
993 fname[fcnp->cn_namelen] = '\0';
994 MALLOC(tname, char *, tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
995 memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
996 tname[tcnp->cn_namelen] = '\0';
1000 /* XXX use "from" or "to" creds? NFS uses "to" creds */
1002 afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
1005 FREE(fname, M_TEMP);
1006 FREE(tname, M_TEMP);
1020 struct vop_mkdir_args /* {
1021 * struct vnode *a_dvp;
1022 * struct vnode **a_vpp;
1023 * struct componentname *a_cnp;
1024 * struct vattr *a_vap;
1027 struct vnode *dvp = ap->a_dvp;
1028 struct vattr *vap = ap->a_vap;
1034 if ((cnp->cn_flags & HASBUF) == 0)
1035 panic("afs_vop_mkdir: no name");
1038 error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
1045 *ap->a_vpp = AFSTOV(vcp);
1046 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
1055 struct vop_rmdir_args /* {
1056 * struct vnode *a_dvp;
1057 * struct vnode *a_vp;
1058 * struct componentname *a_cnp;
1062 struct vnode *dvp = ap->a_dvp;
1066 error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
1072 /* struct vop_symlink_args {
1073 * struct vnode *a_dvp;
1074 * struct vnode **a_vpp;
1075 * struct componentname *a_cnp;
1076 * struct vattr *a_vap;
1081 afs_vop_symlink(struct vop_symlink_args *ap)
1084 struct vnode *newvp;
1095 afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, NULL,
1098 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
1100 newvp = AFSTOV(vcp);
1101 ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
1106 *(ap->a_vpp) = newvp;
1112 struct vop_readdir_args /* {
1113 * struct vnode *a_vp;
1114 * struct uio *a_uio;
1115 * struct ucred *a_cred;
1117 * u_long *a_cookies;
1123 /* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
1125 off = ap->a_uio->uio_offset;
1128 afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred, ap->a_eofflag);
1130 if (!error && ap->a_ncookies != NULL) {
1131 struct uio *uio = ap->a_uio;
1132 const struct dirent *dp, *dp_start, *dp_end;
1134 u_long *cookies, *cookiep;
1136 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1137 panic("afs_readdir: burned cookies");
1138 dp = (const struct dirent *)
1139 ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
1141 dp_end = (const struct dirent *)uio->uio_iov->iov_base;
1142 for (dp_start = dp, ncookies = 0; dp < dp_end;
1143 dp = (const struct dirent *)((const char *)dp + dp->d_reclen))
1146 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP,
1148 for (dp = dp_start, cookiep = cookies; dp < dp_end;
1149 dp = (const struct dirent *)((const char *)dp + dp->d_reclen)) {
1150 off += dp->d_reclen;
1153 *ap->a_cookies = cookies;
1154 *ap->a_ncookies = ncookies;
1161 afs_vop_readlink(ap)
1162 struct vop_readlink_args /* {
1163 * struct vnode *a_vp;
1164 * struct uio *a_uio;
1165 * struct ucred *a_cred;
1169 /* printf("readlink %x\n", ap->a_vp);*/
1171 error = afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
1177 afs_vop_inactive(ap)
1178 struct vop_inactive_args /* {
1179 * struct vnode *a_vp;
1180 * struct thread *td;
1183 struct vnode *vp = ap->a_vp;
1186 afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
1192 * struct vop_reclaim_args {
1193 * struct vnode *a_vp;
1197 afs_vop_reclaim(struct vop_reclaim_args *ap)
1199 /* copied from ../OBSD/osi_vnodeops.c:afs_nbsd_reclaim() */
1201 struct vnode *vp = ap->a_vp;
1202 struct vcache *avc = VTOAFS(vp);
1203 int haveGlock = ISAFS_GLOCK();
1204 int haveVlock = CheckLock(&afs_xvcache);
1209 ObtainWriteLock(&afs_xvcache, 901);
1210 /* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
1211 code = afs_FlushVCache(avc, &slept);
1213 if (avc->f.states & CVInit) {
1214 avc->f.states &= ~CVInit;
1215 afs_osi_Wakeup(&avc->f.states);
1219 ReleaseWriteLock(&afs_xvcache);
1224 afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
1228 /* basically, it must not fail */
1229 vnode_destroy_vobject(vp);
1236 afs_vop_strategy(ap)
1237 struct vop_strategy_args /* {
1243 error = afs_ustrategy(ap->a_bp, osi_curcred());
1250 struct vop_print_args /* {
1251 * struct vnode *a_vp;
1254 struct vnode *vp = ap->a_vp;
1255 struct vcache *vc = VTOAFS(ap->a_vp);
1256 int s = vc->f.states;
1258 printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
1259 (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
1260 (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
1261 vc->execsOrWriters);
1262 printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
1263 (s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
1264 (s & CMAPPED) ? " mapped" : "",
1265 (s & CVFlushed) ? " flush in progress" : "");
1271 * Advisory record locking support (fcntl() POSIX style)
1275 struct vop_advlock_args /* {
1276 * struct vnode *a_vp;
1279 * struct flock *a_fl;
1284 struct ucred cr = *osi_curcred();
1287 if (a_op == F_UNLCK) {
1289 * When a_fl->type is F_UNLCK, FreeBSD passes in an a_op of F_UNLCK.
1290 * This is (confusingly) different than how you actually release a lock
1291 * with fcntl(), which is done with an a_op of F_SETLK and an l_type of
1292 * F_UNLCK. Pretend we were given an a_op of F_SETLK in this case,
1293 * since this is what afs_lockctl expects.
1300 afs_lockctl(VTOAFS(ap->a_vp),
1303 (int)(intptr_t)ap->a_id); /* XXX: no longer unique! */