2 * A large chunk of this file appears to be copied directly from
3 * sys/nfsclient/nfs_bio.c, which has the following license:
6 * Copyright (c) 1989, 1993
7 * The Regents of the University of California. All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * Rick Macklem at The University of Guelph.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
43 * Pursuant to a statement of U.C. Berkeley dated 1999-07-22, this license
44 * is amended to drop clause (3) above.
47 #include <afsconfig.h>
48 #include <afs/param.h>
51 #include <afs/sysincludes.h> /* Standard vendor system headers */
52 #include <afsincludes.h> /* Afs-based standard headers */
53 #include <afs/afs_stats.h> /* statistics */
54 #include <sys/malloc.h>
55 #include <sys/namei.h>
56 #include <sys/unistd.h>
57 #if __FreeBSD_version >= 1000030
58 #include <sys/rwlock.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_object.h>
62 #include <vm/vm_pager.h>
63 #include <vm/vnode_pager.h>
64 extern int afs_pbuf_freecnt;
67 struct componentname *cnp = ap->a_cnp; \
69 MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
70 memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
71 name[cnp->cn_namelen] = '\0'
73 #define DROPNAME() FREE(name, M_TEMP)
76 * Here we define compatibility functions/macros for interfaces that
77 * have changed between different FreeBSD versions.
79 static __inline void ma_vm_page_lock_queues(void) {};
80 static __inline void ma_vm_page_unlock_queues(void) {};
81 static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
82 static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
84 #if __FreeBSD_version >= 1000030
85 #define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_WLOCK(o)
86 #define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_WUNLOCK(o)
88 #define AFS_VM_OBJECT_WLOCK(o) VM_OBJECT_LOCK(o)
89 #define AFS_VM_OBJECT_WUNLOCK(o) VM_OBJECT_UNLOCK(o)
93 * Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
94 * We should know the correct answers to these questions with
95 * respect to the AFS protocol (which may differ from the UFS
96 * values) but for the moment this will do.
99 afs_vop_pathconf(struct vop_pathconf_args *ap)
104 switch (ap->a_name) {
106 *ap->a_retval = LINK_MAX;
109 *ap->a_retval = NAME_MAX;
112 *ap->a_retval = PATH_MAX;
115 *ap->a_retval = PIPE_BUF;
117 case _PC_CHOWN_RESTRICTED:
123 #ifdef _PC_ACL_EXTENDED
124 case _PC_ACL_EXTENDED:
127 case _PC_ACL_PATH_MAX:
131 #ifdef _PC_MAC_PRESENT
132 case _PC_MAC_PRESENT:
138 /* _PC_ASYNC_IO should have been handled by upper layers. */
139 KASSERT(0, ("_PC_ASYNC_IO should not get here"));
149 #ifdef _PC_ALLOC_SIZE_MIN
150 case _PC_ALLOC_SIZE_MIN:
151 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize;
154 #ifdef _PC_FILESIZEBITS
155 case _PC_FILESIZEBITS:
156 *ap->a_retval = 32; /* XXX */
159 #ifdef _PC_REC_INCR_XFER_SIZE
160 case _PC_REC_INCR_XFER_SIZE:
161 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
163 case _PC_REC_MAX_XFER_SIZE:
164 *ap->a_retval = -1; /* means ``unlimited'' */
166 case _PC_REC_MIN_XFER_SIZE:
167 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
169 case _PC_REC_XFER_ALIGN:
170 *ap->a_retval = PAGE_SIZE;
173 #ifdef _PC_SYMLINK_MAX
174 case _PC_SYMLINK_MAX:
175 *ap->a_retval = MAXPATHLEN;
187 struct vop_lookup_args /* {
188 * struct vnodeop_desc * a_desc;
189 * struct vnode *a_dvp;
190 * struct vnode **a_vpp;
191 * struct componentname *a_cnp;
196 struct vnode *vp, *dvp;
197 int flags = ap->a_cnp->cn_flags;
200 if (dvp->v_type != VDIR) {
204 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
209 #if __FreeBSD_version < 1000021
210 cnp->cn_flags |= MPSAFE; /* steel */
216 * - 'dvp' is locked by our caller. We must return it locked, whether we
217 * return success or error.
219 * - If the lookup succeeds, 'vp' must be locked before we return.
221 * - If we lock multiple vnodes, parent vnodes must be locked before
224 * As a result, looking up the parent directory (if 'flags' has ISDOTDOT
225 * set) is a bit of a special case. In that case, we must unlock 'dvp'
226 * before performing the lookup, since the lookup operation may lock the
227 * target vnode, and the target vnode is the parent of 'dvp' (so we must
228 * lock 'dvp' after locking the target vnode).
231 if (flags & ISDOTDOT)
235 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
239 if (flags & ISDOTDOT)
240 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
241 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
242 && (flags & ISLASTCN) && error == ENOENT)
244 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
245 cnp->cn_flags |= SAVENAME;
250 vp = AFSTOV(vcp); /* always get a node if no error */
252 if (flags & ISDOTDOT) {
253 /* Must lock 'vp' before 'dvp', since 'vp' is the parent vnode. */
254 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
255 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
256 } else if (vp == dvp) {
257 /* they're the same; afs_lookup() already ref'ed the leaf.
258 * It came in locked, so we don't need to ref OR lock it */
260 vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
264 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
265 cnp->cn_flags |= SAVENAME;
273 struct vop_create_args /* {
274 * struct vnode *a_dvp;
275 * struct vnode **a_vpp;
276 * struct componentname *a_cnp;
277 * struct vattr *a_vap;
282 struct vnode *dvp = ap->a_dvp;
287 afs_create(VTOAFS(dvp), name, ap->a_vap,
288 ap->a_vap->va_vaflags & VA_EXCLUSIVE ? EXCL : NONEXCL,
289 ap->a_vap->va_mode, &vcp, cnp->cn_cred);
297 *ap->a_vpp = AFSTOV(vcp);
298 vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY);
308 struct vop_mknod_args /* {
309 * struct vnode *a_dvp;
310 * struct vnode **a_vpp;
311 * struct componentname *a_cnp;
312 * struct vattr *a_vap;
320 struct vop_open_args /* {
321 * struct vnode *a_vp;
323 * struct ucred *a_cred;
324 * struct thread *a_td;
329 struct vcache *vc = VTOAFS(ap->a_vp);
332 error = afs_open(&vc, ap->a_mode, ap->a_cred);
334 if (AFSTOV(vc) != ap->a_vp)
335 panic("AFS open changed vnode!");
338 vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
339 osi_FlushPages(vc, ap->a_cred);
345 struct vop_close_args /* {
346 * struct vnode *a_vp;
348 * struct ucred *a_cred;
349 * struct thread *a_td;
353 struct vnode *vp = ap->a_vp;
354 struct vcache *avc = VTOAFS(vp);
357 iflag = vp->v_iflag & VI_DOOMED;
359 if (iflag & VI_DOOMED) {
360 /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
361 * have an afs_close to process, in that case */
363 panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
364 vp, avc, avc->opens);
370 code = afs_close(avc, ap->a_fflag, ap->a_cred);
372 code = afs_close(avc, ap->a_fflag, afs_osi_credp);
373 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
380 struct vop_access_args /* {
381 * struct vnode *a_vp;
382 * accmode_t a_accmode;
383 * struct ucred *a_cred;
384 * struct thread *a_td;
389 code = afs_access(VTOAFS(ap->a_vp), ap->a_accmode, ap->a_cred);
396 struct vop_getattr_args /* {
397 * struct vnode *a_vp;
398 * struct vattr *a_vap;
399 * struct ucred *a_cred;
405 code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
413 struct vop_setattr_args /* {
414 * struct vnode *a_vp;
415 * struct vattr *a_vap;
416 * struct ucred *a_cred;
421 code = afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
428 struct vop_read_args /* {
429 * struct vnode *a_vp;
432 * struct ucred *a_cred;
437 struct vcache *avc = VTOAFS(ap->a_vp);
439 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
440 code = afs_read(avc, ap->a_uio, ap->a_cred, 0);
445 /* struct vop_getpages_args {
446 * struct vnode *a_vp;
454 afs_vop_getpages(struct vop_getpages_args *ap)
457 int i, nextoff, size, toff, npages, count;
467 memset(&uio, 0, sizeof(uio));
468 memset(&iov, 0, sizeof(iov));
473 #ifdef FBSD_VOP_GETPAGES_BUSIED
474 npages = ap->a_count;
480 npages = btoc(ap->a_count);
483 if ((object = vp->v_object) == NULL) {
484 printf("afs_getpages: called with non-merged cache vnode??\n");
485 return VM_PAGER_ERROR;
489 * If the requested page is partially valid, just return it and
490 * allow the pager to zero-out the blanks. Partially valid pages
491 * can only occur at the file EOF.
494 #ifdef FBSD_VOP_GETPAGES_BUSIED
495 AFS_VM_OBJECT_WLOCK(object);
496 ma_vm_page_lock_queues();
497 if(pages[npages - 1]->valid != 0) {
499 ma_vm_page_unlock_queues();
500 AFS_VM_OBJECT_WUNLOCK(object);
501 return (VM_PAGER_OK);
505 vm_page_t m = pages[ap->a_reqpage];
506 AFS_VM_OBJECT_WLOCK(object);
507 ma_vm_page_lock_queues();
509 /* handled by vm_fault now */
510 /* vm_page_zero_invalid(m, TRUE); */
511 for (i = 0; i < npages; ++i) {
512 if (i != ap->a_reqpage) {
513 ma_vm_page_lock(pages[i]);
514 vm_page_free(pages[i]);
515 ma_vm_page_unlock(pages[i]);
518 ma_vm_page_unlock_queues();
519 AFS_VM_OBJECT_WUNLOCK(object);
523 ma_vm_page_unlock_queues();
524 AFS_VM_OBJECT_WUNLOCK(object);
526 bp = getpbuf(&afs_pbuf_freecnt);
528 kva = (vm_offset_t) bp->b_data;
529 pmap_qenter(kva, pages, npages);
530 PCPU_INC(cnt.v_vnodein);
531 PCPU_ADD(cnt.v_vnodepgsin, npages);
533 #ifdef FBSD_VOP_GETPAGES_BUSIED
534 count = ctob(npages);
538 iov.iov_base = (caddr_t) kva;
542 uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
543 uio.uio_resid = count;
544 uio.uio_segflg = UIO_SYSSPACE;
545 uio.uio_rw = UIO_READ;
546 uio.uio_td = curthread;
549 osi_FlushPages(avc, osi_curcred()); /* hold GLOCK, but not basic vnode lock */
550 code = afs_read(avc, &uio, osi_curcred(), 0);
552 pmap_qremove(kva, npages);
554 relpbuf(bp, &afs_pbuf_freecnt);
556 if (code && (uio.uio_resid == count)) {
557 #ifndef FBSD_VOP_GETPAGES_BUSIED
558 AFS_VM_OBJECT_WLOCK(object);
559 ma_vm_page_lock_queues();
560 for (i = 0; i < npages; ++i) {
561 if (i != ap->a_reqpage)
562 vm_page_free(pages[i]);
564 ma_vm_page_unlock_queues();
565 AFS_VM_OBJECT_WUNLOCK(object);
567 return VM_PAGER_ERROR;
570 size = count - uio.uio_resid;
571 AFS_VM_OBJECT_WLOCK(object);
572 ma_vm_page_lock_queues();
573 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
575 nextoff = toff + PAGE_SIZE;
578 /* XXX not in nfsclient? */
579 m->flags &= ~PG_ZERO;
581 if (nextoff <= size) {
583 * Read operation filled an entire page
585 m->valid = VM_PAGE_BITS_ALL;
586 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
587 } else if (size > toff) {
589 * Read operation filled a partial page.
592 vm_page_set_validclean(m, 0, size - toff);
593 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
596 #ifndef FBSD_VOP_GETPAGES_BUSIED
597 if (i != ap->a_reqpage) {
598 #if __FreeBSD_version >= 1000042
599 vm_page_readahead_finish(m);
602 * Whether or not to leave the page activated is up in
603 * the air, but we should put the page on a page queue
604 * somewhere (it already is in the object). Result:
605 * It appears that emperical results show that
606 * deactivating pages is best.
610 * Just in case someone was asking for this page we
611 * now tell them that it is ok to use.
614 if (m->oflags & VPO_WANTED) {
617 ma_vm_page_unlock(m);
621 vm_page_deactivate(m);
622 ma_vm_page_unlock(m);
628 ma_vm_page_unlock(m);
630 #endif /* __FreeBSD_version 1000042 */
632 #endif /* ndef FBSD_VOP_GETPAGES_BUSIED */
634 ma_vm_page_unlock_queues();
635 AFS_VM_OBJECT_WUNLOCK(object);
641 struct vop_write_args /* {
642 * struct vnode *a_vp;
645 * struct ucred *a_cred;
649 struct vcache *avc = VTOAFS(ap->a_vp);
651 osi_FlushPages(avc, ap->a_cred); /* hold GLOCK, but not basic vnode lock */
653 afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
659 * struct vop_putpages_args {
660 * struct vnode *a_vp;
665 * vm_oofset_t a_offset;
669 * All of the pages passed to us in ap->a_m[] are already marked as busy,
670 * so there is no additional locking required to set their flags. -GAW
673 afs_vop_putpages(struct vop_putpages_args *ap)
676 int i, size, npages, sync;
684 memset(&uio, 0, sizeof(uio));
685 memset(&iov, 0, sizeof(iov));
689 /* Perhaps these two checks should just be KASSERTs instead... */
690 if (vp->v_object == NULL) {
691 printf("afs_putpages: called with non-merged cache vnode??\n");
692 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
694 if (vType(avc) != VREG) {
695 printf("afs_putpages: not VREG");
696 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
698 npages = btoc(ap->a_count);
699 for (i = 0; i < npages; i++)
700 ap->a_rtvals[i] = VM_PAGER_AGAIN;
701 bp = getpbuf(&afs_pbuf_freecnt);
703 kva = (vm_offset_t) bp->b_data;
704 pmap_qenter(kva, ap->a_m, npages);
705 PCPU_INC(cnt.v_vnodeout);
706 PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
708 iov.iov_base = (caddr_t) kva;
709 iov.iov_len = ap->a_count;
712 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
713 uio.uio_resid = ap->a_count;
714 uio.uio_segflg = UIO_SYSSPACE;
715 uio.uio_rw = UIO_WRITE;
716 uio.uio_td = curthread;
718 if (ap->a_sync & VM_PAGER_PUT_SYNC)
720 /*if (ap->a_sync & VM_PAGER_PUT_INVAL)
721 * sync |= IO_INVAL; */
724 code = afs_write(avc, &uio, sync, osi_curcred(), 0);
727 pmap_qremove(kva, npages);
728 relpbuf(bp, &afs_pbuf_freecnt);
731 size = ap->a_count - uio.uio_resid;
732 for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
733 ap->a_rtvals[i] = VM_PAGER_OK;
734 vm_page_undirty(ap->a_m[i]);
737 return ap->a_rtvals[0];
742 struct vop_ioctl_args /* {
743 * struct vnode *a_vp;
747 * struct ucred *a_cred;
748 * struct thread *a_td;
751 struct vcache *tvc = VTOAFS(ap->a_vp);
754 /* in case we ever get in here... */
756 AFS_STATCNT(afs_ioctl);
757 if (((ap->a_command >> 8) & 0xff) == 'V') {
758 /* This is a VICEIOCTL call */
760 error = HandleIoctl(tvc, ap->a_command, ap->a_data);
764 /* No-op call; just return. */
771 struct vop_fsync_args /* {
772 * struct vnode *a_vp;
778 struct vnode *vp = ap->a_vp;
781 /*vflushbuf(vp, wait); */
782 error = afs_fsync(VTOAFS(vp), ap->a_td->td_ucred);
789 struct vop_remove_args /* {
790 * struct vnode *a_dvp;
791 * struct vnode *a_vp;
792 * struct componentname *a_cnp;
796 struct vnode *vp = ap->a_vp;
797 struct vnode *dvp = ap->a_dvp;
801 error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
810 struct vop_link_args /* {
811 * struct vnode *a_vp;
812 * struct vnode *a_tdvp;
813 * struct componentname *a_cnp;
817 struct vnode *dvp = ap->a_tdvp;
818 struct vnode *vp = ap->a_vp;
821 if (dvp->v_mount != vp->v_mount) {
825 if (vp->v_type == VDIR) {
829 if ((error = vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE)) != 0) {
833 error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
844 struct vop_rename_args /* {
845 * struct vnode *a_fdvp;
846 * struct vnode *a_fvp;
847 * struct componentname *a_fcnp;
848 * struct vnode *a_tdvp;
849 * struct vnode *a_tvp;
850 * struct componentname *a_tcnp;
854 struct componentname *fcnp = ap->a_fcnp;
856 struct componentname *tcnp = ap->a_tcnp;
858 struct vnode *tvp = ap->a_tvp;
859 struct vnode *tdvp = ap->a_tdvp;
860 struct vnode *fvp = ap->a_fvp;
861 struct vnode *fdvp = ap->a_fdvp;
864 * Check for cross-device rename.
866 if ((fvp->v_mount != tdvp->v_mount)
867 || (tvp && (fvp->v_mount != tvp->v_mount))) {
881 * if fvp == tvp, we're just removing one name of a pair of
882 * directory entries for the same element. convert call into rename.
883 ( (pinched from FreeBSD 4.4's ufs_rename())
887 if (fvp->v_type == VDIR) {
892 /* Release destination completely. */
899 fcnp->cn_flags &= ~MODMASK;
900 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
901 if ((fcnp->cn_flags & SAVESTART) == 0)
902 panic("afs_rename: lost from startdir");
903 fcnp->cn_nameiop = DELETE;
905 error = relookup(fdvp, &fvp, fcnp);
912 error = VOP_REMOVE(fdvp, fvp, fcnp);
920 if ((error = vn_lock(fvp, LK_EXCLUSIVE)) != 0)
923 MALLOC(fname, char *, fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
924 memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
925 fname[fcnp->cn_namelen] = '\0';
926 MALLOC(tname, char *, tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
927 memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
928 tname[tcnp->cn_namelen] = '\0';
932 /* XXX use "from" or "to" creds? NFS uses "to" creds */
934 afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
952 struct vop_mkdir_args /* {
953 * struct vnode *a_dvp;
954 * struct vnode **a_vpp;
955 * struct componentname *a_cnp;
956 * struct vattr *a_vap;
959 struct vnode *dvp = ap->a_dvp;
960 struct vattr *vap = ap->a_vap;
966 if ((cnp->cn_flags & HASBUF) == 0)
967 panic("afs_vop_mkdir: no name");
970 error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
977 *ap->a_vpp = AFSTOV(vcp);
978 vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY);
987 struct vop_rmdir_args /* {
988 * struct vnode *a_dvp;
989 * struct vnode *a_vp;
990 * struct componentname *a_cnp;
994 struct vnode *dvp = ap->a_dvp;
998 error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
1004 /* struct vop_symlink_args {
1005 * struct vnode *a_dvp;
1006 * struct vnode **a_vpp;
1007 * struct componentname *a_cnp;
1008 * struct vattr *a_vap;
1013 afs_vop_symlink(struct vop_symlink_args *ap)
1016 struct vnode *newvp;
1027 afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, NULL,
1030 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
1032 newvp = AFSTOV(vcp);
1033 vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY);
1038 *(ap->a_vpp) = newvp;
1044 struct vop_readdir_args /* {
1045 * struct vnode *a_vp;
1046 * struct uio *a_uio;
1047 * struct ucred *a_cred;
1049 * u_long *a_cookies;
1055 /* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
1057 off = ap->a_uio->uio_offset;
1060 afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred, ap->a_eofflag);
1062 if (!error && ap->a_ncookies != NULL) {
1063 struct uio *uio = ap->a_uio;
1064 const struct dirent *dp, *dp_start, *dp_end;
1066 u_long *cookies, *cookiep;
1068 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1069 panic("afs_readdir: burned cookies");
1070 dp = (const struct dirent *)
1071 ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
1073 dp_end = (const struct dirent *)uio->uio_iov->iov_base;
1074 for (dp_start = dp, ncookies = 0; dp < dp_end;
1075 dp = (const struct dirent *)((const char *)dp + dp->d_reclen))
1078 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP,
1080 for (dp = dp_start, cookiep = cookies; dp < dp_end;
1081 dp = (const struct dirent *)((const char *)dp + dp->d_reclen)) {
1082 off += dp->d_reclen;
1085 *ap->a_cookies = cookies;
1086 *ap->a_ncookies = ncookies;
1093 afs_vop_readlink(ap)
1094 struct vop_readlink_args /* {
1095 * struct vnode *a_vp;
1096 * struct uio *a_uio;
1097 * struct ucred *a_cred;
1101 /* printf("readlink %x\n", ap->a_vp);*/
1103 error = afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
1109 afs_vop_inactive(ap)
1110 struct vop_inactive_args /* {
1111 * struct vnode *a_vp;
1112 * struct thread *td;
1115 struct vnode *vp = ap->a_vp;
1118 afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
1124 * struct vop_reclaim_args {
1125 * struct vnode *a_vp;
1129 afs_vop_reclaim(struct vop_reclaim_args *ap)
1132 struct vnode *vp = ap->a_vp;
1133 struct vcache *avc = VTOAFS(vp);
1134 int haveGlock = ISAFS_GLOCK();
1137 * In other code paths, we acquire the vnode lock while afs_xvcache is
1138 * already held (e.g. afs_PutVCache() -> vrele()). Here, we already have
1139 * the vnode lock, and we need afs_xvcache. So drop the vnode lock in order
1140 * to hold afs_xvcache.
1146 ObtainWriteLock(&afs_xvcache, 901);
1149 * Note that we deliberately call VOP_LOCK() instead of vn_lock() here.
1150 * vn_lock() will return an error for VI_DOOMED vnodes, but we know this
1151 * vnode is already VI_DOOMED. We just want to lock it again, and skip the
1154 VOP_LOCK(vp, LK_EXCLUSIVE);
1156 code = afs_FlushVCache(avc, &slept);
1158 if (avc->f.states & CVInit) {
1159 avc->f.states &= ~CVInit;
1160 afs_osi_Wakeup(&avc->f.states);
1163 ReleaseWriteLock(&afs_xvcache);
1168 afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
1170 panic("afs: afs_FlushVCache failed during reclaim");
1173 vnode_destroy_vobject(vp);
1180 afs_vop_strategy(ap)
1181 struct vop_strategy_args /* {
1187 error = afs_ustrategy(ap->a_bp, osi_curcred());
1194 struct vop_print_args /* {
1195 * struct vnode *a_vp;
1198 struct vnode *vp = ap->a_vp;
1199 struct vcache *vc = VTOAFS(ap->a_vp);
1200 int s = vc->f.states;
1202 printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
1203 (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
1204 (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
1205 vc->execsOrWriters);
1206 printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
1207 (s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
1208 (s & CMAPPED) ? " mapped" : "",
1209 (s & CVFlushed) ? " flush in progress" : "");
1215 * Advisory record locking support (fcntl() POSIX style)
1219 struct vop_advlock_args /* {
1220 * struct vnode *a_vp;
1223 * struct flock *a_fl;
1228 struct ucred cr = *osi_curcred();
1231 if (a_op == F_UNLCK) {
1233 * When a_fl->type is F_UNLCK, FreeBSD passes in an a_op of F_UNLCK.
1234 * This is (confusingly) different than how you actually release a lock
1235 * with fcntl(), which is done with an a_op of F_SETLK and an l_type of
1236 * F_UNLCK. Pretend we were given an a_op of F_SETLK in this case,
1237 * since this is what afs_lockctl expects.
1244 afs_lockctl(VTOAFS(ap->a_vp),
1247 (int)(intptr_t)ap->a_id); /* XXX: no longer unique! */
1252 struct vop_vector afs_vnodeops = {
1253 .vop_default = &default_vnodeops,
1254 .vop_access = afs_vop_access,
1255 .vop_advlock = afs_vop_advlock,
1256 .vop_close = afs_vop_close,
1257 .vop_create = afs_vop_create,
1258 .vop_fsync = afs_vop_fsync,
1259 .vop_getattr = afs_vop_getattr,
1260 .vop_getpages = afs_vop_getpages,
1261 .vop_inactive = afs_vop_inactive,
1262 .vop_ioctl = afs_vop_ioctl,
1263 .vop_link = afs_vop_link,
1264 .vop_lookup = afs_vop_lookup,
1265 .vop_mkdir = afs_vop_mkdir,
1266 .vop_mknod = afs_vop_mknod,
1267 .vop_open = afs_vop_open,
1268 .vop_pathconf = afs_vop_pathconf,
1269 .vop_print = afs_vop_print,
1270 .vop_putpages = afs_vop_putpages,
1271 .vop_read = afs_vop_read,
1272 .vop_readdir = afs_vop_readdir,
1273 .vop_readlink = afs_vop_readlink,
1274 .vop_reclaim = afs_vop_reclaim,
1275 .vop_remove = afs_vop_remove,
1276 .vop_rename = afs_vop_rename,
1277 .vop_rmdir = afs_vop_rmdir,
1278 .vop_setattr = afs_vop_setattr,
1279 .vop_strategy = afs_vop_strategy,
1280 .vop_symlink = afs_vop_symlink,
1281 .vop_write = afs_vop_write,