2 * A large chunk of this file appears to be copied directly from
3 * sys/nfsclient/nfs_bio.c, which has the following license:
6 * Copyright (c) 1989, 1993
7 * The Regents of the University of California. All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * Rick Macklem at The University of Guelph.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
43 * Pursuant to a statement of U.C. Berkeley dated 1999-07-22, this license
44 * is amended to drop clause (3) above.
47 #include <afsconfig.h>
48 #include <afs/param.h>
51 #include <afs/sysincludes.h> /* Standard vendor system headers */
52 #include <afsincludes.h> /* Afs-based standard headers */
53 #include <afs/afs_stats.h> /* statistics */
54 #include <sys/malloc.h>
55 #include <sys/namei.h>
56 #include <sys/unistd.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vnode_pager.h>
61 extern int afs_pbuf_freecnt;
64 static vop_access_t afs_vop_access;
65 static vop_advlock_t afs_vop_advlock;
66 static vop_close_t afs_vop_close;
67 static vop_create_t afs_vop_create;
68 static vop_fsync_t afs_vop_fsync;
69 static vop_getattr_t afs_vop_getattr;
70 static vop_getpages_t afs_vop_getpages;
71 static vop_inactive_t afs_vop_inactive;
72 static vop_ioctl_t afs_vop_ioctl;
73 static vop_link_t afs_vop_link;
74 static vop_lookup_t afs_vop_lookup;
75 static vop_mkdir_t afs_vop_mkdir;
76 static vop_mknod_t afs_vop_mknod;
77 static vop_open_t afs_vop_open;
78 static vop_pathconf_t afs_vop_pathconf;
79 static vop_poll_t afs_vop_poll;
80 static vop_print_t afs_vop_print;
81 static vop_putpages_t afs_vop_putpages;
82 static vop_read_t afs_vop_read;
83 static vop_readdir_t afs_vop_readdir;
84 static vop_readlink_t afs_vop_readlink;
85 static vop_reclaim_t afs_vop_reclaim;
86 static vop_remove_t afs_vop_remove;
87 static vop_rename_t afs_vop_rename;
88 static vop_rmdir_t afs_vop_rmdir;
89 static vop_setattr_t afs_vop_setattr;
90 static vop_strategy_t afs_vop_strategy;
91 static vop_symlink_t afs_vop_symlink;
92 static vop_write_t afs_vop_write;
93 #if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
94 static vop_lock1_t afs_vop_lock;
95 static vop_unlock_t afs_vop_unlock;
96 static vop_islocked_t afs_vop_islocked;
99 struct vop_vector afs_vnodeops = {
100 .vop_default = &default_vnodeops,
101 .vop_access = afs_vop_access,
102 .vop_advlock = afs_vop_advlock,
103 .vop_close = afs_vop_close,
104 .vop_create = afs_vop_create,
105 .vop_fsync = afs_vop_fsync,
106 .vop_getattr = afs_vop_getattr,
107 .vop_getpages = afs_vop_getpages,
108 .vop_inactive = afs_vop_inactive,
109 .vop_ioctl = afs_vop_ioctl,
110 #if !defined(AFS_FBSD80_ENV)
111 /* removed at least temporarily (NFSv4 flux) */
112 .vop_lease = VOP_NULL,
114 .vop_link = afs_vop_link,
115 .vop_lookup = afs_vop_lookup,
116 .vop_mkdir = afs_vop_mkdir,
117 .vop_mknod = afs_vop_mknod,
118 .vop_open = afs_vop_open,
119 .vop_pathconf = afs_vop_pathconf,
120 .vop_poll = afs_vop_poll,
121 .vop_print = afs_vop_print,
122 .vop_putpages = afs_vop_putpages,
123 .vop_read = afs_vop_read,
124 .vop_readdir = afs_vop_readdir,
125 .vop_readlink = afs_vop_readlink,
126 .vop_reclaim = afs_vop_reclaim,
127 .vop_remove = afs_vop_remove,
128 .vop_rename = afs_vop_rename,
129 .vop_rmdir = afs_vop_rmdir,
130 .vop_setattr = afs_vop_setattr,
131 .vop_strategy = afs_vop_strategy,
132 .vop_symlink = afs_vop_symlink,
133 .vop_write = afs_vop_write,
134 #if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
135 .vop_lock1 = afs_vop_lock,
136 .vop_unlock = afs_vop_unlock,
137 .vop_islocked = afs_vop_islocked,
141 #else /* AFS_FBSD60_ENV */
143 int afs_vop_lookup(struct vop_lookup_args *);
144 int afs_vop_create(struct vop_create_args *);
145 int afs_vop_mknod(struct vop_mknod_args *);
146 int afs_vop_open(struct vop_open_args *);
147 int afs_vop_close(struct vop_close_args *);
148 int afs_vop_access(struct vop_access_args *);
149 int afs_vop_getattr(struct vop_getattr_args *);
150 int afs_vop_setattr(struct vop_setattr_args *);
151 int afs_vop_read(struct vop_read_args *);
152 int afs_vop_write(struct vop_write_args *);
153 int afs_vop_getpages(struct vop_getpages_args *);
154 int afs_vop_putpages(struct vop_putpages_args *);
155 int afs_vop_ioctl(struct vop_ioctl_args *);
156 static int afs_vop_pathconf(struct vop_pathconf_args *);
157 int afs_vop_poll(struct vop_poll_args *);
158 int afs_vop_fsync(struct vop_fsync_args *);
159 int afs_vop_remove(struct vop_remove_args *);
160 int afs_vop_link(struct vop_link_args *);
161 int afs_vop_rename(struct vop_rename_args *);
162 int afs_vop_mkdir(struct vop_mkdir_args *);
163 int afs_vop_rmdir(struct vop_rmdir_args *);
164 int afs_vop_symlink(struct vop_symlink_args *);
165 int afs_vop_readdir(struct vop_readdir_args *);
166 int afs_vop_readlink(struct vop_readlink_args *);
167 int afs_vop_inactive(struct vop_inactive_args *);
168 int afs_vop_reclaim(struct vop_reclaim_args *);
169 int afs_vop_bmap(struct vop_bmap_args *);
170 int afs_vop_strategy(struct vop_strategy_args *);
171 int afs_vop_print(struct vop_print_args *);
172 int afs_vop_advlock(struct vop_advlock_args *);
176 /* Global vfs data structures for AFS. */
177 vop_t **afs_vnodeop_p;
178 struct vnodeopv_entry_desc afs_vnodeop_entries[] = {
179 {&vop_default_desc, (vop_t *) vop_defaultop},
180 {&vop_access_desc, (vop_t *) afs_vop_access}, /* access */
181 {&vop_advlock_desc, (vop_t *) afs_vop_advlock}, /* advlock */
182 {&vop_bmap_desc, (vop_t *) afs_vop_bmap}, /* bmap */
183 {&vop_close_desc, (vop_t *) afs_vop_close}, /* close */
184 {&vop_createvobject_desc, (vop_t *) vop_stdcreatevobject},
185 {&vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject},
186 {&vop_create_desc, (vop_t *) afs_vop_create}, /* create */
187 {&vop_fsync_desc, (vop_t *) afs_vop_fsync}, /* fsync */
188 {&vop_getattr_desc, (vop_t *) afs_vop_getattr}, /* getattr */
189 {&vop_getpages_desc, (vop_t *) afs_vop_getpages}, /* read */
190 {&vop_getvobject_desc, (vop_t *) vop_stdgetvobject},
191 {&vop_putpages_desc, (vop_t *) afs_vop_putpages}, /* write */
192 {&vop_inactive_desc, (vop_t *) afs_vop_inactive}, /* inactive */
193 {&vop_lease_desc, (vop_t *) vop_null},
194 {&vop_link_desc, (vop_t *) afs_vop_link}, /* link */
195 {&vop_lookup_desc, (vop_t *) afs_vop_lookup}, /* lookup */
196 {&vop_mkdir_desc, (vop_t *) afs_vop_mkdir}, /* mkdir */
197 {&vop_mknod_desc, (vop_t *) afs_vop_mknod}, /* mknod */
198 {&vop_open_desc, (vop_t *) afs_vop_open}, /* open */
199 {&vop_pathconf_desc, (vop_t *) afs_vop_pathconf}, /* pathconf */
200 {&vop_poll_desc, (vop_t *) afs_vop_poll}, /* select */
201 {&vop_print_desc, (vop_t *) afs_vop_print}, /* print */
202 {&vop_read_desc, (vop_t *) afs_vop_read}, /* read */
203 {&vop_readdir_desc, (vop_t *) afs_vop_readdir}, /* readdir */
204 {&vop_readlink_desc, (vop_t *) afs_vop_readlink}, /* readlink */
205 {&vop_reclaim_desc, (vop_t *) afs_vop_reclaim}, /* reclaim */
206 {&vop_remove_desc, (vop_t *) afs_vop_remove}, /* remove */
207 {&vop_rename_desc, (vop_t *) afs_vop_rename}, /* rename */
208 {&vop_rmdir_desc, (vop_t *) afs_vop_rmdir}, /* rmdir */
209 {&vop_setattr_desc, (vop_t *) afs_vop_setattr}, /* setattr */
210 {&vop_strategy_desc, (vop_t *) afs_vop_strategy}, /* strategy */
211 {&vop_symlink_desc, (vop_t *) afs_vop_symlink}, /* symlink */
212 {&vop_write_desc, (vop_t *) afs_vop_write}, /* write */
213 {&vop_ioctl_desc, (vop_t *) afs_vop_ioctl}, /* XXX ioctl */
214 /*{ &vop_seek_desc, afs_vop_seek }, *//* seek */
215 #if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD90_ENV)
216 {&vop_lock1_desc, (vop_t *) afs_vop_lock}, /* lock */
217 {&vop_unlock_desc, (vop_t *) afs_vop_unlock}, /* unlock */
218 {&vop_islocked_desc, (vop_t *) afs_vop_islocked}, /* islocked */
222 struct vnodeopv_desc afs_vnodeop_opv_desc =
223 { &afs_vnodeop_p, afs_vnodeop_entries };
224 #endif /* AFS_FBSD60_ENV */
227 struct componentname *cnp = ap->a_cnp; \
229 MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
230 memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
231 name[cnp->cn_namelen] = '\0'
233 #define DROPNAME() FREE(name, M_TEMP)
236 * Here we define compatibility functions/macros for interfaces that
237 * have changed between different FreeBSD versions.
239 #if defined(AFS_FBSD90_ENV)
240 static __inline void ma_vm_page_lock_queues(void) {};
241 static __inline void ma_vm_page_unlock_queues(void) {};
242 static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
243 static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
245 static __inline void ma_vm_page_lock_queues(void) { vm_page_lock_queues(); };
246 static __inline void ma_vm_page_unlock_queues(void) { vm_page_unlock_queues(); };
247 static __inline void ma_vm_page_lock(vm_page_t m) {};
248 static __inline void ma_vm_page_unlock(vm_page_t m) {};
251 #if defined(AFS_FBSD80_ENV)
252 #define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
253 #define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
254 #define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
256 #define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags, p))
257 #define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags, p))
258 #define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags, p))
261 #if defined(AFS_FBSD70_ENV)
262 #define MA_PCPU_INC(c) PCPU_INC(c)
263 #define MA_PCPU_ADD(c, n) PCPU_ADD(c, n)
265 #define MA_PCPU_INC(c) PCPU_LAZY_INC(c)
266 #define MA_PCPU_ADD(c, n) (c) += (n)
269 #ifdef AFS_FBSD70_ENV
270 #ifndef AFS_FBSD80_ENV
271 /* From kern_lock.c */
272 #define COUNT(td, x) if ((td)) (td)->td_locks += (x)
273 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
274 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
277 sharelock(struct thread *td, struct lock *lkp, int incr) {
278 lkp->lk_flags |= LK_SHARE_NONZERO;
279 lkp->lk_sharecount += incr;
285 * Standard lock, unlock and islocked functions.
289 struct vop_lock1_args /* {
297 struct vnode *vp = ap->a_vp;
298 struct lock *lkp = vp->v_vnlock;
300 #if 0 && defined(AFS_FBSD80_ENV) && !defined(UKERNEL)
301 afs_warn("afs_vop_lock: tid %d pid %d \"%s\"\n", curthread->td_tid,
302 curthread->td_proc->p_pid, curthread->td_name);
306 #ifdef AFS_FBSD80_ENV
307 return (_lockmgr_args(lkp, ap->a_flags, VI_MTX(vp),
308 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
309 ap->a_file, ap->a_line));
311 return (_lockmgr(lkp, ap->a_flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line));
318 struct vop_unlock_args /* {
324 struct vnode *vp = ap->a_vp;
325 struct lock *lkp = vp->v_vnlock;
327 #ifdef AFS_FBSD80_ENV
330 op = ((ap->a_flags) | LK_RELEASE) & LK_TYPE_MASK;
331 int glocked = ISAFS_GLOCK();
334 if ((op & (op - 1)) != 0) {
335 afs_warn("afs_vop_unlock: Shit.\n");
338 code = lockmgr(lkp, ap->a_flags | LK_RELEASE, VI_MTX(vp));
344 /* possibly in current code path where this
345 * forces trace, we should have had a (shared? not
346 * necessarily, see _lockmgr in kern_lock.c) lock
347 * and that's the real bug. but.
350 if ((lkp->lk_exclusivecount == 0) &&
351 (!(lkp->lk_flags & LK_SHARE_NONZERO))) {
352 sharelock(ap->a_td, lkp, 1);
355 return (lockmgr(lkp, ap->a_flags | LK_RELEASE, VI_MTX(vp),
363 struct vop_islocked_args /* {
365 struct thread *a_td; (not in 80)
368 #ifdef AFS_FBSD80_ENV
369 return (lockstatus(ap->a_vp->v_vnlock));
371 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
377 * Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
378 * We should know the correct answers to these questions with
379 * respect to the AFS protocol (which may differ from the UFS
380 * values) but for the moment this will do.
383 afs_vop_pathconf(struct vop_pathconf_args *ap)
388 switch (ap->a_name) {
390 *ap->a_retval = LINK_MAX;
393 *ap->a_retval = NAME_MAX;
396 *ap->a_retval = PATH_MAX;
399 *ap->a_retval = PIPE_BUF;
401 case _PC_CHOWN_RESTRICTED:
407 #ifdef _PC_ACL_EXTENDED
408 case _PC_ACL_EXTENDED:
411 case _PC_ACL_PATH_MAX:
415 #ifdef _PC_MAC_PRESENT
416 case _PC_MAC_PRESENT:
422 /* _PC_ASYNC_IO should have been handled by upper layers. */
423 KASSERT(0, ("_PC_ASYNC_IO should not get here"));
433 #ifdef _PC_ALLOC_SIZE_MIN
434 case _PC_ALLOC_SIZE_MIN:
435 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize;
438 #ifdef _PC_FILESIZEBITS
439 case _PC_FILESIZEBITS:
440 *ap->a_retval = 32; /* XXX */
443 #ifdef _PC_REC_INCR_XFER_SIZE
444 case _PC_REC_INCR_XFER_SIZE:
445 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
447 case _PC_REC_MAX_XFER_SIZE:
448 *ap->a_retval = -1; /* means ``unlimited'' */
450 case _PC_REC_MIN_XFER_SIZE:
451 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
453 case _PC_REC_XFER_ALIGN:
454 *ap->a_retval = PAGE_SIZE;
457 #ifdef _PC_SYMLINK_MAX
458 case _PC_SYMLINK_MAX:
459 *ap->a_retval = MAXPATHLEN;
471 struct vop_lookup_args /* {
472 * struct vnodeop_desc * a_desc;
473 * struct vnode *a_dvp;
474 * struct vnode **a_vpp;
475 * struct componentname *a_cnp;
480 struct vnode *vp, *dvp;
481 int flags = ap->a_cnp->cn_flags;
482 int lockparent; /* 1 => lockparent flag is set */
483 int wantparent; /* 1 => wantparent or lockparent flag */
484 struct thread *p = ap->a_cnp->cn_thread;
487 if (dvp->v_type != VDIR) {
488 #ifndef AFS_FBSD70_ENV
494 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
499 lockparent = flags & LOCKPARENT;
500 wantparent = flags & (LOCKPARENT | WANTPARENT);
502 #ifdef AFS_FBSD80_ENV
503 cnp->cn_flags |= MPSAFE; /* steel */
506 #ifndef AFS_FBSD70_ENV
507 if (flags & ISDOTDOT)
508 VOP_UNLOCK(dvp, 0, p);
512 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
516 if (flags & ISDOTDOT)
517 MA_VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
518 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
519 && (flags & ISLASTCN) && error == ENOENT)
521 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
522 cnp->cn_flags |= SAVENAME;
527 vp = AFSTOV(vcp); /* always get a node if no error */
529 /* The parent directory comes in locked. We unlock it on return
530 * unless the caller wants it left locked.
531 * we also always return the vnode locked. */
533 if (flags & ISDOTDOT) {
534 MA_VOP_UNLOCK(dvp, 0, p);
535 ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
536 ma_vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
537 /* always return the child locked */
538 if (lockparent && (flags & ISLASTCN)
539 && (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
544 } else if (vp == dvp) {
545 /* they're the same; afs_lookup() already ref'ed the leaf.
546 * It came in locked, so we don't need to ref OR lock it */
548 if (!lockparent || !(flags & ISLASTCN)) {
549 #ifndef AFS_FBSD70_ENV /* 6 too? */
550 MA_VOP_UNLOCK(dvp, 0, p); /* done with parent. */
553 ma_vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
554 /* always return the child locked */
558 if ((cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN))
559 || (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)))
560 cnp->cn_flags |= SAVENAME;
568 struct vop_create_args /* {
569 * struct vnode *a_dvp;
570 * struct vnode **a_vpp;
571 * struct componentname *a_cnp;
572 * struct vattr *a_vap;
577 struct vnode *dvp = ap->a_dvp;
578 struct thread *p = ap->a_cnp->cn_thread;
583 afs_create(VTOAFS(dvp), name, ap->a_vap,
584 ap->a_vap->va_vaflags & VA_EXCLUSIVE ? EXCL : NONEXCL,
585 ap->a_vap->va_mode, &vcp, cnp->cn_cred);
593 *ap->a_vpp = AFSTOV(vcp);
594 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
604 struct vop_mknod_args /* {
605 * struct vnode *a_dvp;
606 * struct vnode **a_vpp;
607 * struct componentname *a_cnp;
608 * struct vattr *a_vap;
616 validate_vops(struct vnode *vp, int after)
619 struct vnodeopv_entry_desc *this;
620 for (this = afs_vnodeop_entries; this->opve_op; this++) {
621 if (vp->v_op[this->opve_op->vdesc_offset] != this->opve_impl) {
623 printf("v_op %d ", after);
627 printf("For oper %d (%s), func is %p, not %p",
628 this->opve_op->vdesc_offset, this->opve_op->vdesc_name,
629 vp->v_op[this->opve_op->vdesc_offset], this->opve_impl);
637 struct vop_open_args /* {
638 * struct vnode *a_vp;
640 * struct ucred *a_cred;
641 * struct thread *a_td;
646 struct vcache *vc = VTOAFS(ap->a_vp);
649 error = afs_open(&vc, ap->a_mode, ap->a_cred);
651 if (AFSTOV(vc) != ap->a_vp)
652 panic("AFS open changed vnode!");
655 #ifdef AFS_FBSD60_ENV
656 vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
658 osi_FlushPages(vc, ap->a_cred);
664 struct vop_close_args /* {
665 * struct vnode *a_vp;
667 * struct ucred *a_cred;
668 * struct thread *a_td;
672 struct vnode *vp = ap->a_vp;
673 struct vcache *avc = VTOAFS(vp);
675 #if defined(AFS_FBSD80_ENV)
677 iflag = vp->v_iflag & VI_DOOMED;
679 if (iflag & VI_DOOMED) {
680 /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
681 * have an afs_close to process, in that case */
683 panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
684 vp, avc, avc->opens);
691 code = afs_close(avc, ap->a_fflag, ap->a_cred);
693 code = afs_close(avc, ap->a_fflag, afs_osi_credp);
694 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
701 struct vop_access_args /* {
702 * struct vnode *a_vp;
703 * accmode_t a_accmode;
704 * struct ucred *a_cred;
705 * struct thread *a_td;
710 #if defined(AFS_FBSD80_ENV)
711 code = afs_access(VTOAFS(ap->a_vp), ap->a_accmode, ap->a_cred);
713 code = afs_access(VTOAFS(ap->a_vp), ap->a_mode, ap->a_cred);
721 struct vop_getattr_args /* {
722 * struct vnode *a_vp;
723 * struct vattr *a_vap;
724 * struct ucred *a_cred;
730 code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
738 struct vop_setattr_args /* {
739 * struct vnode *a_vp;
740 * struct vattr *a_vap;
741 * struct ucred *a_cred;
746 code = afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
753 struct vop_read_args /* {
754 * struct vnode *a_vp;
757 * struct ucred *a_cred;
762 struct vcache *avc = VTOAFS(ap->a_vp);
764 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
765 code = afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
770 /* struct vop_getpages_args {
771 * struct vnode *a_vp;
775 * vm_oofset_t a_offset;
779 afs_vop_getpages(struct vop_getpages_args *ap)
782 int i, nextoff, size, toff, npages;
793 if ((object = vp->v_object) == NULL) {
794 printf("afs_getpages: called with non-merged cache vnode??\n");
795 return VM_PAGER_ERROR;
797 npages = btoc(ap->a_count);
799 * If the requested page is partially valid, just return it and
800 * allow the pager to zero-out the blanks. Partially valid pages
801 * can only occur at the file EOF.
805 vm_page_t m = ap->a_m[ap->a_reqpage];
807 VM_OBJECT_LOCK(object);
808 ma_vm_page_lock_queues();
810 /* handled by vm_fault now */
811 /* vm_page_zero_invalid(m, TRUE); */
812 for (i = 0; i < npages; ++i) {
813 if (i != ap->a_reqpage) {
814 ma_vm_page_lock(ap->a_m[i]);
815 vm_page_free(ap->a_m[i]);
816 ma_vm_page_unlock(ap->a_m[i]);
819 ma_vm_page_unlock_queues();
820 VM_OBJECT_UNLOCK(object);
823 ma_vm_page_unlock_queues();
824 VM_OBJECT_UNLOCK(object);
826 bp = getpbuf(&afs_pbuf_freecnt);
828 kva = (vm_offset_t) bp->b_data;
829 pmap_qenter(kva, ap->a_m, npages);
830 MA_PCPU_INC(cnt.v_vnodein);
831 MA_PCPU_ADD(cnt.v_vnodepgsin, npages);
833 iov.iov_base = (caddr_t) kva;
834 iov.iov_len = ap->a_count;
837 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
838 uio.uio_resid = ap->a_count;
839 uio.uio_segflg = UIO_SYSSPACE;
840 uio.uio_rw = UIO_READ;
841 uio.uio_td = curthread;
844 osi_FlushPages(avc, osi_curcred()); /* hold bozon lock, but not basic vnode lock */
845 code = afs_read(avc, &uio, osi_curcred(), 0, 0, 0);
847 pmap_qremove(kva, npages);
849 relpbuf(bp, &afs_pbuf_freecnt);
851 if (code && (uio.uio_resid == ap->a_count)) {
852 VM_OBJECT_LOCK(object);
853 ma_vm_page_lock_queues();
854 for (i = 0; i < npages; ++i) {
855 if (i != ap->a_reqpage)
856 vm_page_free(ap->a_m[i]);
858 ma_vm_page_unlock_queues();
859 VM_OBJECT_UNLOCK(object);
860 return VM_PAGER_ERROR;
863 size = ap->a_count - uio.uio_resid;
864 VM_OBJECT_LOCK(object);
865 ma_vm_page_lock_queues();
866 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
868 nextoff = toff + PAGE_SIZE;
871 /* XXX not in nfsclient? */
872 m->flags &= ~PG_ZERO;
874 if (nextoff <= size) {
876 * Read operation filled an entire page
878 m->valid = VM_PAGE_BITS_ALL;
879 #ifndef AFS_FBSD80_ENV
882 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
884 } else if (size > toff) {
886 * Read operation filled a partial page.
889 vm_page_set_valid(m, 0, size - toff);
890 #ifndef AFS_FBSD80_ENV
893 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
897 if (i != ap->a_reqpage) {
899 * Whether or not to leave the page activated is up in
900 * the air, but we should put the page on a page queue
901 * somewhere (it already is in the object). Result:
902 * It appears that emperical results show that
903 * deactivating pages is best.
907 * Just in case someone was asking for this page we
908 * now tell them that it is ok to use.
911 #if defined(AFS_FBSD70_ENV)
912 if (m->oflags & VPO_WANTED) {
914 if (m->flags & PG_WANTED) {
918 ma_vm_page_unlock(m);
922 vm_page_deactivate(m);
923 ma_vm_page_unlock(m);
929 ma_vm_page_unlock(m);
933 ma_vm_page_unlock_queues();
934 VM_OBJECT_UNLOCK(object);
940 struct vop_write_args /* {
941 * struct vnode *a_vp;
944 * struct ucred *a_cred;
948 struct vcache *avc = VTOAFS(ap->a_vp);
950 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
952 afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
958 * struct vop_putpages_args {
959 * struct vnode *a_vp;
964 * vm_oofset_t a_offset;
968 * All of the pages passed to us in ap->a_m[] are already marked as busy,
969 * so there is no additional locking required to set their flags. -GAW
972 afs_vop_putpages(struct vop_putpages_args *ap)
975 int i, size, npages, sync;
985 /* Perhaps these two checks should just be KASSERTs instead... */
986 if (vp->v_object == NULL) {
987 printf("afs_putpages: called with non-merged cache vnode??\n");
988 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
990 if (vType(avc) != VREG) {
991 printf("afs_putpages: not VREG");
992 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
994 npages = btoc(ap->a_count);
995 for (i = 0; i < npages; i++)
996 ap->a_rtvals[i] = VM_PAGER_AGAIN;
997 bp = getpbuf(&afs_pbuf_freecnt);
999 kva = (vm_offset_t) bp->b_data;
1000 pmap_qenter(kva, ap->a_m, npages);
1001 MA_PCPU_INC(cnt.v_vnodeout);
1002 MA_PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
1004 iov.iov_base = (caddr_t) kva;
1005 iov.iov_len = ap->a_count;
1008 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
1009 uio.uio_resid = ap->a_count;
1010 uio.uio_segflg = UIO_SYSSPACE;
1011 uio.uio_rw = UIO_WRITE;
1012 uio.uio_td = curthread;
1014 if (ap->a_sync & VM_PAGER_PUT_SYNC)
1016 /*if (ap->a_sync & VM_PAGER_PUT_INVAL)
1017 * sync |= IO_INVAL; */
1020 code = afs_write(avc, &uio, sync, osi_curcred(), 0);
1023 pmap_qremove(kva, npages);
1024 relpbuf(bp, &afs_pbuf_freecnt);
1027 size = ap->a_count - uio.uio_resid;
1028 for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
1029 ap->a_rtvals[i] = VM_PAGER_OK;
1030 vm_page_undirty(ap->a_m[i]);
1033 return ap->a_rtvals[0];
1038 struct vop_ioctl_args /* {
1039 * struct vnode *a_vp;
1043 * struct ucred *a_cred;
1044 * struct thread *a_td;
1047 struct vcache *tvc = VTOAFS(ap->a_vp);
1050 /* in case we ever get in here... */
1052 AFS_STATCNT(afs_ioctl);
1053 if (((ap->a_command >> 8) & 0xff) == 'V') {
1054 /* This is a VICEIOCTL call */
1056 error = HandleIoctl(tvc, ap->a_command, ap->a_data);
1060 /* No-op call; just return. */
1068 struct vop_poll_args /* {
1069 * struct vnode *a_vp;
1071 * struct ucred *a_cred;
1072 * struct thread *td;
1076 * We should really check to see if I/O is possible.
1084 * NB Currently unsupported.
1089 struct vop_mmap_args /* {
1090 * struct vnode *a_vp;
1092 * struct ucred *a_cred;
1093 * struct thread *td;
1101 struct vop_fsync_args /* {
1102 * struct vnode *a_vp;
1104 * struct thread *td;
1108 struct vnode *vp = ap->a_vp;
1111 /*vflushbuf(vp, wait); */
1112 #ifdef AFS_FBSD60_ENV
1113 error = afs_fsync(VTOAFS(vp), ap->a_td->td_ucred);
1116 error = afs_fsync(VTOAFS(vp), ap->a_cred);
1118 error = afs_fsync(VTOAFS(vp), afs_osi_credp);
1126 struct vop_remove_args /* {
1127 * struct vnode *a_dvp;
1128 * struct vnode *a_vp;
1129 * struct componentname *a_cnp;
1133 struct vnode *vp = ap->a_vp;
1134 struct vnode *dvp = ap->a_dvp;
1138 error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
1147 struct vop_link_args /* {
1148 * struct vnode *a_vp;
1149 * struct vnode *a_tdvp;
1150 * struct componentname *a_cnp;
1154 struct vnode *dvp = ap->a_tdvp;
1155 struct vnode *vp = ap->a_vp;
1156 struct thread *p = ap->a_cnp->cn_thread;
1159 if (dvp->v_mount != vp->v_mount) {
1163 if (vp->v_type == VDIR) {
1167 if ((error = ma_vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE, p)) != 0) {
1171 error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
1174 MA_VOP_UNLOCK(vp, 0, p);
1182 struct vop_rename_args /* {
1183 * struct vnode *a_fdvp;
1184 * struct vnode *a_fvp;
1185 * struct componentname *a_fcnp;
1186 * struct vnode *a_tdvp;
1187 * struct vnode *a_tvp;
1188 * struct componentname *a_tcnp;
1192 struct componentname *fcnp = ap->a_fcnp;
1194 struct componentname *tcnp = ap->a_tcnp;
1196 struct vnode *tvp = ap->a_tvp;
1197 struct vnode *tdvp = ap->a_tdvp;
1198 struct vnode *fvp = ap->a_fvp;
1199 struct vnode *fdvp = ap->a_fdvp;
1200 struct thread *p = fcnp->cn_thread;
1203 * Check for cross-device rename.
1205 if ((fvp->v_mount != tdvp->v_mount)
1206 || (tvp && (fvp->v_mount != tvp->v_mount))) {
1220 * if fvp == tvp, we're just removing one name of a pair of
1221 * directory entries for the same element. convert call into rename.
1222 ( (pinched from FreeBSD 4.4's ufs_rename())
1226 if (fvp->v_type == VDIR) {
1231 /* Release destination completely. */
1235 /* Delete source. */
1238 fcnp->cn_flags &= ~MODMASK;
1239 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
1240 if ((fcnp->cn_flags & SAVESTART) == 0)
1241 panic("afs_rename: lost from startdir");
1242 fcnp->cn_nameiop = DELETE;
1244 error = relookup(fdvp, &fvp, fcnp);
1251 error = VOP_REMOVE(fdvp, fvp, fcnp);
1259 if ((error = ma_vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
1262 MALLOC(fname, char *, fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
1263 memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
1264 fname[fcnp->cn_namelen] = '\0';
1265 MALLOC(tname, char *, tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
1266 memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
1267 tname[tcnp->cn_namelen] = '\0';
1271 /* XXX use "from" or "to" creds? NFS uses "to" creds */
1273 afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
1276 FREE(fname, M_TEMP);
1277 FREE(tname, M_TEMP);
1291 struct vop_mkdir_args /* {
1292 * struct vnode *a_dvp;
1293 * struct vnode **a_vpp;
1294 * struct componentname *a_cnp;
1295 * struct vattr *a_vap;
1298 struct vnode *dvp = ap->a_dvp;
1299 struct vattr *vap = ap->a_vap;
1302 struct thread *p = ap->a_cnp->cn_thread;
1306 if ((cnp->cn_flags & HASBUF) == 0)
1307 panic("afs_vop_mkdir: no name");
1310 error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
1317 *ap->a_vpp = AFSTOV(vcp);
1318 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
1327 struct vop_rmdir_args /* {
1328 * struct vnode *a_dvp;
1329 * struct vnode *a_vp;
1330 * struct componentname *a_cnp;
1334 struct vnode *dvp = ap->a_dvp;
1338 error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
1344 /* struct vop_symlink_args {
1345 * struct vnode *a_dvp;
1346 * struct vnode **a_vpp;
1347 * struct componentname *a_cnp;
1348 * struct vattr *a_vap;
1353 afs_vop_symlink(struct vop_symlink_args *ap)
1356 struct vnode *newvp;
1367 afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, cnp->cn_cred);
1369 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
1371 newvp = AFSTOV(vcp);
1372 ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
1377 *(ap->a_vpp) = newvp;
1383 struct vop_readdir_args /* {
1384 * struct vnode *a_vp;
1385 * struct uio *a_uio;
1386 * struct ucred *a_cred;
1388 * u_long *a_cookies;
1394 /* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
1396 off = ap->a_uio->uio_offset;
1399 afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred, ap->a_eofflag);
1401 if (!error && ap->a_ncookies != NULL) {
1402 struct uio *uio = ap->a_uio;
1403 const struct dirent *dp, *dp_start, *dp_end;
1405 u_long *cookies, *cookiep;
1407 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1408 panic("afs_readdir: burned cookies");
1409 dp = (const struct dirent *)
1410 ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
1412 dp_end = (const struct dirent *)uio->uio_iov->iov_base;
1413 for (dp_start = dp, ncookies = 0; dp < dp_end;
1414 dp = (const struct dirent *)((const char *)dp + dp->d_reclen))
1417 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP,
1419 for (dp = dp_start, cookiep = cookies; dp < dp_end;
1420 dp = (const struct dirent *)((const char *)dp + dp->d_reclen)) {
1421 off += dp->d_reclen;
1424 *ap->a_cookies = cookies;
1425 *ap->a_ncookies = ncookies;
1432 afs_vop_readlink(ap)
1433 struct vop_readlink_args /* {
1434 * struct vnode *a_vp;
1435 * struct uio *a_uio;
1436 * struct ucred *a_cred;
1440 /* printf("readlink %x\n", ap->a_vp);*/
1442 error = afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
1447 extern int prtactive;
1450 afs_vop_inactive(ap)
1451 struct vop_inactive_args /* {
1452 * struct vnode *a_vp;
1453 * struct thread *td;
1456 struct vnode *vp = ap->a_vp;
1458 if (prtactive && vp->v_usecount != 0)
1459 vprint("afs_vop_inactive(): pushing active", vp);
1462 afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
1464 #ifndef AFS_FBSD60_ENV
1465 MA_VOP_UNLOCK(vp, 0, ap->a_td);
1471 * struct vop_reclaim_args {
1472 * struct vnode *a_vp;
1476 afs_vop_reclaim(struct vop_reclaim_args *ap)
1478 /* copied from ../OBSD/osi_vnodeops.c:afs_nbsd_reclaim() */
1480 struct vnode *vp = ap->a_vp;
1481 struct vcache *avc = VTOAFS(vp);
1482 int haveGlock = ISAFS_GLOCK();
1483 int haveVlock = CheckLock(&afs_xvcache);
1488 ObtainWriteLock(&afs_xvcache, 901);
1489 /* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
1490 code = afs_FlushVCache(avc, &slept);
1492 ReleaseWriteLock(&afs_xvcache);
1497 afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
1501 /* basically, it must not fail */
1502 vnode_destroy_vobject(vp);
1508 #ifndef AFS_FBSD60_ENV
1511 struct vop_bmap_args /* {
1512 * struct vnode *a_vp;
1514 * struct vnode **a_vpp;
1521 *ap->a_bnp = ap->a_bn * (PAGE_SIZE / DEV_BSIZE);
1524 *ap->a_vpp = ap->a_vp;
1526 if (ap->a_runp != NULL)
1528 if (ap->a_runb != NULL)
1536 afs_vop_strategy(ap)
1537 struct vop_strategy_args /* {
1543 error = afs_ustrategy(ap->a_bp, osi_curcred());
1550 struct vop_print_args /* {
1551 * struct vnode *a_vp;
1554 struct vnode *vp = ap->a_vp;
1555 struct vcache *vc = VTOAFS(ap->a_vp);
1556 int s = vc->f.states;
1558 printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
1559 (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
1560 (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
1561 vc->execsOrWriters);
1562 printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
1563 (s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
1564 (s & CMAPPED) ? " mapped" : "",
1565 (s & CVFlushed) ? " flush in progress" : "");
1571 * Advisory record locking support (fcntl() POSIX style)
1575 struct vop_advlock_args /* {
1576 * struct vnode *a_vp;
1579 * struct flock *a_fl;
1584 struct ucred cr = *osi_curcred();
1588 afs_lockctl(VTOAFS(ap->a_vp), ap->a_fl, ap->a_op, &cr, (int)ap->a_id);