2 * A large chunk of this file appears to be copied directly from
3 * sys/nfsclient/nfs_bio.c, which has the following license:
6 * Copyright (c) 1989, 1993
7 * The Regents of the University of California. All rights reserved.
9 * This code is derived from software contributed to Berkeley by
10 * Rick Macklem at The University of Guelph.
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 * 3. All advertising materials mentioning features or use of this software
21 * must display the following acknowledgement:
22 * This product includes software developed by the University of
23 * California, Berkeley and its contributors.
24 * 4. Neither the name of the University nor the names of its contributors
25 * may be used to endorse or promote products derived from this software
26 * without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
43 * Pursuant to a statement of U.C. Berkeley dated 1999-07-22, this license
44 * is amended to drop clause (3) above.
47 #include <afsconfig.h>
48 #include <afs/param.h>
51 #include <afs/sysincludes.h> /* Standard vendor system headers */
52 #include <afsincludes.h> /* Afs-based standard headers */
53 #include <afs/afs_stats.h> /* statistics */
54 #include <sys/malloc.h>
55 #include <sys/namei.h>
56 #include <sys/unistd.h>
57 #include <vm/vm_page.h>
58 #include <vm/vm_object.h>
59 #include <vm/vm_pager.h>
60 #include <vm/vnode_pager.h>
61 extern int afs_pbuf_freecnt;
64 static vop_access_t afs_vop_access;
65 static vop_advlock_t afs_vop_advlock;
66 static vop_close_t afs_vop_close;
67 static vop_create_t afs_vop_create;
68 static vop_fsync_t afs_vop_fsync;
69 static vop_getattr_t afs_vop_getattr;
70 static vop_getpages_t afs_vop_getpages;
71 static vop_inactive_t afs_vop_inactive;
72 static vop_ioctl_t afs_vop_ioctl;
73 static vop_link_t afs_vop_link;
74 static vop_lookup_t afs_vop_lookup;
75 static vop_mkdir_t afs_vop_mkdir;
76 static vop_mknod_t afs_vop_mknod;
77 static vop_open_t afs_vop_open;
78 static vop_pathconf_t afs_vop_pathconf;
79 static vop_poll_t afs_vop_poll;
80 static vop_print_t afs_vop_print;
81 static vop_putpages_t afs_vop_putpages;
82 static vop_read_t afs_vop_read;
83 static vop_readdir_t afs_vop_readdir;
84 static vop_readlink_t afs_vop_readlink;
85 static vop_reclaim_t afs_vop_reclaim;
86 static vop_remove_t afs_vop_remove;
87 static vop_rename_t afs_vop_rename;
88 static vop_rmdir_t afs_vop_rmdir;
89 static vop_setattr_t afs_vop_setattr;
90 static vop_strategy_t afs_vop_strategy;
91 static vop_symlink_t afs_vop_symlink;
92 static vop_write_t afs_vop_write;
93 #if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
94 static vop_lock1_t afs_vop_lock;
95 static vop_unlock_t afs_vop_unlock;
96 static vop_islocked_t afs_vop_islocked;
99 struct vop_vector afs_vnodeops = {
100 .vop_default = &default_vnodeops,
101 .vop_access = afs_vop_access,
102 .vop_advlock = afs_vop_advlock,
103 .vop_close = afs_vop_close,
104 .vop_create = afs_vop_create,
105 .vop_fsync = afs_vop_fsync,
106 .vop_getattr = afs_vop_getattr,
107 .vop_getpages = afs_vop_getpages,
108 .vop_inactive = afs_vop_inactive,
109 .vop_ioctl = afs_vop_ioctl,
110 #if !defined(AFS_FBSD80_ENV)
111 /* removed at least temporarily (NFSv4 flux) */
112 .vop_lease = VOP_NULL,
114 .vop_link = afs_vop_link,
115 .vop_lookup = afs_vop_lookup,
116 .vop_mkdir = afs_vop_mkdir,
117 .vop_mknod = afs_vop_mknod,
118 .vop_open = afs_vop_open,
119 .vop_pathconf = afs_vop_pathconf,
120 .vop_poll = afs_vop_poll,
121 .vop_print = afs_vop_print,
122 .vop_putpages = afs_vop_putpages,
123 .vop_read = afs_vop_read,
124 .vop_readdir = afs_vop_readdir,
125 .vop_readlink = afs_vop_readlink,
126 .vop_reclaim = afs_vop_reclaim,
127 .vop_remove = afs_vop_remove,
128 .vop_rename = afs_vop_rename,
129 .vop_rmdir = afs_vop_rmdir,
130 .vop_setattr = afs_vop_setattr,
131 .vop_strategy = afs_vop_strategy,
132 .vop_symlink = afs_vop_symlink,
133 .vop_write = afs_vop_write,
134 #if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD80_ENV)
135 .vop_lock1 = afs_vop_lock,
136 .vop_unlock = afs_vop_unlock,
137 .vop_islocked = afs_vop_islocked,
141 #else /* AFS_FBSD60_ENV */
143 int afs_vop_lookup(struct vop_lookup_args *);
144 int afs_vop_create(struct vop_create_args *);
145 int afs_vop_mknod(struct vop_mknod_args *);
146 int afs_vop_open(struct vop_open_args *);
147 int afs_vop_close(struct vop_close_args *);
148 int afs_vop_access(struct vop_access_args *);
149 int afs_vop_getattr(struct vop_getattr_args *);
150 int afs_vop_setattr(struct vop_setattr_args *);
151 int afs_vop_read(struct vop_read_args *);
152 int afs_vop_write(struct vop_write_args *);
153 int afs_vop_getpages(struct vop_getpages_args *);
154 int afs_vop_putpages(struct vop_putpages_args *);
155 int afs_vop_ioctl(struct vop_ioctl_args *);
156 static int afs_vop_pathconf(struct vop_pathconf_args *);
157 int afs_vop_poll(struct vop_poll_args *);
158 int afs_vop_fsync(struct vop_fsync_args *);
159 int afs_vop_remove(struct vop_remove_args *);
160 int afs_vop_link(struct vop_link_args *);
161 int afs_vop_rename(struct vop_rename_args *);
162 int afs_vop_mkdir(struct vop_mkdir_args *);
163 int afs_vop_rmdir(struct vop_rmdir_args *);
164 int afs_vop_symlink(struct vop_symlink_args *);
165 int afs_vop_readdir(struct vop_readdir_args *);
166 int afs_vop_readlink(struct vop_readlink_args *);
167 int afs_vop_inactive(struct vop_inactive_args *);
168 int afs_vop_reclaim(struct vop_reclaim_args *);
169 int afs_vop_bmap(struct vop_bmap_args *);
170 int afs_vop_strategy(struct vop_strategy_args *);
171 int afs_vop_print(struct vop_print_args *);
172 int afs_vop_advlock(struct vop_advlock_args *);
176 /* Global vfs data structures for AFS. */
177 vop_t **afs_vnodeop_p;
178 struct vnodeopv_entry_desc afs_vnodeop_entries[] = {
179 {&vop_default_desc, (vop_t *) vop_defaultop},
180 {&vop_access_desc, (vop_t *) afs_vop_access}, /* access */
181 {&vop_advlock_desc, (vop_t *) afs_vop_advlock}, /* advlock */
182 {&vop_bmap_desc, (vop_t *) afs_vop_bmap}, /* bmap */
183 {&vop_close_desc, (vop_t *) afs_vop_close}, /* close */
184 {&vop_createvobject_desc, (vop_t *) vop_stdcreatevobject},
185 {&vop_destroyvobject_desc, (vop_t *) vop_stddestroyvobject},
186 {&vop_create_desc, (vop_t *) afs_vop_create}, /* create */
187 {&vop_fsync_desc, (vop_t *) afs_vop_fsync}, /* fsync */
188 {&vop_getattr_desc, (vop_t *) afs_vop_getattr}, /* getattr */
189 {&vop_getpages_desc, (vop_t *) afs_vop_getpages}, /* read */
190 {&vop_getvobject_desc, (vop_t *) vop_stdgetvobject},
191 {&vop_putpages_desc, (vop_t *) afs_vop_putpages}, /* write */
192 {&vop_inactive_desc, (vop_t *) afs_vop_inactive}, /* inactive */
193 {&vop_lease_desc, (vop_t *) vop_null},
194 {&vop_link_desc, (vop_t *) afs_vop_link}, /* link */
195 {&vop_lookup_desc, (vop_t *) afs_vop_lookup}, /* lookup */
196 {&vop_mkdir_desc, (vop_t *) afs_vop_mkdir}, /* mkdir */
197 {&vop_mknod_desc, (vop_t *) afs_vop_mknod}, /* mknod */
198 {&vop_open_desc, (vop_t *) afs_vop_open}, /* open */
199 {&vop_pathconf_desc, (vop_t *) afs_vop_pathconf}, /* pathconf */
200 {&vop_poll_desc, (vop_t *) afs_vop_poll}, /* select */
201 {&vop_print_desc, (vop_t *) afs_vop_print}, /* print */
202 {&vop_read_desc, (vop_t *) afs_vop_read}, /* read */
203 {&vop_readdir_desc, (vop_t *) afs_vop_readdir}, /* readdir */
204 {&vop_readlink_desc, (vop_t *) afs_vop_readlink}, /* readlink */
205 {&vop_reclaim_desc, (vop_t *) afs_vop_reclaim}, /* reclaim */
206 {&vop_remove_desc, (vop_t *) afs_vop_remove}, /* remove */
207 {&vop_rename_desc, (vop_t *) afs_vop_rename}, /* rename */
208 {&vop_rmdir_desc, (vop_t *) afs_vop_rmdir}, /* rmdir */
209 {&vop_setattr_desc, (vop_t *) afs_vop_setattr}, /* setattr */
210 {&vop_strategy_desc, (vop_t *) afs_vop_strategy}, /* strategy */
211 {&vop_symlink_desc, (vop_t *) afs_vop_symlink}, /* symlink */
212 {&vop_write_desc, (vop_t *) afs_vop_write}, /* write */
213 {&vop_ioctl_desc, (vop_t *) afs_vop_ioctl}, /* XXX ioctl */
214 /*{ &vop_seek_desc, afs_vop_seek }, *//* seek */
215 #if defined(AFS_FBSD70_ENV) && !defined(AFS_FBSD90_ENV)
216 {&vop_lock1_desc, (vop_t *) afs_vop_lock}, /* lock */
217 {&vop_unlock_desc, (vop_t *) afs_vop_unlock}, /* unlock */
218 {&vop_islocked_desc, (vop_t *) afs_vop_islocked}, /* islocked */
222 struct vnodeopv_desc afs_vnodeop_opv_desc =
223 { &afs_vnodeop_p, afs_vnodeop_entries };
224 #endif /* AFS_FBSD60_ENV */
227 struct componentname *cnp = ap->a_cnp; \
229 MALLOC(name, char *, cnp->cn_namelen+1, M_TEMP, M_WAITOK); \
230 memcpy(name, cnp->cn_nameptr, cnp->cn_namelen); \
231 name[cnp->cn_namelen] = '\0'
233 #define DROPNAME() FREE(name, M_TEMP)
236 * Here we define compatibility functions/macros for interfaces that
237 * have changed between different FreeBSD versions.
239 #if defined(AFS_FBSD90_ENV)
240 static __inline void ma_vm_page_lock_queues(void) {};
241 static __inline void ma_vm_page_unlock_queues(void) {};
242 static __inline void ma_vm_page_lock(vm_page_t m) { vm_page_lock(m); };
243 static __inline void ma_vm_page_unlock(vm_page_t m) { vm_page_unlock(m); };
245 static __inline void ma_vm_page_lock_queues(void) { vm_page_lock_queues(); };
246 static __inline void ma_vm_page_unlock_queues(void) { vm_page_unlock_queues(); };
247 static __inline void ma_vm_page_lock(vm_page_t m) {};
248 static __inline void ma_vm_page_unlock(vm_page_t m) {};
251 #if defined(AFS_FBSD80_ENV)
252 #define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags))
253 #define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags))
254 #define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags))
256 #define ma_vn_lock(vp, flags, p) (vn_lock(vp, flags, p))
257 #define MA_VOP_LOCK(vp, flags, p) (VOP_LOCK(vp, flags, p))
258 #define MA_VOP_UNLOCK(vp, flags, p) (VOP_UNLOCK(vp, flags, p))
261 #if defined(AFS_FBSD70_ENV)
262 #define MA_PCPU_INC(c) PCPU_INC(c)
263 #define MA_PCPU_ADD(c, n) PCPU_ADD(c, n)
265 #define MA_PCPU_INC(c) PCPU_LAZY_INC(c)
266 #define MA_PCPU_ADD(c, n) (c) += (n)
269 #ifdef AFS_FBSD70_ENV
270 #ifndef AFS_FBSD80_ENV
271 /* From kern_lock.c */
272 #define COUNT(td, x) if ((td)) (td)->td_locks += (x)
273 #define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \
274 LK_SHARE_NONZERO | LK_WAIT_NONZERO)
277 sharelock(struct thread *td, struct lock *lkp, int incr) {
278 lkp->lk_flags |= LK_SHARE_NONZERO;
279 lkp->lk_sharecount += incr;
285 * Standard lock, unlock and islocked functions.
289 struct vop_lock1_args /* {
297 struct vnode *vp = ap->a_vp;
298 struct lock *lkp = vp->v_vnlock;
300 #if 0 && defined(AFS_FBSD80_ENV) && !defined(UKERNEL)
301 afs_warn("afs_vop_lock: tid %d pid %d \"%s\"\n", curthread->td_tid,
302 curthread->td_proc->p_pid, curthread->td_name);
306 #ifdef AFS_FBSD80_ENV
307 return (_lockmgr_args(lkp, ap->a_flags, VI_MTX(vp),
308 LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT,
309 ap->a_file, ap->a_line));
311 return (_lockmgr(lkp, ap->a_flags, VI_MTX(vp), ap->a_td, ap->a_file, ap->a_line));
318 struct vop_unlock_args /* {
324 struct vnode *vp = ap->a_vp;
325 struct lock *lkp = vp->v_vnlock;
327 #ifdef AFS_FBSD80_ENV
330 op = ((ap->a_flags) | LK_RELEASE) & LK_TYPE_MASK;
331 int glocked = ISAFS_GLOCK();
334 if ((op & (op - 1)) != 0) {
335 afs_warn("afs_vop_unlock: Shit.\n");
338 code = lockmgr(lkp, ap->a_flags | LK_RELEASE, VI_MTX(vp));
344 /* possibly in current code path where this
345 * forces trace, we should have had a (shared? not
346 * necessarily, see _lockmgr in kern_lock.c) lock
347 * and that's the real bug. but.
350 if ((lkp->lk_exclusivecount == 0) &&
351 (!(lkp->lk_flags & LK_SHARE_NONZERO))) {
352 sharelock(ap->a_td, lkp, 1);
355 return (lockmgr(lkp, ap->a_flags | LK_RELEASE, VI_MTX(vp),
363 struct vop_islocked_args /* {
365 struct thread *a_td; (not in 80)
368 #ifdef AFS_FBSD80_ENV
369 return (lockstatus(ap->a_vp->v_vnlock));
371 return (lockstatus(ap->a_vp->v_vnlock, ap->a_td));
377 * Mosty copied from sys/ufs/ufs/ufs_vnops.c:ufs_pathconf().
378 * We should know the correct answers to these questions with
379 * respect to the AFS protocol (which may differ from the UFS
380 * values) but for the moment this will do.
383 afs_vop_pathconf(struct vop_pathconf_args *ap)
388 switch (ap->a_name) {
390 *ap->a_retval = LINK_MAX;
393 *ap->a_retval = NAME_MAX;
396 *ap->a_retval = PATH_MAX;
399 *ap->a_retval = PIPE_BUF;
401 case _PC_CHOWN_RESTRICTED:
407 #ifdef _PC_ACL_EXTENDED
408 case _PC_ACL_EXTENDED:
411 case _PC_ACL_PATH_MAX:
415 #ifdef _PC_MAC_PRESENT
416 case _PC_MAC_PRESENT:
422 /* _PC_ASYNC_IO should have been handled by upper layers. */
423 KASSERT(0, ("_PC_ASYNC_IO should not get here"));
433 #ifdef _PC_ALLOC_SIZE_MIN
434 case _PC_ALLOC_SIZE_MIN:
435 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_bsize;
438 #ifdef _PC_FILESIZEBITS
439 case _PC_FILESIZEBITS:
440 *ap->a_retval = 32; /* XXX */
443 #ifdef _PC_REC_INCR_XFER_SIZE
444 case _PC_REC_INCR_XFER_SIZE:
445 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
447 case _PC_REC_MAX_XFER_SIZE:
448 *ap->a_retval = -1; /* means ``unlimited'' */
450 case _PC_REC_MIN_XFER_SIZE:
451 *ap->a_retval = ap->a_vp->v_mount->mnt_stat.f_iosize;
453 case _PC_REC_XFER_ALIGN:
454 *ap->a_retval = PAGE_SIZE;
457 #ifdef _PC_SYMLINK_MAX
458 case _PC_SYMLINK_MAX:
459 *ap->a_retval = MAXPATHLEN;
471 struct vop_lookup_args /* {
472 * struct vnodeop_desc * a_desc;
473 * struct vnode *a_dvp;
474 * struct vnode **a_vpp;
475 * struct componentname *a_cnp;
480 struct vnode *vp, *dvp;
481 int flags = ap->a_cnp->cn_flags;
482 int lockparent; /* 1 => lockparent flag is set */
483 int wantparent; /* 1 => wantparent or lockparent flag */
484 struct thread *p = ap->a_cnp->cn_thread;
487 if (dvp->v_type != VDIR) {
488 #ifndef AFS_FBSD70_ENV
494 if ((flags & ISDOTDOT) && (dvp->v_vflag & VV_ROOT))
499 lockparent = flags & LOCKPARENT;
500 wantparent = flags & (LOCKPARENT | WANTPARENT);
502 #ifdef AFS_FBSD80_ENV
503 cnp->cn_flags |= MPSAFE; /* steel */
506 #ifndef AFS_FBSD70_ENV
507 if (flags & ISDOTDOT)
508 VOP_UNLOCK(dvp, 0, p);
512 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
516 if (flags & ISDOTDOT)
517 MA_VOP_LOCK(dvp, LK_EXCLUSIVE | LK_RETRY, p);
518 if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME)
519 && (flags & ISLASTCN) && error == ENOENT)
521 if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
522 cnp->cn_flags |= SAVENAME;
527 vp = AFSTOV(vcp); /* always get a node if no error */
529 /* The parent directory comes in locked. We unlock it on return
530 * unless the caller wants it left locked.
531 * we also always return the vnode locked. */
533 if (flags & ISDOTDOT) {
534 ma_vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
535 /* always return the child locked */
536 if (lockparent && (flags & ISLASTCN)
537 && (error = ma_vn_lock(dvp, LK_EXCLUSIVE, p))) {
542 } else if (vp == dvp) {
543 /* they're the same; afs_lookup() already ref'ed the leaf.
544 * It came in locked, so we don't need to ref OR lock it */
546 if (!lockparent || !(flags & ISLASTCN)) {
547 #ifndef AFS_FBSD70_ENV /* 6 too? */
548 MA_VOP_UNLOCK(dvp, 0, p); /* done with parent. */
551 ma_vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY, p);
552 /* always return the child locked */
556 if ((cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN))
557 || (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN)))
558 cnp->cn_flags |= SAVENAME;
566 struct vop_create_args /* {
567 * struct vnode *a_dvp;
568 * struct vnode **a_vpp;
569 * struct componentname *a_cnp;
570 * struct vattr *a_vap;
575 struct vnode *dvp = ap->a_dvp;
576 struct thread *p = ap->a_cnp->cn_thread;
581 afs_create(VTOAFS(dvp), name, ap->a_vap,
582 ap->a_vap->va_vaflags & VA_EXCLUSIVE ? EXCL : NONEXCL,
583 ap->a_vap->va_mode, &vcp, cnp->cn_cred);
591 *ap->a_vpp = AFSTOV(vcp);
592 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
602 struct vop_mknod_args /* {
603 * struct vnode *a_dvp;
604 * struct vnode **a_vpp;
605 * struct componentname *a_cnp;
606 * struct vattr *a_vap;
614 validate_vops(struct vnode *vp, int after)
617 struct vnodeopv_entry_desc *this;
618 for (this = afs_vnodeop_entries; this->opve_op; this++) {
619 if (vp->v_op[this->opve_op->vdesc_offset] != this->opve_impl) {
621 printf("v_op %d ", after);
625 printf("For oper %d (%s), func is %p, not %p",
626 this->opve_op->vdesc_offset, this->opve_op->vdesc_name,
627 vp->v_op[this->opve_op->vdesc_offset], this->opve_impl);
635 struct vop_open_args /* {
636 * struct vnode *a_vp;
638 * struct ucred *a_cred;
639 * struct thread *a_td;
644 struct vcache *vc = VTOAFS(ap->a_vp);
647 error = afs_open(&vc, ap->a_mode, ap->a_cred);
649 if (AFSTOV(vc) != ap->a_vp)
650 panic("AFS open changed vnode!");
653 #ifdef AFS_FBSD60_ENV
654 vnode_create_vobject(ap->a_vp, vc->f.m.Length, ap->a_td);
656 osi_FlushPages(vc, ap->a_cred);
662 struct vop_close_args /* {
663 * struct vnode *a_vp;
665 * struct ucred *a_cred;
666 * struct thread *a_td;
670 struct vnode *vp = ap->a_vp;
671 struct vcache *avc = VTOAFS(vp);
673 #if defined(AFS_FBSD80_ENV)
675 iflag = vp->v_iflag & VI_DOOMED;
677 if (iflag & VI_DOOMED) {
678 /* osi_FlushVCache (correctly) calls vgone() on recycled vnodes, we don't
679 * have an afs_close to process, in that case */
681 panic("afs_vop_close: doomed vnode %p has vcache %p with non-zero opens %d\n",
682 vp, avc, avc->opens);
689 code = afs_close(avc, ap->a_fflag, ap->a_cred);
691 code = afs_close(avc, ap->a_fflag, afs_osi_credp);
692 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
699 struct vop_access_args /* {
700 * struct vnode *a_vp;
701 * accmode_t a_accmode;
702 * struct ucred *a_cred;
703 * struct thread *a_td;
708 #if defined(AFS_FBSD80_ENV)
709 code = afs_access(VTOAFS(ap->a_vp), ap->a_accmode, ap->a_cred);
711 code = afs_access(VTOAFS(ap->a_vp), ap->a_mode, ap->a_cred);
719 struct vop_getattr_args /* {
720 * struct vnode *a_vp;
721 * struct vattr *a_vap;
722 * struct ucred *a_cred;
728 code = afs_getattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
736 struct vop_setattr_args /* {
737 * struct vnode *a_vp;
738 * struct vattr *a_vap;
739 * struct ucred *a_cred;
744 code = afs_setattr(VTOAFS(ap->a_vp), ap->a_vap, ap->a_cred);
751 struct vop_read_args /* {
752 * struct vnode *a_vp;
755 * struct ucred *a_cred;
760 struct vcache *avc = VTOAFS(ap->a_vp);
762 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
763 code = afs_read(avc, ap->a_uio, ap->a_cred, 0, 0, 0);
768 /* struct vop_getpages_args {
769 * struct vnode *a_vp;
773 * vm_oofset_t a_offset;
777 afs_vop_getpages(struct vop_getpages_args *ap)
780 int i, nextoff, size, toff, npages;
791 if ((object = vp->v_object) == NULL) {
792 printf("afs_getpages: called with non-merged cache vnode??\n");
793 return VM_PAGER_ERROR;
795 npages = btoc(ap->a_count);
797 * If the requested page is partially valid, just return it and
798 * allow the pager to zero-out the blanks. Partially valid pages
799 * can only occur at the file EOF.
803 vm_page_t m = ap->a_m[ap->a_reqpage];
805 VM_OBJECT_LOCK(object);
806 ma_vm_page_lock_queues();
808 /* handled by vm_fault now */
809 /* vm_page_zero_invalid(m, TRUE); */
810 for (i = 0; i < npages; ++i) {
811 if (i != ap->a_reqpage) {
812 ma_vm_page_lock(ap->a_m[i]);
813 vm_page_free(ap->a_m[i]);
814 ma_vm_page_unlock(ap->a_m[i]);
817 ma_vm_page_unlock_queues();
818 VM_OBJECT_UNLOCK(object);
821 ma_vm_page_unlock_queues();
822 VM_OBJECT_UNLOCK(object);
824 bp = getpbuf(&afs_pbuf_freecnt);
826 kva = (vm_offset_t) bp->b_data;
827 pmap_qenter(kva, ap->a_m, npages);
828 MA_PCPU_INC(cnt.v_vnodein);
829 MA_PCPU_ADD(cnt.v_vnodepgsin, npages);
831 iov.iov_base = (caddr_t) kva;
832 iov.iov_len = ap->a_count;
835 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
836 uio.uio_resid = ap->a_count;
837 uio.uio_segflg = UIO_SYSSPACE;
838 uio.uio_rw = UIO_READ;
839 uio.uio_td = curthread;
842 osi_FlushPages(avc, osi_curcred()); /* hold bozon lock, but not basic vnode lock */
843 code = afs_read(avc, &uio, osi_curcred(), 0, 0, 0);
845 pmap_qremove(kva, npages);
847 relpbuf(bp, &afs_pbuf_freecnt);
849 if (code && (uio.uio_resid == ap->a_count)) {
850 VM_OBJECT_LOCK(object);
851 ma_vm_page_lock_queues();
852 for (i = 0; i < npages; ++i) {
853 if (i != ap->a_reqpage)
854 vm_page_free(ap->a_m[i]);
856 ma_vm_page_unlock_queues();
857 VM_OBJECT_UNLOCK(object);
858 return VM_PAGER_ERROR;
861 size = ap->a_count - uio.uio_resid;
862 VM_OBJECT_LOCK(object);
863 ma_vm_page_lock_queues();
864 for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
866 nextoff = toff + PAGE_SIZE;
869 /* XXX not in nfsclient? */
870 m->flags &= ~PG_ZERO;
872 if (nextoff <= size) {
874 * Read operation filled an entire page
876 m->valid = VM_PAGE_BITS_ALL;
877 #ifndef AFS_FBSD80_ENV
880 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
882 } else if (size > toff) {
884 * Read operation filled a partial page.
887 vm_page_set_valid(m, 0, size - toff);
888 #ifndef AFS_FBSD80_ENV
891 KASSERT(m->dirty == 0, ("afs_getpages: page %p is dirty", m));
895 if (i != ap->a_reqpage) {
897 * Whether or not to leave the page activated is up in
898 * the air, but we should put the page on a page queue
899 * somewhere (it already is in the object). Result:
900 * It appears that emperical results show that
901 * deactivating pages is best.
905 * Just in case someone was asking for this page we
906 * now tell them that it is ok to use.
909 #if defined(AFS_FBSD70_ENV)
910 if (m->oflags & VPO_WANTED) {
912 if (m->flags & PG_WANTED) {
916 ma_vm_page_unlock(m);
920 vm_page_deactivate(m);
921 ma_vm_page_unlock(m);
927 ma_vm_page_unlock(m);
931 ma_vm_page_unlock_queues();
932 VM_OBJECT_UNLOCK(object);
938 struct vop_write_args /* {
939 * struct vnode *a_vp;
942 * struct ucred *a_cred;
946 struct vcache *avc = VTOAFS(ap->a_vp);
948 osi_FlushPages(avc, ap->a_cred); /* hold bozon lock, but not basic vnode lock */
950 afs_write(VTOAFS(ap->a_vp), ap->a_uio, ap->a_ioflag, ap->a_cred, 0);
956 * struct vop_putpages_args {
957 * struct vnode *a_vp;
962 * vm_oofset_t a_offset;
966 * All of the pages passed to us in ap->a_m[] are already marked as busy,
967 * so there is no additional locking required to set their flags. -GAW
970 afs_vop_putpages(struct vop_putpages_args *ap)
973 int i, size, npages, sync;
983 /* Perhaps these two checks should just be KASSERTs instead... */
984 if (vp->v_object == NULL) {
985 printf("afs_putpages: called with non-merged cache vnode??\n");
986 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
988 if (vType(avc) != VREG) {
989 printf("afs_putpages: not VREG");
990 return VM_PAGER_ERROR; /* XXX I think this is insufficient */
992 npages = btoc(ap->a_count);
993 for (i = 0; i < npages; i++)
994 ap->a_rtvals[i] = VM_PAGER_AGAIN;
995 bp = getpbuf(&afs_pbuf_freecnt);
997 kva = (vm_offset_t) bp->b_data;
998 pmap_qenter(kva, ap->a_m, npages);
999 MA_PCPU_INC(cnt.v_vnodeout);
1000 MA_PCPU_ADD(cnt.v_vnodepgsout, ap->a_count);
1002 iov.iov_base = (caddr_t) kva;
1003 iov.iov_len = ap->a_count;
1006 uio.uio_offset = IDX_TO_OFF(ap->a_m[0]->pindex);
1007 uio.uio_resid = ap->a_count;
1008 uio.uio_segflg = UIO_SYSSPACE;
1009 uio.uio_rw = UIO_WRITE;
1010 uio.uio_td = curthread;
1012 if (ap->a_sync & VM_PAGER_PUT_SYNC)
1014 /*if (ap->a_sync & VM_PAGER_PUT_INVAL)
1015 * sync |= IO_INVAL; */
1018 code = afs_write(avc, &uio, sync, osi_curcred(), 0);
1021 pmap_qremove(kva, npages);
1022 relpbuf(bp, &afs_pbuf_freecnt);
1025 size = ap->a_count - uio.uio_resid;
1026 for (i = 0; i < round_page(size) / PAGE_SIZE; i++) {
1027 ap->a_rtvals[i] = VM_PAGER_OK;
1028 vm_page_undirty(ap->a_m[i]);
1031 return ap->a_rtvals[0];
1036 struct vop_ioctl_args /* {
1037 * struct vnode *a_vp;
1041 * struct ucred *a_cred;
1042 * struct thread *a_td;
1045 struct vcache *tvc = VTOAFS(ap->a_vp);
1048 /* in case we ever get in here... */
1050 AFS_STATCNT(afs_ioctl);
1051 if (((ap->a_command >> 8) & 0xff) == 'V') {
1052 /* This is a VICEIOCTL call */
1054 error = HandleIoctl(tvc, ap->a_command, ap->a_data);
1058 /* No-op call; just return. */
1066 struct vop_poll_args /* {
1067 * struct vnode *a_vp;
1069 * struct ucred *a_cred;
1070 * struct thread *td;
1074 * We should really check to see if I/O is possible.
1082 * NB Currently unsupported.
1087 struct vop_mmap_args /* {
1088 * struct vnode *a_vp;
1090 * struct ucred *a_cred;
1091 * struct thread *td;
1099 struct vop_fsync_args /* {
1100 * struct vnode *a_vp;
1102 * struct thread *td;
1106 struct vnode *vp = ap->a_vp;
1109 /*vflushbuf(vp, wait); */
1110 #ifdef AFS_FBSD60_ENV
1111 error = afs_fsync(VTOAFS(vp), ap->a_td->td_ucred);
1114 error = afs_fsync(VTOAFS(vp), ap->a_cred);
1116 error = afs_fsync(VTOAFS(vp), afs_osi_credp);
1124 struct vop_remove_args /* {
1125 * struct vnode *a_dvp;
1126 * struct vnode *a_vp;
1127 * struct componentname *a_cnp;
1131 struct vnode *vp = ap->a_vp;
1132 struct vnode *dvp = ap->a_dvp;
1136 error = afs_remove(VTOAFS(dvp), name, cnp->cn_cred);
1145 struct vop_link_args /* {
1146 * struct vnode *a_vp;
1147 * struct vnode *a_tdvp;
1148 * struct componentname *a_cnp;
1152 struct vnode *dvp = ap->a_tdvp;
1153 struct vnode *vp = ap->a_vp;
1154 struct thread *p = ap->a_cnp->cn_thread;
1157 if (dvp->v_mount != vp->v_mount) {
1161 if (vp->v_type == VDIR) {
1165 if ((error = ma_vn_lock(vp, LK_CANRECURSE | LK_EXCLUSIVE, p)) != 0) {
1169 error = afs_link(VTOAFS(vp), VTOAFS(dvp), name, cnp->cn_cred);
1172 MA_VOP_UNLOCK(vp, 0, p);
1180 struct vop_rename_args /* {
1181 * struct vnode *a_fdvp;
1182 * struct vnode *a_fvp;
1183 * struct componentname *a_fcnp;
1184 * struct vnode *a_tdvp;
1185 * struct vnode *a_tvp;
1186 * struct componentname *a_tcnp;
1190 struct componentname *fcnp = ap->a_fcnp;
1192 struct componentname *tcnp = ap->a_tcnp;
1194 struct vnode *tvp = ap->a_tvp;
1195 struct vnode *tdvp = ap->a_tdvp;
1196 struct vnode *fvp = ap->a_fvp;
1197 struct vnode *fdvp = ap->a_fdvp;
1198 struct thread *p = fcnp->cn_thread;
1201 * Check for cross-device rename.
1203 if ((fvp->v_mount != tdvp->v_mount)
1204 || (tvp && (fvp->v_mount != tvp->v_mount))) {
1218 * if fvp == tvp, we're just removing one name of a pair of
1219 * directory entries for the same element. convert call into rename.
1220 ( (pinched from FreeBSD 4.4's ufs_rename())
1224 if (fvp->v_type == VDIR) {
1229 /* Release destination completely. */
1233 /* Delete source. */
1236 fcnp->cn_flags &= ~MODMASK;
1237 fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
1238 if ((fcnp->cn_flags & SAVESTART) == 0)
1239 panic("afs_rename: lost from startdir");
1240 fcnp->cn_nameiop = DELETE;
1242 error = relookup(fdvp, &fvp, fcnp);
1249 error = VOP_REMOVE(fdvp, fvp, fcnp);
1257 if ((error = ma_vn_lock(fvp, LK_EXCLUSIVE, p)) != 0)
1260 MALLOC(fname, char *, fcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
1261 memcpy(fname, fcnp->cn_nameptr, fcnp->cn_namelen);
1262 fname[fcnp->cn_namelen] = '\0';
1263 MALLOC(tname, char *, tcnp->cn_namelen + 1, M_TEMP, M_WAITOK);
1264 memcpy(tname, tcnp->cn_nameptr, tcnp->cn_namelen);
1265 tname[tcnp->cn_namelen] = '\0';
1269 /* XXX use "from" or "to" creds? NFS uses "to" creds */
1271 afs_rename(VTOAFS(fdvp), fname, VTOAFS(tdvp), tname, tcnp->cn_cred);
1274 FREE(fname, M_TEMP);
1275 FREE(tname, M_TEMP);
1289 struct vop_mkdir_args /* {
1290 * struct vnode *a_dvp;
1291 * struct vnode **a_vpp;
1292 * struct componentname *a_cnp;
1293 * struct vattr *a_vap;
1296 struct vnode *dvp = ap->a_dvp;
1297 struct vattr *vap = ap->a_vap;
1300 struct thread *p = ap->a_cnp->cn_thread;
1304 if ((cnp->cn_flags & HASBUF) == 0)
1305 panic("afs_vop_mkdir: no name");
1308 error = afs_mkdir(VTOAFS(dvp), name, vap, &vcp, cnp->cn_cred);
1315 *ap->a_vpp = AFSTOV(vcp);
1316 ma_vn_lock(AFSTOV(vcp), LK_EXCLUSIVE | LK_RETRY, p);
1325 struct vop_rmdir_args /* {
1326 * struct vnode *a_dvp;
1327 * struct vnode *a_vp;
1328 * struct componentname *a_cnp;
1332 struct vnode *dvp = ap->a_dvp;
1336 error = afs_rmdir(VTOAFS(dvp), name, cnp->cn_cred);
1342 /* struct vop_symlink_args {
1343 * struct vnode *a_dvp;
1344 * struct vnode **a_vpp;
1345 * struct componentname *a_cnp;
1346 * struct vattr *a_vap;
1351 afs_vop_symlink(struct vop_symlink_args *ap)
1354 struct vnode *newvp;
1365 afs_symlink(VTOAFS(dvp), name, ap->a_vap, ap->a_target, cnp->cn_cred);
1367 error = afs_lookup(VTOAFS(dvp), name, &vcp, cnp->cn_cred);
1369 newvp = AFSTOV(vcp);
1370 ma_vn_lock(newvp, LK_EXCLUSIVE | LK_RETRY, cnp->cn_thread);
1375 *(ap->a_vpp) = newvp;
1381 struct vop_readdir_args /* {
1382 * struct vnode *a_vp;
1383 * struct uio *a_uio;
1384 * struct ucred *a_cred;
1386 * u_long *a_cookies;
1392 /* printf("readdir %x cookies %x ncookies %d\n", ap->a_vp, ap->a_cookies,
1394 off = ap->a_uio->uio_offset;
1397 afs_readdir(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred, ap->a_eofflag);
1399 if (!error && ap->a_ncookies != NULL) {
1400 struct uio *uio = ap->a_uio;
1401 const struct dirent *dp, *dp_start, *dp_end;
1403 u_long *cookies, *cookiep;
1405 if (uio->uio_segflg != UIO_SYSSPACE || uio->uio_iovcnt != 1)
1406 panic("afs_readdir: burned cookies");
1407 dp = (const struct dirent *)
1408 ((const char *)uio->uio_iov->iov_base - (uio->uio_offset - off));
1410 dp_end = (const struct dirent *)uio->uio_iov->iov_base;
1411 for (dp_start = dp, ncookies = 0; dp < dp_end;
1412 dp = (const struct dirent *)((const char *)dp + dp->d_reclen))
1415 MALLOC(cookies, u_long *, ncookies * sizeof(u_long), M_TEMP,
1417 for (dp = dp_start, cookiep = cookies; dp < dp_end;
1418 dp = (const struct dirent *)((const char *)dp + dp->d_reclen)) {
1419 off += dp->d_reclen;
1422 *ap->a_cookies = cookies;
1423 *ap->a_ncookies = ncookies;
1430 afs_vop_readlink(ap)
1431 struct vop_readlink_args /* {
1432 * struct vnode *a_vp;
1433 * struct uio *a_uio;
1434 * struct ucred *a_cred;
1438 /* printf("readlink %x\n", ap->a_vp);*/
1440 error = afs_readlink(VTOAFS(ap->a_vp), ap->a_uio, ap->a_cred);
1445 extern int prtactive;
1448 afs_vop_inactive(ap)
1449 struct vop_inactive_args /* {
1450 * struct vnode *a_vp;
1451 * struct thread *td;
1454 struct vnode *vp = ap->a_vp;
1456 if (prtactive && vp->v_usecount != 0)
1457 vprint("afs_vop_inactive(): pushing active", vp);
1460 afs_InactiveVCache(VTOAFS(vp), 0); /* decrs ref counts */
1462 #ifndef AFS_FBSD60_ENV
1463 MA_VOP_UNLOCK(vp, 0, ap->a_td);
1469 * struct vop_reclaim_args {
1470 * struct vnode *a_vp;
1474 afs_vop_reclaim(struct vop_reclaim_args *ap)
1476 /* copied from ../OBSD/osi_vnodeops.c:afs_nbsd_reclaim() */
1478 struct vnode *vp = ap->a_vp;
1479 struct vcache *avc = VTOAFS(vp);
1480 int haveGlock = ISAFS_GLOCK();
1481 int haveVlock = CheckLock(&afs_xvcache);
1486 ObtainWriteLock(&afs_xvcache, 901);
1487 /* reclaim the vnode and the in-memory vcache, but keep the on-disk vcache */
1488 code = afs_FlushVCache(avc, &slept);
1490 ReleaseWriteLock(&afs_xvcache);
1495 afs_warn("afs_vop_reclaim: afs_FlushVCache failed code %d vnode\n", code);
1499 /* basically, it must not fail */
1500 vnode_destroy_vobject(vp);
1506 #ifndef AFS_FBSD60_ENV
1509 struct vop_bmap_args /* {
1510 * struct vnode *a_vp;
1512 * struct vnode **a_vpp;
1519 *ap->a_bnp = ap->a_bn * (PAGE_SIZE / DEV_BSIZE);
1522 *ap->a_vpp = ap->a_vp;
1524 if (ap->a_runp != NULL)
1526 if (ap->a_runb != NULL)
1534 afs_vop_strategy(ap)
1535 struct vop_strategy_args /* {
1541 error = afs_ustrategy(ap->a_bp, osi_curcred());
1548 struct vop_print_args /* {
1549 * struct vnode *a_vp;
1552 struct vnode *vp = ap->a_vp;
1553 struct vcache *vc = VTOAFS(ap->a_vp);
1554 int s = vc->f.states;
1556 printf("vc %p vp %p tag %s, fid: %d.%d.%d.%d, opens %d, writers %d", vc, vp, vp->v_tag,
1557 (int)vc->f.fid.Cell, (u_int) vc->f.fid.Fid.Volume,
1558 (u_int) vc->f.fid.Fid.Vnode, (u_int) vc->f.fid.Fid.Unique, vc->opens,
1559 vc->execsOrWriters);
1560 printf("\n states%s%s%s%s%s", (s & CStatd) ? " statd" : "",
1561 (s & CRO) ? " readonly" : "", (s & CDirty) ? " dirty" : "",
1562 (s & CMAPPED) ? " mapped" : "",
1563 (s & CVFlushed) ? " flush in progress" : "");
1569 * Advisory record locking support (fcntl() POSIX style)
1573 struct vop_advlock_args /* {
1574 * struct vnode *a_vp;
1577 * struct flock *a_fl;
1582 struct ucred cr = *osi_curcred();
1586 afs_lockctl(VTOAFS(ap->a_vp), ap->a_fl, ap->a_op, &cr, (int)ap->a_id);