2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #include "../h/systm.h"
16 #include "../h/types.h"
17 #include "../h/errno.h"
18 #include "../h/stat.h"
19 #include "../h/user.h"
21 #include "../h/vattr.h"
22 #include "../h/file.h"
24 #include "../h/chownx.h"
25 #include "../h/systm.h"
26 #include "../h/access.h"
27 #include "../rpc/types.h"
28 #include "../afs/osi_vfs.h"
29 #include "../netinet/in.h"
30 #include "../h/mbuf.h"
31 #include "../h/vmuser.h"
33 #include "../rpc/types.h"
34 #include "../rpc/xdr.h"
36 #include "../afs/stds.h"
37 #include "../afs/afs_osi.h"
38 #define RFTP_INTERNALS 1
39 #include "../afs/volerrors.h"
40 #include "../afsint/afsint.h"
41 #include "../afsint/vldbint.h"
42 #include "../afs/lock.h"
43 #include "../afs/exporter.h"
44 #include "../afs/afs.h"
45 #include "../afs/afs_chunkops.h"
46 #include "../afs/afs_stats.h"
47 #include "../afs/nfsclient.h"
48 #include "../afs/icl.h"
49 #include "../afs/prs_fs.h"
50 #include "../h/flock.h"
51 #include "../afs/afsincludes.h"
55 * declare all the functions so they can be used to init the table
57 /* creation/naming/deletion */
64 /* lookup, file handle stuff */
75 /* manipulate attributes of files */
79 /* data update operations */
87 int afs_gn_readlink();
92 int afs_gn_strategy();
103 * declare a struct vnodeops and initialize it with ptrs to all functions
105 struct vnodeops afs_gn_vnodeops = {
106 /* creation/naming/deletion */
113 /* lookup, file handle stuff */
116 /* access to files */
124 /* manipulate attributes of files */
128 /* data update operations */
142 /* security things */
148 afs_gn_enosys, /* vn_seek */
149 afs_gn_enosys, /* vn_spare0 */
150 afs_gn_enosys, /* vn_spare1 */
151 afs_gn_enosys, /* vn_spare2 */
152 afs_gn_enosys, /* vn_spare3 */
153 afs_gn_enosys, /* vn_spare4 */
154 afs_gn_enosys, /* vn_spare5 */
155 afs_gn_enosys, /* vn_spare6 */
156 afs_gn_enosys, /* vn_spare7 */
157 afs_gn_enosys, /* vn_spare8 */
158 afs_gn_enosys, /* vn_spare9 */
159 afs_gn_enosys, /* vn_spareA */
160 afs_gn_enosys, /* vn_spareB */
161 afs_gn_enosys, /* vn_spareC */
162 afs_gn_enosys, /* vn_spareD */
163 afs_gn_enosys, /* vn_spareE */
164 afs_gn_enosys /* vn_spareF */
166 struct vnodeops *afs_ops = &afs_gn_vnodeops;
170 afs_gn_link(vp, dp, name, cred)
178 AFS_STATCNT(afs_gn_link);
179 error = afs_link(vp, dp, name, cred);
180 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, (afs_int32)vp,
181 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
187 afs_gn_mkdir(dp, name, mode, cred)
197 AFS_STATCNT(afs_gn_mkdir);
200 va.va_mode = (mode & 07777) & ~get_umask();
201 error = afs_mkdir(dp, name, &va, &vp, cred);
205 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, (afs_int32)vp,
206 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
212 afs_gn_mknod(dp, name, mode, dev, cred)
223 AFS_STATCNT(afs_gn_mknod);
225 va.va_type = IFTOVT(mode);
226 va.va_mode = (mode & 07777) & ~get_umask();
228 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
229 if (va.va_type != VFIFO && !suser(&error))
231 switch (va.va_type) {
233 error = afs_mkdir(dp, name, &va, &vp, cred);
243 error = afs_create(dp, name, &va, NONEXCL, mode, &vp, cred);
248 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32)vp,
249 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
255 afs_gn_remove(vp, dp, name, cred)
256 struct vnode *vp; /* Ignored in AFS */
263 AFS_STATCNT(afs_gn_remove);
264 error = afs_remove(dp, name, cred);
265 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, (afs_int32)dp,
266 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
272 afs_gn_rename(vp, dp, name, tp, tdp, tname, cred)
275 struct vnode *vp; /* Ignored in AFS */
276 struct vnode *tp; /* Ignored in AFS */
283 AFS_STATCNT(afs_gn_rename);
284 error = afs_rename(dp, name, tdp, tname, cred);
285 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, (afs_int32)dp,
286 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG, error);
292 afs_gn_rmdir(vp, dp, name, cred)
293 struct vnode *vp; /* Ignored in AFS */
300 AFS_STATCNT(afs_gn_rmdir);
301 error = afs_rmdir(dp, name, cred);
303 if (error == 66 /* 4.3's ENOTEMPTY */)
304 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
306 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, (afs_int32)dp,
307 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
313 afs_gn_lookup(dp, vpp, name, flags, vattrp, cred)
314 struct vattr *vattrp;
318 afs_uint32 flags; /* includes FOLLOW... */
323 AFS_STATCNT(afs_gn_lookup);
324 error = afs_lookup(dp, name, vpp, cred);
325 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, (afs_int32)dp,
326 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
327 if (vattrp != NULL && error == 0)
328 afs_gn_getattr(*vpp, vattrp, cred);
334 afs_gn_fid(vp, fidp, cred)
341 AFS_STATCNT(afs_gn_fid);
342 error = afs_fid(vp, fidp);
343 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, (afs_int32)vp,
344 ICL_TYPE_LONG, (afs_int32)fidp, ICL_TYPE_LONG, error);
350 afs_gn_open(vp, flags, ext, vinfop, cred)
353 int ext; /* Ignored in AFS */
354 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
359 struct vcache *tvp = VTOAFS(vp);
362 AFS_STATCNT(afs_gn_open);
364 if ((flags & FREAD)) modes |= R_ACC;
365 if ((flags & FEXEC)) modes |= X_ACC;
366 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
368 while ((flags & FNSHARE) && tvp->opens) {
369 if (!(flags & FDELAY)) {
373 afs_osi_Sleep(&tvp->opens);
376 error = afs_access(vp, modes, cred);
381 error = afs_open(&vp, flags, cred);
383 if (flags & FTRUNC) {
386 error = afs_setattr(vp, &va, cred);
390 tvp->states |= CNSHARE;
393 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
396 /* an error occurred; we've told CM that the file
397 * is open, so close it now so that open and
398 * writer counts are correct. Ignore error code,
399 * as it is likely to fail (the setattr just did).
401 afs_close(vp, flags, cred);
406 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, (afs_int32)vp,
407 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
413 afs_gn_create(dp, vpp, flags, name, mode, vinfop, cred)
419 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
423 enum vcexcl exclusive;
426 AFS_STATCNT(afs_gn_create);
427 if ((flags & (O_EXCL|O_CREAT)) == (O_EXCL|O_CREAT))
433 va.va_mode = (mode & 07777) & ~get_umask();
434 if ((flags & FREAD)) modes |= R_ACC;
435 if ((flags & FEXEC)) modes |= X_ACC;
436 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
437 error = afs_create(dp, name, &va, exclusive, modes, vpp, cred);
441 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
442 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
443 * call it when this NFS traffic since the close will never happen thus
444 * we'd never flush the files out to the server! Gross but the simplest
445 * solution we came out with */
446 if (cred->cr_luid != RMTUSER_REQ) {
447 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
448 if (!(flags & FDELAY))
450 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
452 /* Since in the standard copen() for bsd vnode kernels they do an
453 * vop_open after the vop_create, we must do the open here since there
454 * are stuff in afs_open that we need. For example advance the
455 * execsOrWriters flag (else we'll be treated as the sun's "core"
457 *vinfop = cred; /* save user creds in fp->f_vinfo */
458 error = afs_open(vpp, flags, cred);
460 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, (afs_int32)dp,
461 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
470 AFS_STATCNT(afs_gn_hold);
481 struct vcache *vcp = VTOAFS(vp);
484 AFS_STATCNT(afs_gn_rele);
485 if (vp->v_count == 0)
486 osi_Panic("afs_rele: zero v_count");
487 if (--(vp->v_count) == 0) {
488 if (vcp->states & CPageHog) {
490 vcp->states &= ~CPageHog;
492 error = afs_inactive(vp, 0);
499 afs_gn_close(vp, flags, vinfo, cred)
502 caddr_t vinfo; /* Ignored in AFS */
506 struct vcache *tvp = VTOAFS(vp);
508 AFS_STATCNT(afs_gn_close);
510 if (flags & FNSHARE) {
511 tvp->states &= ~CNSHARE;
512 afs_osi_Wakeup(&tvp->opens);
515 error = afs_close(vp, flags, cred);
516 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32)vp,
517 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
523 afs_gn_map(vp, addr, len, off, flag, cred)
526 u_int len, off, flag;
529 struct vcache *vcp = VTOAFS(vp);
530 struct vrequest treq;
532 AFS_STATCNT(afs_gn_map);
534 if (error = afs_InitReq(&treq, cred)) return error;
535 error = afs_VerifyVCache(vcp, &treq);
537 return afs_CheckCode(error, &treq, 49);
539 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
540 ObtainWriteLock(&vcp->lock, 401);
541 vcp->states |= CMAPPED; /* flag cleared at afs_inactive */
543 * We map the segment into our address space using the handle returned by vm_create.
546 /* Consider V_INTRSEG too for interrupts */
547 if (error = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode, vcp->m.Length, 0, 0)) {
548 ReleaseWriteLock(&vcp->lock);
551 vcp->vmh = SRVAL(vcp->segid, 0, 0);
553 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
554 if (flag & SHM_RDONLY) {
555 vp->v_gnode->gn_mrdcnt++;
558 vp->v_gnode->gn_mwrcnt++;
561 * We keep the caller's credentials since an async daemon will handle the
562 * request at some point. We assume that the same credentials will be used.
564 if (!vcp->credp || (vcp->credp != cred)) {
567 struct ucred *crp = vcp->credp;
573 ReleaseWriteLock(&vcp->lock);
575 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, (afs_int32)vp,
576 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
582 afs_gn_unmap(vp, flag, cred)
587 struct vcache *vcp = VTOAFS(vp);
588 AFS_STATCNT(afs_gn_unmap);
589 ObtainWriteLock(&vcp->lock, 402);
590 if (flag & SHM_RDONLY) {
591 vp->v_gnode->gn_mrdcnt--;
592 if (vp->v_gnode->gn_mrdcnt <=0) vp->v_gnode->gn_mrdcnt = 0;
595 vp->v_gnode->gn_mwrcnt--;
596 if (vp->v_gnode->gn_mwrcnt <=0) vp->v_gnode->gn_mwrcnt = 0;
598 ReleaseWriteLock(&vcp->lock);
606 afs_gn_access(vp, mode, who, cred)
615 AFS_STATCNT(afs_gn_access);
619 error = afs_access(vp, mode, cred);
621 /* Additional testing */
622 if (who == ACC_OTHERS || who == ACC_ANY) {
623 error = afs_getattr(vp, &vattr, cred);
625 if (who == ACC_ANY) {
626 if (((vattr.va_mode >> 6) & mode) == mode) {
631 if (((vattr.va_mode >> 3) & mode) == mode)
636 } else if (who == ACC_ALL) {
637 error = afs_getattr(vp, &vattr, cred);
639 if ((!((vattr.va_mode >> 6) & mode)) || (!((vattr.va_mode >> 3) & mode)) ||
640 (!(vattr.va_mode & mode)))
649 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, (afs_int32)vp,
650 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
656 afs_gn_getattr(vp, vattrp, cred)
658 struct vattr *vattrp;
663 AFS_STATCNT(afs_gn_getattr);
664 error = afs_getattr(vp, vattrp, cred);
665 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
666 ICL_TYPE_LONG, error);
672 afs_gn_setattr(vp, op, arg1, arg2, arg3, cred)
683 AFS_STATCNT(afs_gn_setattr);
691 if ((arg1 & T_OWNER_AS_IS) == 0)
693 if ((arg1 & T_GROUP_AS_IS) == 0)
698 error = afs_access(vp, VWRITE, cred);
702 if (arg1 & T_SETTIME) {
703 va.va_atime.tv_sec = time;
704 va.va_mtime.tv_sec = time;
706 va.va_atime = *(struct timestruc_t *) arg2;
707 va.va_mtime = *(struct timestruc_t *) arg3;
714 error = afs_setattr(vp, &va, cred);
715 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
716 ICL_TYPE_LONG, error);
721 char zero_buffer[PAGESIZE];
723 afs_gn_fclear(vp, flags, offset, length, vinfo, cred)
731 int i, len, error = 0;
734 static int fclear_init =0;
735 register struct vcache *avc = VTOAFS(vp);
737 AFS_STATCNT(afs_gn_fclear);
739 memset(zero_buffer, 0, PAGESIZE);
743 * Don't clear past ulimit
745 if (offset + length > get_ulimit())
748 /* Flush all pages first */
751 vm_flushp(avc->segid, 0, MAXFSIZE/PAGESIZE - 1);
752 vms_iowait(avc->vmh);
755 uio.afsio_offset = offset;
756 for (i = offset; i < offset + length; i = uio.afsio_offset) {
757 len = offset + length - i;
758 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
759 iov.iov_base = zero_buffer;
760 uio.afsio_iov = &iov;
761 uio.afsio_iovcnt = 1;
762 uio.afsio_seg = AFS_UIOSYS;
763 uio.afsio_resid = iov.iov_len;
764 if (error = afs_rdwr(vp, &uio, UIO_WRITE, 0, cred))
767 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, (afs_int32)vp,
768 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG, error);
774 afs_gn_fsync(vp, flags, vinfo, cred)
776 int flags; /* Not used by AFS */
777 caddr_t vinfo; /* Not used by AFS */
782 AFS_STATCNT(afs_gn_fsync);
783 error = afs_fsync(vp, cred);
784 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, (afs_int32)vp,
785 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
791 afs_gn_ftrunc(vp, flags, length, vinfo, cred)
793 int flags; /* Ignored in AFS */
795 caddr_t vinfo; /* Ignored in AFS */
801 AFS_STATCNT(afs_gn_ftrunc);
804 error = afs_setattr(vp, &va, cred);
805 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, (afs_int32)vp,
806 ICL_TYPE_LONG, flags,
807 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length),
808 ICL_TYPE_LONG, error);
812 /* Min size of a file which is dumping core before we declare it a page hog. */
813 #define MIN_PAGE_HOG_SIZE 8388608
815 int afs_gn_rdwr(vp, op, flags, ubuf, ext, vinfo, vattrp, cred)
820 int ext; /* Ignored in AFS */
821 caddr_t vinfo; /* Ignored in AFS */
822 struct vattr *vattrp;
825 register struct vcache *vcp = VTOAFS(vp);
826 struct vrequest treq;
830 AFS_STATCNT(afs_gn_rdwr);
833 if (op == UIO_WRITE) {
834 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1,
835 ICL_TYPE_POINTER, (afs_int32)vp,
836 ICL_TYPE_LONG, vcp->vc_error);
837 return vcp->vc_error;
842 ObtainSharedLock(&vcp->lock, 507);
844 * We keep the caller's credentials since an async daemon will handle the
845 * request at some point. We assume that the same credentials will be used.
846 * If this is being called from an NFS server thread, then dupe the
847 * cred and only use that copy in calls and for the stach.
849 if (!vcp->credp || (vcp->credp != cred)) {
850 #ifdef AFS_AIX_IAUTH_ENV
851 if (AFS_NFSXLATORREQ(cred)) {
852 /* Must be able to use cred later, so dupe it so that nfs server
853 * doesn't overwrite it's contents.
859 crhold(cred); /* Bump refcount for reference in vcache */
863 UpgradeSToWLock(&vcp->lock, 508);
866 ConvertWToSLock(&vcp->lock);
871 ReleaseSharedLock(&vcp->lock);
874 * XXX Is the following really required?? XXX
876 if (error = afs_InitReq(&treq, cred)) return error;
877 if (error = afs_VerifyVCache(vcp, &treq))
878 return afs_CheckCode(error, &treq, 50);
879 osi_FlushPages(vcp, cred); /* Flush old pages */
881 if (AFS_NFSXLATORREQ(cred)) {
884 if (op == UIO_READ) {
885 if (!afs_AccessOK(vcp, PRSFS_READ, &treq,
886 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
895 * We have to bump the open/exwriters field here courtesy of the nfs xlator
896 * because there're no open/close nfs rpcs to call our afs_open/close.
897 * We do a similar thing on the afs_read/write interface.
899 if (op == UIO_WRITE) {
900 #ifdef AFS_64BIT_CLIENT
901 if (ubuf->afsio_offset < afs_vmMappingEnd) {
902 #endif /* AFS_64BIT_ENV */
903 ObtainWriteLock(&vcp->lock,240);
904 vcp->states |= CDirty; /* Set the dirty bit */
906 ReleaseWriteLock(&vcp->lock);
907 #ifdef AFS_64BIT_CLIENT
909 #endif /* AFS_64BIT_ENV */
912 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
914 if (op == UIO_WRITE) {
915 #ifdef AFS_64BIT_CLIENT
916 if (ubuf->afsio_offset < afs_vmMappingEnd) {
917 #endif /* AFS_64BIT_ENV */
918 ObtainWriteLock(&vcp->lock,241);
919 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
920 ReleaseWriteLock(&vcp->lock);
921 #ifdef AFS_64BIT_CLIENT
923 #endif /* AFS_64BIT_ENV */
925 if (vattrp != NULL && error == 0)
926 afs_gn_getattr(vp, vattrp, cred);
928 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, (afs_int32)vp,
929 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
936 #define AFS_MAX_VM_CHUNKS 10
937 afs_vm_rdwr(vp, uiop, rw, ioflag, credp)
938 register struct vnode *vp;
944 register afs_int32 code = 0;
947 afs_size_t fileSize, xfrOffset, offset, old_offset;
949 #ifdef AFS_64BIT_CLIENT
950 afs_size_t finalOffset;
953 #endif /* AFS_64BIT_CLIENT */
954 register struct vcache *vcp = VTOAFS(vp);
956 afs_size_t start_offset;
957 afs_int32 save_resid = uiop->afsio_resid;
958 int first_page, last_page, pages;
961 struct vrequest treq;
963 if (code = afs_InitReq(&treq, credp)) return code;
965 /* special case easy transfer; apparently a lot are done */
966 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
968 ObtainReadLock(&vcp->lock);
969 fileSize = vcp->m.Length;
970 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
971 uiop->afsio_offset = fileSize;
973 /* compute xfrOffset now, and do some checks */
974 xfrOffset = uiop->afsio_offset;
975 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
980 #ifndef AFS_64BIT_CLIENT
981 /* check for "file too big" error, which should really be done above us */
982 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
986 #endif /* AFS_64BIT_CLIENT */
988 #ifdef AFS_64BIT_CLIENT
989 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
990 if (xfrOffset < afs_vmMappingEnd) {
991 /* special case of a buffer crossing the VM mapping line */
993 struct iovec tvec[16]; /* Should have access to #define */
997 finalOffset = xfrOffset + xfrSize;
998 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
999 afsio_copy(uiop, &tuio, tvec);
1000 afsio_skip(&tuio, xfrSize - tsize);
1001 afsio_trim(&tuio, tsize);
1002 tuio.afsio_offset = afs_vmMappingEnd;
1003 ReleaseReadLock(&vcp->lock);
1004 ObtainWriteLock(&vcp->lock,243);
1005 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
1006 ReleaseWriteLock(&vcp->lock);
1007 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
1008 ObtainWriteLock(&vcp->lock,244);
1009 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
1010 ReleaseWriteLock(&vcp->lock);
1011 ObtainReadLock(&vcp->lock);
1012 if (code) goto fail;
1013 xfrSize = (afs_size_t) (afs_vmMappingEnd - xfrOffset);
1014 afsio_trim(uiop, xfrSize);
1016 ReleaseReadLock(&vcp->lock);
1017 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
1021 #endif /* AFS_64BIT_CLIENT */
1024 /* Consider V_INTRSEG too for interrupts */
1025 if (code = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode,
1026 vcp->m.Length, 0, 0)) {
1029 vcp->vmh = SRVAL(vcp->segid, 0, 0);
1031 if (rw == UIO_READ) {
1032 /* don't read past EOF */
1033 if (xfrSize+xfrOffset > fileSize)
1034 xfrSize = fileSize - xfrOffset;
1035 if (xfrSize <= 0) goto fail;
1036 ReleaseReadLock(&vcp->lock);
1037 #ifdef AFS_64BIT_CLIENT
1038 toffset = xfrOffset;
1039 uiop->afsio_offset = xfrOffset;
1040 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1041 ICL_TYPE_POINTER, vcp,
1042 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
1043 ICL_TYPE_INT32, xfrSize);
1045 code = vm_move(vcp->segid, toffset, xfrSize, rw, uiop);
1046 #else /* AFS_64BIT_CLIENT */
1048 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1049 #endif /* AFS_64BIT_CLIENT */
1052 * If at a chunk boundary and staying within chunk,
1053 * start prefetch of next chunk.
1055 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1056 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1057 ObtainWriteLock(&vcp->lock,407);
1058 tdc = afs_FindDCache(vcp, xfrOffset);
1060 if (!(tdc->mflags & DFNextStarted))
1061 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1064 ReleaseWriteLock(&vcp->lock);
1066 #ifdef AFS_64BIT_CLIENT
1068 uiop->afsio_offset = finalOffset;
1070 #endif /* AFS_64BIT_CLIENT */
1075 start_offset = uiop->afsio_offset;
1076 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1077 ICL_TYPE_POINTER, vcp,
1078 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1079 ICL_TYPE_INT32, xfrSize);
1080 ReleaseReadLock(&vcp->lock);
1081 ObtainWriteLock(&vcp->lock,400);
1082 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1084 /* un-protect last page. */
1085 last_page = vcp->m.Length/PAGESIZE;
1086 vm_protectp(vcp->vmh, last_page, 1, FILEKEY);
1087 if (xfrSize + xfrOffset > fileSize) {
1088 vcp->m.Length = xfrSize+xfrOffset;
1090 if ((!(vcp->states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1092 vcp->states |= CPageHog;
1094 ReleaseWriteLock(&vcp->lock);
1096 /* If the write will fit into a single chunk we'll write all of it
1097 * at once. Otherwise, we'll write one chunk at a time, flushing
1098 * some of it to disk.
1102 /* Only create a page to avoid excess VM access if we're writing a
1103 * small file which is either new or completely overwrites the
1106 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize) &&
1107 (vcp->v.v_gnode->gn_mwrcnt == 0) &&
1108 (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1109 (void) vm_makep(vcp->segid, 0);
1112 while (xfrSize > 0) {
1113 offset = AFS_CHUNKBASE(xfrOffset);
1116 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1117 len = AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1119 if (len == xfrSize) {
1120 /* All data goes to this one chunk. */
1122 old_offset = uiop->afsio_offset;
1123 #ifdef AFS_64BIT_CLIENT
1124 uiop->afsio_offset = xfrOffset;
1125 toffset = xfrOffset;
1126 code = vm_move(vcp->segid, toffset, xfrSize, rw, uiop);
1127 #else /* AFS_64BIT_CLIENT */
1128 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1129 #endif /* AFS_64BIT_CLIENT */
1135 /* Write just one chunk's worth of data. */
1137 struct iovec tvec[16]; /* Should have access to #define */
1139 /* Purge dirty chunks of file if there are too many dirty chunks.
1140 * Inside the write loop, we only do this at a chunk boundary.
1141 * Clean up partial chunk if necessary at end of loop.
1143 if (counter > 0 && code == 0 && xfrOffset == offset) {
1144 ObtainWriteLock(&vcp->lock,403);
1145 code = afs_DoPartialWrite(vcp, &treq);
1146 vcp->states |= CDirty;
1147 ReleaseWriteLock(&vcp->lock);
1151 afsio_copy(uiop, &tuio, tvec);
1152 afsio_trim(&tuio, len);
1153 tuio.afsio_offset = xfrOffset;
1156 old_offset = uiop->afsio_offset;
1157 #ifdef AFS_64BIT_CLIENT
1158 toffset = xfrOffset;
1159 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1160 #else /* AFS_64BIT_CLIENT */
1161 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1162 #endif /* AFS_64BIT_CLIENT */
1164 len -= tuio.afsio_resid;
1165 afsio_skip(uiop, len);
1170 first_page = old_offset >> PGSHIFT;
1171 pages = 1 + ((old_offset + (len - 1)) >> PGSHIFT) - first_page;
1172 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2,
1173 ICL_TYPE_POINTER, (afs_int32) vcp,
1174 ICL_TYPE_INT32, first_page,
1175 ICL_TYPE_INT32, pages);
1177 code = vm_writep(vcp->segid, first_page, pages);
1178 if (++count > AFS_MAX_VM_CHUNKS) {
1180 vms_iowait(vcp->segid);
1188 vms_iowait(vcp->segid);
1192 ObtainWriteLock(&vcp->lock,242);
1193 if (code == 0 && (vcp->states & CDirty)) {
1194 code = afs_DoPartialWrite(vcp, &treq);
1196 vm_protectp(vcp->vmh, last_page, 1, RDONLY);
1197 ReleaseWriteLock(&vcp->lock);
1199 /* If requested, fsync the file after every write */
1201 afs_fsync(vp, credp);
1203 ObtainReadLock(&vcp->lock);
1204 if (vcp->vc_error) {
1205 /* Pretend we didn't write anything. We need to get the error back to
1206 * the user. If we don't it's possible for a quota error for this
1207 * write to succeed and the file to be closed without the user ever
1208 * having seen the store error. And AIX syscall clears the error if
1209 * anything was written.
1211 code = vcp->vc_error;
1212 if (code == EDQUOT || code == ENOSPC)
1213 uiop->afsio_resid = save_resid;
1215 #ifdef AFS_64BIT_CLIENT
1217 uiop->afsio_offset = finalOffset;
1219 #endif /* AFS_64BIT_CLIENT */
1222 ReleaseReadLock(&vcp->lock);
1223 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3,
1224 ICL_TYPE_POINTER, vcp,
1225 ICL_TYPE_INT32, code);
1230 afs_direct_rdwr(vp, uiop, rw, ioflag, credp)
1231 register struct vnode *vp;
1235 struct ucred *credp;
1237 register afs_int32 code = 0;
1239 afs_size_t fileSize, xfrOffset, offset, old_offset;
1240 struct vcache *vcp = VTOAFS(vp);
1241 afs_int32 save_resid = uiop->afsio_resid;
1242 struct vrequest treq;
1244 if (code = afs_InitReq(&treq, credp)) return code;
1246 /* special case easy transfer; apparently a lot are done */
1247 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
1249 ObtainReadLock(&vcp->lock);
1250 fileSize = vcp->m.Length;
1251 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1252 uiop->afsio_offset = fileSize;
1254 /* compute xfrOffset now, and do some checks */
1255 xfrOffset = uiop->afsio_offset;
1256 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1258 ReleaseReadLock(&vcp->lock);
1262 /* check for "file too big" error, which should really be done above us */
1264 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1266 ReleaseReadLock(&vcp->lock);
1270 ReleaseReadLock(&vcp->lock);
1271 if (rw == UIO_WRITE) {
1272 ObtainWriteLock(&vcp->lock,400);
1273 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1275 if (xfrSize + xfrOffset > fileSize) {
1276 vcp->m.Length = xfrSize+xfrOffset;
1278 ReleaseWriteLock(&vcp->lock);
1280 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR,
1281 ICL_TYPE_POINTER, (afs_int32)vp,
1282 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1283 ICL_TYPE_LONG, uiop->afsio_resid);
1284 code = afs_rdwr(vp, uiop, rw, ioflag, credp);
1286 uiop->afsio_resid = save_resid;
1288 uiop->afsio_offset = xfrOffset + xfrSize;
1289 if (uiop->afsio_resid > 0) {
1290 /* should zero here the remaining buffer */
1291 uiop->afsio_resid = 0;
1293 /* Purge dirty chunks of file if there are too many dirty chunks.
1294 * Inside the write loop, we only do this at a chunk boundary.
1295 * Clean up partial chunk if necessary at end of loop.
1297 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1298 ObtainWriteLock(&vcp->lock,402);
1299 code = afs_DoPartialWrite(vcp, &treq);
1300 vcp->states |= CDirty;
1301 ReleaseWriteLock(&vcp->lock);
1310 static int lock_normalize(vp, lckdat, offset, cred)
1312 struct eflock *lckdat;
1319 switch(lckdat->l_whence) {
1323 lckdat->l_start += (off_t) offset;
1326 code = afs_getattr(vp, &vattr, cred);
1327 if (code != 0) return code;
1328 lckdat->l_start += (off_t) vattr.va_size;
1330 default: return EINVAL;
1332 lckdat->l_whence = 0;
1338 afs_gn_lockctl(vp, offset, lckdat, cmd, ignored_fcn, ignored_id, cred)
1339 void (*ignored_fcn)();
1343 struct eflock *lckdat;
1348 struct vattr *attrs;
1350 AFS_STATCNT(afs_gn_lockctl);
1351 /* Convert from AIX's cmd to standard lockctl lock types... */
1354 else if (cmd & SETFLCK) {
1359 flkd.l_type = lckdat->l_type;
1360 flkd.l_whence = lckdat->l_whence;
1361 flkd.l_start = lckdat->l_start;
1362 flkd.l_len = lckdat->l_len;
1363 flkd.l_pid = lckdat->l_pid;
1364 flkd.l_sysid = lckdat->l_sysid;
1366 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1368 if (error = lock_normalize(vp, &flkd, offset, cred))
1370 error = afs_lockctl(vp, &flkd, ncmd, cred);
1371 lckdat->l_type = flkd.l_type;
1372 lckdat->l_whence = flkd.l_whence;
1373 lckdat->l_start = flkd.l_start;
1374 lckdat->l_len = flkd.l_len;
1375 lckdat->l_pid = flkd.l_pid;
1376 lckdat->l_sysid = flkd.l_sysid;
1377 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1378 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1383 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1384 int afs_gn_ioctl(vp, cmd, arg, flags, channel, ext)
1388 int flags; /* Ignored in AFS */
1389 int channel; /* Ignored in AFS */
1390 int ext; /* Ignored in AFS */
1393 AFS_STATCNT(afs_gn_ioctl);
1394 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1395 error = afs_ioctl(vp, cmd, arg);
1396 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1397 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1403 afs_gn_readlink(vp, uiop, cred)
1410 AFS_STATCNT(afs_gn_readlink);
1411 error = afs_readlink(vp, uiop, cred);
1412 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1413 ICL_TYPE_LONG, error);
1419 afs_gn_select(vp, which, vinfo, mpx)
1425 AFS_STATCNT(afs_gn_select);
1426 /* NO SUPPORT for this in afs YET! */
1432 afs_gn_symlink(vp, link, target, cred)
1441 AFS_STATCNT(afs_gn_symlink);
1444 error = afs_symlink(vp, link, &va, target, cred);
1445 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1446 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG, error);
1452 afs_gn_readdir(vp, uiop, cred)
1459 AFS_STATCNT(afs_gn_readdir);
1460 error = afs_readdir(vp, uiop, cred);
1461 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, (afs_int32)vp,
1462 ICL_TYPE_LONG, error);
1467 extern Simple_lock afs_asyncbuf_lock;
1469 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1470 * when the buffer is processed by naix_vmstrategy. afs_biotime is
1471 * incremented for each buffer. A buffer's age is kept in its av_back field.
1472 * The age ranking is used by the daemons, which favor older buffers.
1474 afs_int32 afs_biotime = 0;
1476 extern struct buf *afs_asyncbuf;
1477 extern int afs_asyncbuf_cv;
1478 /* This function is called with a list of buffers, threaded through
1479 * the av_forw field. Our goal is to copy the list of buffers into the
1480 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1481 * Within buffers within the same work group, the guy with the lowest address
1482 * has to be located at the head of the queue; his b_bcount field will also
1483 * be increased to cover all of the buffers in the b_work queue.
1485 #define AIX_VM_BLKSIZE 8192
1486 afs_gn_strategy(abp, cred)
1488 register struct buf *abp;
1490 register struct buf **lbp, *tbp;
1491 int *lwbp; /* last guy in work chain */
1492 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1496 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1497 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1498 && (x)->b_flags == (y)->b_flags \
1499 && !((x)->b_flags & B_PFPROT) \
1500 && !((y)->b_flags & B_PFPROT))
1502 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1503 for(tbp = abp; tbp; tbp=nbp) {
1504 nbp = tbp->av_forw; /* remember for later */
1506 tbp->av_back = (struct buf *) afs_biotime++;
1508 /* first insert the buffer into the afs_async queue. Insert buffer
1509 * sorted within its disk position within a set of comparable buffers.
1510 * Ensure that all comparable buffers are grouped contiguously.
1511 * Later on, we'll merge adjacent buffers into a single request.
1513 firstComparable = NULL;
1514 lbp = &afs_asyncbuf;
1515 for(qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1516 if (EFS_COMPARABLE(tbp, qbp)) {
1517 if (!firstComparable) firstComparable = qbp;
1518 /* this buffer is comparable, so see if the next buffer
1519 * is farther in the file; if it is insert before next buffer.
1521 if (tbp->b_blkno < qbp->b_blkno) {
1525 /* If we're at the end of a block of comparable buffers, we
1526 * insert the buffer here to keep all comparable buffers
1529 if (firstComparable)
1533 /* do the insert before qbp now */
1534 tbp->av_forw = *lbp;
1536 if (firstComparable == NULL) {
1537 /* next we're going to do all sorts of buffer merging tricks, but
1538 * here we know we're the only COMPARABLE block in the
1539 * afs_asyncbuf list, so we just skip that and continue with
1540 * the next input buffer.
1544 /* we may have actually added the "new" firstComparable */
1545 if (tbp->av_forw == firstComparable)
1546 firstComparable = tbp;
1548 * when we get here, firstComparable points to the first dude in the
1549 * same vnode and subspace that we (tbp) are in. We go through the
1550 * area of this list with COMPARABLE buffers (a contiguous region) and
1551 * repeated merge buffers that are contiguous and in the same block or
1552 * buffers that are contiguous and are both integral numbers of blocks.
1553 * Note that our end goal is to have as big blocks as we can, but we
1554 * must minimize the transfers that are not integral #s of blocks on
1555 * block boundaries, since Episode will do those smaller and/or
1556 * unaligned I/Os synchronously.
1558 * A useful example to consider has the async queue with this in it:
1559 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1560 * If we get a request that fills the 4K hole, we want to merge this
1561 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1562 * don't want to do any merging since adding the 4K transfer to the 8K
1563 * transfer makes the 8K transfer synchronous.
1565 * Note that if there are any blocks whose size is a multiple of
1566 * the file system block size, then we know that such blocks are also
1567 * on block boundaries.
1570 doMerge = 1; /* start the loop */
1571 while(doMerge) { /* loop until an iteration doesn't
1572 * make any more changes */
1574 for (qbp = firstComparable; ; qbp = qnbp) {
1575 qnbp = qbp->av_forw;
1576 if (!qnbp) break; /* we're done */
1577 if (!EFS_COMPARABLE(qbp, qnbp)) break;
1579 /* try to merge qbp and qnbp */
1581 /* first check if both not adjacent go on to next region */
1582 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) != dbtob(qnbp->b_blkno))
1585 /* note if both in the same block, the first byte of leftmost guy
1586 * and last byte of rightmost guy are in the same block.
1588 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE-1)) ==
1589 ((dbtob(qnbp->b_blkno)+qnbp->b_bcount-1) & ~(AIX_VM_BLKSIZE-1))) {
1590 doMerge = 1; /* both in same block */
1592 else if ((qbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0
1593 && (qnbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0) {
1594 doMerge = 1; /* both integral #s of blocks */
1597 register struct buf *xbp;
1599 /* merge both of these blocks together */
1600 /* first set age to the older of the two */
1601 if ((int) qnbp->av_back - (int) qbp->av_back < 0)
1602 qbp->av_back = qnbp->av_back;
1603 lwbp = &qbp->b_work;
1604 /* find end of qbp's work queue */
1605 for(xbp = (struct buf *)(*lwbp); xbp;
1606 lwbp = &xbp->b_work, xbp = (struct buf *) (*lwbp));
1608 * now setting *lwbp will change the last ptr in the qbp's
1611 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1612 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1613 *lwbp = (int) qnbp; /* append qnbp to end */
1615 * note that qnbp is bogus, but it doesn't matter because
1616 * we're going to restart the for loop now.
1618 break; /* out of the for loop */
1622 } /* for loop for all interrupt data */
1623 /* at this point, all I/O has been queued. Wakeup the daemon */
1624 e_wakeup_one((int*) &afs_asyncbuf_cv);
1625 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1630 afs_inactive(avc, acred)
1631 register struct vcache *avc;
1632 struct AFS_UCRED *acred;
1634 afs_InactiveVCache(avc, acred);
1641 AFS_STATCNT(afs_gn_revoke);
1642 /* NO SUPPORT for this in afs YET! */
1646 int afs_gn_getacl(vp, uiop, cred)
1655 int afs_gn_setacl(vp, uiop, cred)
1664 int afs_gn_getpcl(vp, uiop, cred)
1673 int afs_gn_setpcl(vp, uiop, cred)
1685 extern struct vfsops Afs_vfsops;
1686 extern struct vnodeops afs_gn_vnodeops;
1687 extern int Afs_init();
1689 #define AFS_CALLOUT_TBL_SIZE 256
1692 * the following additional layer of gorp is due to the fact that the
1693 * filesystem layer no longer obtains the kernel lock for me. I was relying
1694 * on this behavior to avoid having to think about locking.
1698 vfs_mount(struct vfs *a, struct ucred *b) {
1699 register glockOwner, ret;
1701 glockOwner = ISAFS_GLOCK();
1704 ret = (*Afs_vfsops.vfs_mount)(a, b);
1712 vfs_unmount(struct vfs *a, int b, struct ucred *c) {
1713 register glockOwner, ret;
1715 glockOwner = ISAFS_GLOCK();
1718 ret = (*Afs_vfsops.vfs_unmount)(a, b, c);
1726 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c) {
1727 register glockOwner, ret;
1729 glockOwner = ISAFS_GLOCK();
1732 ret = (*Afs_vfsops.vfs_root)(a, b, c);
1740 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c) {
1741 register glockOwner, ret;
1743 glockOwner = ISAFS_GLOCK();
1746 ret = (*Afs_vfsops.vfs_statfs)(a, b, c);
1754 vfs_sync(struct gfs *a) {
1755 register glockOwner, ret;
1757 glockOwner = ISAFS_GLOCK();
1760 ret = (*Afs_vfsops.vfs_sync)(a);
1767 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c
1768 , struct ucred *d) {
1769 register glockOwner, ret;
1771 glockOwner = ISAFS_GLOCK();
1774 ret = (*Afs_vfsops.vfs_vget)(a, b, c, d);
1782 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e) {
1783 register glockOwner, ret;
1785 glockOwner = ISAFS_GLOCK();
1788 ret = (*Afs_vfsops.vfs_cntl)(a, b, c, d, e);
1796 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d
1797 , struct ucred *e) {
1798 register glockOwner, ret;
1800 glockOwner = ISAFS_GLOCK();
1803 ret = (*Afs_vfsops.vfs_quotactl)(a, b, c, d, e);
1811 struct vfsops locked_Afs_vfsops = {
1823 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1824 register glockOwner, ret;
1826 glockOwner = ISAFS_GLOCK();
1829 ret = (*afs_gn_vnodeops.vn_link)(a, b, c, d);
1837 vn_mkdir(struct vnode *a, char *b, int c, struct ucred *d) {
1838 register glockOwner, ret;
1840 glockOwner = ISAFS_GLOCK();
1843 ret = (*afs_gn_vnodeops.vn_mkdir)(a, b, c, d);
1851 vn_mknod(struct vnode *a, caddr_t b, int c, dev_t d, struct ucred *e) {
1852 register glockOwner, ret;
1854 glockOwner = ISAFS_GLOCK();
1857 ret = (*afs_gn_vnodeops.vn_mknod)(a, b, c, d, e);
1865 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1866 register glockOwner, ret;
1868 glockOwner = ISAFS_GLOCK();
1871 ret = (*afs_gn_vnodeops.vn_remove)(a, b, c, d);
1879 vn_rename(struct vnode *a, struct vnode *b, caddr_t c
1880 , struct vnode *d, struct vnode *e, caddr_t f, struct ucred *g) {
1881 register glockOwner, ret;
1883 glockOwner = ISAFS_GLOCK();
1886 ret = (*afs_gn_vnodeops.vn_rename)(a, b, c, d, e, f, g);
1894 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1895 register glockOwner, ret;
1897 glockOwner = ISAFS_GLOCK();
1900 ret = (*afs_gn_vnodeops.vn_rmdir)(a, b, c, d);
1908 vn_lookup(struct vnode *a, struct vnode **b, char *c, int d,
1909 struct vattr *v, struct ucred *e) {
1910 register glockOwner, ret;
1912 glockOwner = ISAFS_GLOCK();
1915 ret = (*afs_gn_vnodeops.vn_lookup)(a, b, c, d, v, e);
1923 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c) {
1924 register glockOwner, ret;
1926 glockOwner = ISAFS_GLOCK();
1929 ret = (*afs_gn_vnodeops.vn_fid)(a, b, c);
1937 vn_open(struct vnode *a, int b, int c, caddr_t *d, struct ucred *e) {
1938 register glockOwner, ret;
1940 glockOwner = ISAFS_GLOCK();
1943 ret = (*afs_gn_vnodeops.vn_open)(a, b, c, d, e);
1951 vn_create(struct vnode *a, struct vnode **b, int c, caddr_t d
1952 , int e, caddr_t *f, struct ucred *g) {
1953 register glockOwner, ret;
1955 glockOwner = ISAFS_GLOCK();
1958 ret = (*afs_gn_vnodeops.vn_create)(a, b, c, d, e, f, g);
1966 vn_hold(struct vnode *a) {
1967 register glockOwner, ret;
1969 glockOwner = ISAFS_GLOCK();
1972 ret = (*afs_gn_vnodeops.vn_hold)(a);
1980 vn_rele(struct vnode *a) {
1981 register glockOwner, ret;
1983 glockOwner = ISAFS_GLOCK();
1986 ret = (*afs_gn_vnodeops.vn_rele)(a);
1994 vn_close(struct vnode *a, int b, caddr_t c, struct ucred *d) {
1995 register glockOwner, ret;
1997 glockOwner = ISAFS_GLOCK();
2000 ret = (*afs_gn_vnodeops.vn_close)(a, b, c, d);
2008 vn_map(struct vnode *a, caddr_t b, uint c, uint d, uint e, struct ucred *f) {
2009 register glockOwner, ret;
2011 glockOwner = ISAFS_GLOCK();
2014 ret = (*afs_gn_vnodeops.vn_map)(a, b, c, d, e, f);
2022 vn_unmap(struct vnode *a, int b, struct ucred *c) {
2023 register glockOwner, ret;
2025 glockOwner = ISAFS_GLOCK();
2028 ret = (*afs_gn_vnodeops.vn_unmap)(a, b, c);
2036 vn_access(struct vnode *a, int b, int c, struct ucred *d) {
2037 register glockOwner, ret;
2039 glockOwner = ISAFS_GLOCK();
2042 ret = (*afs_gn_vnodeops.vn_access)(a, b, c, d);
2050 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c) {
2051 register glockOwner, ret;
2053 glockOwner = ISAFS_GLOCK();
2056 ret = (*afs_gn_vnodeops.vn_getattr)(a, b, c);
2064 vn_setattr(struct vnode *a, int b, int c, int d, int e, struct ucred *f) {
2065 register glockOwner, ret;
2067 glockOwner = ISAFS_GLOCK();
2070 ret = (*afs_gn_vnodeops.vn_setattr)(a, b, c, d, e, f);
2078 vn_fclear(struct vnode *a, int b, offset_t c, offset_t d
2079 , caddr_t e, struct ucred *f) {
2080 register glockOwner, ret;
2082 glockOwner = ISAFS_GLOCK();
2085 ret = (*afs_gn_vnodeops.vn_fclear)(a, b, c, d, e, f);
2093 vn_fsync(struct vnode *a, int b, int c, struct ucred *d) {
2094 register glockOwner, ret;
2096 glockOwner = ISAFS_GLOCK();
2099 ret = (*afs_gn_vnodeops.vn_fsync)(a, b, c, d);
2107 vn_ftrunc(struct vnode *a, int b, offset_t c, caddr_t d, struct ucred *e) {
2108 register glockOwner, ret;
2110 glockOwner = ISAFS_GLOCK();
2113 ret = (*afs_gn_vnodeops.vn_ftrunc)(a, b, c, d, e);
2121 vn_rdwr(struct vnode *a, enum uio_rw b, int c, struct uio *d
2122 , int e, caddr_t f, struct vattr *v, struct ucred *g) {
2123 register glockOwner, ret;
2125 glockOwner = ISAFS_GLOCK();
2128 ret = (*afs_gn_vnodeops.vn_rdwr)(a, b, c, d, e, f, v, g);
2136 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int d
2137 , int (*e)(), ulong *f, struct ucred *g) {
2138 register glockOwner, ret;
2140 glockOwner = ISAFS_GLOCK();
2143 ret = (*afs_gn_vnodeops.vn_lockctl)(a, b, c, d, e, f, g);
2151 vn_ioctl(struct vnode *a, int b, caddr_t c, size_t d, int e, struct ucred *f) {
2152 register glockOwner, ret;
2154 glockOwner = ISAFS_GLOCK();
2157 ret = (*afs_gn_vnodeops.vn_ioctl)(a, b, c, d, e, f);
2165 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c) {
2166 register glockOwner, ret;
2168 glockOwner = ISAFS_GLOCK();
2171 ret = (*afs_gn_vnodeops.vn_readlink)(a, b, c);
2179 vn_select(struct vnode *a, int b, ushort c, ushort *d, void (*e)()
2180 , caddr_t f, struct ucred *g) {
2181 register glockOwner, ret;
2183 glockOwner = ISAFS_GLOCK();
2186 ret = (*afs_gn_vnodeops.vn_select)(a, b, c, d, e, f, g);
2194 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d) {
2195 register glockOwner, ret;
2197 glockOwner = ISAFS_GLOCK();
2200 ret = (*afs_gn_vnodeops.vn_symlink)(a, b, c, d);
2208 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c) {
2209 register glockOwner, ret;
2211 glockOwner = ISAFS_GLOCK();
2214 ret = (*afs_gn_vnodeops.vn_readdir)(a, b, c);
2222 vn_revoke(struct vnode *a, int b, int c, struct vattr *d, struct ucred *e) {
2223 register glockOwner, ret;
2225 glockOwner = ISAFS_GLOCK();
2228 ret = (*afs_gn_vnodeops.vn_revoke)(a, b, c, d, e);
2236 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c) {
2237 register glockOwner, ret;
2239 glockOwner = ISAFS_GLOCK();
2242 ret = (*afs_gn_vnodeops.vn_getacl)(a, b, c);
2250 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c) {
2251 register glockOwner, ret;
2253 glockOwner = ISAFS_GLOCK();
2256 ret = (*afs_gn_vnodeops.vn_setacl)(a, b, c);
2264 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2265 register glockOwner, ret;
2267 glockOwner = ISAFS_GLOCK();
2270 ret = (*afs_gn_vnodeops.vn_getpcl)(a, b, c);
2278 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2279 register glockOwner, ret;
2281 glockOwner = ISAFS_GLOCK();
2284 ret = (*afs_gn_vnodeops.vn_setpcl)(a, b, c);
2291 extern int afs_gn_strategy();
2293 struct vnodeops locked_afs_gn_vnodeops = {
2322 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2330 struct gfs afs_gfs = {
2332 &locked_afs_gn_vnodeops,
2336 GFS_VERSION4 | GFS_REMOTE,