2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #include "../h/systm.h"
16 #include "../h/types.h"
17 #include "../h/errno.h"
18 #include "../h/stat.h"
19 #include "../h/user.h"
21 #include "../h/vattr.h"
22 #include "../h/file.h"
24 #include "../h/chownx.h"
25 #include "../h/systm.h"
26 #include "../h/access.h"
30 #include "../rpc/types.h"
31 #include "../afs/osi_vfs.h"
32 #include "../netinet/in.h"
33 #include "../h/mbuf.h"
34 #include "../h/vmuser.h"
36 #include "../rpc/types.h"
37 #include "../rpc/xdr.h"
39 #include "../afs/stds.h"
40 #include "../afs/afs_osi.h"
41 #define RFTP_INTERNALS 1
42 #include "../afs/volerrors.h"
43 #include "../afsint/afsint.h"
44 #include "../afsint/vldbint.h"
45 #include "../afs/lock.h"
46 #include "../afs/exporter.h"
47 #include "../afs/afs.h"
48 #include "../afs/afs_chunkops.h"
49 #include "../afs/afs_stats.h"
50 #include "../afs/nfsclient.h"
51 #include "../afs/icl.h"
52 #include "../afs/prs_fs.h"
53 #include "../h/flock.h"
54 #include "../afs/afsincludes.h"
58 * declare all the functions so they can be used to init the table
60 /* creation/naming/deletion */
67 /* lookup, file handle stuff */
78 /* manipulate attributes of files */
82 /* data update operations */
90 int afs_gn_readlink();
95 int afs_gn_strategy();
106 * declare a struct vnodeops and initialize it with ptrs to all functions
108 struct vnodeops afs_gn_vnodeops = {
109 /* creation/naming/deletion */
116 /* lookup, file handle stuff */
119 /* access to files */
127 /* manipulate attributes of files */
131 /* data update operations */
145 /* security things */
151 afs_gn_enosys, /* vn_seek */
152 afs_gn_enosys, /* vn_fsync_range */
153 afs_gn_enosys, /* vn_create_attr */
154 afs_gn_enosys, /* vn_finfo */
155 afs_gn_enosys, /* vn_map_lloff */
156 afs_gn_enosys, /* vn_readdir_eofp */
157 afs_gn_enosys, /* vn_rdwr_attr */
158 afs_gn_enosys, /* vn_memcntl */
159 afs_gn_enosys, /* vn_spare7 */
160 afs_gn_enosys, /* vn_spare8 */
161 afs_gn_enosys, /* vn_spare9 */
162 afs_gn_enosys, /* vn_spareA */
163 afs_gn_enosys, /* vn_spareB */
164 afs_gn_enosys, /* vn_spareC */
165 afs_gn_enosys, /* vn_spareD */
166 afs_gn_enosys, /* vn_spareE */
167 afs_gn_enosys /* vn_spareF */
169 ,afs_gn_enosys, /* pagerBackRange */
170 afs_gn_enosys, /* pagerGetFileSize */
171 afs_gn_enosys, /* pagerReadAhead */
172 afs_gn_enosys, /* pagerWriteBehind */
173 afs_gn_enosys /* pagerEndCopy */
176 struct vnodeops *afs_ops = &afs_gn_vnodeops;
180 afs_gn_link(vp, dp, name, cred)
188 AFS_STATCNT(afs_gn_link);
189 error = afs_link(vp, dp, name, cred);
190 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, (afs_int32)vp,
191 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
197 afs_gn_mkdir(dp, name, Mode, cred)
212 AFS_STATCNT(afs_gn_mkdir);
215 va.va_mode = (mode & 07777) & ~get_umask();
216 error = afs_mkdir(dp, name, &va, &vp, cred);
220 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, (afs_int32)vp,
221 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
227 afs_gn_mknod(dp, name, Mode, dev, cred)
243 AFS_STATCNT(afs_gn_mknod);
245 va.va_type = IFTOVT(mode);
246 va.va_mode = (mode & 07777) & ~get_umask();
248 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
249 if (va.va_type != VFIFO && !suser(&error))
251 switch (va.va_type) {
253 error = afs_mkdir(dp, name, &va, &vp, cred);
263 error = afs_create(dp, name, &va, NONEXCL, mode, &vp, cred);
268 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32)vp,
269 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
275 afs_gn_remove(vp, dp, name, cred)
276 struct vnode *vp; /* Ignored in AFS */
283 AFS_STATCNT(afs_gn_remove);
284 error = afs_remove(dp, name, cred);
285 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, (afs_int32)dp,
286 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
292 afs_gn_rename(vp, dp, name, tp, tdp, tname, cred)
295 struct vnode *vp; /* Ignored in AFS */
296 struct vnode *tp; /* Ignored in AFS */
303 AFS_STATCNT(afs_gn_rename);
304 error = afs_rename(dp, name, tdp, tname, cred);
305 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, (afs_int32)dp,
306 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG, error);
312 afs_gn_rmdir(vp, dp, name, cred)
313 struct vnode *vp; /* Ignored in AFS */
320 AFS_STATCNT(afs_gn_rmdir);
321 error = afs_rmdir(dp, name, cred);
323 if (error == 66 /* 4.3's ENOTEMPTY */)
324 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
326 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, (afs_int32)dp,
327 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
333 afs_gn_lookup(dp, vpp, name, Flags, vattrp, cred)
334 struct vattr *vattrp;
339 int32long64_t Flags; /* includes FOLLOW... */
341 afs_uint32 Flags; /* includes FOLLOW... */
348 AFS_STATCNT(afs_gn_lookup);
349 error = afs_lookup(dp, name, vpp, cred);
350 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, (afs_int32)dp,
351 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
352 if (vattrp != NULL && error == 0)
353 afs_gn_getattr(*vpp, vattrp, cred);
359 afs_gn_fid(vp, fidp, cred)
366 AFS_STATCNT(afs_gn_fid);
367 error = afs_fid(vp, fidp);
368 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, (afs_int32)vp,
369 ICL_TYPE_LONG, (afs_int32)fidp, ICL_TYPE_LONG, error);
375 afs_gn_open(vp, Flags, ext, vinfop, cred)
379 ext_t ext; /* Ignored in AFS */
382 int ext; /* Ignored in AFS */
384 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
389 struct vcache *tvp = VTOAFS(vp);
393 AFS_STATCNT(afs_gn_open);
395 if ((flags & FREAD)) modes |= R_ACC;
396 if ((flags & FEXEC)) modes |= X_ACC;
397 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
399 while ((flags & FNSHARE) && tvp->opens) {
400 if (!(flags & FDELAY)) {
404 afs_osi_Sleep(&tvp->opens);
407 error = afs_access(vp, modes, cred);
412 error = afs_open(&vp, flags, cred);
414 if (flags & FTRUNC) {
417 error = afs_setattr(vp, &va, cred);
421 tvp->states |= CNSHARE;
424 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
427 /* an error occurred; we've told CM that the file
428 * is open, so close it now so that open and
429 * writer counts are correct. Ignore error code,
430 * as it is likely to fail (the setattr just did).
432 afs_close(vp, flags, cred);
437 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, (afs_int32)vp,
438 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
444 afs_gn_create(dp, vpp, Flags, name, Mode, vinfop, cred)
455 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
459 enum vcexcl exclusive;
464 AFS_STATCNT(afs_gn_create);
465 if ((flags & (O_EXCL|O_CREAT)) == (O_EXCL|O_CREAT))
471 va.va_mode = (mode & 07777) & ~get_umask();
472 if ((flags & FREAD)) modes |= R_ACC;
473 if ((flags & FEXEC)) modes |= X_ACC;
474 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
475 error = afs_create(dp, name, &va, exclusive, modes, vpp, cred);
479 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
480 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
481 * call it when this NFS traffic since the close will never happen thus
482 * we'd never flush the files out to the server! Gross but the simplest
483 * solution we came out with */
484 if (cred->cr_luid != RMTUSER_REQ) {
485 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
486 if (!(flags & FDELAY))
488 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
490 /* Since in the standard copen() for bsd vnode kernels they do an
491 * vop_open after the vop_create, we must do the open here since there
492 * are stuff in afs_open that we need. For example advance the
493 * execsOrWriters flag (else we'll be treated as the sun's "core"
495 *vinfop = cred; /* save user creds in fp->f_vinfo */
496 error = afs_open(vpp, flags, cred);
498 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, (afs_int32)dp,
499 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
508 AFS_STATCNT(afs_gn_hold);
519 struct vcache *vcp = VTOAFS(vp);
522 AFS_STATCNT(afs_gn_rele);
523 if (vp->v_count == 0)
524 osi_Panic("afs_rele: zero v_count");
525 if (--(vp->v_count) == 0) {
526 if (vcp->states & CPageHog) {
528 vcp->states &= ~CPageHog;
530 error = afs_inactive(vp, 0);
537 afs_gn_close(vp, Flags, vinfo, cred)
544 caddr_t vinfo; /* Ignored in AFS */
548 struct vcache *tvp = VTOAFS(vp);
551 AFS_STATCNT(afs_gn_close);
553 if (flags & FNSHARE) {
554 tvp->states &= ~CNSHARE;
555 afs_osi_Wakeup(&tvp->opens);
558 error = afs_close(vp, flags, cred);
559 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32)vp,
560 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
566 afs_gn_map(vp, addr, Len, Off, Flag, cred)
570 uint32long64_t Len, Off, Flag;
572 u_int Len, Off, Flag;
576 struct vcache *vcp = VTOAFS(vp);
577 struct vrequest treq;
581 afs_int32 flag = Flag;
583 AFS_STATCNT(afs_gn_map);
585 if (error = afs_InitReq(&treq, cred)) return error;
586 error = afs_VerifyVCache(vcp, &treq);
588 return afs_CheckCode(error, &treq, 49);
590 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
591 ObtainWriteLock(&vcp->lock, 401);
592 vcp->states |= CMAPPED; /* flag cleared at afs_inactive */
594 * We map the segment into our address space using the handle returned by vm_create.
597 /* Consider V_INTRSEG too for interrupts */
598 if (error = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode, vcp->m.Length, 0, 0)) {
599 ReleaseWriteLock(&vcp->lock);
602 vcp->vmh = SRVAL(vcp->segid, 0, 0);
604 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
605 if (flag & SHM_RDONLY) {
606 vp->v_gnode->gn_mrdcnt++;
609 vp->v_gnode->gn_mwrcnt++;
612 * We keep the caller's credentials since an async daemon will handle the
613 * request at some point. We assume that the same credentials will be used.
615 if (!vcp->credp || (vcp->credp != cred)) {
618 struct ucred *crp = vcp->credp;
624 ReleaseWriteLock(&vcp->lock);
626 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, (afs_int32)vp,
627 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
633 afs_gn_unmap(vp, flag, cred)
642 struct vcache *vcp = VTOAFS(vp);
643 AFS_STATCNT(afs_gn_unmap);
644 ObtainWriteLock(&vcp->lock, 402);
645 if (flag & SHM_RDONLY) {
646 vp->v_gnode->gn_mrdcnt--;
647 if (vp->v_gnode->gn_mrdcnt <=0) vp->v_gnode->gn_mrdcnt = 0;
650 vp->v_gnode->gn_mwrcnt--;
651 if (vp->v_gnode->gn_mwrcnt <=0) vp->v_gnode->gn_mwrcnt = 0;
653 ReleaseWriteLock(&vcp->lock);
661 afs_gn_access(vp, Mode, Who, cred)
677 AFS_STATCNT(afs_gn_access);
683 error = afs_access(vp, mode, cred);
685 /* Additional testing */
686 if (who == ACC_OTHERS || who == ACC_ANY) {
687 error = afs_getattr(vp, &vattr, cred);
689 if (who == ACC_ANY) {
690 if (((vattr.va_mode >> 6) & mode) == mode) {
695 if (((vattr.va_mode >> 3) & mode) == mode)
700 } else if (who == ACC_ALL) {
701 error = afs_getattr(vp, &vattr, cred);
703 if ((!((vattr.va_mode >> 6) & mode)) || (!((vattr.va_mode >> 3) & mode)) ||
704 (!(vattr.va_mode & mode)))
713 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, (afs_int32)vp,
714 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
720 afs_gn_getattr(vp, vattrp, cred)
722 struct vattr *vattrp;
727 AFS_STATCNT(afs_gn_getattr);
728 error = afs_getattr(vp, vattrp, cred);
729 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
730 ICL_TYPE_LONG, error);
736 afs_gn_setattr(vp, Op, Arg1, Arg2, Arg3, cred)
758 AFS_STATCNT(afs_gn_setattr);
766 if ((arg1 & T_OWNER_AS_IS) == 0)
768 if ((arg1 & T_GROUP_AS_IS) == 0)
773 error = afs_access(vp, VWRITE, cred);
777 if (arg1 & T_SETTIME) {
778 va.va_atime.tv_sec = time;
779 va.va_mtime.tv_sec = time;
781 va.va_atime = *(struct timestruc_t *) arg2;
782 va.va_mtime = *(struct timestruc_t *) arg3;
790 error = afs_setattr(vp, &va, cred);
792 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
793 ICL_TYPE_LONG, error);
798 char zero_buffer[PAGESIZE];
800 afs_gn_fclear(vp, flags, offset, length, vinfo, cred)
812 int i, len, error = 0;
815 static int fclear_init =0;
816 register struct vcache *avc = VTOAFS(vp);
818 AFS_STATCNT(afs_gn_fclear);
820 memset(zero_buffer, 0, PAGESIZE);
824 * Don't clear past ulimit
826 if (offset + length > get_ulimit())
829 /* Flush all pages first */
832 vm_flushp(avc->segid, 0, MAXFSIZE/PAGESIZE - 1);
833 vms_iowait(avc->vmh);
836 uio.afsio_offset = offset;
837 for (i = offset; i < offset + length; i = uio.afsio_offset) {
838 len = offset + length - i;
839 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
840 iov.iov_base = zero_buffer;
841 uio.afsio_iov = &iov;
842 uio.afsio_iovcnt = 1;
843 uio.afsio_seg = AFS_UIOSYS;
844 uio.afsio_resid = iov.iov_len;
845 if (error = afs_rdwr(vp, &uio, UIO_WRITE, 0, cred))
848 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, (afs_int32)vp,
849 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG, error);
855 afs_gn_fsync(vp, flags, vinfo, cred)
858 int32long64_t flags; /* Not used by AFS */
859 int32long64_t vinfo; /* Not used by AFS */
861 int flags; /* Not used by AFS */
862 caddr_t vinfo; /* Not used by AFS */
868 AFS_STATCNT(afs_gn_fsync);
869 error = afs_fsync(vp, cred);
870 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, (afs_int32)vp,
871 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
877 afs_gn_ftrunc(vp, flags, length, vinfo, cred)
880 int32long64_t flags; /* Ignored in AFS */
882 int flags; /* Ignored in AFS */
885 caddr_t vinfo; /* Ignored in AFS */
891 AFS_STATCNT(afs_gn_ftrunc);
894 error = afs_setattr(vp, &va, cred);
895 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, (afs_int32)vp,
896 ICL_TYPE_LONG, flags,
897 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length),
898 ICL_TYPE_LONG, error);
902 /* Min size of a file which is dumping core before we declare it a page hog. */
903 #define MIN_PAGE_HOG_SIZE 8388608
905 int afs_gn_rdwr(vp, op, Flags, ubuf, ext, vinfo, vattrp, cred)
910 ext_t ext; /* Ignored in AFS */
913 int ext; /* Ignored in AFS */
916 caddr_t vinfo; /* Ignored in AFS */
917 struct vattr *vattrp;
920 register struct vcache *vcp = VTOAFS(vp);
921 struct vrequest treq;
926 AFS_STATCNT(afs_gn_rdwr);
929 if (op == UIO_WRITE) {
930 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1,
931 ICL_TYPE_POINTER, (afs_int32)vp,
932 ICL_TYPE_LONG, vcp->vc_error);
933 return vcp->vc_error;
938 ObtainSharedLock(&vcp->lock, 507);
940 * We keep the caller's credentials since an async daemon will handle the
941 * request at some point. We assume that the same credentials will be used.
942 * If this is being called from an NFS server thread, then dupe the
943 * cred and only use that copy in calls and for the stach.
945 if (!vcp->credp || (vcp->credp != cred)) {
946 #ifdef AFS_AIX_IAUTH_ENV
947 if (AFS_NFSXLATORREQ(cred)) {
948 /* Must be able to use cred later, so dupe it so that nfs server
949 * doesn't overwrite it's contents.
955 crhold(cred); /* Bump refcount for reference in vcache */
959 UpgradeSToWLock(&vcp->lock, 508);
962 ConvertWToSLock(&vcp->lock);
967 ReleaseSharedLock(&vcp->lock);
970 * XXX Is the following really required?? XXX
972 if (error = afs_InitReq(&treq, cred)) return error;
973 if (error = afs_VerifyVCache(vcp, &treq))
974 return afs_CheckCode(error, &treq, 50);
975 osi_FlushPages(vcp, cred); /* Flush old pages */
977 if (AFS_NFSXLATORREQ(cred)) {
980 if (op == UIO_READ) {
981 if (!afs_AccessOK(vcp, PRSFS_READ, &treq,
982 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
991 * We have to bump the open/exwriters field here courtesy of the nfs xlator
992 * because there're no open/close nfs rpcs to call our afs_open/close.
993 * We do a similar thing on the afs_read/write interface.
995 if (op == UIO_WRITE) {
996 #ifdef AFS_64BIT_CLIENT
997 if (ubuf->afsio_offset < afs_vmMappingEnd) {
998 #endif /* AFS_64BIT_ENV */
999 ObtainWriteLock(&vcp->lock,240);
1000 vcp->states |= CDirty; /* Set the dirty bit */
1002 ReleaseWriteLock(&vcp->lock);
1003 #ifdef AFS_64BIT_CLIENT
1005 #endif /* AFS_64BIT_ENV */
1008 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
1010 if (op == UIO_WRITE) {
1011 #ifdef AFS_64BIT_CLIENT
1012 if (ubuf->afsio_offset < afs_vmMappingEnd) {
1013 #endif /* AFS_64BIT_ENV */
1014 ObtainWriteLock(&vcp->lock,241);
1015 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
1016 ReleaseWriteLock(&vcp->lock);
1017 #ifdef AFS_64BIT_CLIENT
1019 #endif /* AFS_64BIT_ENV */
1021 if (vattrp != NULL && error == 0)
1022 afs_gn_getattr(vp, vattrp, cred);
1024 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, (afs_int32)vp,
1025 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
1032 #define AFS_MAX_VM_CHUNKS 10
1033 afs_vm_rdwr(vp, uiop, rw, ioflag, credp)
1034 register struct vnode *vp;
1038 struct ucred *credp;
1040 register afs_int32 code = 0;
1042 afs_int32 blockSize;
1043 afs_size_t fileSize, xfrOffset, offset, old_offset;
1045 #ifdef AFS_64BIT_CLIENT
1046 afs_size_t finalOffset;
1049 #endif /* AFS_64BIT_CLIENT */
1050 register struct vcache *vcp = VTOAFS(vp);
1052 afs_size_t start_offset;
1053 afs_int32 save_resid = uiop->afsio_resid;
1054 int first_page, last_page, pages;
1057 struct vrequest treq;
1059 if (code = afs_InitReq(&treq, credp)) return code;
1061 /* special case easy transfer; apparently a lot are done */
1062 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
1064 ObtainReadLock(&vcp->lock);
1065 fileSize = vcp->m.Length;
1066 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1067 uiop->afsio_offset = fileSize;
1069 /* compute xfrOffset now, and do some checks */
1070 xfrOffset = uiop->afsio_offset;
1071 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1076 #ifndef AFS_64BIT_CLIENT
1077 /* check for "file too big" error, which should really be done above us */
1078 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1082 #endif /* AFS_64BIT_CLIENT */
1084 #ifdef AFS_64BIT_CLIENT
1085 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
1086 if (xfrOffset < afs_vmMappingEnd) {
1087 /* special case of a buffer crossing the VM mapping line */
1089 struct iovec tvec[16]; /* Should have access to #define */
1093 finalOffset = xfrOffset + xfrSize;
1094 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
1095 afsio_copy(uiop, &tuio, tvec);
1096 afsio_skip(&tuio, xfrSize - tsize);
1097 afsio_trim(&tuio, tsize);
1098 tuio.afsio_offset = afs_vmMappingEnd;
1099 ReleaseReadLock(&vcp->lock);
1100 ObtainWriteLock(&vcp->lock,243);
1101 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
1102 ReleaseWriteLock(&vcp->lock);
1103 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
1104 ObtainWriteLock(&vcp->lock,244);
1105 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
1106 ReleaseWriteLock(&vcp->lock);
1107 ObtainReadLock(&vcp->lock);
1108 if (code) goto fail;
1109 xfrSize = (afs_size_t) (afs_vmMappingEnd - xfrOffset);
1110 afsio_trim(uiop, xfrSize);
1112 ReleaseReadLock(&vcp->lock);
1113 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
1117 #endif /* AFS_64BIT_CLIENT */
1120 /* Consider V_INTRSEG too for interrupts */
1121 if (code = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode,
1122 vcp->m.Length, 0, 0)) {
1125 vcp->vmh = SRVAL(vcp->segid, 0, 0);
1127 if (rw == UIO_READ) {
1128 /* don't read past EOF */
1129 if (xfrSize+xfrOffset > fileSize)
1130 xfrSize = fileSize - xfrOffset;
1131 if (xfrSize <= 0) goto fail;
1132 ReleaseReadLock(&vcp->lock);
1133 #ifdef AFS_64BIT_CLIENT
1134 toffset = xfrOffset;
1135 uiop->afsio_offset = xfrOffset;
1136 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1137 ICL_TYPE_POINTER, vcp,
1138 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
1139 ICL_TYPE_INT32, xfrSize);
1141 code = vm_move(vcp->segid, toffset, xfrSize, rw, uiop);
1142 #else /* AFS_64BIT_CLIENT */
1144 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1145 #endif /* AFS_64BIT_CLIENT */
1148 * If at a chunk boundary and staying within chunk,
1149 * start prefetch of next chunk.
1151 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1152 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1153 ObtainWriteLock(&vcp->lock,407);
1154 tdc = afs_FindDCache(vcp, xfrOffset);
1156 if (!(tdc->mflags & DFNextStarted))
1157 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1160 ReleaseWriteLock(&vcp->lock);
1162 #ifdef AFS_64BIT_CLIENT
1164 uiop->afsio_offset = finalOffset;
1166 #endif /* AFS_64BIT_CLIENT */
1171 start_offset = uiop->afsio_offset;
1172 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1173 ICL_TYPE_POINTER, vcp,
1174 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1175 ICL_TYPE_INT32, xfrSize);
1176 ReleaseReadLock(&vcp->lock);
1177 ObtainWriteLock(&vcp->lock,400);
1178 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1180 /* un-protect last page. */
1181 last_page = vcp->m.Length/PAGESIZE;
1182 vm_protectp(vcp->vmh, last_page, 1, FILEKEY);
1183 if (xfrSize + xfrOffset > fileSize) {
1184 vcp->m.Length = xfrSize+xfrOffset;
1186 if ((!(vcp->states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1188 vcp->states |= CPageHog;
1190 ReleaseWriteLock(&vcp->lock);
1192 /* If the write will fit into a single chunk we'll write all of it
1193 * at once. Otherwise, we'll write one chunk at a time, flushing
1194 * some of it to disk.
1198 /* Only create a page to avoid excess VM access if we're writing a
1199 * small file which is either new or completely overwrites the
1202 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize) &&
1203 (vcp->v.v_gnode->gn_mwrcnt == 0) &&
1204 (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1205 (void) vm_makep(vcp->segid, 0);
1208 while (xfrSize > 0) {
1209 offset = AFS_CHUNKBASE(xfrOffset);
1212 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1213 len = AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1215 if (len == xfrSize) {
1216 /* All data goes to this one chunk. */
1218 old_offset = uiop->afsio_offset;
1219 #ifdef AFS_64BIT_CLIENT
1220 uiop->afsio_offset = xfrOffset;
1221 toffset = xfrOffset;
1222 code = vm_move(vcp->segid, toffset, xfrSize, rw, uiop);
1223 #else /* AFS_64BIT_CLIENT */
1224 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1225 #endif /* AFS_64BIT_CLIENT */
1231 /* Write just one chunk's worth of data. */
1233 struct iovec tvec[16]; /* Should have access to #define */
1235 /* Purge dirty chunks of file if there are too many dirty chunks.
1236 * Inside the write loop, we only do this at a chunk boundary.
1237 * Clean up partial chunk if necessary at end of loop.
1239 if (counter > 0 && code == 0 && xfrOffset == offset) {
1240 ObtainWriteLock(&vcp->lock,403);
1241 if (xfrOffset > vcp->m.Length)
1242 vcp->m.Length = xfrOffset;
1243 code = afs_DoPartialWrite(vcp, &treq);
1244 vcp->states |= CDirty;
1245 ReleaseWriteLock(&vcp->lock);
1249 afsio_copy(uiop, &tuio, tvec);
1250 afsio_trim(&tuio, len);
1251 tuio.afsio_offset = xfrOffset;
1254 old_offset = uiop->afsio_offset;
1255 #ifdef AFS_64BIT_CLIENT
1256 toffset = xfrOffset;
1257 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1258 #else /* AFS_64BIT_CLIENT */
1259 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1260 #endif /* AFS_64BIT_CLIENT */
1262 len -= tuio.afsio_resid;
1263 afsio_skip(uiop, len);
1268 first_page = old_offset >> PGSHIFT;
1269 pages = 1 + ((old_offset + (len - 1)) >> PGSHIFT) - first_page;
1270 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2,
1271 ICL_TYPE_POINTER, (afs_int32) vcp,
1272 ICL_TYPE_INT32, first_page,
1273 ICL_TYPE_INT32, pages);
1275 code = vm_writep(vcp->segid, first_page, pages);
1276 if (++count > AFS_MAX_VM_CHUNKS) {
1278 vms_iowait(vcp->segid);
1286 vms_iowait(vcp->segid);
1290 ObtainWriteLock(&vcp->lock,242);
1291 if (code == 0 && (vcp->states & CDirty)) {
1292 code = afs_DoPartialWrite(vcp, &treq);
1294 vm_protectp(vcp->vmh, last_page, 1, RDONLY);
1295 ReleaseWriteLock(&vcp->lock);
1297 /* If requested, fsync the file after every write */
1299 afs_fsync(vp, credp);
1301 ObtainReadLock(&vcp->lock);
1302 if (vcp->vc_error) {
1303 /* Pretend we didn't write anything. We need to get the error back to
1304 * the user. If we don't it's possible for a quota error for this
1305 * write to succeed and the file to be closed without the user ever
1306 * having seen the store error. And AIX syscall clears the error if
1307 * anything was written.
1309 code = vcp->vc_error;
1310 if (code == EDQUOT || code == ENOSPC)
1311 uiop->afsio_resid = save_resid;
1313 #ifdef AFS_64BIT_CLIENT
1315 uiop->afsio_offset = finalOffset;
1317 #endif /* AFS_64BIT_CLIENT */
1320 ReleaseReadLock(&vcp->lock);
1321 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3,
1322 ICL_TYPE_POINTER, vcp,
1323 ICL_TYPE_INT32, code);
1328 afs_direct_rdwr(vp, uiop, rw, ioflag, credp)
1329 register struct vnode *vp;
1333 struct ucred *credp;
1335 register afs_int32 code = 0;
1337 afs_size_t fileSize, xfrOffset, offset, old_offset;
1338 struct vcache *vcp = VTOAFS(vp);
1339 afs_int32 save_resid = uiop->afsio_resid;
1340 struct vrequest treq;
1342 if (code = afs_InitReq(&treq, credp)) return code;
1344 /* special case easy transfer; apparently a lot are done */
1345 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
1347 ObtainReadLock(&vcp->lock);
1348 fileSize = vcp->m.Length;
1349 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1350 uiop->afsio_offset = fileSize;
1352 /* compute xfrOffset now, and do some checks */
1353 xfrOffset = uiop->afsio_offset;
1354 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1356 ReleaseReadLock(&vcp->lock);
1360 /* check for "file too big" error, which should really be done above us */
1362 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1364 ReleaseReadLock(&vcp->lock);
1368 ReleaseReadLock(&vcp->lock);
1369 if (rw == UIO_WRITE) {
1370 ObtainWriteLock(&vcp->lock,400);
1371 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1373 if (xfrSize + xfrOffset > fileSize) {
1374 vcp->m.Length = xfrSize+xfrOffset;
1376 ReleaseWriteLock(&vcp->lock);
1378 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR,
1379 ICL_TYPE_POINTER, (afs_int32)vp,
1380 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1381 ICL_TYPE_LONG, uiop->afsio_resid);
1382 code = afs_rdwr(vp, uiop, rw, ioflag, credp);
1384 uiop->afsio_resid = save_resid;
1386 uiop->afsio_offset = xfrOffset + xfrSize;
1387 if (uiop->afsio_resid > 0) {
1388 /* should zero here the remaining buffer */
1389 uiop->afsio_resid = 0;
1391 /* Purge dirty chunks of file if there are too many dirty chunks.
1392 * Inside the write loop, we only do this at a chunk boundary.
1393 * Clean up partial chunk if necessary at end of loop.
1395 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1396 ObtainWriteLock(&vcp->lock,402);
1397 code = afs_DoPartialWrite(vcp, &treq);
1398 vcp->states |= CDirty;
1399 ReleaseWriteLock(&vcp->lock);
1408 static int lock_normalize(vp, lckdat, offset, cred)
1410 struct eflock *lckdat;
1417 switch(lckdat->l_whence) {
1421 lckdat->l_start += (off_t) offset;
1424 code = afs_getattr(vp, &vattr, cred);
1425 if (code != 0) return code;
1426 lckdat->l_start += (off_t) vattr.va_size;
1431 lckdat->l_whence = 0;
1437 afs_gn_lockctl(vp, offset, lckdat, cmd, ignored_fcn, ignored_id, cred)
1438 void (*ignored_fcn)();
1442 struct eflock *lckdat;
1444 #ifdef AFS_AIX51_ENV
1452 struct vattr *attrs;
1454 AFS_STATCNT(afs_gn_lockctl);
1455 /* Convert from AIX's cmd to standard lockctl lock types... */
1458 else if (cmd & SETFLCK) {
1463 flkd.l_type = lckdat->l_type;
1464 flkd.l_whence = lckdat->l_whence;
1465 flkd.l_start = lckdat->l_start;
1466 flkd.l_len = lckdat->l_len;
1467 flkd.l_pid = lckdat->l_pid;
1468 flkd.l_sysid = lckdat->l_sysid;
1470 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1472 if (error = lock_normalize(vp, &flkd, offset, cred))
1474 error = afs_lockctl(vp, &flkd, ncmd, cred);
1475 lckdat->l_type = flkd.l_type;
1476 lckdat->l_whence = flkd.l_whence;
1477 lckdat->l_start = flkd.l_start;
1478 lckdat->l_len = flkd.l_len;
1479 lckdat->l_pid = flkd.l_pid;
1480 lckdat->l_sysid = flkd.l_sysid;
1481 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1482 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1487 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1488 int afs_gn_ioctl(vp, Cmd, arg, flags, channel, ext)
1490 #ifdef AFS_AIX51_ENV
1496 int flags; /* Ignored in AFS */
1497 int channel; /* Ignored in AFS */
1498 int ext; /* Ignored in AFS */
1503 AFS_STATCNT(afs_gn_ioctl);
1504 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1505 error = afs_ioctl(vp, cmd, arg);
1506 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1507 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1513 afs_gn_readlink(vp, uiop, cred)
1520 AFS_STATCNT(afs_gn_readlink);
1521 error = afs_readlink(vp, uiop, cred);
1522 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1523 ICL_TYPE_LONG, error);
1529 afs_gn_select(vp, which, vinfo, mpx)
1535 AFS_STATCNT(afs_gn_select);
1536 /* NO SUPPORT for this in afs YET! */
1542 afs_gn_symlink(vp, link, target, cred)
1551 AFS_STATCNT(afs_gn_symlink);
1554 error = afs_symlink(vp, link, &va, target, cred);
1555 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1556 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG, error);
1562 afs_gn_readdir(vp, uiop, cred)
1569 AFS_STATCNT(afs_gn_readdir);
1570 error = afs_readdir(vp, uiop, cred);
1571 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, (afs_int32)vp,
1572 ICL_TYPE_LONG, error);
1577 extern Simple_lock afs_asyncbuf_lock;
1579 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1580 * when the buffer is processed by naix_vmstrategy. afs_biotime is
1581 * incremented for each buffer. A buffer's age is kept in its av_back field.
1582 * The age ranking is used by the daemons, which favor older buffers.
1584 afs_int32 afs_biotime = 0;
1586 extern struct buf *afs_asyncbuf;
1587 extern int afs_asyncbuf_cv;
1588 /* This function is called with a list of buffers, threaded through
1589 * the av_forw field. Our goal is to copy the list of buffers into the
1590 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1591 * Within buffers within the same work group, the guy with the lowest address
1592 * has to be located at the head of the queue; his b_bcount field will also
1593 * be increased to cover all of the buffers in the b_work queue.
1595 #define AIX_VM_BLKSIZE 8192
1596 afs_gn_strategy(abp, cred)
1598 register struct buf *abp;
1600 register struct buf **lbp, *tbp;
1601 int *lwbp; /* last guy in work chain */
1602 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1606 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1607 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1608 && (x)->b_flags == (y)->b_flags \
1609 && !((x)->b_flags & B_PFPROT) \
1610 && !((y)->b_flags & B_PFPROT))
1612 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1613 for(tbp = abp; tbp; tbp=nbp) {
1614 nbp = tbp->av_forw; /* remember for later */
1616 tbp->av_back = (struct buf *) afs_biotime++;
1618 /* first insert the buffer into the afs_async queue. Insert buffer
1619 * sorted within its disk position within a set of comparable buffers.
1620 * Ensure that all comparable buffers are grouped contiguously.
1621 * Later on, we'll merge adjacent buffers into a single request.
1623 firstComparable = NULL;
1624 lbp = &afs_asyncbuf;
1625 for(qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1626 if (EFS_COMPARABLE(tbp, qbp)) {
1627 if (!firstComparable) firstComparable = qbp;
1628 /* this buffer is comparable, so see if the next buffer
1629 * is farther in the file; if it is insert before next buffer.
1631 if (tbp->b_blkno < qbp->b_blkno) {
1635 /* If we're at the end of a block of comparable buffers, we
1636 * insert the buffer here to keep all comparable buffers
1639 if (firstComparable)
1643 /* do the insert before qbp now */
1644 tbp->av_forw = *lbp;
1646 if (firstComparable == NULL) {
1647 /* next we're going to do all sorts of buffer merging tricks, but
1648 * here we know we're the only COMPARABLE block in the
1649 * afs_asyncbuf list, so we just skip that and continue with
1650 * the next input buffer.
1654 /* we may have actually added the "new" firstComparable */
1655 if (tbp->av_forw == firstComparable)
1656 firstComparable = tbp;
1658 * when we get here, firstComparable points to the first dude in the
1659 * same vnode and subspace that we (tbp) are in. We go through the
1660 * area of this list with COMPARABLE buffers (a contiguous region) and
1661 * repeated merge buffers that are contiguous and in the same block or
1662 * buffers that are contiguous and are both integral numbers of blocks.
1663 * Note that our end goal is to have as big blocks as we can, but we
1664 * must minimize the transfers that are not integral #s of blocks on
1665 * block boundaries, since Episode will do those smaller and/or
1666 * unaligned I/Os synchronously.
1668 * A useful example to consider has the async queue with this in it:
1669 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1670 * If we get a request that fills the 4K hole, we want to merge this
1671 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1672 * don't want to do any merging since adding the 4K transfer to the 8K
1673 * transfer makes the 8K transfer synchronous.
1675 * Note that if there are any blocks whose size is a multiple of
1676 * the file system block size, then we know that such blocks are also
1677 * on block boundaries.
1680 doMerge = 1; /* start the loop */
1681 while(doMerge) { /* loop until an iteration doesn't
1682 * make any more changes */
1684 for (qbp = firstComparable; ; qbp = qnbp) {
1685 qnbp = qbp->av_forw;
1686 if (!qnbp) break; /* we're done */
1687 if (!EFS_COMPARABLE(qbp, qnbp)) break;
1689 /* try to merge qbp and qnbp */
1691 /* first check if both not adjacent go on to next region */
1692 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) != dbtob(qnbp->b_blkno))
1695 /* note if both in the same block, the first byte of leftmost guy
1696 * and last byte of rightmost guy are in the same block.
1698 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE-1)) ==
1699 ((dbtob(qnbp->b_blkno)+qnbp->b_bcount-1) & ~(AIX_VM_BLKSIZE-1))) {
1700 doMerge = 1; /* both in same block */
1702 else if ((qbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0
1703 && (qnbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0) {
1704 doMerge = 1; /* both integral #s of blocks */
1707 register struct buf *xbp;
1709 /* merge both of these blocks together */
1710 /* first set age to the older of the two */
1711 if ((int) qnbp->av_back - (int) qbp->av_back < 0)
1712 qbp->av_back = qnbp->av_back;
1713 lwbp = &qbp->b_work;
1714 /* find end of qbp's work queue */
1715 for(xbp = (struct buf *)(*lwbp); xbp;
1716 lwbp = &xbp->b_work, xbp = (struct buf *) (*lwbp));
1718 * now setting *lwbp will change the last ptr in the qbp's
1721 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1722 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1723 *lwbp = (int) qnbp; /* append qnbp to end */
1725 * note that qnbp is bogus, but it doesn't matter because
1726 * we're going to restart the for loop now.
1728 break; /* out of the for loop */
1732 } /* for loop for all interrupt data */
1733 /* at this point, all I/O has been queued. Wakeup the daemon */
1734 e_wakeup_one((int*) &afs_asyncbuf_cv);
1735 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1740 afs_inactive(avc, acred)
1741 register struct vcache *avc;
1742 struct AFS_UCRED *acred;
1744 afs_InactiveVCache(avc, acred);
1751 AFS_STATCNT(afs_gn_revoke);
1752 /* NO SUPPORT for this in afs YET! */
1756 int afs_gn_getacl(vp, uiop, cred)
1765 int afs_gn_setacl(vp, uiop, cred)
1774 int afs_gn_getpcl(vp, uiop, cred)
1783 int afs_gn_setpcl(vp, uiop, cred)
1795 extern struct vfsops Afs_vfsops;
1796 extern struct vnodeops afs_gn_vnodeops;
1797 extern int Afs_init();
1799 #define AFS_CALLOUT_TBL_SIZE 256
1802 * the following additional layer of gorp is due to the fact that the
1803 * filesystem layer no longer obtains the kernel lock for me. I was relying
1804 * on this behavior to avoid having to think about locking.
1808 vfs_mount(struct vfs *a, struct ucred *b) {
1809 register glockOwner, ret;
1811 glockOwner = ISAFS_GLOCK();
1814 ret = (*Afs_vfsops.vfs_mount)(a, b);
1822 vfs_unmount(struct vfs *a, int b, struct ucred *c) {
1823 register glockOwner, ret;
1825 glockOwner = ISAFS_GLOCK();
1828 ret = (*Afs_vfsops.vfs_unmount)(a, b, c);
1836 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c) {
1837 register glockOwner, ret;
1839 glockOwner = ISAFS_GLOCK();
1842 ret = (*Afs_vfsops.vfs_root)(a, b, c);
1850 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c) {
1851 register glockOwner, ret;
1853 glockOwner = ISAFS_GLOCK();
1856 ret = (*Afs_vfsops.vfs_statfs)(a, b, c);
1864 vfs_sync(struct gfs *a) {
1865 register glockOwner, ret;
1867 glockOwner = ISAFS_GLOCK();
1870 ret = (*Afs_vfsops.vfs_sync)(a);
1877 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c
1878 , struct ucred *d) {
1879 register glockOwner, ret;
1881 glockOwner = ISAFS_GLOCK();
1884 ret = (*Afs_vfsops.vfs_vget)(a, b, c, d);
1892 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e) {
1893 register glockOwner, ret;
1895 glockOwner = ISAFS_GLOCK();
1898 ret = (*Afs_vfsops.vfs_cntl)(a, b, c, d, e);
1906 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d
1907 , struct ucred *e) {
1908 register glockOwner, ret;
1910 glockOwner = ISAFS_GLOCK();
1913 ret = (*Afs_vfsops.vfs_quotactl)(a, b, c, d, e);
1920 #ifdef AFS_AIX51_ENV
1922 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1924 register glockOwner, ret;
1926 glockOwner = ISAFS_GLOCK();
1929 ret = (*Afs_vfsops.vfs_syncvfs)(a, b, c, d);
1938 struct vfsops locked_Afs_vfsops = {
1947 #ifdef AFS_AIX51_ENV
1953 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1954 register glockOwner, ret;
1956 glockOwner = ISAFS_GLOCK();
1959 ret = (*afs_gn_vnodeops.vn_link)(a, b, c, d);
1967 #ifdef AFS_AIX51_ENV
1968 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d) {
1970 vn_mkdir(struct vnode *a, char *b, int c, struct ucred *d) {
1972 register glockOwner, ret;
1974 glockOwner = ISAFS_GLOCK();
1977 ret = (*afs_gn_vnodeops.vn_mkdir)(a, b, c, d);
1985 #ifdef AFS_AIX51_ENV
1986 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d, struct ucred *e) {
1988 vn_mknod(struct vnode *a, caddr_t b, int c, dev_t d, struct ucred *e) {
1990 register glockOwner, ret;
1992 glockOwner = ISAFS_GLOCK();
1995 ret = (*afs_gn_vnodeops.vn_mknod)(a, b, c, d, e);
2003 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
2004 register glockOwner, ret;
2006 glockOwner = ISAFS_GLOCK();
2009 ret = (*afs_gn_vnodeops.vn_remove)(a, b, c, d);
2017 vn_rename(struct vnode *a, struct vnode *b, caddr_t c
2018 , struct vnode *d, struct vnode *e, caddr_t f, struct ucred *g) {
2019 register glockOwner, ret;
2021 glockOwner = ISAFS_GLOCK();
2024 ret = (*afs_gn_vnodeops.vn_rename)(a, b, c, d, e, f, g);
2032 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
2033 register glockOwner, ret;
2035 glockOwner = ISAFS_GLOCK();
2038 ret = (*afs_gn_vnodeops.vn_rmdir)(a, b, c, d);
2046 #ifdef AFS_AIX51_ENV
2047 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2049 vn_lookup(struct vnode *a, struct vnode **b, char *c, int d,
2051 struct vattr *v, struct ucred *e) {
2052 register glockOwner, ret;
2054 glockOwner = ISAFS_GLOCK();
2057 ret = (*afs_gn_vnodeops.vn_lookup)(a, b, c, d, v, e);
2065 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c) {
2066 register glockOwner, ret;
2068 glockOwner = ISAFS_GLOCK();
2071 ret = (*afs_gn_vnodeops.vn_fid)(a, b, c);
2079 #ifdef AFS_AIX51_ENV
2080 vn_open(struct vnode *a, int b, int c, caddr_t *d, struct ucred *e) {
2082 vn_open(struct vnode *a, int32long64_t b, ext_t c, caddr_t *d, struct ucred *e) {
2084 register glockOwner, ret;
2086 glockOwner = ISAFS_GLOCK();
2089 ret = (*afs_gn_vnodeops.vn_open)(a, b, c, d, e);
2097 #ifdef AFS_AIX51_ENV
2098 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d
2099 , int32long64_t e, caddr_t *f, struct ucred *g) {
2101 vn_create(struct vnode *a, struct vnode **b, int c, caddr_t d
2102 , int e, caddr_t *f, struct ucred *g) {
2104 register glockOwner, ret;
2106 glockOwner = ISAFS_GLOCK();
2109 ret = (*afs_gn_vnodeops.vn_create)(a, b, c, d, e, f, g);
2117 vn_hold(struct vnode *a) {
2118 register glockOwner, ret;
2120 glockOwner = ISAFS_GLOCK();
2123 ret = (*afs_gn_vnodeops.vn_hold)(a);
2131 vn_rele(struct vnode *a) {
2132 register glockOwner, ret;
2134 glockOwner = ISAFS_GLOCK();
2137 ret = (*afs_gn_vnodeops.vn_rele)(a);
2145 #ifdef AFS_AIX51_ENV
2146 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d) {
2148 vn_close(struct vnode *a, int b, caddr_t c, struct ucred *d) {
2150 register glockOwner, ret;
2152 glockOwner = ISAFS_GLOCK();
2155 ret = (*afs_gn_vnodeops.vn_close)(a, b, c, d);
2163 #ifdef AFS_AIX51_ENV
2164 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d, uint32long64_t e, struct ucred *f) {
2166 vn_map(struct vnode *a, caddr_t b, uint c, uint d, uint e, struct ucred *f) {
2168 register glockOwner, ret;
2170 glockOwner = ISAFS_GLOCK();
2173 ret = (*afs_gn_vnodeops.vn_map)(a, b, c, d, e, f);
2181 #ifdef AFS_AIX51_ENV
2182 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c) {
2184 vn_unmap(struct vnode *a, int b, struct ucred *c) {
2186 register glockOwner, ret;
2188 glockOwner = ISAFS_GLOCK();
2191 ret = (*afs_gn_vnodeops.vn_unmap)(a, b, c);
2199 #ifdef AFS_AIX51_ENV
2200 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d) {
2202 vn_access(struct vnode *a, int b, int c, struct ucred *d) {
2204 register glockOwner, ret;
2206 glockOwner = ISAFS_GLOCK();
2209 ret = (*afs_gn_vnodeops.vn_access)(a, b, c, d);
2217 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c) {
2218 register glockOwner, ret;
2220 glockOwner = ISAFS_GLOCK();
2223 ret = (*afs_gn_vnodeops.vn_getattr)(a, b, c);
2231 #ifdef AFS_AIX51_ENV
2232 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d, int32long64_t e, struct ucred *f) {
2234 vn_setattr(struct vnode *a, int b, int c, int d, int e, struct ucred *f) {
2236 register glockOwner, ret;
2238 glockOwner = ISAFS_GLOCK();
2241 ret = (*afs_gn_vnodeops.vn_setattr)(a, b, c, d, e, f);
2249 #ifdef AFS_AIX51_ENV
2250 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2252 vn_fclear(struct vnode *a, int b, offset_t c, offset_t d
2254 , caddr_t e, struct ucred *f) {
2255 register glockOwner, ret;
2257 glockOwner = ISAFS_GLOCK();
2260 ret = (*afs_gn_vnodeops.vn_fclear)(a, b, c, d, e, f);
2268 #ifdef AFS_AIX51_ENV
2269 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d) {
2271 vn_fsync(struct vnode *a, int b, int c, struct ucred *d) {
2273 register glockOwner, ret;
2275 glockOwner = ISAFS_GLOCK();
2278 ret = (*afs_gn_vnodeops.vn_fsync)(a, b, c, d);
2286 #ifdef AFS_AIX51_ENV
2287 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d, struct ucred *e) {
2289 vn_ftrunc(struct vnode *a, int b, offset_t c, caddr_t d, struct ucred *e) {
2291 register glockOwner, ret;
2293 glockOwner = ISAFS_GLOCK();
2296 ret = (*afs_gn_vnodeops.vn_ftrunc)(a, b, c, d, e);
2304 #ifdef AFS_AIX51_ENV
2305 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d
2306 , ext_t e, caddr_t f, struct vattr *v, struct ucred *g) {
2308 vn_rdwr(struct vnode *a, enum uio_rw b, int c, struct uio *d
2309 , int e, caddr_t f, struct vattr *v, struct ucred *g) {
2311 register glockOwner, ret;
2313 glockOwner = ISAFS_GLOCK();
2316 ret = (*afs_gn_vnodeops.vn_rdwr)(a, b, c, d, e, f, v, g);
2324 #ifdef AFS_AIX51_ENV
2325 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int32long64_t d
2326 , int (*e)(), ulong32int64_t *f, struct ucred *g) {
2328 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int d
2329 , int (*e)(), ulong *f, struct ucred *g) {
2331 register glockOwner, ret;
2333 glockOwner = ISAFS_GLOCK();
2336 ret = (*afs_gn_vnodeops.vn_lockctl)(a, b, c, d, e, f, g);
2344 #ifdef AFS_AIX51_ENV
2345 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e, struct ucred *f) {
2347 vn_ioctl(struct vnode *a, int b, caddr_t c, size_t d, int e, struct ucred *f) {
2349 register glockOwner, ret;
2351 glockOwner = ISAFS_GLOCK();
2354 ret = (*afs_gn_vnodeops.vn_ioctl)(a, b, c, d, e, f);
2362 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c) {
2363 register glockOwner, ret;
2365 glockOwner = ISAFS_GLOCK();
2368 ret = (*afs_gn_vnodeops.vn_readlink)(a, b, c);
2376 #ifdef AFS_AIX51_ENV
2377 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort *d, void (*e)()
2379 vn_select(struct vnode *a, int b, ushort c, ushort *d, void (*e)()
2381 , caddr_t f, struct ucred *g) {
2382 register glockOwner, ret;
2384 glockOwner = ISAFS_GLOCK();
2387 ret = (*afs_gn_vnodeops.vn_select)(a, b, c, d, e, f, g);
2395 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d) {
2396 register glockOwner, ret;
2398 glockOwner = ISAFS_GLOCK();
2401 ret = (*afs_gn_vnodeops.vn_symlink)(a, b, c, d);
2409 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c) {
2410 register glockOwner, ret;
2412 glockOwner = ISAFS_GLOCK();
2415 ret = (*afs_gn_vnodeops.vn_readdir)(a, b, c);
2423 #ifdef AFS_AIX51_ENV
2424 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d, struct ucred *e) {
2426 vn_revoke(struct vnode *a, int b, int c, struct vattr *d, struct ucred *e) {
2428 register glockOwner, ret;
2430 glockOwner = ISAFS_GLOCK();
2433 ret = (*afs_gn_vnodeops.vn_revoke)(a, b, c, d, e);
2441 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c) {
2442 register glockOwner, ret;
2444 glockOwner = ISAFS_GLOCK();
2447 ret = (*afs_gn_vnodeops.vn_getacl)(a, b, c);
2455 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c) {
2456 register glockOwner, ret;
2458 glockOwner = ISAFS_GLOCK();
2461 ret = (*afs_gn_vnodeops.vn_setacl)(a, b, c);
2469 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2470 register glockOwner, ret;
2472 glockOwner = ISAFS_GLOCK();
2475 ret = (*afs_gn_vnodeops.vn_getpcl)(a, b, c);
2483 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2484 register glockOwner, ret;
2486 glockOwner = ISAFS_GLOCK();
2489 ret = (*afs_gn_vnodeops.vn_setpcl)(a, b, c);
2496 extern int afs_gn_strategy();
2498 struct vnodeops locked_afs_gn_vnodeops = {
2527 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2533 afs_gn_enosys, /* vn_seek */
2534 afs_gn_enosys, /* vn_fsync_range */
2535 afs_gn_enosys, /* vn_create_attr */
2536 afs_gn_enosys, /* vn_finfo */
2537 afs_gn_enosys, /* vn_map_lloff */
2538 afs_gn_enosys, /* vn_readdir_eofp */
2539 afs_gn_enosys, /* vn_rdwr_attr */
2540 afs_gn_enosys, /* vn_memcntl */
2541 afs_gn_enosys, /* vn_spare7 */
2542 afs_gn_enosys, /* vn_spare8 */
2543 afs_gn_enosys, /* vn_spare9 */
2544 afs_gn_enosys, /* vn_spareA */
2545 afs_gn_enosys, /* vn_spareB */
2546 afs_gn_enosys, /* vn_spareC */
2547 afs_gn_enosys, /* vn_spareD */
2548 afs_gn_enosys, /* vn_spareE */
2549 afs_gn_enosys /* vn_spareF */
2550 #ifdef AFS_AIX51_ENV
2551 ,afs_gn_enosys, /* pagerBackRange */
2552 afs_gn_enosys, /* pagerGetFileSize */
2553 afs_gn_enosys, /* pagerReadAhead */
2554 afs_gn_enosys, /* pagerWriteBehind */
2555 afs_gn_enosys /* pagerEndCopy */
2559 struct gfs afs_gfs = {
2561 &locked_afs_gn_vnodeops,
2565 GFS_VERSION4 | GFS_REMOTE,