2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
31 #include "rpc/types.h"
33 #include "netinet/in.h"
37 #include "rpc/types.h"
41 #include "afs/afs_osi.h"
42 #define RFTP_INTERNALS 1
43 #include "afs/volerrors.h"
47 #include "afs/exporter.h"
49 #include "afs/afs_chunkops.h"
50 #include "afs/afs_stats.h"
51 #include "afs/nfsclient.h"
53 #include "afs/prs_fs.h"
55 #include "afsincludes.h"
59 * declare all the functions so they can be used to init the table
61 /* creation/naming/deletion */
68 /* lookup, file handle stuff */
79 /* manipulate attributes of files */
83 /* data update operations */
91 int afs_gn_readlink();
96 int afs_gn_strategy();
107 * declare a struct vnodeops and initialize it with ptrs to all functions
109 struct vnodeops afs_gn_vnodeops = {
110 /* creation/naming/deletion */
117 /* lookup, file handle stuff */
120 /* access to files */
128 /* manipulate attributes of files */
132 /* data update operations */
146 /* security things */
152 afs_gn_enosys, /* vn_seek */
153 afs_gn_enosys, /* vn_fsync_range */
154 afs_gn_enosys, /* vn_create_attr */
155 afs_gn_enosys, /* vn_finfo */
156 afs_gn_enosys, /* vn_map_lloff */
157 afs_gn_enosys, /* vn_readdir_eofp */
158 afs_gn_enosys, /* vn_rdwr_attr */
159 afs_gn_enosys, /* vn_memcntl */
160 afs_gn_enosys, /* vn_spare7 */
161 afs_gn_enosys, /* vn_spare8 */
162 afs_gn_enosys, /* vn_spare9 */
163 afs_gn_enosys, /* vn_spareA */
164 afs_gn_enosys, /* vn_spareB */
165 afs_gn_enosys, /* vn_spareC */
166 afs_gn_enosys, /* vn_spareD */
167 afs_gn_enosys, /* vn_spareE */
168 afs_gn_enosys /* vn_spareF */
170 , afs_gn_enosys, /* pagerBackRange */
171 afs_gn_enosys, /* pagerGetFileSize */
172 afs_gn_enosys, /* pagerReadAhead */
173 afs_gn_enosys, /* pagerWriteBehind */
174 afs_gn_enosys /* pagerEndCopy */
177 struct vnodeops *afs_ops = &afs_gn_vnodeops;
181 afs_gn_link(vp, dp, name, cred)
189 AFS_STATCNT(afs_gn_link);
190 error = afs_link(vp, dp, name, cred);
191 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, vp,
192 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
198 afs_gn_mkdir(dp, name, Mode, cred)
213 AFS_STATCNT(afs_gn_mkdir);
216 va.va_mode = (mode & 07777) & ~get_umask();
217 error = afs_mkdir(dp, name, &va, &vp, cred);
221 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, vp,
222 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
229 afs_gn_mknod(dp, name, Mode, dev, cred)
245 AFS_STATCNT(afs_gn_mknod);
247 va.va_type = IFTOVT(mode);
248 va.va_mode = (mode & 07777) & ~get_umask();
250 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
251 if (va.va_type != VFIFO && !suser(&error))
253 switch (va.va_type) {
255 error = afs_mkdir(dp, name, &va, &vp, cred);
265 error = afs_create(dp, name, &va, NONEXCL, mode, &vp, cred);
270 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32) vp,
271 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
278 afs_gn_remove(vp, dp, name, cred)
279 struct vnode *vp; /* Ignored in AFS */
286 AFS_STATCNT(afs_gn_remove);
287 error = afs_remove(dp, name, cred);
288 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, dp,
289 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
295 afs_gn_rename(vp, dp, name, tp, tdp, tname, cred)
298 struct vnode *vp; /* Ignored in AFS */
299 struct vnode *tp; /* Ignored in AFS */
306 AFS_STATCNT(afs_gn_rename);
307 error = afs_rename(dp, name, tdp, tname, cred);
308 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, dp,
309 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG,
316 afs_gn_rmdir(vp, dp, name, cred)
317 struct vnode *vp; /* Ignored in AFS */
324 AFS_STATCNT(afs_gn_rmdir);
325 error = afs_rmdir(dp, name, cred);
327 if (error == 66 /* 4.3's ENOTEMPTY */ )
328 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
330 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, dp,
331 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
337 afs_gn_lookup(dp, vpp, name, Flags, vattrp, cred)
338 struct vattr *vattrp;
343 int32long64_t Flags; /* includes FOLLOW... */
345 afs_uint32 Flags; /* includes FOLLOW... */
352 AFS_STATCNT(afs_gn_lookup);
353 error = afs_lookup(dp, name, vpp, cred);
354 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, dp,
355 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
356 if (vattrp != NULL && error == 0)
357 afs_gn_getattr(*vpp, vattrp, cred);
363 afs_gn_fid(vp, fidp, cred)
370 AFS_STATCNT(afs_gn_fid);
371 error = afs_fid(vp, fidp);
372 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, vp,
373 ICL_TYPE_LONG, (afs_int32) fidp, ICL_TYPE_LONG, error);
379 afs_gn_open(vp, Flags, ext, vinfop, cred)
383 ext_t ext; /* Ignored in AFS */
386 int ext; /* Ignored in AFS */
388 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
393 struct vcache *tvp = VTOAFS(vp);
397 AFS_STATCNT(afs_gn_open);
403 if ((flags & FWRITE) || (flags & FTRUNC))
406 while ((flags & FNSHARE) && tvp->opens) {
407 if (!(flags & FDELAY)) {
411 afs_osi_Sleep(&tvp->opens);
414 error = afs_access(vp, modes, cred);
419 error = afs_open(&vp, flags, cred);
421 if (flags & FTRUNC) {
424 error = afs_setattr(vp, &va, cred);
428 tvp->states |= CNSHARE;
431 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
433 /* an error occurred; we've told CM that the file
434 * is open, so close it now so that open and
435 * writer counts are correct. Ignore error code,
436 * as it is likely to fail (the setattr just did).
438 afs_close(vp, flags, cred);
443 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, vp,
444 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
450 afs_gn_create(dp, vpp, Flags, name, Mode, vinfop, cred)
461 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
465 enum vcexcl exclusive;
466 int error, modes = 0;
470 AFS_STATCNT(afs_gn_create);
471 if ((flags & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
477 va.va_mode = (mode & 07777) & ~get_umask();
482 if ((flags & FWRITE) || (flags & FTRUNC))
484 error = afs_create(dp, name, &va, exclusive, modes, vpp, cred);
488 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
489 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
490 * call it when this NFS traffic since the close will never happen thus
491 * we'd never flush the files out to the server! Gross but the simplest
492 * solution we came out with */
493 if (cred->cr_luid != RMTUSER_REQ) {
494 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
495 if (!(flags & FDELAY))
497 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
499 /* Since in the standard copen() for bsd vnode kernels they do an
500 * vop_open after the vop_create, we must do the open here since there
501 * are stuff in afs_open that we need. For example advance the
502 * execsOrWriters flag (else we'll be treated as the sun's "core"
504 *vinfop = cred; /* save user creds in fp->f_vinfo */
505 error = afs_open(vpp, flags, cred);
507 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, dp,
508 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
518 AFS_STATCNT(afs_gn_hold);
529 struct vcache *vcp = VTOAFS(vp);
532 AFS_STATCNT(afs_gn_rele);
533 if (vp->v_count == 0)
534 osi_Panic("afs_rele: zero v_count");
535 if (--(vp->v_count) == 0) {
536 if (vcp->states & CPageHog) {
538 vcp->states &= ~CPageHog;
540 error = afs_inactive(vp, 0);
547 afs_gn_close(vp, Flags, vinfo, cred)
554 caddr_t vinfo; /* Ignored in AFS */
558 struct vcache *tvp = VTOAFS(vp);
561 AFS_STATCNT(afs_gn_close);
563 if (flags & FNSHARE) {
564 tvp->states &= ~CNSHARE;
565 afs_osi_Wakeup(&tvp->opens);
568 error = afs_close(vp, flags, cred);
569 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32) vp,
570 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
576 afs_gn_map(vp, addr, Len, Off, Flag, cred)
580 uint32long64_t Len, Off, Flag;
582 u_int Len, Off, Flag;
586 struct vcache *vcp = VTOAFS(vp);
587 struct vrequest treq;
591 afs_int32 flag = Flag;
593 AFS_STATCNT(afs_gn_map);
595 if (error = afs_InitReq(&treq, cred))
597 error = afs_VerifyVCache(vcp, &treq);
599 return afs_CheckCode(error, &treq, 49);
601 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
602 ObtainWriteLock(&vcp->lock, 401);
603 vcp->states |= CMAPPED; /* flag cleared at afs_inactive */
605 * We map the segment into our address space using the handle returned by vm_create.
608 afs_uint32 tlen = vcp->m.Length;
609 #ifdef AFS_64BIT_CLIENT
610 if (vcp->m.Length > afs_vmMappingEnd)
611 tlen = afs_vmMappingEnd;
613 /* Consider V_INTRSEG too for interrupts */
615 vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode, tlen, 0, 0)) {
616 ReleaseWriteLock(&vcp->lock);
619 #ifdef AFS_64BIT_KERNEL
620 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
622 vcp->vmh = SRVAL(vcp->segid, 0, 0);
625 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
626 if (flag & SHM_RDONLY) {
627 vp->v_gnode->gn_mrdcnt++;
629 vp->v_gnode->gn_mwrcnt++;
632 * We keep the caller's credentials since an async daemon will handle the
633 * request at some point. We assume that the same credentials will be used.
635 if (!vcp->credp || (vcp->credp != cred)) {
638 struct ucred *crp = vcp->credp;
644 ReleaseWriteLock(&vcp->lock);
646 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vp,
647 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
653 afs_gn_unmap(vp, flag, cred)
662 struct vcache *vcp = VTOAFS(vp);
663 AFS_STATCNT(afs_gn_unmap);
664 ObtainWriteLock(&vcp->lock, 402);
665 if (flag & SHM_RDONLY) {
666 vp->v_gnode->gn_mrdcnt--;
667 if (vp->v_gnode->gn_mrdcnt <= 0)
668 vp->v_gnode->gn_mrdcnt = 0;
670 vp->v_gnode->gn_mwrcnt--;
671 if (vp->v_gnode->gn_mwrcnt <= 0)
672 vp->v_gnode->gn_mwrcnt = 0;
674 ReleaseWriteLock(&vcp->lock);
682 afs_gn_access(vp, Mode, Who, cred)
698 AFS_STATCNT(afs_gn_access);
704 error = afs_access(vp, mode, cred);
706 /* Additional testing */
707 if (who == ACC_OTHERS || who == ACC_ANY) {
708 error = afs_getattr(vp, &vattr, cred);
710 if (who == ACC_ANY) {
711 if (((vattr.va_mode >> 6) & mode) == mode) {
716 if (((vattr.va_mode >> 3) & mode) == mode)
721 } else if (who == ACC_ALL) {
722 error = afs_getattr(vp, &vattr, cred);
724 if ((!((vattr.va_mode >> 6) & mode))
725 || (!((vattr.va_mode >> 3) & mode))
726 || (!(vattr.va_mode & mode)))
735 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, vp,
736 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
742 afs_gn_getattr(vp, vattrp, cred)
744 struct vattr *vattrp;
749 AFS_STATCNT(afs_gn_getattr);
750 error = afs_getattr(vp, vattrp, cred);
751 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, vp,
752 ICL_TYPE_LONG, error);
758 afs_gn_setattr(vp, op, arg1, arg2, arg3, cred)
776 AFS_STATCNT(afs_gn_setattr);
784 if ((arg1 & T_OWNER_AS_IS) == 0)
786 if ((arg1 & T_GROUP_AS_IS) == 0)
791 error = afs_access(vp, VWRITE, cred);
795 if (arg1 & T_SETTIME) {
796 va.va_atime.tv_sec = time;
797 va.va_mtime.tv_sec = time;
799 va.va_atime = *(struct timestruc_t *)arg2;
800 va.va_mtime = *(struct timestruc_t *)arg3;
808 error = afs_setattr(vp, &va, cred);
810 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, vp,
811 ICL_TYPE_LONG, error);
816 char zero_buffer[PAGESIZE];
818 afs_gn_fclear(vp, flags, offset, length, vinfo, cred)
830 int i, len, error = 0;
833 static int fclear_init = 0;
834 register struct vcache *avc = VTOAFS(vp);
836 AFS_STATCNT(afs_gn_fclear);
838 memset(zero_buffer, 0, PAGESIZE);
842 * Don't clear past ulimit
844 if (offset + length > get_ulimit())
847 /* Flush all pages first */
850 vm_flushp(avc->segid, 0, MAXFSIZE / PAGESIZE - 1);
851 vms_iowait(avc->segid);
854 uio.afsio_offset = offset;
855 for (i = offset; i < offset + length; i = uio.afsio_offset) {
856 len = offset + length - i;
857 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
858 iov.iov_base = zero_buffer;
859 uio.afsio_iov = &iov;
860 uio.afsio_iovcnt = 1;
861 uio.afsio_seg = AFS_UIOSYS;
862 uio.afsio_resid = iov.iov_len;
863 if (error = afs_rdwr(vp, &uio, UIO_WRITE, 0, cred))
866 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, vp,
867 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG,
874 afs_gn_fsync(vp, flags, vinfo, cred)
877 int32long64_t flags; /* Not used by AFS */
878 int32long64_t vinfo; /* Not used by AFS */
880 int flags; /* Not used by AFS */
881 caddr_t vinfo; /* Not used by AFS */
887 AFS_STATCNT(afs_gn_fsync);
888 error = afs_fsync(vp, cred);
889 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, vp,
890 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
896 afs_gn_ftrunc(vp, flags, length, vinfo, cred)
899 int32long64_t flags; /* Ignored in AFS */
901 int flags; /* Ignored in AFS */
904 caddr_t vinfo; /* Ignored in AFS */
910 AFS_STATCNT(afs_gn_ftrunc);
913 error = afs_setattr(vp, &va, cred);
914 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, vp,
915 ICL_TYPE_LONG, flags, ICL_TYPE_OFFSET,
916 ICL_HANDLE_OFFSET(length), ICL_TYPE_LONG, error);
920 /* Min size of a file which is dumping core before we declare it a page hog. */
921 #define MIN_PAGE_HOG_SIZE 8388608
924 afs_gn_rdwr(vp, op, Flags, ubuf, ext, vinfo, vattrp, cred)
929 ext_t ext; /* Ignored in AFS */
932 int ext; /* Ignored in AFS */
935 caddr_t vinfo; /* Ignored in AFS */
936 struct vattr *vattrp;
939 register struct vcache *vcp = VTOAFS(vp);
940 struct vrequest treq;
945 AFS_STATCNT(afs_gn_rdwr);
948 if (op == UIO_WRITE) {
949 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1, ICL_TYPE_POINTER, vp,
950 ICL_TYPE_LONG, vcp->vc_error);
951 return vcp->vc_error;
956 ObtainSharedLock(&vcp->lock, 507);
958 * We keep the caller's credentials since an async daemon will handle the
959 * request at some point. We assume that the same credentials will be used.
960 * If this is being called from an NFS server thread, then dupe the
961 * cred and only use that copy in calls and for the stach.
963 if (!vcp->credp || (vcp->credp != cred)) {
964 #ifdef AFS_AIX_IAUTH_ENV
965 if (AFS_NFSXLATORREQ(cred)) {
966 /* Must be able to use cred later, so dupe it so that nfs server
967 * doesn't overwrite it's contents.
973 crhold(cred); /* Bump refcount for reference in vcache */
977 UpgradeSToWLock(&vcp->lock, 508);
980 ConvertWToSLock(&vcp->lock);
985 ReleaseSharedLock(&vcp->lock);
988 * XXX Is the following really required?? XXX
990 if (error = afs_InitReq(&treq, cred))
992 if (error = afs_VerifyVCache(vcp, &treq))
993 return afs_CheckCode(error, &treq, 50);
994 osi_FlushPages(vcp, cred); /* Flush old pages */
996 if (AFS_NFSXLATORREQ(cred)) {
999 if (op == UIO_READ) {
1001 (vcp, PRSFS_READ, &treq,
1002 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
1011 * We have to bump the open/exwriters field here courtesy of the nfs xlator
1012 * because there're no open/close nfs rpcs to call our afs_open/close.
1013 * We do a similar thing on the afs_read/write interface.
1015 if (op == UIO_WRITE) {
1016 #ifdef AFS_64BIT_CLIENT
1017 if (ubuf->afsio_offset < afs_vmMappingEnd) {
1018 #endif /* AFS_64BIT_ENV */
1019 ObtainWriteLock(&vcp->lock, 240);
1020 vcp->states |= CDirty; /* Set the dirty bit */
1022 ReleaseWriteLock(&vcp->lock);
1023 #ifdef AFS_64BIT_CLIENT
1025 #endif /* AFS_64BIT_ENV */
1028 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
1030 if (op == UIO_WRITE) {
1031 #ifdef AFS_64BIT_CLIENT
1032 if (ubuf->afsio_offset < afs_vmMappingEnd) {
1033 #endif /* AFS_64BIT_ENV */
1034 ObtainWriteLock(&vcp->lock, 241);
1035 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
1036 ReleaseWriteLock(&vcp->lock);
1037 #ifdef AFS_64BIT_CLIENT
1039 #endif /* AFS_64BIT_ENV */
1041 if (vattrp != NULL && error == 0)
1042 afs_gn_getattr(vp, vattrp, cred);
1044 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, vp,
1045 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
1052 #define AFS_MAX_VM_CHUNKS 10
1053 afs_vm_rdwr(vp, uiop, rw, ioflag, credp)
1054 register struct vnode *vp;
1058 struct ucred *credp;
1060 register afs_int32 code = 0;
1062 afs_int32 blockSize;
1063 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1065 #ifdef AFS_64BIT_CLIENT
1066 afs_size_t finalOffset;
1069 #endif /* AFS_64BIT_CLIENT */
1070 register struct vcache *vcp = VTOAFS(vp);
1072 afs_size_t start_offset;
1073 afs_int32 save_resid = uiop->afsio_resid;
1074 int first_page, last_page, pages;
1077 struct vrequest treq;
1079 if (code = afs_InitReq(&treq, credp))
1082 /* special case easy transfer; apparently a lot are done */
1083 if ((xfrSize = uiop->afsio_resid) == 0)
1086 ObtainReadLock(&vcp->lock);
1087 fileSize = vcp->m.Length;
1088 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1089 uiop->afsio_offset = fileSize;
1091 /* compute xfrOffset now, and do some checks */
1092 xfrOffset = uiop->afsio_offset;
1093 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1097 #ifndef AFS_64BIT_CLIENT
1098 /* check for "file too big" error, which should really be done above us */
1099 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1103 #endif /* AFS_64BIT_CLIENT */
1105 #ifdef AFS_64BIT_CLIENT
1106 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
1107 if (xfrOffset < afs_vmMappingEnd) {
1108 /* special case of a buffer crossing the VM mapping line */
1110 struct iovec tvec[16]; /* Should have access to #define */
1114 finalOffset = xfrOffset + xfrSize;
1115 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
1117 afsio_copy(uiop, &tuio, tvec);
1118 afsio_skip(&tuio, txfrSize - tsize);
1119 afsio_trim(&tuio, tsize);
1120 tuio.afsio_offset = afs_vmMappingEnd;
1121 ReleaseReadLock(&vcp->lock);
1122 ObtainWriteLock(&vcp->lock, 243);
1123 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
1124 ReleaseWriteLock(&vcp->lock);
1125 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
1126 ObtainWriteLock(&vcp->lock, 244);
1127 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
1128 ReleaseWriteLock(&vcp->lock);
1129 ObtainReadLock(&vcp->lock);
1132 xfrSize = afs_vmMappingEnd - xfrOffset;
1134 afsio_trim(uiop, txfrSize);
1136 ReleaseReadLock(&vcp->lock);
1137 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
1141 #endif /* AFS_64BIT_CLIENT */
1144 afs_uint32 tlen = vcp->m.Length;
1145 #ifdef AFS_64BIT_CLIENT
1146 if (vcp->m.Length > afs_vmMappingEnd)
1147 tlen = afs_vmMappingEnd;
1149 /* Consider V_INTRSEG too for interrupts */
1151 vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode, tlen, 0, 0)) {
1154 #ifdef AFS_64BIT_KERNEL
1155 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
1157 vcp->vmh = SRVAL(vcp->segid, 0, 0);
1160 vcp->v.v_gnode->gn_seg = vcp->segid;
1161 if (rw == UIO_READ) {
1162 /* don't read past EOF */
1163 if (xfrSize + xfrOffset > fileSize)
1164 xfrSize = fileSize - xfrOffset;
1167 ReleaseReadLock(&vcp->lock);
1168 #ifdef AFS_64BIT_CLIENT
1169 toffset = xfrOffset;
1170 uiop->afsio_offset = xfrOffset;
1171 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
1172 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
1173 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1176 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1177 #else /* AFS_64BIT_CLIENT */
1179 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1180 #endif /* AFS_64BIT_CLIENT */
1183 * If at a chunk boundary and staying within chunk,
1184 * start prefetch of next chunk.
1186 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1187 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1188 ObtainWriteLock(&vcp->lock, 407);
1189 tdc = afs_FindDCache(vcp, xfrOffset);
1191 if (!(tdc->mflags & DFNextStarted))
1192 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1195 ReleaseWriteLock(&vcp->lock);
1197 #ifdef AFS_64BIT_CLIENT
1199 uiop->afsio_offset = finalOffset;
1201 #endif /* AFS_64BIT_CLIENT */
1206 start_offset = uiop->afsio_offset;
1207 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
1208 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1209 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1210 ReleaseReadLock(&vcp->lock);
1211 ObtainWriteLock(&vcp->lock, 400);
1212 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1214 /* un-protect last page. */
1215 last_page = vcp->m.Length / PAGESIZE;
1216 #ifdef AFS_64BIT_CLIENT
1217 if (vcp->m.Length > afs_vmMappingEnd)
1218 last_page = afs_vmMappingEnd / PAGESIZE;
1220 vm_protectp(vcp->segid, last_page, 1, FILEKEY);
1221 if (xfrSize + xfrOffset > fileSize) {
1222 vcp->m.Length = xfrSize + xfrOffset;
1224 if ((!(vcp->states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1226 vcp->states |= CPageHog;
1228 ReleaseWriteLock(&vcp->lock);
1230 /* If the write will fit into a single chunk we'll write all of it
1231 * at once. Otherwise, we'll write one chunk at a time, flushing
1232 * some of it to disk.
1236 /* Only create a page to avoid excess VM access if we're writing a
1237 * small file which is either new or completely overwrites the
1240 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize)
1241 && (vcp->v.v_gnode->gn_mwrcnt == 0)
1242 && (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1243 (void)vm_makep(vcp->segid, 0);
1246 while (xfrSize > 0) {
1247 offset = AFS_CHUNKBASE(xfrOffset);
1250 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1252 (afs_size_t) AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1254 if (len == xfrSize) {
1255 /* All data goes to this one chunk. */
1257 old_offset = uiop->afsio_offset;
1258 #ifdef AFS_64BIT_CLIENT
1259 uiop->afsio_offset = xfrOffset;
1260 toffset = xfrOffset;
1262 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1263 #else /* AFS_64BIT_CLIENT */
1264 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1265 #endif /* AFS_64BIT_CLIENT */
1270 /* Write just one chunk's worth of data. */
1272 struct iovec tvec[16]; /* Should have access to #define */
1274 /* Purge dirty chunks of file if there are too many dirty chunks.
1275 * Inside the write loop, we only do this at a chunk boundary.
1276 * Clean up partial chunk if necessary at end of loop.
1278 if (counter > 0 && code == 0 && xfrOffset == offset) {
1279 ObtainWriteLock(&vcp->lock, 403);
1280 if (xfrOffset > vcp->m.Length)
1281 vcp->m.Length = xfrOffset;
1282 code = afs_DoPartialWrite(vcp, &treq);
1283 vcp->states |= CDirty;
1284 ReleaseWriteLock(&vcp->lock);
1288 afsio_copy(uiop, &tuio, tvec);
1289 afsio_trim(&tuio, len);
1290 tuio.afsio_offset = xfrOffset;
1293 old_offset = uiop->afsio_offset;
1294 #ifdef AFS_64BIT_CLIENT
1295 toffset = xfrOffset;
1296 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1297 #else /* AFS_64BIT_CLIENT */
1298 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1299 #endif /* AFS_64BIT_CLIENT */
1301 len -= tuio.afsio_resid;
1302 afsio_skip(uiop, len);
1307 first_page = (afs_size_t) old_offset >> PGSHIFT;
1309 1 + (((afs_size_t) old_offset + (len - 1)) >> PGSHIFT) -
1311 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2, ICL_TYPE_POINTER, vcp,
1312 ICL_TYPE_INT32, first_page, ICL_TYPE_INT32, pages);
1314 code = vm_writep(vcp->segid, first_page, pages);
1315 if (++count > AFS_MAX_VM_CHUNKS) {
1317 vms_iowait(vcp->segid);
1325 vms_iowait(vcp->segid);
1329 ObtainWriteLock(&vcp->lock, 242);
1330 if (code == 0 && (vcp->states & CDirty)) {
1331 code = afs_DoPartialWrite(vcp, &treq);
1333 vm_protectp(vcp->segid, last_page, 1, RDONLY);
1334 ReleaseWriteLock(&vcp->lock);
1336 /* If requested, fsync the file after every write */
1338 afs_fsync(vp, credp);
1340 ObtainReadLock(&vcp->lock);
1341 if (vcp->vc_error) {
1342 /* Pretend we didn't write anything. We need to get the error back to
1343 * the user. If we don't it's possible for a quota error for this
1344 * write to succeed and the file to be closed without the user ever
1345 * having seen the store error. And AIX syscall clears the error if
1346 * anything was written.
1348 code = vcp->vc_error;
1349 if (code == EDQUOT || code == ENOSPC)
1350 uiop->afsio_resid = save_resid;
1352 #ifdef AFS_64BIT_CLIENT
1354 uiop->afsio_offset = finalOffset;
1356 #endif /* AFS_64BIT_CLIENT */
1359 ReleaseReadLock(&vcp->lock);
1360 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3, ICL_TYPE_POINTER, vcp,
1361 ICL_TYPE_INT32, code);
1366 afs_direct_rdwr(vp, uiop, rw, ioflag, credp)
1367 register struct vnode *vp;
1371 struct ucred *credp;
1373 register afs_int32 code = 0;
1374 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1375 struct vcache *vcp = VTOAFS(vp);
1376 afs_int32 save_resid = uiop->afsio_resid;
1377 struct vrequest treq;
1379 if (code = afs_InitReq(&treq, credp))
1382 /* special case easy transfer; apparently a lot are done */
1383 if ((xfrSize = uiop->afsio_resid) == 0)
1386 ObtainReadLock(&vcp->lock);
1387 fileSize = vcp->m.Length;
1388 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1389 uiop->afsio_offset = fileSize;
1391 /* compute xfrOffset now, and do some checks */
1392 xfrOffset = uiop->afsio_offset;
1393 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1395 ReleaseReadLock(&vcp->lock);
1399 /* check for "file too big" error, which should really be done above us */
1401 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1403 ReleaseReadLock(&vcp->lock);
1407 ReleaseReadLock(&vcp->lock);
1408 if (rw == UIO_WRITE) {
1409 ObtainWriteLock(&vcp->lock, 400);
1410 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1412 if (xfrSize + xfrOffset > fileSize)
1413 vcp->m.Length = xfrSize + xfrOffset;
1414 ReleaseWriteLock(&vcp->lock);
1416 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR, ICL_TYPE_POINTER, vp,
1417 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1418 ICL_TYPE_LONG, uiop->afsio_resid);
1419 code = afs_rdwr(vp, uiop, rw, ioflag, credp);
1421 uiop->afsio_resid = save_resid;
1423 uiop->afsio_offset = xfrOffset + xfrSize;
1424 if (uiop->afsio_resid > 0) {
1425 /* should zero here the remaining buffer */
1426 uiop->afsio_resid = 0;
1428 /* Purge dirty chunks of file if there are too many dirty chunks.
1429 * Inside the write loop, we only do this at a chunk boundary.
1430 * Clean up partial chunk if necessary at end of loop.
1432 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1433 ObtainWriteLock(&vcp->lock, 402);
1434 code = afs_DoPartialWrite(vcp, &treq);
1435 vcp->states |= CDirty;
1436 ReleaseWriteLock(&vcp->lock);
1446 lock_normalize(vp, lckdat, offset, cred)
1448 struct eflock *lckdat;
1455 switch (lckdat->l_whence) {
1459 lckdat->l_start += (off_t) offset;
1462 code = afs_getattr(vp, &vattr, cred);
1465 lckdat->l_start += (off_t) vattr.va_size;
1470 lckdat->l_whence = 0;
1476 afs_gn_lockctl(vp, offset, lckdat, cmd, ignored_fcn, ignored_id, cred)
1477 void (*ignored_fcn) ();
1481 struct eflock *lckdat;
1483 #ifdef AFS_AIX51_ENV
1489 int error, ncmd = 0;
1491 struct vattr *attrs;
1493 AFS_STATCNT(afs_gn_lockctl);
1494 /* Convert from AIX's cmd to standard lockctl lock types... */
1497 else if (cmd & SETFLCK) {
1502 flkd.l_type = lckdat->l_type;
1503 flkd.l_whence = lckdat->l_whence;
1504 flkd.l_start = lckdat->l_start;
1505 flkd.l_len = lckdat->l_len;
1506 flkd.l_pid = lckdat->l_pid;
1507 flkd.l_sysid = lckdat->l_sysid;
1509 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1511 if (error = lock_normalize(vp, &flkd, offset, cred))
1513 error = afs_lockctl(vp, &flkd, ncmd, cred);
1514 lckdat->l_type = flkd.l_type;
1515 lckdat->l_whence = flkd.l_whence;
1516 lckdat->l_start = flkd.l_start;
1517 lckdat->l_len = flkd.l_len;
1518 lckdat->l_pid = flkd.l_pid;
1519 lckdat->l_sysid = flkd.l_sysid;
1520 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, vp,
1521 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1526 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1528 afs_gn_ioctl(vp, Cmd, arg, flags, channel, ext)
1530 #ifdef AFS_AIX51_ENV
1536 int flags; /* Ignored in AFS */
1537 int channel; /* Ignored in AFS */
1538 int ext; /* Ignored in AFS */
1543 AFS_STATCNT(afs_gn_ioctl);
1544 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1545 error = afs_ioctl(vp, cmd, arg);
1546 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, vp,
1547 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1553 afs_gn_readlink(vp, uiop, cred)
1560 AFS_STATCNT(afs_gn_readlink);
1561 error = afs_readlink(vp, uiop, cred);
1562 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, vp,
1563 ICL_TYPE_LONG, error);
1569 afs_gn_select(vp, which, vinfo, mpx)
1575 AFS_STATCNT(afs_gn_select);
1576 /* NO SUPPORT for this in afs YET! */
1577 return (EOPNOTSUPP);
1582 afs_gn_symlink(vp, link, target, cred)
1591 AFS_STATCNT(afs_gn_symlink);
1594 error = afs_symlink(vp, link, &va, target, cred);
1595 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, vp,
1596 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG,
1603 afs_gn_readdir(vp, uiop, cred)
1610 AFS_STATCNT(afs_gn_readdir);
1611 error = afs_readdir(vp, uiop, cred);
1612 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, vp,
1613 ICL_TYPE_LONG, error);
1618 extern Simple_lock afs_asyncbuf_lock;
1620 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1621 * when the buffer is processed by naix_vmstrategy. afs_biotime is
1622 * incremented for each buffer. A buffer's age is kept in its av_back field.
1623 * The age ranking is used by the daemons, which favor older buffers.
1625 afs_int32 afs_biotime = 0;
1627 extern struct buf *afs_asyncbuf;
1628 extern int afs_asyncbuf_cv;
1629 /* This function is called with a list of buffers, threaded through
1630 * the av_forw field. Our goal is to copy the list of buffers into the
1631 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1632 * Within buffers within the same work group, the guy with the lowest address
1633 * has to be located at the head of the queue; his b_bcount field will also
1634 * be increased to cover all of the buffers in the b_work queue.
1636 #define AIX_VM_BLKSIZE 8192
1637 afs_gn_strategy(abp, cred)
1639 register struct buf *abp;
1641 register struct buf **lbp, *tbp;
1642 #ifdef AFS_64BIT_KERNEL
1643 afs_int64 *lwbp; /* last quy in work chain */
1645 int *lwbp; /* last guy in work chain */
1647 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1651 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1652 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1653 && (x)->b_flags == (y)->b_flags \
1654 && !((x)->b_flags & B_PFPROT) \
1655 && !((y)->b_flags & B_PFPROT))
1657 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1658 for (tbp = abp; tbp; tbp = nbp) {
1659 nbp = tbp->av_forw; /* remember for later */
1661 tbp->av_back = (struct buf *)afs_biotime++;
1663 /* first insert the buffer into the afs_async queue. Insert buffer
1664 * sorted within its disk position within a set of comparable buffers.
1665 * Ensure that all comparable buffers are grouped contiguously.
1666 * Later on, we'll merge adjacent buffers into a single request.
1668 firstComparable = NULL;
1669 lbp = &afs_asyncbuf;
1670 for (qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1671 if (EFS_COMPARABLE(tbp, qbp)) {
1672 if (!firstComparable)
1673 firstComparable = qbp;
1674 /* this buffer is comparable, so see if the next buffer
1675 * is farther in the file; if it is insert before next buffer.
1677 if (tbp->b_blkno < qbp->b_blkno) {
1681 /* If we're at the end of a block of comparable buffers, we
1682 * insert the buffer here to keep all comparable buffers
1685 if (firstComparable)
1689 /* do the insert before qbp now */
1690 tbp->av_forw = *lbp;
1692 if (firstComparable == NULL) {
1693 /* next we're going to do all sorts of buffer merging tricks, but
1694 * here we know we're the only COMPARABLE block in the
1695 * afs_asyncbuf list, so we just skip that and continue with
1696 * the next input buffer.
1700 /* we may have actually added the "new" firstComparable */
1701 if (tbp->av_forw == firstComparable)
1702 firstComparable = tbp;
1704 * when we get here, firstComparable points to the first dude in the
1705 * same vnode and subspace that we (tbp) are in. We go through the
1706 * area of this list with COMPARABLE buffers (a contiguous region) and
1707 * repeated merge buffers that are contiguous and in the same block or
1708 * buffers that are contiguous and are both integral numbers of blocks.
1709 * Note that our end goal is to have as big blocks as we can, but we
1710 * must minimize the transfers that are not integral #s of blocks on
1711 * block boundaries, since Episode will do those smaller and/or
1712 * unaligned I/Os synchronously.
1714 * A useful example to consider has the async queue with this in it:
1715 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1716 * If we get a request that fills the 4K hole, we want to merge this
1717 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1718 * don't want to do any merging since adding the 4K transfer to the 8K
1719 * transfer makes the 8K transfer synchronous.
1721 * Note that if there are any blocks whose size is a multiple of
1722 * the file system block size, then we know that such blocks are also
1723 * on block boundaries.
1726 doMerge = 1; /* start the loop */
1727 while (doMerge) { /* loop until an iteration doesn't
1728 * make any more changes */
1730 for (qbp = firstComparable;; qbp = qnbp) {
1731 qnbp = qbp->av_forw;
1733 break; /* we're done */
1734 if (!EFS_COMPARABLE(qbp, qnbp))
1737 /* try to merge qbp and qnbp */
1739 /* first check if both not adjacent go on to next region */
1740 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) !=
1741 dbtob(qnbp->b_blkno))
1744 /* note if both in the same block, the first byte of leftmost guy
1745 * and last byte of rightmost guy are in the same block.
1747 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE - 1)) ==
1748 ((dbtob(qnbp->b_blkno) + qnbp->b_bcount -
1749 1) & ~(AIX_VM_BLKSIZE - 1))) {
1750 doMerge = 1; /* both in same block */
1751 } else if ((qbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0
1752 && (qnbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0) {
1753 doMerge = 1; /* both integral #s of blocks */
1756 register struct buf *xbp;
1758 /* merge both of these blocks together */
1759 /* first set age to the older of the two */
1760 #ifdef AFS_64BIT_KERNEL
1761 if ((afs_int64) qnbp->av_back - (afs_int64) qbp->av_back <
1764 if ((int)qnbp->av_back - (int)qbp->av_back < 0)
1766 qbp->av_back = qnbp->av_back;
1767 lwbp = &qbp->b_work;
1768 /* find end of qbp's work queue */
1769 for (xbp = (struct buf *)(*lwbp); xbp;
1770 lwbp = &xbp->b_work, xbp = (struct buf *)(*lwbp));
1772 * now setting *lwbp will change the last ptr in the qbp's
1775 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1776 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1777 #ifdef AFS_64BIT_KERNEL
1778 *lwbp = (afs_int64) qnbp; /* append qnbp to end */
1780 *lwbp = (int)qnbp; /* append qnbp to end */
1783 * note that qnbp is bogus, but it doesn't matter because
1784 * we're going to restart the for loop now.
1786 break; /* out of the for loop */
1790 } /* for loop for all interrupt data */
1791 /* at this point, all I/O has been queued. Wakeup the daemon */
1792 e_wakeup_one((int *)&afs_asyncbuf_cv);
1793 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1798 afs_inactive(avc, acred)
1799 register struct vcache *avc;
1800 struct AFS_UCRED *acred;
1802 afs_InactiveVCache(avc, acred);
1809 AFS_STATCNT(afs_gn_revoke);
1810 /* NO SUPPORT for this in afs YET! */
1811 return (EOPNOTSUPP);
1815 afs_gn_getacl(vp, uiop, cred)
1825 afs_gn_setacl(vp, uiop, cred)
1835 afs_gn_getpcl(vp, uiop, cred)
1845 afs_gn_setpcl(vp, uiop, cred)
1858 extern struct vfsops Afs_vfsops;
1859 extern struct vnodeops afs_gn_vnodeops;
1860 extern int Afs_init();
1862 #define AFS_CALLOUT_TBL_SIZE 256
1865 * the following additional layer of gorp is due to the fact that the
1866 * filesystem layer no longer obtains the kernel lock for me. I was relying
1867 * on this behavior to avoid having to think about locking.
1871 vfs_mount(struct vfs *a, struct ucred *b)
1873 register glockOwner, ret;
1875 glockOwner = ISAFS_GLOCK();
1878 ret = (*Afs_vfsops.vfs_mount) (a, b);
1886 vfs_unmount(struct vfs *a, int b, struct ucred *c)
1888 register glockOwner, ret;
1890 glockOwner = ISAFS_GLOCK();
1893 ret = (*Afs_vfsops.vfs_unmount) (a, b, c);
1901 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c)
1903 register glockOwner, ret;
1905 glockOwner = ISAFS_GLOCK();
1908 ret = (*Afs_vfsops.vfs_root) (a, b, c);
1916 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c)
1918 register glockOwner, ret;
1920 glockOwner = ISAFS_GLOCK();
1923 ret = (*Afs_vfsops.vfs_statfs) (a, b, c);
1931 vfs_sync(struct gfs *a)
1933 register glockOwner, ret;
1935 glockOwner = ISAFS_GLOCK();
1938 ret = (*Afs_vfsops.vfs_sync) (a);
1945 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c, struct ucred *d)
1947 register glockOwner, ret;
1949 glockOwner = ISAFS_GLOCK();
1952 ret = (*Afs_vfsops.vfs_vget) (a, b, c, d);
1960 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e)
1962 register glockOwner, ret;
1964 glockOwner = ISAFS_GLOCK();
1967 ret = (*Afs_vfsops.vfs_cntl) (a, b, c, d, e);
1975 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d, struct ucred *e)
1977 register glockOwner, ret;
1979 glockOwner = ISAFS_GLOCK();
1982 ret = (*Afs_vfsops.vfs_quotactl) (a, b, c, d, e);
1989 #ifdef AFS_AIX51_ENV
1991 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1993 register glockOwner, ret;
1995 glockOwner = ISAFS_GLOCK();
1998 ret = (*Afs_vfsops.vfs_syncvfs) (a, b, c, d);
2007 struct vfsops locked_Afs_vfsops = {
2016 #ifdef AFS_AIX51_ENV
2022 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2024 register glockOwner, ret;
2026 glockOwner = ISAFS_GLOCK();
2029 ret = (*afs_gn_vnodeops.vn_link) (a, b, c, d);
2037 #ifdef AFS_AIX51_ENV
2038 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d)
2041 vn_mkdir(struct vnode *a, char *b, int c, struct ucred *d)
2044 register glockOwner, ret;
2046 glockOwner = ISAFS_GLOCK();
2049 ret = (*afs_gn_vnodeops.vn_mkdir) (a, b, c, d);
2057 #ifdef AFS_AIX51_ENV
2058 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d,
2062 vn_mknod(struct vnode *a, caddr_t b, int c, dev_t d, struct ucred *e)
2065 register glockOwner, ret;
2067 glockOwner = ISAFS_GLOCK();
2070 ret = (*afs_gn_vnodeops.vn_mknod) (a, b, c, d, e);
2078 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2080 register glockOwner, ret;
2082 glockOwner = ISAFS_GLOCK();
2085 ret = (*afs_gn_vnodeops.vn_remove) (a, b, c, d);
2093 vn_rename(struct vnode *a, struct vnode *b, caddr_t c, struct vnode *d,
2094 struct vnode *e, caddr_t f, struct ucred *g)
2096 register glockOwner, ret;
2098 glockOwner = ISAFS_GLOCK();
2101 ret = (*afs_gn_vnodeops.vn_rename) (a, b, c, d, e, f, g);
2109 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2111 register glockOwner, ret;
2113 glockOwner = ISAFS_GLOCK();
2116 ret = (*afs_gn_vnodeops.vn_rmdir) (a, b, c, d);
2124 #ifdef AFS_AIX51_ENV
2125 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2127 vn_lookup(struct vnode *a, struct vnode **b, char *c, int d,
2129 struct vattr *v, struct ucred *e)
2131 register glockOwner, ret;
2133 glockOwner = ISAFS_GLOCK();
2136 ret = (*afs_gn_vnodeops.vn_lookup) (a, b, c, d, v, e);
2144 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c)
2146 register glockOwner, ret;
2148 glockOwner = ISAFS_GLOCK();
2151 ret = (*afs_gn_vnodeops.vn_fid) (a, b, c);
2159 #ifdef AFS_AIX51_ENV
2160 vn_open(struct vnode *a, int b, int c, caddr_t * d, struct ucred *e)
2163 vn_open(struct vnode *a, int32long64_t b, ext_t c, caddr_t * d,
2167 register glockOwner, ret;
2169 glockOwner = ISAFS_GLOCK();
2172 ret = (*afs_gn_vnodeops.vn_open) (a, b, c, d, e);
2180 #ifdef AFS_AIX51_ENV
2181 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d,
2182 int32long64_t e, caddr_t * f, struct ucred *g)
2185 vn_create(struct vnode *a, struct vnode **b, int c, caddr_t d, int e,
2186 caddr_t * f, struct ucred *g)
2189 register glockOwner, ret;
2191 glockOwner = ISAFS_GLOCK();
2194 ret = (*afs_gn_vnodeops.vn_create) (a, b, c, d, e, f, g);
2202 vn_hold(struct vnode *a)
2204 register glockOwner, ret;
2206 glockOwner = ISAFS_GLOCK();
2209 ret = (*afs_gn_vnodeops.vn_hold) (a);
2217 vn_rele(struct vnode *a)
2219 register glockOwner, ret;
2221 glockOwner = ISAFS_GLOCK();
2224 ret = (*afs_gn_vnodeops.vn_rele) (a);
2232 #ifdef AFS_AIX51_ENV
2233 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d)
2236 vn_close(struct vnode *a, int b, caddr_t c, struct ucred *d)
2239 register glockOwner, ret;
2241 glockOwner = ISAFS_GLOCK();
2244 ret = (*afs_gn_vnodeops.vn_close) (a, b, c, d);
2252 #ifdef AFS_AIX51_ENV
2253 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d,
2254 uint32long64_t e, struct ucred *f)
2257 vn_map(struct vnode *a, caddr_t b, uint c, uint d, uint e, struct ucred *f)
2260 register glockOwner, ret;
2262 glockOwner = ISAFS_GLOCK();
2265 ret = (*afs_gn_vnodeops.vn_map) (a, b, c, d, e, f);
2273 #ifdef AFS_AIX51_ENV
2274 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c)
2277 vn_unmap(struct vnode *a, int b, struct ucred *c)
2280 register glockOwner, ret;
2282 glockOwner = ISAFS_GLOCK();
2285 ret = (*afs_gn_vnodeops.vn_unmap) (a, b, c);
2293 #ifdef AFS_AIX51_ENV
2294 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2297 vn_access(struct vnode *a, int b, int c, struct ucred *d)
2300 register glockOwner, ret;
2302 glockOwner = ISAFS_GLOCK();
2305 ret = (*afs_gn_vnodeops.vn_access) (a, b, c, d);
2313 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c)
2315 register glockOwner, ret;
2317 glockOwner = ISAFS_GLOCK();
2320 ret = (*afs_gn_vnodeops.vn_getattr) (a, b, c);
2328 #ifdef AFS_AIX51_ENV
2329 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d,
2330 int32long64_t e, struct ucred *f)
2333 vn_setattr(struct vnode *a, int b, int c, int d, int e, struct ucred *f)
2336 register glockOwner, ret;
2338 glockOwner = ISAFS_GLOCK();
2341 ret = (*afs_gn_vnodeops.vn_setattr) (a, b, c, d, e, f);
2349 #ifdef AFS_AIX51_ENV
2350 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2352 vn_fclear(struct vnode *a, int b, offset_t c, offset_t d
2354 , caddr_t e, struct ucred *f)
2356 register glockOwner, ret;
2358 glockOwner = ISAFS_GLOCK();
2361 ret = (*afs_gn_vnodeops.vn_fclear) (a, b, c, d, e, f);
2369 #ifdef AFS_AIX51_ENV
2370 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2373 vn_fsync(struct vnode *a, int b, int c, struct ucred *d)
2376 register glockOwner, ret;
2378 glockOwner = ISAFS_GLOCK();
2381 ret = (*afs_gn_vnodeops.vn_fsync) (a, b, c, d);
2389 #ifdef AFS_AIX51_ENV
2390 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d,
2394 vn_ftrunc(struct vnode *a, int b, offset_t c, caddr_t d, struct ucred *e)
2397 register glockOwner, ret;
2399 glockOwner = ISAFS_GLOCK();
2402 ret = (*afs_gn_vnodeops.vn_ftrunc) (a, b, c, d, e);
2410 #ifdef AFS_AIX51_ENV
2411 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d,
2412 ext_t e, caddr_t f, struct vattr *v, struct ucred *g)
2415 vn_rdwr(struct vnode *a, enum uio_rw b, int c, struct uio *d, int e,
2416 caddr_t f, struct vattr *v, struct ucred *g)
2419 register glockOwner, ret;
2421 glockOwner = ISAFS_GLOCK();
2424 ret = (*afs_gn_vnodeops.vn_rdwr) (a, b, c, d, e, f, v, g);
2432 #ifdef AFS_AIX51_ENV
2433 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int32long64_t d,
2434 int (*e) (), ulong32int64_t * f, struct ucred *g)
2437 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int d, int (*e) (),
2438 ulong * f, struct ucred *g)
2441 register glockOwner, ret;
2443 glockOwner = ISAFS_GLOCK();
2446 ret = (*afs_gn_vnodeops.vn_lockctl) (a, b, c, d, e, f, g);
2454 #ifdef AFS_AIX51_ENV
2455 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e,
2459 vn_ioctl(struct vnode *a, int b, caddr_t c, size_t d, int e, struct ucred *f)
2462 register glockOwner, ret;
2464 glockOwner = ISAFS_GLOCK();
2467 ret = (*afs_gn_vnodeops.vn_ioctl) (a, b, c, d, e, f);
2475 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c)
2477 register glockOwner, ret;
2479 glockOwner = ISAFS_GLOCK();
2482 ret = (*afs_gn_vnodeops.vn_readlink) (a, b, c);
2490 #ifdef AFS_AIX51_ENV
2491 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort * d,
2494 vn_select(struct vnode *a, int b, ushort c, ushort * d, void (*e) ()
2496 , caddr_t f, struct ucred *g)
2498 register glockOwner, ret;
2500 glockOwner = ISAFS_GLOCK();
2503 ret = (*afs_gn_vnodeops.vn_select) (a, b, c, d, e, f, g);
2511 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d)
2513 register glockOwner, ret;
2515 glockOwner = ISAFS_GLOCK();
2518 ret = (*afs_gn_vnodeops.vn_symlink) (a, b, c, d);
2526 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c)
2528 register glockOwner, ret;
2530 glockOwner = ISAFS_GLOCK();
2533 ret = (*afs_gn_vnodeops.vn_readdir) (a, b, c);
2541 #ifdef AFS_AIX51_ENV
2542 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d,
2546 vn_revoke(struct vnode *a, int b, int c, struct vattr *d, struct ucred *e)
2549 register glockOwner, ret;
2551 glockOwner = ISAFS_GLOCK();
2554 ret = (*afs_gn_vnodeops.vn_revoke) (a, b, c, d, e);
2562 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c)
2564 register glockOwner, ret;
2566 glockOwner = ISAFS_GLOCK();
2569 ret = (*afs_gn_vnodeops.vn_getacl) (a, b, c);
2577 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c)
2579 register glockOwner, ret;
2581 glockOwner = ISAFS_GLOCK();
2584 ret = (*afs_gn_vnodeops.vn_setacl) (a, b, c);
2592 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c)
2594 register glockOwner, ret;
2596 glockOwner = ISAFS_GLOCK();
2599 ret = (*afs_gn_vnodeops.vn_getpcl) (a, b, c);
2607 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c)
2609 register glockOwner, ret;
2611 glockOwner = ISAFS_GLOCK();
2614 ret = (*afs_gn_vnodeops.vn_setpcl) (a, b, c);
2621 extern int afs_gn_strategy();
2623 struct vnodeops locked_afs_gn_vnodeops = {
2652 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2658 afs_gn_enosys, /* vn_seek */
2659 afs_gn_enosys, /* vn_fsync_range */
2660 afs_gn_enosys, /* vn_create_attr */
2661 afs_gn_enosys, /* vn_finfo */
2662 afs_gn_enosys, /* vn_map_lloff */
2663 afs_gn_enosys, /* vn_readdir_eofp */
2664 afs_gn_enosys, /* vn_rdwr_attr */
2665 afs_gn_enosys, /* vn_memcntl */
2666 afs_gn_enosys, /* vn_spare7 */
2667 afs_gn_enosys, /* vn_spare8 */
2668 afs_gn_enosys, /* vn_spare9 */
2669 afs_gn_enosys, /* vn_spareA */
2670 afs_gn_enosys, /* vn_spareB */
2671 afs_gn_enosys, /* vn_spareC */
2672 afs_gn_enosys, /* vn_spareD */
2673 afs_gn_enosys, /* vn_spareE */
2674 afs_gn_enosys /* vn_spareF */
2675 #ifdef AFS_AIX51_ENV
2676 , afs_gn_enosys, /* pagerBackRange */
2677 afs_gn_enosys, /* pagerGetFileSize */
2678 afs_gn_enosys, /* pagerReadAhead */
2679 afs_gn_enosys, /* pagerWriteBehind */
2680 afs_gn_enosys /* pagerEndCopy */
2684 struct gfs afs_gfs = {
2686 &locked_afs_gn_vnodeops,
2690 GFS_VERSION4 | GFS_REMOTE,