2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
31 #include "rpc/types.h"
33 #include "netinet/in.h"
37 #include "rpc/types.h"
41 #include "afs/afs_osi.h"
42 #define RFTP_INTERNALS 1
43 #include "afs/volerrors.h"
47 #include "afs/exporter.h"
49 #include "afs/afs_chunkops.h"
50 #include "afs/afs_stats.h"
51 #include "afs/nfsclient.h"
53 #include "afs/prs_fs.h"
55 #include "afsincludes.h"
59 afs_gn_link(struct vnode *vp,
66 AFS_STATCNT(afs_gn_link);
67 error = afs_link(vp, dp, name, cred);
68 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, vp,
69 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
75 afs_gn_mkdir(struct vnode *dp,
85 AFS_STATCNT(afs_gn_mkdir);
88 va.va_mode = (mode & 07777) & ~get_umask();
89 error = afs_mkdir(dp, name, &va, &vp, cred);
93 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, vp,
94 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
101 afs_gn_mknod(struct vnode *dp,
112 AFS_STATCNT(afs_gn_mknod);
114 va.va_type = IFTOVT(mode);
115 va.va_mode = (mode & 07777) & ~get_umask();
117 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
118 if (va.va_type != VFIFO && !suser((char *)&error))
120 switch (va.va_type) {
122 error = afs_mkdir(dp, name, &va, &vp, cred);
132 error = afs_create(VTOAFS(dp), name, &va, NONEXCL, mode, (struct vcache **)&vp, cred);
137 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32) vp,
138 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
145 afs_gn_remove(struct vnode *vp, /* Ignored in AFS */
152 AFS_STATCNT(afs_gn_remove);
153 error = afs_remove(dp, name, cred);
154 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, dp,
155 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
161 afs_gn_rename(struct vnode *vp, /* Ignored in AFS */
164 struct vnode *tp, /* Ignored in AFS */
171 AFS_STATCNT(afs_gn_rename);
172 error = afs_rename(dp, name, tdp, tname, cred);
173 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, dp,
174 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG,
181 afs_gn_rmdir(struct vnode *vp, /* Ignored in AFS */
188 AFS_STATCNT(afs_gn_rmdir);
189 error = afs_rmdir(dp, name, cred);
191 if (error == 66 /* 4.3's ENOTEMPTY */ )
192 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
194 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, dp,
195 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
201 afs_gn_lookup(struct vnode *dp,
204 int32long64_t Flags, /* includes FOLLOW... */
205 struct vattr *vattrp,
211 AFS_STATCNT(afs_gn_lookup);
212 error = afs_lookup(dp, name, vpp, cred);
213 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, dp,
214 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
215 if (vattrp != NULL && error == 0)
216 afs_gn_getattr(*vpp, vattrp, cred);
222 afs_gn_fid(struct vnode *vp,
228 AFS_STATCNT(afs_gn_fid);
229 error = afs_fid(vp, fidp);
230 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, vp,
231 ICL_TYPE_LONG, (afs_int32) fidp, ICL_TYPE_LONG, error);
237 afs_gn_open(struct vnode *vp,
240 struct ucred **vinfop,
245 struct vcache *tvp = VTOAFS(vp);
249 AFS_STATCNT(afs_gn_open);
255 if ((flags & FWRITE) || (flags & FTRUNC))
258 while ((flags & FNSHARE) && tvp->opens) {
259 if (!(flags & FDELAY)) {
263 afs_osi_Sleep(&tvp->opens);
266 error = afs_access(VTOAFS(vp), modes, cred);
271 error = afs_open((struct vcache **) &vp, flags, cred);
273 if (flags & FTRUNC) {
276 error = afs_setattr(VTOAFS(vp), &va, cred);
280 tvp->states |= CNSHARE;
283 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
285 /* an error occurred; we've told CM that the file
286 * is open, so close it now so that open and
287 * writer counts are correct. Ignore error code,
288 * as it is likely to fail (the setattr just did).
290 afs_close(vp, flags, cred);
295 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, vp,
296 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
302 afs_gn_create(struct vnode *dp,
307 struct ucred **vinfop, /* return ptr for fp->f_vinfo, used as fp->f_cred */
312 enum vcexcl exclusive;
313 int error, modes = 0;
317 AFS_STATCNT(afs_gn_create);
318 if ((flags & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
324 va.va_mode = (mode & 07777) & ~get_umask();
329 if ((flags & FWRITE) || (flags & FTRUNC))
331 error = afs_create(VTOAFS(dp), name, &va, exclusive, modes, (struct vcache **)vpp, cred);
335 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
336 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
337 * call it when this NFS traffic since the close will never happen thus
338 * we'd never flush the files out to the server! Gross but the simplest
339 * solution we came out with */
340 if (cred->cr_luid != RMTUSER_REQ) {
341 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
342 if (!(flags & FDELAY))
344 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
346 /* Since in the standard copen() for bsd vnode kernels they do an
347 * vop_open after the vop_create, we must do the open here since there
348 * are stuff in afs_open that we need. For example advance the
349 * execsOrWriters flag (else we'll be treated as the sun's "core"
351 *vinfop = cred; /* save user creds in fp->f_vinfo */
352 error = afs_open((struct vcache **)vpp, flags, cred);
354 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, dp,
355 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
362 afs_gn_hold(struct vnode *vp)
364 AFS_STATCNT(afs_gn_hold);
372 afs_gn_rele(struct vnode *vp)
374 struct vcache *vcp = VTOAFS(vp);
377 AFS_STATCNT(afs_gn_rele);
378 if (vp->v_count == 0)
379 osi_Panic("afs_rele: zero v_count");
380 if (--(vp->v_count) == 0) {
381 if (vcp->states & CPageHog) {
383 vcp->states &= ~CPageHog;
385 error = afs_inactive(vp, 0);
392 afs_gn_close(struct vnode *vp,
394 caddr_t vinfo, /* Ignored in AFS */
398 struct vcache *tvp = VTOAFS(vp);
401 AFS_STATCNT(afs_gn_close);
403 if (flags & FNSHARE) {
404 tvp->states &= ~CNSHARE;
405 afs_osi_Wakeup(&tvp->opens);
408 error = afs_close(vp, flags, cred);
409 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32) vp,
410 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
416 afs_gn_map(struct vnode *vp,
423 struct vcache *vcp = VTOAFS(vp);
424 struct vrequest treq;
428 afs_int32 flag = Flag;
430 AFS_STATCNT(afs_gn_map);
432 if (error = afs_InitReq(&treq, cred))
434 error = afs_VerifyVCache(vcp, &treq);
436 return afs_CheckCode(error, &treq, 49);
438 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
439 ObtainWriteLock(&vcp->lock, 401);
440 vcp->states |= CMAPPED; /* flag cleared at afs_inactive */
442 * We map the segment into our address space using the handle returned by vm_create.
445 afs_uint32 tlen = vcp->m.Length;
446 #ifdef AFS_64BIT_CLIENT
447 if (vcp->m.Length > afs_vmMappingEnd)
448 tlen = afs_vmMappingEnd;
450 /* Consider V_INTRSEG too for interrupts */
452 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
453 ReleaseWriteLock(&vcp->lock);
456 #ifdef AFS_64BIT_KERNEL
457 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
459 vcp->vmh = SRVAL(vcp->segid, 0, 0);
462 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
463 if (flag & SHM_RDONLY) {
464 vp->v_gnode->gn_mrdcnt++;
466 vp->v_gnode->gn_mwrcnt++;
469 * We keep the caller's credentials since an async daemon will handle the
470 * request at some point. We assume that the same credentials will be used.
472 if (!vcp->credp || (vcp->credp != cred)) {
475 struct ucred *crp = vcp->credp;
481 ReleaseWriteLock(&vcp->lock);
483 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vp,
484 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
490 afs_gn_unmap(struct vnode *vp,
494 struct vcache *vcp = VTOAFS(vp);
495 AFS_STATCNT(afs_gn_unmap);
496 ObtainWriteLock(&vcp->lock, 402);
497 if (flag & SHM_RDONLY) {
498 vp->v_gnode->gn_mrdcnt--;
499 if (vp->v_gnode->gn_mrdcnt <= 0)
500 vp->v_gnode->gn_mrdcnt = 0;
502 vp->v_gnode->gn_mwrcnt--;
503 if (vp->v_gnode->gn_mwrcnt <= 0)
504 vp->v_gnode->gn_mwrcnt = 0;
506 ReleaseWriteLock(&vcp->lock);
514 afs_gn_access(struct vnode *vp,
524 AFS_STATCNT(afs_gn_access);
530 error = afs_access(VTOAFS(vp), mode, cred);
532 /* Additional testing */
533 if (who == ACC_OTHERS || who == ACC_ANY) {
534 error = afs_getattr(VTOAFS(vp), &vattr, cred);
536 if (who == ACC_ANY) {
537 if (((vattr.va_mode >> 6) & mode) == mode) {
542 if (((vattr.va_mode >> 3) & mode) == mode)
547 } else if (who == ACC_ALL) {
548 error = afs_getattr(VTOAFS(vp), &vattr, cred);
550 if ((!((vattr.va_mode >> 6) & mode))
551 || (!((vattr.va_mode >> 3) & mode))
552 || (!(vattr.va_mode & mode)))
561 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, vp,
562 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
568 afs_gn_getattr(struct vnode *vp,
569 struct vattr *vattrp,
574 AFS_STATCNT(afs_gn_getattr);
575 error = afs_getattr(VTOAFS(vp), vattrp, cred);
576 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, vp,
577 ICL_TYPE_LONG, error);
583 afs_gn_setattr(struct vnode *vp,
593 AFS_STATCNT(afs_gn_setattr);
601 if ((arg1 & T_OWNER_AS_IS) == 0)
603 if ((arg1 & T_GROUP_AS_IS) == 0)
608 error = afs_access(vp, VWRITE, cred);
612 if (arg1 & T_SETTIME) {
613 va.va_atime.tv_sec = time;
614 va.va_mtime.tv_sec = time;
616 va.va_atime = *(struct timestruc_t *)arg2;
617 va.va_mtime = *(struct timestruc_t *)arg3;
625 error = afs_setattr(VTOAFS(vp), &va, cred);
627 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, vp,
628 ICL_TYPE_LONG, error);
633 char zero_buffer[PAGESIZE];
635 afs_gn_fclear(struct vnode *vp,
642 int i, len, error = 0;
645 static int fclear_init = 0;
646 struct vcache *avc = VTOAFS(vp);
648 AFS_STATCNT(afs_gn_fclear);
650 memset(zero_buffer, 0, PAGESIZE);
654 * Don't clear past ulimit
656 if (offset + length > get_ulimit())
659 /* Flush all pages first */
662 vm_flushp(avc->segid, 0, MAXFSIZE / PAGESIZE - 1);
663 vms_iowait(avc->segid);
666 uio.afsio_offset = offset;
667 for (i = offset; i < offset + length; i = uio.afsio_offset) {
668 len = offset + length - i;
669 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
670 iov.iov_base = zero_buffer;
671 uio.afsio_iov = &iov;
672 uio.afsio_iovcnt = 1;
673 uio.afsio_seg = AFS_UIOSYS;
674 uio.afsio_resid = iov.iov_len;
675 if (error = afs_rdwr(VTOAFS(vp), &uio, UIO_WRITE, 0, cred))
678 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, vp,
679 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG,
686 afs_gn_fsync(struct vnode *vp,
687 int32long64_t flags, /* Not used by AFS */
688 int32long64_t vinfo, /* Not used by AFS */
693 AFS_STATCNT(afs_gn_fsync);
694 error = afs_fsync(vp, cred);
695 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, vp,
696 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
702 afs_gn_ftrunc(struct vnode *vp,
711 AFS_STATCNT(afs_gn_ftrunc);
714 error = afs_setattr(VTOAFS(vp), &va, cred);
715 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, vp,
716 ICL_TYPE_LONG, flags, ICL_TYPE_OFFSET,
717 ICL_HANDLE_OFFSET(length), ICL_TYPE_LONG, error);
721 /* Min size of a file which is dumping core before we declare it a page hog. */
722 #define MIN_PAGE_HOG_SIZE 8388608
725 afs_gn_rdwr(struct vnode *vp,
729 ext_t ext, /* Ignored in AFS */
730 caddr_t vinfo, /* Ignored in AFS */
731 struct vattr *vattrp,
734 struct vcache *vcp = VTOAFS(vp);
735 struct vrequest treq;
740 AFS_STATCNT(afs_gn_rdwr);
743 if (op == UIO_WRITE) {
744 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1, ICL_TYPE_POINTER, vp,
745 ICL_TYPE_LONG, vcp->vc_error);
746 return vcp->vc_error;
751 ObtainSharedLock(&vcp->lock, 507);
753 * We keep the caller's credentials since an async daemon will handle the
754 * request at some point. We assume that the same credentials will be used.
755 * If this is being called from an NFS server thread, then dupe the
756 * cred and only use that copy in calls and for the stach.
758 if (!vcp->credp || (vcp->credp != cred)) {
759 #ifdef AFS_AIX_IAUTH_ENV
760 if (AFS_NFSXLATORREQ(cred)) {
761 /* Must be able to use cred later, so dupe it so that nfs server
762 * doesn't overwrite it's contents.
768 crhold(cred); /* Bump refcount for reference in vcache */
772 UpgradeSToWLock(&vcp->lock, 508);
775 ConvertWToSLock(&vcp->lock);
780 ReleaseSharedLock(&vcp->lock);
783 * XXX Is the following really required?? XXX
785 if (error = afs_InitReq(&treq, cred))
787 if (error = afs_VerifyVCache(vcp, &treq))
788 return afs_CheckCode(error, &treq, 50);
789 osi_FlushPages(vcp, cred); /* Flush old pages */
791 if (AFS_NFSXLATORREQ(cred)) {
794 if (op == UIO_READ) {
796 (vcp, PRSFS_READ, &treq,
797 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
806 * We have to bump the open/exwriters field here courtesy of the nfs xlator
807 * because there're no open/close nfs rpcs to call our afs_open/close.
808 * We do a similar thing on the afs_read/write interface.
810 if (op == UIO_WRITE) {
811 #ifdef AFS_64BIT_CLIENT
812 if (ubuf->afsio_offset < afs_vmMappingEnd) {
813 #endif /* AFS_64BIT_CLIENT */
814 ObtainWriteLock(&vcp->lock, 240);
815 vcp->states |= CDirty; /* Set the dirty bit */
817 ReleaseWriteLock(&vcp->lock);
818 #ifdef AFS_64BIT_CLIENT
820 #endif /* AFS_64BIT_CLIENT */
823 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
825 if (op == UIO_WRITE) {
826 #ifdef AFS_64BIT_CLIENT
827 if (ubuf->afsio_offset < afs_vmMappingEnd) {
828 #endif /* AFS_64BIT_CLIENT */
829 ObtainWriteLock(&vcp->lock, 241);
830 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
831 ReleaseWriteLock(&vcp->lock);
832 #ifdef AFS_64BIT_CLIENT
834 #endif /* AFS_64BIT_CLIENT */
836 if (vattrp != NULL && error == 0)
837 afs_gn_getattr(vp, vattrp, cred);
839 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, vp,
840 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
847 #define AFS_MAX_VM_CHUNKS 10
849 afs_vm_rdwr(struct vnode *vp,
858 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
860 #ifdef AFS_64BIT_CLIENT
861 afs_size_t finalOffset;
864 afs_size_t add2resid = 0;
865 #endif /* AFS_64BIT_CLIENT */
866 struct vcache *vcp = VTOAFS(vp);
868 afs_size_t start_offset;
869 afs_int32 save_resid = uiop->afsio_resid;
870 int first_page, last_page, pages;
873 struct vrequest treq;
875 if (code = afs_InitReq(&treq, credp))
878 /* special case easy transfer; apparently a lot are done */
879 if ((xfrSize = uiop->afsio_resid) == 0)
882 ObtainReadLock(&vcp->lock);
883 fileSize = vcp->m.Length;
884 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
885 uiop->afsio_offset = fileSize;
887 /* compute xfrOffset now, and do some checks */
888 xfrOffset = uiop->afsio_offset;
889 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
893 #ifndef AFS_64BIT_CLIENT
894 /* check for "file too big" error, which should really be done above us */
895 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
899 #endif /* AFS_64BIT_CLIENT */
901 #ifdef AFS_64BIT_CLIENT
902 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
903 if (rw == UIO_READ) {
904 /* don't read past EOF */
905 if (xfrSize+xfrOffset > fileSize) {
906 add2resid = xfrSize + xfrOffset - fileSize;
907 xfrSize = fileSize - xfrOffset;
908 if (xfrSize <= 0) goto fail;
910 afsio_trim(uiop, txfrSize);
913 if (xfrOffset < afs_vmMappingEnd) {
914 /* special case of a buffer crossing the VM mapping line */
916 struct iovec tvec[16]; /* Should have access to #define */
920 finalOffset = xfrOffset + xfrSize;
921 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
923 afsio_copy(uiop, &tuio, tvec);
924 afsio_skip(&tuio, txfrSize - tsize);
925 afsio_trim(&tuio, tsize);
926 tuio.afsio_offset = afs_vmMappingEnd;
927 ReleaseReadLock(&vcp->lock);
928 ObtainWriteLock(&vcp->lock, 243);
929 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
930 ReleaseWriteLock(&vcp->lock);
931 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
932 ObtainWriteLock(&vcp->lock, 244);
933 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
934 ReleaseWriteLock(&vcp->lock);
935 ObtainReadLock(&vcp->lock);
938 xfrSize = afs_vmMappingEnd - xfrOffset;
940 afsio_trim(uiop, txfrSize);
942 ReleaseReadLock(&vcp->lock);
943 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
944 uiop->uio_resid += add2resid;
948 #endif /* AFS_64BIT_CLIENT */
951 afs_uint32 tlen = vcp->m.Length;
952 #ifdef AFS_64BIT_CLIENT
953 if (vcp->m.Length > afs_vmMappingEnd)
954 tlen = afs_vmMappingEnd;
956 /* Consider V_INTRSEG too for interrupts */
958 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
961 #ifdef AFS_64BIT_KERNEL
962 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
964 vcp->vmh = SRVAL(vcp->segid, 0, 0);
967 vcp->v.v_gnode->gn_seg = vcp->segid;
968 if (rw == UIO_READ) {
969 /* don't read past EOF */
970 if (xfrSize + xfrOffset > fileSize)
971 xfrSize = fileSize - xfrOffset;
974 ReleaseReadLock(&vcp->lock);
975 #ifdef AFS_64BIT_CLIENT
977 uiop->afsio_offset = xfrOffset;
978 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
979 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
980 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
983 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
984 #else /* AFS_64BIT_CLIENT */
986 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
987 #endif /* AFS_64BIT_CLIENT */
990 * If at a chunk boundary and staying within chunk,
991 * start prefetch of next chunk.
993 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
994 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
995 ObtainWriteLock(&vcp->lock, 407);
996 tdc = afs_FindDCache(vcp, xfrOffset);
998 if (!(tdc->mflags & DFNextStarted))
999 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1002 ReleaseWriteLock(&vcp->lock);
1004 #ifdef AFS_64BIT_CLIENT
1006 uiop->afsio_offset = finalOffset;
1008 uiop->uio_resid += add2resid;
1009 #endif /* AFS_64BIT_CLIENT */
1014 start_offset = uiop->afsio_offset;
1015 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
1016 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1017 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1018 ReleaseReadLock(&vcp->lock);
1019 ObtainWriteLock(&vcp->lock, 400);
1020 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1022 /* un-protect last page. */
1023 last_page = vcp->m.Length / PAGESIZE;
1024 #ifdef AFS_64BIT_CLIENT
1025 if (vcp->m.Length > afs_vmMappingEnd)
1026 last_page = afs_vmMappingEnd / PAGESIZE;
1028 vm_protectp(vcp->segid, last_page, 1, FILEKEY);
1029 if (xfrSize + xfrOffset > fileSize) {
1030 vcp->m.Length = xfrSize + xfrOffset;
1032 if ((!(vcp->states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1034 vcp->states |= CPageHog;
1036 ReleaseWriteLock(&vcp->lock);
1038 /* If the write will fit into a single chunk we'll write all of it
1039 * at once. Otherwise, we'll write one chunk at a time, flushing
1040 * some of it to disk.
1044 /* Only create a page to avoid excess VM access if we're writing a
1045 * small file which is either new or completely overwrites the
1048 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize)
1049 && (vcp->v.v_gnode->gn_mwrcnt == 0)
1050 && (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1051 (void)vm_makep(vcp->segid, 0);
1054 while (xfrSize > 0) {
1055 offset = AFS_CHUNKBASE(xfrOffset);
1058 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1060 (afs_size_t) AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1062 if (len == xfrSize) {
1063 /* All data goes to this one chunk. */
1065 old_offset = uiop->afsio_offset;
1066 #ifdef AFS_64BIT_CLIENT
1067 uiop->afsio_offset = xfrOffset;
1068 toffset = xfrOffset;
1070 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1071 #else /* AFS_64BIT_CLIENT */
1072 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1073 #endif /* AFS_64BIT_CLIENT */
1078 /* Write just one chunk's worth of data. */
1080 struct iovec tvec[16]; /* Should have access to #define */
1082 /* Purge dirty chunks of file if there are too many dirty chunks.
1083 * Inside the write loop, we only do this at a chunk boundary.
1084 * Clean up partial chunk if necessary at end of loop.
1086 if (counter > 0 && code == 0 && xfrOffset == offset) {
1087 ObtainWriteLock(&vcp->lock, 403);
1088 if (xfrOffset > vcp->m.Length)
1089 vcp->m.Length = xfrOffset;
1090 code = afs_DoPartialWrite(vcp, &treq);
1091 vcp->states |= CDirty;
1092 ReleaseWriteLock(&vcp->lock);
1096 afsio_copy(uiop, &tuio, tvec);
1097 afsio_trim(&tuio, len);
1098 tuio.afsio_offset = xfrOffset;
1101 old_offset = uiop->afsio_offset;
1102 #ifdef AFS_64BIT_CLIENT
1103 toffset = xfrOffset;
1104 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1105 #else /* AFS_64BIT_CLIENT */
1106 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1107 #endif /* AFS_64BIT_CLIENT */
1109 len -= tuio.afsio_resid;
1110 afsio_skip(uiop, len);
1115 first_page = (afs_size_t) old_offset >> PGSHIFT;
1117 1 + (((afs_size_t) old_offset + (len - 1)) >> PGSHIFT) -
1119 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2, ICL_TYPE_POINTER, vcp,
1120 ICL_TYPE_INT32, first_page, ICL_TYPE_INT32, pages);
1122 code = vm_writep(vcp->segid, first_page, pages);
1123 if (++count > AFS_MAX_VM_CHUNKS) {
1125 vms_iowait(vcp->segid);
1133 vms_iowait(vcp->segid);
1137 ObtainWriteLock(&vcp->lock, 242);
1138 if (code == 0 && (vcp->states & CDirty)) {
1139 code = afs_DoPartialWrite(vcp, &treq);
1141 vm_protectp(vcp->segid, last_page, 1, RDONLY);
1142 ReleaseWriteLock(&vcp->lock);
1144 /* If requested, fsync the file after every write */
1146 afs_fsync(vp, credp);
1148 ObtainReadLock(&vcp->lock);
1149 if (vcp->vc_error) {
1150 /* Pretend we didn't write anything. We need to get the error back to
1151 * the user. If we don't it's possible for a quota error for this
1152 * write to succeed and the file to be closed without the user ever
1153 * having seen the store error. And AIX syscall clears the error if
1154 * anything was written.
1156 code = vcp->vc_error;
1157 if (code == EDQUOT || code == ENOSPC)
1158 uiop->afsio_resid = save_resid;
1160 #ifdef AFS_64BIT_CLIENT
1162 uiop->afsio_offset = finalOffset;
1164 #endif /* AFS_64BIT_CLIENT */
1167 ReleaseReadLock(&vcp->lock);
1168 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3, ICL_TYPE_POINTER, vcp,
1169 ICL_TYPE_INT32, code);
1175 afs_direct_rdwr(struct vnode *vp,
1179 struct ucred *credp)
1182 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1183 struct vcache *vcp = VTOAFS(vp);
1184 afs_int32 save_resid = uiop->afsio_resid;
1185 struct vrequest treq;
1187 if (code = afs_InitReq(&treq, credp))
1190 /* special case easy transfer; apparently a lot are done */
1191 if ((xfrSize = uiop->afsio_resid) == 0)
1194 ObtainReadLock(&vcp->lock);
1195 fileSize = vcp->m.Length;
1196 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1197 uiop->afsio_offset = fileSize;
1199 /* compute xfrOffset now, and do some checks */
1200 xfrOffset = uiop->afsio_offset;
1201 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1203 ReleaseReadLock(&vcp->lock);
1207 /* check for "file too big" error, which should really be done above us */
1209 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1211 ReleaseReadLock(&vcp->lock);
1215 ReleaseReadLock(&vcp->lock);
1216 if (rw == UIO_WRITE) {
1217 ObtainWriteLock(&vcp->lock, 400);
1218 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1220 if (xfrSize + xfrOffset > fileSize)
1221 vcp->m.Length = xfrSize + xfrOffset;
1222 ReleaseWriteLock(&vcp->lock);
1224 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR, ICL_TYPE_POINTER, vp,
1225 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1226 ICL_TYPE_LONG, uiop->afsio_resid);
1227 code = afs_rdwr(VTOAFS(vp), uiop, rw, ioflag, credp);
1229 uiop->afsio_resid = save_resid;
1231 uiop->afsio_offset = xfrOffset + xfrSize;
1232 if (uiop->afsio_resid > 0) {
1233 /* should zero here the remaining buffer */
1234 uiop->afsio_resid = 0;
1236 /* Purge dirty chunks of file if there are too many dirty chunks.
1237 * Inside the write loop, we only do this at a chunk boundary.
1238 * Clean up partial chunk if necessary at end of loop.
1240 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1241 ObtainWriteLock(&vcp->lock, 402);
1242 code = afs_DoPartialWrite(vcp, &treq);
1243 vcp->states |= CDirty;
1244 ReleaseWriteLock(&vcp->lock);
1254 lock_normalize(struct vnode *vp,
1255 struct flock *lckdat,
1262 switch (lckdat->l_whence) {
1266 lckdat->l_start += (off_t) offset;
1269 code = afs_getattr(VTOAFS(vp), &vattr, cred);
1272 lckdat->l_start += (off_t) vattr.va_size;
1277 lckdat->l_whence = 0;
1284 afs_gn_lockctl(struct vnode *vp,
1286 struct eflock *lckdat,
1288 int (*ignored_fcn) (),
1289 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
1291 #else /* AFS_AIX52_ENV */
1292 ulong32int64_t * ignored_id,
1293 #endif /* AFS_AIX52_ENV */
1296 int error, ncmd = 0;
1298 struct vattr *attrs;
1300 AFS_STATCNT(afs_gn_lockctl);
1301 /* Convert from AIX's cmd to standard lockctl lock types... */
1304 else if (cmd & SETFLCK) {
1309 flkd.l_type = lckdat->l_type;
1310 flkd.l_whence = lckdat->l_whence;
1311 flkd.l_start = lckdat->l_start;
1312 flkd.l_len = lckdat->l_len;
1313 flkd.l_pid = lckdat->l_pid;
1314 flkd.l_sysid = lckdat->l_sysid;
1316 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1318 if (error = lock_normalize(vp, &flkd, offset, cred))
1320 error = afs_lockctl(vp, &flkd, ncmd, cred);
1321 lckdat->l_type = flkd.l_type;
1322 lckdat->l_whence = flkd.l_whence;
1323 lckdat->l_start = flkd.l_start;
1324 lckdat->l_len = flkd.l_len;
1325 lckdat->l_pid = flkd.l_pid;
1326 lckdat->l_sysid = flkd.l_sysid;
1327 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, vp,
1328 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1333 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1335 afs_gn_ioctl(struct vnode *vp,
1338 size_t flags, /* Ignored in AFS */
1339 ext_t ext, /* Ignored in AFS */
1340 struct ucred *crp) /* Ignored in AFS */
1345 AFS_STATCNT(afs_gn_ioctl);
1346 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1347 error = afs_ioctl(vp, cmd, arg);
1348 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, vp,
1349 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1355 afs_gn_readlink(struct vnode *vp,
1361 AFS_STATCNT(afs_gn_readlink);
1362 error = afs_readlink(vp, uiop, cred);
1363 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, vp,
1364 ICL_TYPE_LONG, error);
1370 afs_gn_select(struct vnode *vp,
1371 int32long64_t correl,
1378 AFS_STATCNT(afs_gn_select);
1379 /* NO SUPPORT for this in afs YET! */
1380 return (EOPNOTSUPP);
1385 afs_gn_symlink(struct vnode *vp,
1393 AFS_STATCNT(afs_gn_symlink);
1396 error = afs_symlink(vp, link, &va, target, cred);
1397 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, vp,
1398 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG,
1405 afs_gn_readdir(struct vnode *vp,
1411 AFS_STATCNT(afs_gn_readdir);
1412 error = afs_readdir(vp, uiop, cred);
1413 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, vp,
1414 ICL_TYPE_LONG, error);
1419 extern Simple_lock afs_asyncbuf_lock;
1420 extern struct buf *afs_asyncbuf;
1421 extern int afs_asyncbuf_cv;
1424 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1425 * when the buffer is processed by afs_gn_strategy. afs_biotime is
1426 * incremented for each buffer. A buffer's age is kept in its av_back field.
1427 * The age ranking is used by the daemons, which favor older buffers.
1429 afs_int32 afs_biotime = 0;
1431 /* This function is called with a list of buffers, threaded through
1432 * the av_forw field. Our goal is to copy the list of buffers into the
1433 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1434 * Within buffers within the same work group, the guy with the lowest address
1435 * has to be located at the head of the queue; his b_bcount field will also
1436 * be increased to cover all of the buffers in the b_work queue.
1438 #define AIX_VM_BLKSIZE 8192
1439 /* Note: This function seems to be called as ddstrategy entry point, ie
1440 * has one argument. However, it also needs to be present as
1441 * vn_strategy entry point which has three arguments, but it seems to never
1442 * be called in that capacity (it would fail horribly due to the argument
1443 * mismatch). I'm confused, but it obviously has to be this way, maybe
1444 * some IBM people can shed som light on this
1447 afs_gn_strategy(struct buf *abp)
1449 struct buf **lbp, *tbp;
1451 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1455 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1456 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1457 && (x)->b_flags == (y)->b_flags \
1458 && !((x)->b_flags & B_PFPROT) \
1459 && !((y)->b_flags & B_PFPROT))
1461 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1462 for (tbp = abp; tbp; tbp = nbp) {
1463 nbp = tbp->av_forw; /* remember for later */
1465 tbp->av_back = (struct buf *)afs_biotime++;
1467 /* first insert the buffer into the afs_async queue. Insert buffer
1468 * sorted within its disk position within a set of comparable buffers.
1469 * Ensure that all comparable buffers are grouped contiguously.
1470 * Later on, we'll merge adjacent buffers into a single request.
1472 firstComparable = NULL;
1473 lbp = &afs_asyncbuf;
1474 for (qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1475 if (EFS_COMPARABLE(tbp, qbp)) {
1476 if (!firstComparable)
1477 firstComparable = qbp;
1478 /* this buffer is comparable, so see if the next buffer
1479 * is farther in the file; if it is insert before next buffer.
1481 if (tbp->b_blkno < qbp->b_blkno) {
1485 /* If we're at the end of a block of comparable buffers, we
1486 * insert the buffer here to keep all comparable buffers
1489 if (firstComparable)
1493 /* do the insert before qbp now */
1494 tbp->av_forw = *lbp;
1496 if (firstComparable == NULL) {
1497 /* next we're going to do all sorts of buffer merging tricks, but
1498 * here we know we're the only COMPARABLE block in the
1499 * afs_asyncbuf list, so we just skip that and continue with
1500 * the next input buffer.
1505 /* we may have actually added the "new" firstComparable */
1506 if (tbp->av_forw == firstComparable)
1507 firstComparable = tbp;
1509 * when we get here, firstComparable points to the first dude in the
1510 * same vnode and subspace that we (tbp) are in. We go through the
1511 * area of this list with COMPARABLE buffers (a contiguous region) and
1512 * repeated merge buffers that are contiguous and in the same block or
1513 * buffers that are contiguous and are both integral numbers of blocks.
1514 * Note that our end goal is to have as big blocks as we can, but we
1515 * must minimize the transfers that are not integral #s of blocks on
1516 * block boundaries, since Episode will do those smaller and/or
1517 * unaligned I/Os synchronously.
1519 * A useful example to consider has the async queue with this in it:
1520 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1521 * If we get a request that fills the 4K hole, we want to merge this
1522 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1523 * don't want to do any merging since adding the 4K transfer to the 8K
1524 * transfer makes the 8K transfer synchronous.
1526 * Note that if there are any blocks whose size is a multiple of
1527 * the file system block size, then we know that such blocks are also
1528 * on block boundaries.
1531 doMerge = 1; /* start the loop */
1532 while (doMerge) { /* loop until an iteration doesn't
1533 * make any more changes */
1535 for (qbp = firstComparable;; qbp = qnbp) {
1536 qnbp = qbp->av_forw;
1538 break; /* we're done */
1539 if (!EFS_COMPARABLE(qbp, qnbp))
1542 /* try to merge qbp and qnbp */
1544 /* first check if both not adjacent go on to next region */
1545 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) !=
1546 dbtob(qnbp->b_blkno))
1549 /* note if both in the same block, the first byte of leftmost guy
1550 * and last byte of rightmost guy are in the same block.
1552 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE - 1)) ==
1553 ((dbtob(qnbp->b_blkno) + qnbp->b_bcount -
1554 1) & ~(AIX_VM_BLKSIZE - 1))) {
1555 doMerge = 1; /* both in same block */
1556 } else if ((qbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0
1557 && (qnbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0) {
1558 doMerge = 1; /* both integral #s of blocks */
1563 /* merge both of these blocks together */
1564 /* first set age to the older of the two */
1565 if ((int32long64_t) qnbp->av_back -
1566 (int32long64_t) qbp->av_back < 0) {
1567 qbp->av_back = qnbp->av_back;
1569 lwbp = (struct buf **) &qbp->b_work;
1570 /* find end of qbp's work queue */
1571 for (xbp = *lwbp; xbp;
1572 lwbp = (struct buf **) &xbp->b_work, xbp = *lwbp);
1574 * now setting *lwbp will change the last ptr in the qbp's
1577 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1578 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1579 *lwbp = qnbp; /* append qnbp to end */
1581 * note that qnbp is bogus, but it doesn't matter because
1582 * we're going to restart the for loop now.
1584 break; /* out of the for loop */
1588 } /* for loop for all interrupt data */
1589 /* at this point, all I/O has been queued. Wakeup the daemon */
1590 e_wakeup_one((int *)&afs_asyncbuf_cv);
1591 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1597 afs_inactive(struct vcache *avc,
1598 struct AFS_UCRED *acred)
1600 afs_InactiveVCache(avc, acred);
1604 afs_gn_revoke(struct vnode *vp,
1607 struct vattr *vinfop,
1610 AFS_STATCNT(afs_gn_revoke);
1611 /* NO SUPPORT for this in afs YET! */
1612 return (EOPNOTSUPP);
1616 afs_gn_getacl(struct vnode *vp,
1625 afs_gn_setacl(struct vnode *vp,
1634 afs_gn_getpcl(struct vnode *vp,
1643 afs_gn_setpcl(struct vnode *vp,
1652 afs_gn_seek(struct vnode* vp, offset_t * offp, struct ucred * crp)
1655 * File systems which do not wish to do offset validation can simply
1656 * return 0. File systems which do not provide the vn_seek entry point
1657 * will have a maximum offset of OFF_MAX (2 gigabytes minus 1) enforced
1658 * by the logical file system.
1671 * declare a struct vnodeops and initialize it with ptrs to all functions
1673 struct vnodeops afs_gn_vnodeops = {
1674 /* creation/naming/deletion */
1681 /* lookup, file handle stuff */
1683 (int(*)(struct vnode*,struct fileid*,struct ucred*))
1685 /* access to files */
1686 (int(*)(struct vnode *, int32long64_t, ext_t, caddr_t *,struct ucred *))
1688 (int(*)(struct vnode *, struct vnode **, int32long64_t,caddr_t, int32long64_t, caddr_t *, struct ucred *))
1695 /* manipulate attributes of files */
1699 /* data update operations */
1712 (int(*)(struct vnode*,struct buf*,struct ucred*))
1714 /* security things */
1721 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
1722 afs_gn_enosys, /* vn_fsync_range */
1723 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
1724 afs_gn_enosys, /* vn_create_attr */
1725 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
1726 afs_gn_enosys, /* vn_finfo */
1727 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
1728 afs_gn_enosys, /* vn_map_lloff */
1729 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
1730 afs_gn_enosys, /* vn_readdir_eofp */
1731 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
1732 afs_gn_enosys, /* vn_rdwr_attr */
1733 (int(*)(struct vnode*,int,void*,struct ucred*))
1734 afs_gn_enosys, /* vn_memcntl */
1735 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
1736 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
1737 afs_gn_enosys, /* vn_getea */
1738 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
1739 afs_gn_enosys, /* vn_setea */
1740 (int(*)(struct vnode *, struct uio *, struct ucred *))
1741 afs_gn_enosys, /* vn_listea */
1742 (int(*)(struct vnode *, const char *, struct ucred *))
1743 afs_gn_enosys, /* vn_removeea */
1744 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
1745 afs_gn_enosys, /* vn_statea */
1746 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
1747 afs_gn_enosys, /* vn_getxacl */
1748 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
1749 afs_gn_enosys, /* vn_setxacl */
1750 #else /* AFS_AIX53_ENV */
1751 afs_gn_enosys, /* vn_spare7 */
1752 afs_gn_enosys, /* vn_spare8 */
1753 afs_gn_enosys, /* vn_spare9 */
1754 afs_gn_enosys, /* vn_spareA */
1755 afs_gn_enosys, /* vn_spareB */
1756 afs_gn_enosys, /* vn_spareC */
1757 afs_gn_enosys, /* vn_spareD */
1758 #endif /* AFS_AIX53_ENV */
1759 afs_gn_enosys, /* vn_spareE */
1760 afs_gn_enosys /* vn_spareF */
1761 #ifdef AFS_AIX51_ENV
1762 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
1763 afs_gn_enosys, /* pagerBackRange */
1764 (int64_t(*)(struct gnode*))
1765 afs_gn_enosys, /* pagerGetFileSize */
1766 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
1767 afs_gn_enosys, /* pagerReadAhead */
1768 (void(*)(struct gnode *, int64_t, int64_t, uint))
1769 afs_gn_enosys, /* pagerReadWriteBehind */
1770 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
1771 afs_gn_enosys /* pagerEndCopy */
1774 struct vnodeops *afs_ops = &afs_gn_vnodeops;
1778 extern struct vfsops Afs_vfsops;
1779 extern int Afs_init();
1781 #define AFS_CALLOUT_TBL_SIZE 256
1784 * the following additional layer of gorp is due to the fact that the
1785 * filesystem layer no longer obtains the kernel lock for me. I was relying
1786 * on this behavior to avoid having to think about locking.
1790 vfs_mount(struct vfs *a, struct ucred *b)
1792 int glockOwner, ret;
1794 glockOwner = ISAFS_GLOCK();
1797 ret = (*Afs_vfsops.vfs_mount) (a, b);
1805 vfs_unmount(struct vfs *a, int b, struct ucred *c)
1807 int glockOwner, ret;
1809 glockOwner = ISAFS_GLOCK();
1812 ret = (*Afs_vfsops.vfs_unmount) (a, b, c);
1820 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c)
1822 int glockOwner, ret;
1824 glockOwner = ISAFS_GLOCK();
1827 ret = (*Afs_vfsops.vfs_root) (a, b, c);
1835 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c)
1837 int glockOwner, ret;
1839 glockOwner = ISAFS_GLOCK();
1842 ret = (*Afs_vfsops.vfs_statfs) (a, b, c);
1850 vfs_sync(struct gfs *a)
1852 int glockOwner, ret;
1854 glockOwner = ISAFS_GLOCK();
1857 ret = (*Afs_vfsops.vfs_sync) (a);
1864 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c, struct ucred *d)
1866 int glockOwner, ret;
1868 glockOwner = ISAFS_GLOCK();
1871 ret = (*Afs_vfsops.vfs_vget) (a, b, c, d);
1879 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e)
1881 int glockOwner, ret;
1883 glockOwner = ISAFS_GLOCK();
1886 ret = (*Afs_vfsops.vfs_cntl) (a, b, c, d, e);
1894 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d, struct ucred *e)
1896 int glockOwner, ret;
1898 glockOwner = ISAFS_GLOCK();
1901 ret = (*Afs_vfsops.vfs_quotactl) (a, b, c, d, e);
1908 #ifdef AFS_AIX51_ENV
1910 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1912 int glockOwner, ret;
1914 glockOwner = ISAFS_GLOCK();
1917 ret = (*Afs_vfsops.vfs_syncvfs) (a, b, c, d);
1926 struct vfsops locked_Afs_vfsops = {
1935 #ifdef AFS_AIX51_ENV
1941 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
1943 int glockOwner, ret;
1945 glockOwner = ISAFS_GLOCK();
1948 ret = (*afs_gn_vnodeops.vn_link) (a, b, c, d);
1956 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d)
1958 int glockOwner, ret;
1960 glockOwner = ISAFS_GLOCK();
1963 ret = (*afs_gn_vnodeops.vn_mkdir) (a, b, c, d);
1971 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d,
1974 int glockOwner, ret;
1976 glockOwner = ISAFS_GLOCK();
1979 ret = (*afs_gn_vnodeops.vn_mknod) (a, b, c, d, e);
1987 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
1989 int glockOwner, ret;
1991 glockOwner = ISAFS_GLOCK();
1994 ret = (*afs_gn_vnodeops.vn_remove) (a, b, c, d);
2002 vn_rename(struct vnode *a, struct vnode *b, caddr_t c, struct vnode *d,
2003 struct vnode *e, caddr_t f, struct ucred *g)
2005 int glockOwner, ret;
2007 glockOwner = ISAFS_GLOCK();
2010 ret = (*afs_gn_vnodeops.vn_rename) (a, b, c, d, e, f, g);
2018 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2020 int glockOwner, ret;
2022 glockOwner = ISAFS_GLOCK();
2025 ret = (*afs_gn_vnodeops.vn_rmdir) (a, b, c, d);
2033 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2034 struct vattr *v, struct ucred *e)
2036 int glockOwner, ret;
2038 glockOwner = ISAFS_GLOCK();
2041 ret = (*afs_gn_vnodeops.vn_lookup) (a, b, c, d, v, e);
2049 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c)
2051 int glockOwner, ret;
2053 glockOwner = ISAFS_GLOCK();
2056 ret = (*afs_gn_vnodeops.vn_fid) (a, b, c);
2064 vn_open(struct vnode *a,
2070 int glockOwner, ret;
2072 glockOwner = ISAFS_GLOCK();
2075 ret = (*afs_gn_vnodeops.vn_open) (a, b, c, d, e);
2083 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d,
2084 int32long64_t e, caddr_t * f, struct ucred *g)
2086 int glockOwner, ret;
2088 glockOwner = ISAFS_GLOCK();
2091 ret = (*afs_gn_vnodeops.vn_create) (a, b, c, d, e, f, g);
2099 vn_hold(struct vnode *a)
2101 int glockOwner, ret;
2103 glockOwner = ISAFS_GLOCK();
2106 ret = (*afs_gn_vnodeops.vn_hold) (a);
2114 vn_rele(struct vnode *a)
2116 int glockOwner, ret;
2118 glockOwner = ISAFS_GLOCK();
2121 ret = (*afs_gn_vnodeops.vn_rele) (a);
2129 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d)
2131 int glockOwner, ret;
2133 glockOwner = ISAFS_GLOCK();
2136 ret = (*afs_gn_vnodeops.vn_close) (a, b, c, d);
2144 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d,
2145 uint32long64_t e, struct ucred *f)
2147 int glockOwner, ret;
2149 glockOwner = ISAFS_GLOCK();
2152 ret = (*afs_gn_vnodeops.vn_map) (a, b, c, d, e, f);
2160 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c)
2162 int glockOwner, ret;
2164 glockOwner = ISAFS_GLOCK();
2167 ret = (*afs_gn_vnodeops.vn_unmap) (a, b, c);
2175 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2177 int glockOwner, ret;
2179 glockOwner = ISAFS_GLOCK();
2182 ret = (*afs_gn_vnodeops.vn_access) (a, b, c, d);
2190 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c)
2192 int glockOwner, ret;
2194 glockOwner = ISAFS_GLOCK();
2197 ret = (*afs_gn_vnodeops.vn_getattr) (a, b, c);
2205 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d,
2206 int32long64_t e, struct ucred *f)
2208 int glockOwner, ret;
2210 glockOwner = ISAFS_GLOCK();
2213 ret = (*afs_gn_vnodeops.vn_setattr) (a, b, c, d, e, f);
2221 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2222 , caddr_t e, struct ucred *f)
2224 int glockOwner, ret;
2226 glockOwner = ISAFS_GLOCK();
2229 ret = (*afs_gn_vnodeops.vn_fclear) (a, b, c, d, e, f);
2237 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2239 int glockOwner, ret;
2241 glockOwner = ISAFS_GLOCK();
2244 ret = (*afs_gn_vnodeops.vn_fsync) (a, b, c, d);
2252 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d,
2255 int glockOwner, ret;
2257 glockOwner = ISAFS_GLOCK();
2260 ret = (*afs_gn_vnodeops.vn_ftrunc) (a, b, c, d, e);
2268 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d,
2269 ext_t e, caddr_t f, struct vattr *v, struct ucred *g)
2271 int glockOwner, ret;
2273 glockOwner = ISAFS_GLOCK();
2276 ret = (*afs_gn_vnodeops.vn_rdwr) (a, b, c, d, e, f, v, g);
2284 vn_lockctl(struct vnode *a,
2289 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
2291 #else /* AFS_AIX52_ENV */
2293 #endif /* AFS_AIX52_ENV */
2296 int glockOwner, ret;
2298 glockOwner = ISAFS_GLOCK();
2301 ret = (*afs_gn_vnodeops.vn_lockctl) (a, b, c, d, e, f, g);
2309 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e,
2312 int glockOwner, ret;
2314 glockOwner = ISAFS_GLOCK();
2317 ret = (*afs_gn_vnodeops.vn_ioctl) (a, b, c, d, e, f);
2325 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c)
2327 int glockOwner, ret;
2329 glockOwner = ISAFS_GLOCK();
2332 ret = (*afs_gn_vnodeops.vn_readlink) (a, b, c);
2340 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort * d,
2341 void (*e) (), caddr_t f, struct ucred *g)
2343 int glockOwner, ret;
2345 glockOwner = ISAFS_GLOCK();
2348 ret = (*afs_gn_vnodeops.vn_select) (a, b, c, d, e, f, g);
2356 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d)
2358 int glockOwner, ret;
2360 glockOwner = ISAFS_GLOCK();
2363 ret = (*afs_gn_vnodeops.vn_symlink) (a, b, c, d);
2371 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c)
2373 int glockOwner, ret;
2375 glockOwner = ISAFS_GLOCK();
2378 ret = (*afs_gn_vnodeops.vn_readdir) (a, b, c);
2386 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d,
2389 int glockOwner, ret;
2391 glockOwner = ISAFS_GLOCK();
2394 ret = (*afs_gn_vnodeops.vn_revoke) (a, b, c, d, e);
2402 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c)
2404 int glockOwner, ret;
2406 glockOwner = ISAFS_GLOCK();
2409 ret = (*afs_gn_vnodeops.vn_getacl) (a, b, c);
2417 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c)
2419 int glockOwner, ret;
2421 glockOwner = ISAFS_GLOCK();
2424 ret = (*afs_gn_vnodeops.vn_setacl) (a, b, c);
2432 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c)
2434 int glockOwner, ret;
2436 glockOwner = ISAFS_GLOCK();
2439 ret = (*afs_gn_vnodeops.vn_getpcl) (a, b, c);
2447 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c)
2449 int glockOwner, ret;
2451 glockOwner = ISAFS_GLOCK();
2454 ret = (*afs_gn_vnodeops.vn_setpcl) (a, b, c);
2462 struct vnodeops locked_afs_gn_vnodeops = {
2491 (int(*)(struct vnode*,struct buf*,struct ucred*))
2492 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2499 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
2500 afs_gn_enosys, /* vn_fsync_range */
2501 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
2502 afs_gn_enosys, /* vn_create_attr */
2503 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
2504 afs_gn_enosys, /* vn_finfo */
2505 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
2506 afs_gn_enosys, /* vn_map_lloff */
2507 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
2508 afs_gn_enosys, /* vn_readdir_eofp */
2509 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
2510 afs_gn_enosys, /* vn_rdwr_attr */
2511 (int(*)(struct vnode*,int,void*,struct ucred*))
2512 afs_gn_enosys, /* vn_memcntl */
2513 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
2514 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
2515 afs_gn_enosys, /* vn_getea */
2516 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
2517 afs_gn_enosys, /* vn_setea */
2518 (int(*)(struct vnode *, struct uio *, struct ucred *))
2519 afs_gn_enosys, /* vn_listea */
2520 (int(*)(struct vnode *, const char *, struct ucred *))
2521 afs_gn_enosys, /* vn_removeea */
2522 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
2523 afs_gn_enosys, /* vn_statea */
2524 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
2525 afs_gn_enosys, /* vn_getxacl */
2526 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
2527 afs_gn_enosys, /* vn_setxacl */
2528 #else /* AFS_AIX53_ENV */
2529 afs_gn_enosys, /* vn_spare7 */
2530 afs_gn_enosys, /* vn_spare8 */
2531 afs_gn_enosys, /* vn_spare9 */
2532 afs_gn_enosys, /* vn_spareA */
2533 afs_gn_enosys, /* vn_spareB */
2534 afs_gn_enosys, /* vn_spareC */
2535 afs_gn_enosys, /* vn_spareD */
2536 #endif /* AFS_AIX53_ENV */
2537 afs_gn_enosys, /* vn_spareE */
2538 afs_gn_enosys /* vn_spareF */
2539 #ifdef AFS_AIX51_ENV
2540 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
2541 afs_gn_enosys, /* pagerBackRange */
2542 (int64_t(*)(struct gnode*))
2543 afs_gn_enosys, /* pagerGetFileSize */
2544 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
2545 afs_gn_enosys, /* pagerReadAhead */
2546 (void(*)(struct gnode *, int64_t, int64_t, uint))
2547 afs_gn_enosys, /* pagerReadWriteBehind */
2548 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
2549 afs_gn_enosys /* pagerEndCopy */
2553 struct gfs afs_gfs = {
2555 &locked_afs_gn_vnodeops,
2559 GFS_VERSION4 | GFS_VERSION42 | GFS_REMOTE,