2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
29 #include "rpc/types.h"
31 #include "netinet/in.h"
35 #include "rpc/types.h"
39 #include "afs/afs_osi.h"
40 #define RFTP_INTERNALS 1
41 #include "afs/volerrors.h"
45 #include "afs/exporter.h"
47 #include "afs/afs_chunkops.h"
48 #include "afs/afs_stats.h"
49 #include "afs/nfsclient.h"
51 #include "afs/prs_fs.h"
53 #include "afsincludes.h"
57 afs_gn_link(struct vnode *vp,
64 AFS_STATCNT(afs_gn_link);
65 error = afs_link(vp, dp, name, cred);
66 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, vp,
67 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
73 afs_gn_mkdir(struct vnode *dp,
83 AFS_STATCNT(afs_gn_mkdir);
86 va.va_mode = (mode & 07777) & ~get_umask();
87 error = afs_mkdir(dp, name, &va, &vp, cred);
91 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, vp,
92 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
99 afs_gn_mknod(struct vnode *dp,
110 AFS_STATCNT(afs_gn_mknod);
112 va.va_type = IFTOVT(mode);
113 va.va_mode = (mode & 07777) & ~get_umask();
115 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
116 if (va.va_type != VFIFO && !suser((char *)&error))
118 switch (va.va_type) {
120 error = afs_mkdir(dp, name, &va, &vp, cred);
130 error = afs_create(VTOAFS(dp), name, &va, NONEXCL, mode, (struct vcache **)&vp, cred);
135 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32) vp,
136 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
143 afs_gn_remove(struct vnode *vp, /* Ignored in AFS */
150 AFS_STATCNT(afs_gn_remove);
151 error = afs_remove(dp, name, cred);
152 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, dp,
153 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
159 afs_gn_rename(struct vnode *vp, /* Ignored in AFS */
162 struct vnode *tp, /* Ignored in AFS */
169 AFS_STATCNT(afs_gn_rename);
170 error = afs_rename(dp, name, tdp, tname, cred);
171 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, dp,
172 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG,
179 afs_gn_rmdir(struct vnode *vp, /* Ignored in AFS */
186 AFS_STATCNT(afs_gn_rmdir);
187 error = afs_rmdir(dp, name, cred);
189 if (error == 66 /* 4.3's ENOTEMPTY */ )
190 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
192 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, dp,
193 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
199 afs_gn_lookup(struct vnode *dp,
202 int32long64_t Flags, /* includes FOLLOW... */
203 struct vattr *vattrp,
209 AFS_STATCNT(afs_gn_lookup);
210 error = afs_lookup(dp, name, vpp, cred);
211 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, dp,
212 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
213 if (vattrp != NULL && error == 0)
214 afs_gn_getattr(*vpp, vattrp, cred);
220 afs_gn_fid(struct vnode *vp,
226 AFS_STATCNT(afs_gn_fid);
227 error = afs_fid(vp, fidp);
228 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, vp,
229 ICL_TYPE_LONG, (afs_int32) fidp, ICL_TYPE_LONG, error);
235 afs_gn_open(struct vnode *vp,
238 struct ucred **vinfop,
243 struct vcache *tvp = VTOAFS(vp);
247 AFS_STATCNT(afs_gn_open);
253 if ((flags & FWRITE) || (flags & FTRUNC))
256 while ((flags & FNSHARE) && tvp->opens) {
257 if (!(flags & FDELAY)) {
261 afs_osi_Sleep(&tvp->opens);
264 error = afs_access(VTOAFS(vp), modes, cred);
269 error = afs_open((struct vcache **) &vp, flags, cred);
271 if (flags & FTRUNC) {
274 error = afs_setattr(VTOAFS(vp), &va, cred);
278 tvp->f.states |= CNSHARE;
281 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
283 /* an error occurred; we've told CM that the file
284 * is open, so close it now so that open and
285 * writer counts are correct. Ignore error code,
286 * as it is likely to fail (the setattr just did).
288 afs_close(vp, flags, cred);
293 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, vp,
294 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
300 afs_gn_create(struct vnode *dp,
305 struct ucred **vinfop, /* return ptr for fp->f_vinfo, used as fp->f_cred */
310 enum vcexcl exclusive;
311 int error, modes = 0;
315 AFS_STATCNT(afs_gn_create);
316 if ((flags & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
322 va.va_mode = (mode & 07777) & ~get_umask();
327 if ((flags & FWRITE) || (flags & FTRUNC))
329 error = afs_create(VTOAFS(dp), name, &va, exclusive, modes, (struct vcache **)vpp, cred);
333 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
334 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
335 * call it when this NFS traffic since the close will never happen thus
336 * we'd never flush the files out to the server! Gross but the simplest
337 * solution we came out with */
338 if (cred->cr_luid != RMTUSER_REQ) {
339 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
340 if (!(flags & FDELAY))
342 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
344 /* Since in the standard copen() for bsd vnode kernels they do an
345 * vop_open after the vop_create, we must do the open here since there
346 * are stuff in afs_open that we need. For example advance the
347 * execsOrWriters flag (else we'll be treated as the sun's "core"
349 *vinfop = cred; /* save user creds in fp->f_vinfo */
350 error = afs_open((struct vcache **)vpp, flags, cred);
352 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, dp,
353 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
360 afs_gn_hold(struct vnode *vp)
362 AFS_STATCNT(afs_gn_hold);
370 afs_gn_rele(struct vnode *vp)
372 struct vcache *vcp = VTOAFS(vp);
375 AFS_STATCNT(afs_gn_rele);
376 if (vp->v_count == 0)
377 osi_Panic("afs_rele: zero v_count");
378 if (--(vp->v_count) == 0) {
379 if (vcp->f.states & CPageHog) {
381 vcp->f.states &= ~CPageHog;
383 error = afs_inactive(vp, 0);
390 afs_gn_close(struct vnode *vp,
392 caddr_t vinfo, /* Ignored in AFS */
396 struct vcache *tvp = VTOAFS(vp);
399 AFS_STATCNT(afs_gn_close);
401 if (flags & FNSHARE) {
402 tvp->f.states &= ~CNSHARE;
403 afs_osi_Wakeup(&tvp->opens);
406 error = afs_close(vp, flags, cred);
407 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32) vp,
408 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
414 afs_gn_map(struct vnode *vp,
421 struct vcache *vcp = VTOAFS(vp);
422 struct vrequest treq;
426 afs_int32 flag = Flag;
428 AFS_STATCNT(afs_gn_map);
430 if (error = afs_InitReq(&treq, cred))
432 error = afs_VerifyVCache(vcp, &treq);
434 return afs_CheckCode(error, &treq, 49);
436 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
437 ObtainWriteLock(&vcp->lock, 401);
438 vcp->f.states |= CMAPPED; /* flag cleared at afs_inactive */
440 * We map the segment into our address space using the handle returned by vm_create.
443 afs_uint32 tlen = vcp->f.m.Length;
444 #ifdef AFS_64BIT_CLIENT
445 if (vcp->f.m.Length > afs_vmMappingEnd)
446 tlen = afs_vmMappingEnd;
448 /* Consider V_INTRSEG too for interrupts */
450 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
451 ReleaseWriteLock(&vcp->lock);
454 #ifdef AFS_64BIT_KERNEL
455 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
457 vcp->vmh = SRVAL(vcp->segid, 0, 0);
460 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
461 if (flag & SHM_RDONLY) {
462 vp->v_gnode->gn_mrdcnt++;
464 vp->v_gnode->gn_mwrcnt++;
467 * We keep the caller's credentials since an async daemon will handle the
468 * request at some point. We assume that the same credentials will be used.
470 if (!vcp->credp || (vcp->credp != cred)) {
473 struct ucred *crp = vcp->credp;
479 ReleaseWriteLock(&vcp->lock);
481 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vp,
482 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
488 afs_gn_unmap(struct vnode *vp,
492 struct vcache *vcp = VTOAFS(vp);
493 AFS_STATCNT(afs_gn_unmap);
494 ObtainWriteLock(&vcp->lock, 402);
495 if (flag & SHM_RDONLY) {
496 vp->v_gnode->gn_mrdcnt--;
497 if (vp->v_gnode->gn_mrdcnt <= 0)
498 vp->v_gnode->gn_mrdcnt = 0;
500 vp->v_gnode->gn_mwrcnt--;
501 if (vp->v_gnode->gn_mwrcnt <= 0)
502 vp->v_gnode->gn_mwrcnt = 0;
504 ReleaseWriteLock(&vcp->lock);
512 afs_gn_access(struct vnode *vp,
522 AFS_STATCNT(afs_gn_access);
528 error = afs_access(VTOAFS(vp), mode, cred);
530 /* Additional testing */
531 if (who == ACC_OTHERS || who == ACC_ANY) {
532 error = afs_getattr(VTOAFS(vp), &vattr, cred);
534 if (who == ACC_ANY) {
535 if (((vattr.va_mode >> 6) & mode) == mode) {
540 if (((vattr.va_mode >> 3) & mode) == mode)
545 } else if (who == ACC_ALL) {
546 error = afs_getattr(VTOAFS(vp), &vattr, cred);
548 if ((!((vattr.va_mode >> 6) & mode))
549 || (!((vattr.va_mode >> 3) & mode))
550 || (!(vattr.va_mode & mode)))
559 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, vp,
560 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
566 afs_gn_getattr(struct vnode *vp,
567 struct vattr *vattrp,
572 AFS_STATCNT(afs_gn_getattr);
573 error = afs_getattr(VTOAFS(vp), vattrp, cred);
574 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, vp,
575 ICL_TYPE_LONG, error);
581 afs_gn_setattr(struct vnode *vp,
591 AFS_STATCNT(afs_gn_setattr);
599 if ((arg1 & T_OWNER_AS_IS) == 0)
601 if ((arg1 & T_GROUP_AS_IS) == 0)
606 error = afs_access(vp, VWRITE, cred);
610 if (arg1 & T_SETTIME) {
611 va.va_atime.tv_sec = time;
612 va.va_mtime.tv_sec = time;
614 va.va_atime = *(struct timestruc_t *)arg2;
615 va.va_mtime = *(struct timestruc_t *)arg3;
623 error = afs_setattr(VTOAFS(vp), &va, cred);
625 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, vp,
626 ICL_TYPE_LONG, error);
631 char zero_buffer[PAGESIZE];
633 afs_gn_fclear(struct vnode *vp,
640 int i, len, error = 0;
643 static int fclear_init = 0;
644 struct vcache *avc = VTOAFS(vp);
646 memset(&uio, 0, sizeof(uio));
647 memset(&iov, 0, sizeof(iov));
649 AFS_STATCNT(afs_gn_fclear);
651 memset(zero_buffer, 0, PAGESIZE);
655 * Don't clear past ulimit
657 if (offset + length > get_ulimit())
660 /* Flush all pages first */
663 vm_flushp(avc->segid, 0, MAXFSIZE / PAGESIZE - 1);
664 vms_iowait(avc->segid);
667 uio.afsio_offset = offset;
668 for (i = offset; i < offset + length; i = uio.afsio_offset) {
669 len = offset + length - i;
670 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
671 iov.iov_base = zero_buffer;
672 uio.afsio_iov = &iov;
673 uio.afsio_iovcnt = 1;
674 uio.afsio_seg = AFS_UIOSYS;
675 uio.afsio_resid = iov.iov_len;
676 if (error = afs_rdwr(VTOAFS(vp), &uio, UIO_WRITE, 0, cred))
679 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, vp,
680 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG,
687 afs_gn_fsync(struct vnode *vp,
688 int32long64_t flags, /* Not used by AFS */
689 int32long64_t vinfo, /* Not used by AFS */
694 AFS_STATCNT(afs_gn_fsync);
695 error = afs_fsync(vp, cred);
696 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, vp,
697 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
703 afs_gn_ftrunc(struct vnode *vp,
712 AFS_STATCNT(afs_gn_ftrunc);
715 error = afs_setattr(VTOAFS(vp), &va, cred);
716 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, vp,
717 ICL_TYPE_LONG, flags, ICL_TYPE_OFFSET,
718 ICL_HANDLE_OFFSET(length), ICL_TYPE_LONG, error);
722 /* Min size of a file which is dumping core before we declare it a page hog. */
723 #define MIN_PAGE_HOG_SIZE 8388608
726 afs_gn_rdwr(struct vnode *vp,
730 ext_t ext, /* Ignored in AFS */
731 caddr_t vinfo, /* Ignored in AFS */
732 struct vattr *vattrp,
735 struct vcache *vcp = VTOAFS(vp);
736 struct vrequest treq;
741 AFS_STATCNT(afs_gn_rdwr);
744 if (op == UIO_WRITE) {
745 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1, ICL_TYPE_POINTER, vp,
746 ICL_TYPE_LONG, vcp->vc_error);
747 return vcp->vc_error;
752 ObtainSharedLock(&vcp->lock, 507);
754 * We keep the caller's credentials since an async daemon will handle the
755 * request at some point. We assume that the same credentials will be used.
756 * If this is being called from an NFS server thread, then dupe the
757 * cred and only use that copy in calls and for the stach.
759 if (!vcp->credp || (vcp->credp != cred)) {
760 #ifdef AFS_AIX_IAUTH_ENV
761 if (AFS_NFSXLATORREQ(cred)) {
762 /* Must be able to use cred later, so dupe it so that nfs server
763 * doesn't overwrite it's contents.
769 crhold(cred); /* Bump refcount for reference in vcache */
773 UpgradeSToWLock(&vcp->lock, 508);
776 ConvertWToSLock(&vcp->lock);
781 ReleaseSharedLock(&vcp->lock);
784 * XXX Is the following really required?? XXX
786 if (error = afs_InitReq(&treq, cred))
788 if (error = afs_VerifyVCache(vcp, &treq))
789 return afs_CheckCode(error, &treq, 50);
790 osi_FlushPages(vcp, cred); /* Flush old pages */
792 if (AFS_NFSXLATORREQ(cred)) {
795 if (op == UIO_READ) {
797 (vcp, PRSFS_READ, &treq,
798 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
807 * We have to bump the open/exwriters field here courtesy of the nfs xlator
808 * because there're no open/close nfs rpcs to call our afs_open/close.
809 * We do a similar thing on the afs_read/write interface.
811 if (op == UIO_WRITE) {
812 #ifdef AFS_64BIT_CLIENT
813 if (ubuf->afsio_offset < afs_vmMappingEnd) {
814 #endif /* AFS_64BIT_CLIENT */
815 ObtainWriteLock(&vcp->lock, 240);
816 vcp->f.states |= CDirty; /* Set the dirty bit */
818 ReleaseWriteLock(&vcp->lock);
819 #ifdef AFS_64BIT_CLIENT
821 #endif /* AFS_64BIT_CLIENT */
824 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
826 if (op == UIO_WRITE) {
827 #ifdef AFS_64BIT_CLIENT
828 if (ubuf->afsio_offset < afs_vmMappingEnd) {
829 #endif /* AFS_64BIT_CLIENT */
830 ObtainWriteLock(&vcp->lock, 241);
831 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
832 ReleaseWriteLock(&vcp->lock);
833 #ifdef AFS_64BIT_CLIENT
835 #endif /* AFS_64BIT_CLIENT */
837 if (vattrp != NULL && error == 0)
838 afs_gn_getattr(vp, vattrp, cred);
840 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, vp,
841 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
848 #define AFS_MAX_VM_CHUNKS 10
850 afs_vm_rdwr(struct vnode *vp,
859 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
861 #ifdef AFS_64BIT_CLIENT
862 afs_size_t finalOffset;
865 afs_size_t add2resid = 0;
866 #endif /* AFS_64BIT_CLIENT */
867 struct vcache *vcp = VTOAFS(vp);
869 afs_size_t start_offset;
870 afs_int32 save_resid = uiop->afsio_resid;
871 int first_page, last_page, pages;
874 struct vrequest treq;
876 if (code = afs_InitReq(&treq, credp))
879 /* special case easy transfer; apparently a lot are done */
880 if ((xfrSize = uiop->afsio_resid) == 0)
883 ObtainReadLock(&vcp->lock);
884 fileSize = vcp->f.m.Length;
885 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
886 uiop->afsio_offset = fileSize;
888 /* compute xfrOffset now, and do some checks */
889 xfrOffset = uiop->afsio_offset;
890 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
892 ReleaseReadLock(&vcp->lock);
895 #ifndef AFS_64BIT_CLIENT
896 /* check for "file too big" error, which should really be done above us */
897 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
899 ReleaseReadLock(&vcp->lock);
902 #endif /* AFS_64BIT_CLIENT */
904 #ifdef AFS_64BIT_CLIENT
905 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
906 if (rw == UIO_READ) {
907 /* don't read past EOF */
908 if (xfrSize+xfrOffset > fileSize) {
909 add2resid = xfrSize + xfrOffset - fileSize;
910 xfrSize = fileSize - xfrOffset;
912 ReleaseReadLock(&vcp->lock);
916 afsio_trim(uiop, txfrSize);
919 if (xfrOffset < afs_vmMappingEnd) {
920 /* special case of a buffer crossing the VM mapping line */
922 struct iovec tvec[16]; /* Should have access to #define */
925 memset(&tuio, 0, sizeof(tuio));
926 memset(&tvec, 0, sizeof(tvec));
929 finalOffset = xfrOffset + xfrSize;
930 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
932 afsio_copy(uiop, &tuio, tvec);
933 afsio_skip(&tuio, txfrSize - tsize);
934 afsio_trim(&tuio, tsize);
935 tuio.afsio_offset = afs_vmMappingEnd;
936 ReleaseReadLock(&vcp->lock);
937 ObtainWriteLock(&vcp->lock, 243);
938 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
939 ReleaseWriteLock(&vcp->lock);
940 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
941 ObtainWriteLock(&vcp->lock, 244);
942 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
943 ReleaseWriteLock(&vcp->lock);
946 ObtainReadLock(&vcp->lock);
947 xfrSize = afs_vmMappingEnd - xfrOffset;
949 afsio_trim(uiop, txfrSize);
951 ReleaseReadLock(&vcp->lock);
952 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
953 uiop->uio_resid += add2resid;
957 #endif /* AFS_64BIT_CLIENT */
960 afs_uint32 tlen = vcp->f.m.Length;
961 #ifdef AFS_64BIT_CLIENT
962 if (vcp->f.m.Length > afs_vmMappingEnd)
963 tlen = afs_vmMappingEnd;
965 /* Consider V_INTRSEG too for interrupts */
967 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
968 ReleaseReadLock(&vcp->lock);
971 #ifdef AFS_64BIT_KERNEL
972 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
974 vcp->vmh = SRVAL(vcp->segid, 0, 0);
977 vcp->v.v_gnode->gn_seg = vcp->segid;
978 if (rw == UIO_READ) {
979 ReleaseReadLock(&vcp->lock);
980 /* don't read past EOF */
981 if (xfrSize + xfrOffset > fileSize)
982 xfrSize = fileSize - xfrOffset;
985 #ifdef AFS_64BIT_CLIENT
987 uiop->afsio_offset = xfrOffset;
988 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
989 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
990 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
993 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
994 #else /* AFS_64BIT_CLIENT */
996 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
997 #endif /* AFS_64BIT_CLIENT */
1000 * If at a chunk boundary and staying within chunk,
1001 * start prefetch of next chunk.
1003 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1004 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1005 ObtainWriteLock(&vcp->lock, 407);
1006 tdc = afs_FindDCache(vcp, xfrOffset);
1008 if (!(tdc->mflags & DFNextStarted))
1009 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1012 ReleaseWriteLock(&vcp->lock);
1014 #ifdef AFS_64BIT_CLIENT
1016 uiop->afsio_offset = finalOffset;
1018 uiop->uio_resid += add2resid;
1019 #endif /* AFS_64BIT_CLIENT */
1024 start_offset = uiop->afsio_offset;
1025 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
1026 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1027 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1028 ReleaseReadLock(&vcp->lock);
1029 ObtainWriteLock(&vcp->lock, 400);
1030 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1032 /* un-protect last page. */
1033 last_page = vcp->f.m.Length / PAGESIZE;
1034 #ifdef AFS_64BIT_CLIENT
1035 if (vcp->f.m.Length > afs_vmMappingEnd)
1036 last_page = afs_vmMappingEnd / PAGESIZE;
1038 vm_protectp(vcp->segid, last_page, 1, FILEKEY);
1039 if (xfrSize + xfrOffset > fileSize) {
1040 vcp->f.m.Length = xfrSize + xfrOffset;
1042 if ((!(vcp->f.states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1044 vcp->f.states |= CPageHog;
1046 ReleaseWriteLock(&vcp->lock);
1048 /* If the write will fit into a single chunk we'll write all of it
1049 * at once. Otherwise, we'll write one chunk at a time, flushing
1050 * some of it to disk.
1054 /* Only create a page to avoid excess VM access if we're writing a
1055 * small file which is either new or completely overwrites the
1058 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize)
1059 && (vcp->v.v_gnode->gn_mwrcnt == 0)
1060 && (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1061 (void)vm_makep(vcp->segid, 0);
1064 while (xfrSize > 0) {
1065 offset = AFS_CHUNKBASE(xfrOffset);
1068 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1070 (afs_size_t) AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1072 if (len == xfrSize) {
1073 /* All data goes to this one chunk. */
1075 old_offset = uiop->afsio_offset;
1076 #ifdef AFS_64BIT_CLIENT
1077 uiop->afsio_offset = xfrOffset;
1078 toffset = xfrOffset;
1080 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1081 #else /* AFS_64BIT_CLIENT */
1082 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1083 #endif /* AFS_64BIT_CLIENT */
1091 /* Write just one chunk's worth of data. */
1093 struct iovec tvec[16]; /* Should have access to #define */
1095 memset(&tuio, 0, sizeof(tuio));
1096 memset(&tvec, 0, sizeof(tvec));
1098 /* Purge dirty chunks of file if there are too many dirty chunks.
1099 * Inside the write loop, we only do this at a chunk boundary.
1100 * Clean up partial chunk if necessary at end of loop.
1102 if (counter > 0 && code == 0 && xfrOffset == offset) {
1103 ObtainWriteLock(&vcp->lock, 403);
1104 if (xfrOffset > vcp->f.m.Length)
1105 vcp->f.m.Length = xfrOffset;
1106 code = afs_DoPartialWrite(vcp, &treq);
1107 vcp->f.states |= CDirty;
1108 ReleaseWriteLock(&vcp->lock);
1115 afsio_copy(uiop, &tuio, tvec);
1116 afsio_trim(&tuio, len);
1117 tuio.afsio_offset = xfrOffset;
1120 old_offset = uiop->afsio_offset;
1121 #ifdef AFS_64BIT_CLIENT
1122 toffset = xfrOffset;
1123 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1124 #else /* AFS_64BIT_CLIENT */
1125 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1126 #endif /* AFS_64BIT_CLIENT */
1128 len -= tuio.afsio_resid;
1129 if (code || (len <= 0)) {
1130 code = code ? code : EINVAL;
1133 afsio_skip(uiop, len);
1138 first_page = (afs_size_t) old_offset >> PGSHIFT;
1140 1 + (((afs_size_t) old_offset + (len - 1)) >> PGSHIFT) -
1142 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2, ICL_TYPE_POINTER, vcp,
1143 ICL_TYPE_INT32, first_page, ICL_TYPE_INT32, pages);
1145 code = vm_writep(vcp->segid, first_page, pages);
1150 if (++count > AFS_MAX_VM_CHUNKS) {
1152 code = vms_iowait(vcp->segid);
1154 /* cache device failure? */
1165 code = vms_iowait(vcp->segid);
1168 /* cache device failure? */
1173 ObtainWriteLock(&vcp->lock, 242);
1174 if (code == 0 && (vcp->f.states & CDirty)) {
1175 code = afs_DoPartialWrite(vcp, &treq);
1177 vm_protectp(vcp->segid, last_page, 1, RDONLY);
1178 ReleaseWriteLock(&vcp->lock);
1180 /* If requested, fsync the file after every write */
1182 afs_fsync(vp, credp);
1184 ObtainReadLock(&vcp->lock);
1185 if (vcp->vc_error) {
1186 /* Pretend we didn't write anything. We need to get the error back to
1187 * the user. If we don't it's possible for a quota error for this
1188 * write to succeed and the file to be closed without the user ever
1189 * having seen the store error. And AIX syscall clears the error if
1190 * anything was written.
1192 code = vcp->vc_error;
1193 if (code == EDQUOT || code == ENOSPC)
1194 uiop->afsio_resid = save_resid;
1196 #ifdef AFS_64BIT_CLIENT
1198 uiop->afsio_offset = finalOffset;
1200 #endif /* AFS_64BIT_CLIENT */
1201 ReleaseReadLock(&vcp->lock);
1204 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3, ICL_TYPE_POINTER, vcp,
1205 ICL_TYPE_INT32, code);
1211 afs_direct_rdwr(struct vnode *vp,
1215 struct ucred *credp)
1218 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1219 struct vcache *vcp = VTOAFS(vp);
1220 afs_int32 save_resid = uiop->afsio_resid;
1221 struct vrequest treq;
1223 if (code = afs_InitReq(&treq, credp))
1226 /* special case easy transfer; apparently a lot are done */
1227 if ((xfrSize = uiop->afsio_resid) == 0)
1230 ObtainReadLock(&vcp->lock);
1231 fileSize = vcp->f.m.Length;
1232 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1233 uiop->afsio_offset = fileSize;
1235 /* compute xfrOffset now, and do some checks */
1236 xfrOffset = uiop->afsio_offset;
1237 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1239 ReleaseReadLock(&vcp->lock);
1243 /* check for "file too big" error, which should really be done above us */
1245 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1247 ReleaseReadLock(&vcp->lock);
1251 ReleaseReadLock(&vcp->lock);
1252 if (rw == UIO_WRITE) {
1253 ObtainWriteLock(&vcp->lock, 400);
1254 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1256 if (xfrSize + xfrOffset > fileSize)
1257 vcp->f.m.Length = xfrSize + xfrOffset;
1258 ReleaseWriteLock(&vcp->lock);
1260 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR, ICL_TYPE_POINTER, vp,
1261 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1262 ICL_TYPE_LONG, uiop->afsio_resid);
1263 code = afs_rdwr(VTOAFS(vp), uiop, rw, ioflag, credp);
1265 uiop->afsio_resid = save_resid;
1267 uiop->afsio_offset = xfrOffset + xfrSize;
1268 if (uiop->afsio_resid > 0) {
1269 /* should zero here the remaining buffer */
1270 uiop->afsio_resid = 0;
1272 /* Purge dirty chunks of file if there are too many dirty chunks.
1273 * Inside the write loop, we only do this at a chunk boundary.
1274 * Clean up partial chunk if necessary at end of loop.
1276 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1277 ObtainWriteLock(&vcp->lock, 402);
1278 code = afs_DoPartialWrite(vcp, &treq);
1279 vcp->f.states |= CDirty;
1280 ReleaseWriteLock(&vcp->lock);
1290 lock_normalize(struct vnode *vp,
1291 struct flock *lckdat,
1298 switch (lckdat->l_whence) {
1302 lckdat->l_start += (off_t) offset;
1305 code = afs_getattr(VTOAFS(vp), &vattr, cred);
1308 lckdat->l_start += (off_t) vattr.va_size;
1313 lckdat->l_whence = 0;
1320 afs_gn_lockctl(struct vnode *vp,
1322 struct eflock *lckdat,
1324 int (*ignored_fcn) (),
1325 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
1327 #else /* AFS_AIX52_ENV */
1328 ulong32int64_t * ignored_id,
1329 #endif /* AFS_AIX52_ENV */
1332 int error, ncmd = 0;
1334 struct vattr *attrs;
1336 AFS_STATCNT(afs_gn_lockctl);
1337 /* Convert from AIX's cmd to standard lockctl lock types... */
1340 else if (cmd & SETFLCK) {
1345 flkd.l_type = lckdat->l_type;
1346 flkd.l_whence = lckdat->l_whence;
1347 flkd.l_start = lckdat->l_start;
1348 flkd.l_len = lckdat->l_len;
1349 flkd.l_pid = lckdat->l_pid;
1350 flkd.l_sysid = lckdat->l_sysid;
1352 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1354 if (error = lock_normalize(vp, &flkd, offset, cred))
1356 error = afs_lockctl(vp, &flkd, ncmd, cred);
1357 lckdat->l_type = flkd.l_type;
1358 lckdat->l_whence = flkd.l_whence;
1359 lckdat->l_start = flkd.l_start;
1360 lckdat->l_len = flkd.l_len;
1361 lckdat->l_pid = flkd.l_pid;
1362 lckdat->l_sysid = flkd.l_sysid;
1363 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, vp,
1364 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1369 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1371 afs_gn_ioctl(struct vnode *vp,
1374 size_t flags, /* Ignored in AFS */
1375 ext_t ext, /* Ignored in AFS */
1376 struct ucred *crp) /* Ignored in AFS */
1381 AFS_STATCNT(afs_gn_ioctl);
1382 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1383 error = afs_ioctl(vp, cmd, arg);
1384 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, vp,
1385 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1391 afs_gn_readlink(struct vnode *vp,
1397 AFS_STATCNT(afs_gn_readlink);
1398 error = afs_readlink(vp, uiop, cred);
1399 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, vp,
1400 ICL_TYPE_LONG, error);
1406 afs_gn_select(struct vnode *vp,
1407 int32long64_t correl,
1414 AFS_STATCNT(afs_gn_select);
1415 /* NO SUPPORT for this in afs YET! */
1416 return (EOPNOTSUPP);
1421 afs_gn_symlink(struct vnode *vp,
1429 AFS_STATCNT(afs_gn_symlink);
1432 error = afs_symlink(vp, link, &va, target, NULL, cred);
1433 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, vp,
1434 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG,
1441 afs_gn_readdir(struct vnode *vp,
1447 AFS_STATCNT(afs_gn_readdir);
1448 error = afs_readdir(vp, uiop, cred);
1449 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, vp,
1450 ICL_TYPE_LONG, error);
1455 extern Simple_lock afs_asyncbuf_lock;
1456 extern struct buf *afs_asyncbuf;
1457 extern int afs_asyncbuf_cv;
1460 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1461 * when the buffer is processed by afs_gn_strategy. afs_biotime is
1462 * incremented for each buffer. A buffer's age is kept in its av_back field.
1463 * The age ranking is used by the daemons, which favor older buffers.
1465 afs_int32 afs_biotime = 0;
1467 /* This function is called with a list of buffers, threaded through
1468 * the av_forw field. Our goal is to copy the list of buffers into the
1469 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1470 * Within buffers within the same work group, the guy with the lowest address
1471 * has to be located at the head of the queue; his b_bcount field will also
1472 * be increased to cover all of the buffers in the b_work queue.
1474 #define AIX_VM_BLKSIZE 8192
1475 /* Note: This function seems to be called as ddstrategy entry point, ie
1476 * has one argument. However, it also needs to be present as
1477 * vn_strategy entry point which has three arguments, but it seems to never
1478 * be called in that capacity (it would fail horribly due to the argument
1479 * mismatch). I'm confused, but it obviously has to be this way, maybe
1480 * some IBM people can shed som light on this
1483 afs_gn_strategy(struct buf *abp)
1485 struct buf **lbp, *tbp;
1487 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1491 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1492 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1493 && (x)->b_flags == (y)->b_flags \
1494 && !((x)->b_flags & B_PFPROT) \
1495 && !((y)->b_flags & B_PFPROT))
1497 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1498 for (tbp = abp; tbp; tbp = nbp) {
1499 nbp = tbp->av_forw; /* remember for later */
1501 tbp->av_back = (struct buf *)afs_biotime++;
1503 /* first insert the buffer into the afs_async queue. Insert buffer
1504 * sorted within its disk position within a set of comparable buffers.
1505 * Ensure that all comparable buffers are grouped contiguously.
1506 * Later on, we'll merge adjacent buffers into a single request.
1508 firstComparable = NULL;
1509 lbp = &afs_asyncbuf;
1510 for (qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1511 if (EFS_COMPARABLE(tbp, qbp)) {
1512 if (!firstComparable)
1513 firstComparable = qbp;
1514 /* this buffer is comparable, so see if the next buffer
1515 * is farther in the file; if it is insert before next buffer.
1517 if (tbp->b_blkno < qbp->b_blkno) {
1521 /* If we're at the end of a block of comparable buffers, we
1522 * insert the buffer here to keep all comparable buffers
1525 if (firstComparable)
1529 /* do the insert before qbp now */
1530 tbp->av_forw = *lbp;
1532 if (firstComparable == NULL) {
1533 /* next we're going to do all sorts of buffer merging tricks, but
1534 * here we know we're the only COMPARABLE block in the
1535 * afs_asyncbuf list, so we just skip that and continue with
1536 * the next input buffer.
1541 /* we may have actually added the "new" firstComparable */
1542 if (tbp->av_forw == firstComparable)
1543 firstComparable = tbp;
1545 * when we get here, firstComparable points to the first dude in the
1546 * same vnode and subspace that we (tbp) are in. We go through the
1547 * area of this list with COMPARABLE buffers (a contiguous region) and
1548 * repeated merge buffers that are contiguous and in the same block or
1549 * buffers that are contiguous and are both integral numbers of blocks.
1550 * Note that our end goal is to have as big blocks as we can, but we
1551 * must minimize the transfers that are not integral #s of blocks on
1552 * block boundaries, since Episode will do those smaller and/or
1553 * unaligned I/Os synchronously.
1555 * A useful example to consider has the async queue with this in it:
1556 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1557 * If we get a request that fills the 4K hole, we want to merge this
1558 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1559 * don't want to do any merging since adding the 4K transfer to the 8K
1560 * transfer makes the 8K transfer synchronous.
1562 * Note that if there are any blocks whose size is a multiple of
1563 * the file system block size, then we know that such blocks are also
1564 * on block boundaries.
1567 doMerge = 1; /* start the loop */
1568 while (doMerge) { /* loop until an iteration doesn't
1569 * make any more changes */
1571 for (qbp = firstComparable;; qbp = qnbp) {
1572 qnbp = qbp->av_forw;
1574 break; /* we're done */
1575 if (!EFS_COMPARABLE(qbp, qnbp))
1578 /* try to merge qbp and qnbp */
1580 /* first check if both not adjacent go on to next region */
1581 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) !=
1582 dbtob(qnbp->b_blkno))
1585 /* note if both in the same block, the first byte of leftmost guy
1586 * and last byte of rightmost guy are in the same block.
1588 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE - 1)) ==
1589 ((dbtob(qnbp->b_blkno) + qnbp->b_bcount -
1590 1) & ~(AIX_VM_BLKSIZE - 1))) {
1591 doMerge = 1; /* both in same block */
1592 } else if ((qbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0
1593 && (qnbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0) {
1594 doMerge = 1; /* both integral #s of blocks */
1599 /* merge both of these blocks together */
1600 /* first set age to the older of the two */
1601 if ((int32long64_t) qnbp->av_back -
1602 (int32long64_t) qbp->av_back < 0) {
1603 qbp->av_back = qnbp->av_back;
1605 lwbp = (struct buf **) &qbp->b_work;
1606 /* find end of qbp's work queue */
1607 for (xbp = *lwbp; xbp;
1608 lwbp = (struct buf **) &xbp->b_work, xbp = *lwbp);
1610 * now setting *lwbp will change the last ptr in the qbp's
1613 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1614 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1615 *lwbp = qnbp; /* append qnbp to end */
1617 * note that qnbp is bogus, but it doesn't matter because
1618 * we're going to restart the for loop now.
1620 break; /* out of the for loop */
1624 } /* for loop for all interrupt data */
1625 /* at this point, all I/O has been queued. Wakeup the daemon */
1626 e_wakeup_one((int *)&afs_asyncbuf_cv);
1627 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1633 afs_inactive(struct vcache *avc,
1636 afs_InactiveVCache(avc, acred);
1640 afs_gn_revoke(struct vnode *vp,
1643 struct vattr *vinfop,
1646 AFS_STATCNT(afs_gn_revoke);
1647 /* NO SUPPORT for this in afs YET! */
1648 return (EOPNOTSUPP);
1652 afs_gn_getacl(struct vnode *vp,
1661 afs_gn_setacl(struct vnode *vp,
1670 afs_gn_getpcl(struct vnode *vp,
1679 afs_gn_setpcl(struct vnode *vp,
1688 afs_gn_seek(struct vnode* vp, offset_t * offp, struct ucred * crp)
1691 * File systems which do not wish to do offset validation can simply
1692 * return 0. File systems which do not provide the vn_seek entry point
1693 * will have a maximum offset of OFF_MAX (2 gigabytes minus 1) enforced
1694 * by the logical file system.
1707 * declare a struct vnodeops and initialize it with ptrs to all functions
1709 struct vnodeops afs_gn_vnodeops = {
1710 /* creation/naming/deletion */
1717 /* lookup, file handle stuff */
1719 (int(*)(struct vnode*,struct fileid*,struct ucred*))
1721 /* access to files */
1722 (int(*)(struct vnode *, int32long64_t, ext_t, caddr_t *,struct ucred *))
1724 (int(*)(struct vnode *, struct vnode **, int32long64_t,caddr_t, int32long64_t, caddr_t *, struct ucred *))
1731 /* manipulate attributes of files */
1735 /* data update operations */
1748 (int(*)(struct vnode*,struct buf*,struct ucred*))
1750 /* security things */
1757 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
1758 afs_gn_enosys, /* vn_fsync_range */
1759 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
1760 afs_gn_enosys, /* vn_create_attr */
1761 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
1762 afs_gn_enosys, /* vn_finfo */
1763 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
1764 afs_gn_enosys, /* vn_map_lloff */
1765 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
1766 afs_gn_enosys, /* vn_readdir_eofp */
1767 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
1768 afs_gn_enosys, /* vn_rdwr_attr */
1769 (int(*)(struct vnode*,int,void*,struct ucred*))
1770 afs_gn_enosys, /* vn_memcntl */
1771 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
1772 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
1773 afs_gn_enosys, /* vn_getea */
1774 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
1775 afs_gn_enosys, /* vn_setea */
1776 (int(*)(struct vnode *, struct uio *, struct ucred *))
1777 afs_gn_enosys, /* vn_listea */
1778 (int(*)(struct vnode *, const char *, struct ucred *))
1779 afs_gn_enosys, /* vn_removeea */
1780 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
1781 afs_gn_enosys, /* vn_statea */
1782 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
1783 afs_gn_enosys, /* vn_getxacl */
1784 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
1785 afs_gn_enosys, /* vn_setxacl */
1786 #else /* AFS_AIX53_ENV */
1787 afs_gn_enosys, /* vn_spare7 */
1788 afs_gn_enosys, /* vn_spare8 */
1789 afs_gn_enosys, /* vn_spare9 */
1790 afs_gn_enosys, /* vn_spareA */
1791 afs_gn_enosys, /* vn_spareB */
1792 afs_gn_enosys, /* vn_spareC */
1793 afs_gn_enosys, /* vn_spareD */
1794 #endif /* AFS_AIX53_ENV */
1795 afs_gn_enosys, /* vn_spareE */
1796 afs_gn_enosys /* vn_spareF */
1797 #ifdef AFS_AIX51_ENV
1798 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
1799 afs_gn_enosys, /* pagerBackRange */
1800 (int64_t(*)(struct gnode*))
1801 afs_gn_enosys, /* pagerGetFileSize */
1802 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
1803 afs_gn_enosys, /* pagerReadAhead */
1804 (void(*)(struct gnode *, int64_t, int64_t, uint))
1805 afs_gn_enosys, /* pagerReadWriteBehind */
1806 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
1807 afs_gn_enosys /* pagerEndCopy */
1810 struct vnodeops *afs_ops = &afs_gn_vnodeops;
1814 extern struct vfsops Afs_vfsops;
1815 extern int Afs_init();
1817 #define AFS_CALLOUT_TBL_SIZE 256
1820 * the following additional layer of gorp is due to the fact that the
1821 * filesystem layer no longer obtains the kernel lock for me. I was relying
1822 * on this behavior to avoid having to think about locking.
1826 vfs_mount(struct vfs *a, struct ucred *b)
1828 int glockOwner, ret;
1830 glockOwner = ISAFS_GLOCK();
1833 ret = (*Afs_vfsops.vfs_mount) (a, b);
1841 vfs_unmount(struct vfs *a, int b, struct ucred *c)
1843 int glockOwner, ret;
1845 glockOwner = ISAFS_GLOCK();
1848 ret = (*Afs_vfsops.vfs_unmount) (a, b, c);
1856 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c)
1858 int glockOwner, ret;
1860 glockOwner = ISAFS_GLOCK();
1863 ret = (*Afs_vfsops.vfs_root) (a, b, c);
1871 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c)
1873 int glockOwner, ret;
1875 glockOwner = ISAFS_GLOCK();
1878 ret = (*Afs_vfsops.vfs_statfs) (a, b, c);
1886 vfs_sync(struct gfs *a)
1888 int glockOwner, ret;
1890 glockOwner = ISAFS_GLOCK();
1893 ret = (*Afs_vfsops.vfs_sync) (a);
1900 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c, struct ucred *d)
1902 int glockOwner, ret;
1904 glockOwner = ISAFS_GLOCK();
1907 ret = (*Afs_vfsops.vfs_vget) (a, b, c, d);
1915 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e)
1917 int glockOwner, ret;
1919 glockOwner = ISAFS_GLOCK();
1922 ret = (*Afs_vfsops.vfs_cntl) (a, b, c, d, e);
1930 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d, struct ucred *e)
1932 int glockOwner, ret;
1934 glockOwner = ISAFS_GLOCK();
1937 ret = (*Afs_vfsops.vfs_quotactl) (a, b, c, d, e);
1944 #ifdef AFS_AIX51_ENV
1946 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1948 int glockOwner, ret;
1950 glockOwner = ISAFS_GLOCK();
1953 ret = (*Afs_vfsops.vfs_syncvfs) (a, b, c, d);
1962 struct vfsops locked_Afs_vfsops = {
1971 #ifdef AFS_AIX51_ENV
1977 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
1979 int glockOwner, ret;
1981 glockOwner = ISAFS_GLOCK();
1984 ret = (*afs_gn_vnodeops.vn_link) (a, b, c, d);
1992 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d)
1994 int glockOwner, ret;
1996 glockOwner = ISAFS_GLOCK();
1999 ret = (*afs_gn_vnodeops.vn_mkdir) (a, b, c, d);
2007 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d,
2010 int glockOwner, ret;
2012 glockOwner = ISAFS_GLOCK();
2015 ret = (*afs_gn_vnodeops.vn_mknod) (a, b, c, d, e);
2023 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2025 int glockOwner, ret;
2027 glockOwner = ISAFS_GLOCK();
2030 ret = (*afs_gn_vnodeops.vn_remove) (a, b, c, d);
2038 vn_rename(struct vnode *a, struct vnode *b, caddr_t c, struct vnode *d,
2039 struct vnode *e, caddr_t f, struct ucred *g)
2041 int glockOwner, ret;
2043 glockOwner = ISAFS_GLOCK();
2046 ret = (*afs_gn_vnodeops.vn_rename) (a, b, c, d, e, f, g);
2054 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2056 int glockOwner, ret;
2058 glockOwner = ISAFS_GLOCK();
2061 ret = (*afs_gn_vnodeops.vn_rmdir) (a, b, c, d);
2069 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2070 struct vattr *v, struct ucred *e)
2072 int glockOwner, ret;
2074 glockOwner = ISAFS_GLOCK();
2077 ret = (*afs_gn_vnodeops.vn_lookup) (a, b, c, d, v, e);
2085 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c)
2087 int glockOwner, ret;
2089 glockOwner = ISAFS_GLOCK();
2092 ret = (*afs_gn_vnodeops.vn_fid) (a, b, c);
2100 vn_open(struct vnode *a,
2106 int glockOwner, ret;
2108 glockOwner = ISAFS_GLOCK();
2111 ret = (*afs_gn_vnodeops.vn_open) (a, b, c, d, e);
2119 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d,
2120 int32long64_t e, caddr_t * f, struct ucred *g)
2122 int glockOwner, ret;
2124 glockOwner = ISAFS_GLOCK();
2127 ret = (*afs_gn_vnodeops.vn_create) (a, b, c, d, e, f, g);
2135 vn_hold(struct vnode *a)
2137 int glockOwner, ret;
2139 glockOwner = ISAFS_GLOCK();
2142 ret = (*afs_gn_vnodeops.vn_hold) (a);
2150 vn_rele(struct vnode *a)
2152 int glockOwner, ret;
2154 glockOwner = ISAFS_GLOCK();
2157 ret = (*afs_gn_vnodeops.vn_rele) (a);
2165 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d)
2167 int glockOwner, ret;
2169 glockOwner = ISAFS_GLOCK();
2172 ret = (*afs_gn_vnodeops.vn_close) (a, b, c, d);
2180 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d,
2181 uint32long64_t e, struct ucred *f)
2183 int glockOwner, ret;
2185 glockOwner = ISAFS_GLOCK();
2188 ret = (*afs_gn_vnodeops.vn_map) (a, b, c, d, e, f);
2196 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c)
2198 int glockOwner, ret;
2200 glockOwner = ISAFS_GLOCK();
2203 ret = (*afs_gn_vnodeops.vn_unmap) (a, b, c);
2211 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2213 int glockOwner, ret;
2215 glockOwner = ISAFS_GLOCK();
2218 ret = (*afs_gn_vnodeops.vn_access) (a, b, c, d);
2226 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c)
2228 int glockOwner, ret;
2230 glockOwner = ISAFS_GLOCK();
2233 ret = (*afs_gn_vnodeops.vn_getattr) (a, b, c);
2241 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d,
2242 int32long64_t e, struct ucred *f)
2244 int glockOwner, ret;
2246 glockOwner = ISAFS_GLOCK();
2249 ret = (*afs_gn_vnodeops.vn_setattr) (a, b, c, d, e, f);
2257 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2258 , caddr_t e, struct ucred *f)
2260 int glockOwner, ret;
2262 glockOwner = ISAFS_GLOCK();
2265 ret = (*afs_gn_vnodeops.vn_fclear) (a, b, c, d, e, f);
2273 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2275 int glockOwner, ret;
2277 glockOwner = ISAFS_GLOCK();
2280 ret = (*afs_gn_vnodeops.vn_fsync) (a, b, c, d);
2288 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d,
2291 int glockOwner, ret;
2293 glockOwner = ISAFS_GLOCK();
2296 ret = (*afs_gn_vnodeops.vn_ftrunc) (a, b, c, d, e);
2304 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d,
2305 ext_t e, caddr_t f, struct vattr *v, struct ucred *g)
2307 int glockOwner, ret;
2309 glockOwner = ISAFS_GLOCK();
2312 ret = (*afs_gn_vnodeops.vn_rdwr) (a, b, c, d, e, f, v, g);
2320 vn_lockctl(struct vnode *a,
2325 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
2327 #else /* AFS_AIX52_ENV */
2329 #endif /* AFS_AIX52_ENV */
2332 int glockOwner, ret;
2334 glockOwner = ISAFS_GLOCK();
2337 ret = (*afs_gn_vnodeops.vn_lockctl) (a, b, c, d, e, f, g);
2345 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e,
2348 int glockOwner, ret;
2350 glockOwner = ISAFS_GLOCK();
2353 ret = (*afs_gn_vnodeops.vn_ioctl) (a, b, c, d, e, f);
2361 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c)
2363 int glockOwner, ret;
2365 glockOwner = ISAFS_GLOCK();
2368 ret = (*afs_gn_vnodeops.vn_readlink) (a, b, c);
2376 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort * d,
2377 void (*e) (), caddr_t f, struct ucred *g)
2379 int glockOwner, ret;
2381 glockOwner = ISAFS_GLOCK();
2384 ret = (*afs_gn_vnodeops.vn_select) (a, b, c, d, e, f, g);
2392 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d)
2394 int glockOwner, ret;
2396 glockOwner = ISAFS_GLOCK();
2399 ret = (*afs_gn_vnodeops.vn_symlink) (a, b, c, d);
2407 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c)
2409 int glockOwner, ret;
2411 glockOwner = ISAFS_GLOCK();
2414 ret = (*afs_gn_vnodeops.vn_readdir) (a, b, c);
2422 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d,
2425 int glockOwner, ret;
2427 glockOwner = ISAFS_GLOCK();
2430 ret = (*afs_gn_vnodeops.vn_revoke) (a, b, c, d, e);
2438 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c)
2440 int glockOwner, ret;
2442 glockOwner = ISAFS_GLOCK();
2445 ret = (*afs_gn_vnodeops.vn_getacl) (a, b, c);
2453 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c)
2455 int glockOwner, ret;
2457 glockOwner = ISAFS_GLOCK();
2460 ret = (*afs_gn_vnodeops.vn_setacl) (a, b, c);
2468 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c)
2470 int glockOwner, ret;
2472 glockOwner = ISAFS_GLOCK();
2475 ret = (*afs_gn_vnodeops.vn_getpcl) (a, b, c);
2483 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c)
2485 int glockOwner, ret;
2487 glockOwner = ISAFS_GLOCK();
2490 ret = (*afs_gn_vnodeops.vn_setpcl) (a, b, c);
2498 struct vnodeops locked_afs_gn_vnodeops = {
2527 (int(*)(struct vnode*,struct buf*,struct ucred*))
2528 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2535 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
2536 afs_gn_enosys, /* vn_fsync_range */
2537 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
2538 afs_gn_enosys, /* vn_create_attr */
2539 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
2540 afs_gn_enosys, /* vn_finfo */
2541 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
2542 afs_gn_enosys, /* vn_map_lloff */
2543 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
2544 afs_gn_enosys, /* vn_readdir_eofp */
2545 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
2546 afs_gn_enosys, /* vn_rdwr_attr */
2547 (int(*)(struct vnode*,int,void*,struct ucred*))
2548 afs_gn_enosys, /* vn_memcntl */
2549 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
2550 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
2551 afs_gn_enosys, /* vn_getea */
2552 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
2553 afs_gn_enosys, /* vn_setea */
2554 (int(*)(struct vnode *, struct uio *, struct ucred *))
2555 afs_gn_enosys, /* vn_listea */
2556 (int(*)(struct vnode *, const char *, struct ucred *))
2557 afs_gn_enosys, /* vn_removeea */
2558 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
2559 afs_gn_enosys, /* vn_statea */
2560 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
2561 afs_gn_enosys, /* vn_getxacl */
2562 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
2563 afs_gn_enosys, /* vn_setxacl */
2564 #else /* AFS_AIX53_ENV */
2565 afs_gn_enosys, /* vn_spare7 */
2566 afs_gn_enosys, /* vn_spare8 */
2567 afs_gn_enosys, /* vn_spare9 */
2568 afs_gn_enosys, /* vn_spareA */
2569 afs_gn_enosys, /* vn_spareB */
2570 afs_gn_enosys, /* vn_spareC */
2571 afs_gn_enosys, /* vn_spareD */
2572 #endif /* AFS_AIX53_ENV */
2573 afs_gn_enosys, /* vn_spareE */
2574 afs_gn_enosys /* vn_spareF */
2575 #ifdef AFS_AIX51_ENV
2576 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
2577 afs_gn_enosys, /* pagerBackRange */
2578 (int64_t(*)(struct gnode*))
2579 afs_gn_enosys, /* pagerGetFileSize */
2580 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
2581 afs_gn_enosys, /* pagerReadAhead */
2582 (void(*)(struct gnode *, int64_t, int64_t, uint))
2583 afs_gn_enosys, /* pagerReadWriteBehind */
2584 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
2585 afs_gn_enosys /* pagerEndCopy */
2589 struct gfs afs_gfs = {
2591 &locked_afs_gn_vnodeops,
2595 GFS_VERSION4 | GFS_VERSION42 | GFS_REMOTE,