2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
29 #include "rpc/types.h"
31 #include "netinet/in.h"
35 #include "rpc/types.h"
39 #include "afs/afs_osi.h"
40 #define RFTP_INTERNALS 1
41 #include "afs/volerrors.h"
45 #include "afs/exporter.h"
47 #include "afs/afs_chunkops.h"
48 #include "afs/afs_stats.h"
49 #include "afs/nfsclient.h"
51 #include "afs/prs_fs.h"
53 #include "afsincludes.h"
57 afs_gn_link(struct vnode *vp,
64 AFS_STATCNT(afs_gn_link);
65 error = afs_link(vp, dp, name, cred);
66 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, vp,
67 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
73 afs_gn_mkdir(struct vnode *dp,
83 AFS_STATCNT(afs_gn_mkdir);
86 va.va_mode = (mode & 07777) & ~get_umask();
87 error = afs_mkdir(dp, name, &va, &vp, cred);
91 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, vp,
92 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
99 afs_gn_mknod(struct vnode *dp,
110 AFS_STATCNT(afs_gn_mknod);
112 va.va_type = IFTOVT(mode);
113 va.va_mode = (mode & 07777) & ~get_umask();
115 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
116 if (va.va_type != VFIFO && !suser((char *)&error))
118 switch (va.va_type) {
120 error = afs_mkdir(dp, name, &va, &vp, cred);
130 error = afs_create(VTOAFS(dp), name, &va, NONEXCL, mode, (struct vcache **)&vp, cred);
135 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32) vp,
136 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
143 afs_gn_remove(struct vnode *vp, /* Ignored in AFS */
150 AFS_STATCNT(afs_gn_remove);
151 error = afs_remove(dp, name, cred);
152 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, dp,
153 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
159 afs_gn_rename(struct vnode *vp, /* Ignored in AFS */
162 struct vnode *tp, /* Ignored in AFS */
169 AFS_STATCNT(afs_gn_rename);
170 error = afs_rename(dp, name, tdp, tname, cred);
171 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, dp,
172 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG,
179 afs_gn_rmdir(struct vnode *vp, /* Ignored in AFS */
186 AFS_STATCNT(afs_gn_rmdir);
187 error = afs_rmdir(dp, name, cred);
189 if (error == 66 /* 4.3's ENOTEMPTY */ )
190 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
192 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, dp,
193 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
199 afs_gn_lookup(struct vnode *dp,
202 int32long64_t Flags, /* includes FOLLOW... */
203 struct vattr *vattrp,
209 AFS_STATCNT(afs_gn_lookup);
210 error = afs_lookup(dp, name, vpp, cred);
211 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, dp,
212 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
213 if (vattrp != NULL && error == 0)
214 afs_gn_getattr(*vpp, vattrp, cred);
220 afs_gn_fid(struct vnode *vp,
226 AFS_STATCNT(afs_gn_fid);
227 error = afs_fid(vp, fidp);
228 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, vp,
229 ICL_TYPE_LONG, (afs_int32) fidp, ICL_TYPE_LONG, error);
235 afs_gn_open(struct vnode *vp,
238 struct ucred **vinfop,
243 struct vcache *tvp = VTOAFS(vp);
247 AFS_STATCNT(afs_gn_open);
253 if ((flags & FWRITE) || (flags & FTRUNC))
256 while ((flags & FNSHARE) && tvp->opens) {
257 if (!(flags & FDELAY)) {
261 afs_osi_Sleep(&tvp->opens);
264 error = afs_access(VTOAFS(vp), modes, cred);
269 error = afs_open((struct vcache **) &vp, flags, cred);
271 if (flags & FTRUNC) {
274 error = afs_setattr(VTOAFS(vp), &va, cred);
278 tvp->f.states |= CNSHARE;
281 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
283 /* an error occurred; we've told CM that the file
284 * is open, so close it now so that open and
285 * writer counts are correct. Ignore error code,
286 * as it is likely to fail (the setattr just did).
288 afs_close(vp, flags, cred);
293 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, vp,
294 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
300 afs_gn_create(struct vnode *dp,
305 struct ucred **vinfop, /* return ptr for fp->f_vinfo, used as fp->f_cred */
310 enum vcexcl exclusive;
311 int error, modes = 0;
315 AFS_STATCNT(afs_gn_create);
316 if ((flags & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
322 va.va_mode = (mode & 07777) & ~get_umask();
327 if ((flags & FWRITE) || (flags & FTRUNC))
329 error = afs_create(VTOAFS(dp), name, &va, exclusive, modes, (struct vcache **)vpp, cred);
333 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
334 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
335 * call it when this NFS traffic since the close will never happen thus
336 * we'd never flush the files out to the server! Gross but the simplest
337 * solution we came out with */
338 if (cred->cr_luid != RMTUSER_REQ) {
339 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
340 if (!(flags & FDELAY))
342 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
344 /* Since in the standard copen() for bsd vnode kernels they do an
345 * vop_open after the vop_create, we must do the open here since there
346 * are stuff in afs_open that we need. For example advance the
347 * execsOrWriters flag (else we'll be treated as the sun's "core"
349 *vinfop = cred; /* save user creds in fp->f_vinfo */
350 error = afs_open((struct vcache **)vpp, flags, cred);
352 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, dp,
353 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
360 afs_gn_hold(struct vnode *vp)
362 AFS_STATCNT(afs_gn_hold);
370 afs_gn_rele(struct vnode *vp)
372 struct vcache *vcp = VTOAFS(vp);
375 AFS_STATCNT(afs_gn_rele);
376 if (vp->v_count == 0)
377 osi_Panic("afs_rele: zero v_count");
378 if (--(vp->v_count) == 0) {
379 if (vcp->f.states & CPageHog) {
381 vcp->f.states &= ~CPageHog;
383 error = afs_inactive(vp, 0);
390 afs_gn_close(struct vnode *vp,
392 caddr_t vinfo, /* Ignored in AFS */
396 struct vcache *tvp = VTOAFS(vp);
399 AFS_STATCNT(afs_gn_close);
401 if (flags & FNSHARE) {
402 tvp->f.states &= ~CNSHARE;
403 afs_osi_Wakeup(&tvp->opens);
406 error = afs_close(vp, flags, cred);
407 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32) vp,
408 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
414 afs_gn_map(struct vnode *vp,
421 struct vcache *vcp = VTOAFS(vp);
422 struct vrequest treq;
426 afs_int32 flag = Flag;
428 AFS_STATCNT(afs_gn_map);
430 if (error = afs_InitReq(&treq, cred))
432 error = afs_VerifyVCache(vcp, &treq);
434 return afs_CheckCode(error, &treq, 49);
436 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
437 ObtainWriteLock(&vcp->lock, 401);
438 vcp->f.states |= CMAPPED; /* flag cleared at afs_inactive */
440 * We map the segment into our address space using the handle returned by vm_create.
443 afs_uint32 tlen = vcp->f.m.Length;
444 #ifdef AFS_64BIT_CLIENT
445 if (vcp->f.m.Length > afs_vmMappingEnd)
446 tlen = afs_vmMappingEnd;
448 /* Consider V_INTRSEG too for interrupts */
450 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
451 ReleaseWriteLock(&vcp->lock);
454 #ifdef AFS_64BIT_KERNEL
455 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
457 vcp->vmh = SRVAL(vcp->segid, 0, 0);
460 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
461 if (flag & SHM_RDONLY) {
462 vp->v_gnode->gn_mrdcnt++;
464 vp->v_gnode->gn_mwrcnt++;
467 * We keep the caller's credentials since an async daemon will handle the
468 * request at some point. We assume that the same credentials will be used.
470 if (!vcp->credp || (vcp->credp != cred)) {
473 struct ucred *crp = vcp->credp;
479 ReleaseWriteLock(&vcp->lock);
481 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vp,
482 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
488 afs_gn_unmap(struct vnode *vp,
492 struct vcache *vcp = VTOAFS(vp);
493 AFS_STATCNT(afs_gn_unmap);
494 ObtainWriteLock(&vcp->lock, 402);
495 if (flag & SHM_RDONLY) {
496 vp->v_gnode->gn_mrdcnt--;
497 if (vp->v_gnode->gn_mrdcnt <= 0)
498 vp->v_gnode->gn_mrdcnt = 0;
500 vp->v_gnode->gn_mwrcnt--;
501 if (vp->v_gnode->gn_mwrcnt <= 0)
502 vp->v_gnode->gn_mwrcnt = 0;
504 ReleaseWriteLock(&vcp->lock);
512 afs_gn_access(struct vnode *vp,
522 AFS_STATCNT(afs_gn_access);
528 error = afs_access(VTOAFS(vp), mode, cred);
530 /* Additional testing */
531 if (who == ACC_OTHERS || who == ACC_ANY) {
532 error = afs_getattr(VTOAFS(vp), &vattr, cred);
534 if (who == ACC_ANY) {
535 if (((vattr.va_mode >> 6) & mode) == mode) {
540 if (((vattr.va_mode >> 3) & mode) == mode)
545 } else if (who == ACC_ALL) {
546 error = afs_getattr(VTOAFS(vp), &vattr, cred);
548 if ((!((vattr.va_mode >> 6) & mode))
549 || (!((vattr.va_mode >> 3) & mode))
550 || (!(vattr.va_mode & mode)))
559 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, vp,
560 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
566 afs_gn_getattr(struct vnode *vp,
567 struct vattr *vattrp,
572 AFS_STATCNT(afs_gn_getattr);
573 error = afs_getattr(VTOAFS(vp), vattrp, cred);
574 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, vp,
575 ICL_TYPE_LONG, error);
581 afs_gn_setattr(struct vnode *vp,
591 AFS_STATCNT(afs_gn_setattr);
599 if ((arg1 & T_OWNER_AS_IS) == 0)
601 if ((arg1 & T_GROUP_AS_IS) == 0)
606 error = afs_access(vp, VWRITE, cred);
610 if (arg1 & T_SETTIME) {
611 va.va_atime.tv_sec = time;
612 va.va_mtime.tv_sec = time;
614 va.va_atime = *(struct timestruc_t *)arg2;
615 va.va_mtime = *(struct timestruc_t *)arg3;
623 error = afs_setattr(VTOAFS(vp), &va, cred);
625 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, vp,
626 ICL_TYPE_LONG, error);
631 char zero_buffer[PAGESIZE];
633 afs_gn_fclear(struct vnode *vp,
640 int i, len, error = 0;
643 static int fclear_init = 0;
644 struct vcache *avc = VTOAFS(vp);
646 AFS_STATCNT(afs_gn_fclear);
648 memset(zero_buffer, 0, PAGESIZE);
652 * Don't clear past ulimit
654 if (offset + length > get_ulimit())
657 /* Flush all pages first */
660 vm_flushp(avc->segid, 0, MAXFSIZE / PAGESIZE - 1);
661 vms_iowait(avc->segid);
664 uio.afsio_offset = offset;
665 for (i = offset; i < offset + length; i = uio.afsio_offset) {
666 len = offset + length - i;
667 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
668 iov.iov_base = zero_buffer;
669 uio.afsio_iov = &iov;
670 uio.afsio_iovcnt = 1;
671 uio.afsio_seg = AFS_UIOSYS;
672 uio.afsio_resid = iov.iov_len;
673 if (error = afs_rdwr(VTOAFS(vp), &uio, UIO_WRITE, 0, cred))
676 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, vp,
677 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG,
684 afs_gn_fsync(struct vnode *vp,
685 int32long64_t flags, /* Not used by AFS */
686 int32long64_t vinfo, /* Not used by AFS */
691 AFS_STATCNT(afs_gn_fsync);
692 error = afs_fsync(vp, cred);
693 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, vp,
694 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
700 afs_gn_ftrunc(struct vnode *vp,
709 AFS_STATCNT(afs_gn_ftrunc);
712 error = afs_setattr(VTOAFS(vp), &va, cred);
713 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, vp,
714 ICL_TYPE_LONG, flags, ICL_TYPE_OFFSET,
715 ICL_HANDLE_OFFSET(length), ICL_TYPE_LONG, error);
719 /* Min size of a file which is dumping core before we declare it a page hog. */
720 #define MIN_PAGE_HOG_SIZE 8388608
723 afs_gn_rdwr(struct vnode *vp,
727 ext_t ext, /* Ignored in AFS */
728 caddr_t vinfo, /* Ignored in AFS */
729 struct vattr *vattrp,
732 struct vcache *vcp = VTOAFS(vp);
733 struct vrequest treq;
738 AFS_STATCNT(afs_gn_rdwr);
741 if (op == UIO_WRITE) {
742 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1, ICL_TYPE_POINTER, vp,
743 ICL_TYPE_LONG, vcp->vc_error);
744 return vcp->vc_error;
749 ObtainSharedLock(&vcp->lock, 507);
751 * We keep the caller's credentials since an async daemon will handle the
752 * request at some point. We assume that the same credentials will be used.
753 * If this is being called from an NFS server thread, then dupe the
754 * cred and only use that copy in calls and for the stach.
756 if (!vcp->credp || (vcp->credp != cred)) {
757 #ifdef AFS_AIX_IAUTH_ENV
758 if (AFS_NFSXLATORREQ(cred)) {
759 /* Must be able to use cred later, so dupe it so that nfs server
760 * doesn't overwrite it's contents.
766 crhold(cred); /* Bump refcount for reference in vcache */
770 UpgradeSToWLock(&vcp->lock, 508);
773 ConvertWToSLock(&vcp->lock);
778 ReleaseSharedLock(&vcp->lock);
781 * XXX Is the following really required?? XXX
783 if (error = afs_InitReq(&treq, cred))
785 if (error = afs_VerifyVCache(vcp, &treq))
786 return afs_CheckCode(error, &treq, 50);
787 osi_FlushPages(vcp, cred); /* Flush old pages */
789 if (AFS_NFSXLATORREQ(cred)) {
792 if (op == UIO_READ) {
794 (vcp, PRSFS_READ, &treq,
795 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
804 * We have to bump the open/exwriters field here courtesy of the nfs xlator
805 * because there're no open/close nfs rpcs to call our afs_open/close.
806 * We do a similar thing on the afs_read/write interface.
808 if (op == UIO_WRITE) {
809 #ifdef AFS_64BIT_CLIENT
810 if (ubuf->afsio_offset < afs_vmMappingEnd) {
811 #endif /* AFS_64BIT_CLIENT */
812 ObtainWriteLock(&vcp->lock, 240);
813 vcp->f.states |= CDirty; /* Set the dirty bit */
815 ReleaseWriteLock(&vcp->lock);
816 #ifdef AFS_64BIT_CLIENT
818 #endif /* AFS_64BIT_CLIENT */
821 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
823 if (op == UIO_WRITE) {
824 #ifdef AFS_64BIT_CLIENT
825 if (ubuf->afsio_offset < afs_vmMappingEnd) {
826 #endif /* AFS_64BIT_CLIENT */
827 ObtainWriteLock(&vcp->lock, 241);
828 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
829 ReleaseWriteLock(&vcp->lock);
830 #ifdef AFS_64BIT_CLIENT
832 #endif /* AFS_64BIT_CLIENT */
834 if (vattrp != NULL && error == 0)
835 afs_gn_getattr(vp, vattrp, cred);
837 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, vp,
838 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
845 #define AFS_MAX_VM_CHUNKS 10
847 afs_vm_rdwr(struct vnode *vp,
856 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
858 #ifdef AFS_64BIT_CLIENT
859 afs_size_t finalOffset;
862 afs_size_t add2resid = 0;
863 #endif /* AFS_64BIT_CLIENT */
864 struct vcache *vcp = VTOAFS(vp);
866 afs_size_t start_offset;
867 afs_int32 save_resid = uiop->afsio_resid;
868 int first_page, last_page, pages;
871 struct vrequest treq;
873 if (code = afs_InitReq(&treq, credp))
876 /* special case easy transfer; apparently a lot are done */
877 if ((xfrSize = uiop->afsio_resid) == 0)
880 ObtainReadLock(&vcp->lock);
881 fileSize = vcp->f.m.Length;
882 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
883 uiop->afsio_offset = fileSize;
885 /* compute xfrOffset now, and do some checks */
886 xfrOffset = uiop->afsio_offset;
887 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
889 ReleaseReadLock(&vcp->lock);
892 #ifndef AFS_64BIT_CLIENT
893 /* check for "file too big" error, which should really be done above us */
894 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
896 ReleaseReadLock(&vcp->lock);
899 #endif /* AFS_64BIT_CLIENT */
901 #ifdef AFS_64BIT_CLIENT
902 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
903 if (rw == UIO_READ) {
904 /* don't read past EOF */
905 if (xfrSize+xfrOffset > fileSize) {
906 add2resid = xfrSize + xfrOffset - fileSize;
907 xfrSize = fileSize - xfrOffset;
909 ReleaseReadLock(&vcp->lock);
913 afsio_trim(uiop, txfrSize);
916 if (xfrOffset < afs_vmMappingEnd) {
917 /* special case of a buffer crossing the VM mapping line */
919 struct iovec tvec[16]; /* Should have access to #define */
923 finalOffset = xfrOffset + xfrSize;
924 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
926 afsio_copy(uiop, &tuio, tvec);
927 afsio_skip(&tuio, txfrSize - tsize);
928 afsio_trim(&tuio, tsize);
929 tuio.afsio_offset = afs_vmMappingEnd;
930 ReleaseReadLock(&vcp->lock);
931 ObtainWriteLock(&vcp->lock, 243);
932 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
933 ReleaseWriteLock(&vcp->lock);
934 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
935 ObtainWriteLock(&vcp->lock, 244);
936 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
937 ReleaseWriteLock(&vcp->lock);
940 ObtainReadLock(&vcp->lock);
941 xfrSize = afs_vmMappingEnd - xfrOffset;
943 afsio_trim(uiop, txfrSize);
945 ReleaseReadLock(&vcp->lock);
946 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
947 uiop->uio_resid += add2resid;
951 #endif /* AFS_64BIT_CLIENT */
954 afs_uint32 tlen = vcp->f.m.Length;
955 #ifdef AFS_64BIT_CLIENT
956 if (vcp->f.m.Length > afs_vmMappingEnd)
957 tlen = afs_vmMappingEnd;
959 /* Consider V_INTRSEG too for interrupts */
961 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
962 ReleaseReadLock(&vcp->lock);
965 #ifdef AFS_64BIT_KERNEL
966 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
968 vcp->vmh = SRVAL(vcp->segid, 0, 0);
971 vcp->v.v_gnode->gn_seg = vcp->segid;
972 if (rw == UIO_READ) {
973 ReleaseReadLock(&vcp->lock);
974 /* don't read past EOF */
975 if (xfrSize + xfrOffset > fileSize)
976 xfrSize = fileSize - xfrOffset;
979 #ifdef AFS_64BIT_CLIENT
981 uiop->afsio_offset = xfrOffset;
982 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
983 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
984 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
987 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
988 #else /* AFS_64BIT_CLIENT */
990 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
991 #endif /* AFS_64BIT_CLIENT */
994 * If at a chunk boundary and staying within chunk,
995 * start prefetch of next chunk.
997 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
998 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
999 ObtainWriteLock(&vcp->lock, 407);
1000 tdc = afs_FindDCache(vcp, xfrOffset);
1002 if (!(tdc->mflags & DFNextStarted))
1003 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1006 ReleaseWriteLock(&vcp->lock);
1008 #ifdef AFS_64BIT_CLIENT
1010 uiop->afsio_offset = finalOffset;
1012 uiop->uio_resid += add2resid;
1013 #endif /* AFS_64BIT_CLIENT */
1018 start_offset = uiop->afsio_offset;
1019 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
1020 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1021 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1022 ReleaseReadLock(&vcp->lock);
1023 ObtainWriteLock(&vcp->lock, 400);
1024 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1026 /* un-protect last page. */
1027 last_page = vcp->f.m.Length / PAGESIZE;
1028 #ifdef AFS_64BIT_CLIENT
1029 if (vcp->f.m.Length > afs_vmMappingEnd)
1030 last_page = afs_vmMappingEnd / PAGESIZE;
1032 vm_protectp(vcp->segid, last_page, 1, FILEKEY);
1033 if (xfrSize + xfrOffset > fileSize) {
1034 vcp->f.m.Length = xfrSize + xfrOffset;
1036 if ((!(vcp->f.states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1038 vcp->f.states |= CPageHog;
1040 ReleaseWriteLock(&vcp->lock);
1042 /* If the write will fit into a single chunk we'll write all of it
1043 * at once. Otherwise, we'll write one chunk at a time, flushing
1044 * some of it to disk.
1048 /* Only create a page to avoid excess VM access if we're writing a
1049 * small file which is either new or completely overwrites the
1052 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize)
1053 && (vcp->v.v_gnode->gn_mwrcnt == 0)
1054 && (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1055 (void)vm_makep(vcp->segid, 0);
1058 while (xfrSize > 0) {
1059 offset = AFS_CHUNKBASE(xfrOffset);
1062 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1064 (afs_size_t) AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1066 if (len == xfrSize) {
1067 /* All data goes to this one chunk. */
1069 old_offset = uiop->afsio_offset;
1070 #ifdef AFS_64BIT_CLIENT
1071 uiop->afsio_offset = xfrOffset;
1072 toffset = xfrOffset;
1074 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1075 #else /* AFS_64BIT_CLIENT */
1076 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1077 #endif /* AFS_64BIT_CLIENT */
1085 /* Write just one chunk's worth of data. */
1087 struct iovec tvec[16]; /* Should have access to #define */
1089 /* Purge dirty chunks of file if there are too many dirty chunks.
1090 * Inside the write loop, we only do this at a chunk boundary.
1091 * Clean up partial chunk if necessary at end of loop.
1093 if (counter > 0 && code == 0 && xfrOffset == offset) {
1094 ObtainWriteLock(&vcp->lock, 403);
1095 if (xfrOffset > vcp->f.m.Length)
1096 vcp->f.m.Length = xfrOffset;
1097 code = afs_DoPartialWrite(vcp, &treq);
1098 vcp->f.states |= CDirty;
1099 ReleaseWriteLock(&vcp->lock);
1106 afsio_copy(uiop, &tuio, tvec);
1107 afsio_trim(&tuio, len);
1108 tuio.afsio_offset = xfrOffset;
1111 old_offset = uiop->afsio_offset;
1112 #ifdef AFS_64BIT_CLIENT
1113 toffset = xfrOffset;
1114 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1115 #else /* AFS_64BIT_CLIENT */
1116 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1117 #endif /* AFS_64BIT_CLIENT */
1119 len -= tuio.afsio_resid;
1120 if (code || (len <= 0)) {
1121 code = code ? code : EINVAL;
1124 afsio_skip(uiop, len);
1129 first_page = (afs_size_t) old_offset >> PGSHIFT;
1131 1 + (((afs_size_t) old_offset + (len - 1)) >> PGSHIFT) -
1133 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2, ICL_TYPE_POINTER, vcp,
1134 ICL_TYPE_INT32, first_page, ICL_TYPE_INT32, pages);
1136 code = vm_writep(vcp->segid, first_page, pages);
1141 if (++count > AFS_MAX_VM_CHUNKS) {
1143 code = vms_iowait(vcp->segid);
1145 /* cache device failure? */
1156 code = vms_iowait(vcp->segid);
1159 /* cache device failure? */
1164 ObtainWriteLock(&vcp->lock, 242);
1165 if (code == 0 && (vcp->f.states & CDirty)) {
1166 code = afs_DoPartialWrite(vcp, &treq);
1168 vm_protectp(vcp->segid, last_page, 1, RDONLY);
1169 ReleaseWriteLock(&vcp->lock);
1171 /* If requested, fsync the file after every write */
1173 afs_fsync(vp, credp);
1175 ObtainReadLock(&vcp->lock);
1176 if (vcp->vc_error) {
1177 /* Pretend we didn't write anything. We need to get the error back to
1178 * the user. If we don't it's possible for a quota error for this
1179 * write to succeed and the file to be closed without the user ever
1180 * having seen the store error. And AIX syscall clears the error if
1181 * anything was written.
1183 code = vcp->vc_error;
1184 if (code == EDQUOT || code == ENOSPC)
1185 uiop->afsio_resid = save_resid;
1187 #ifdef AFS_64BIT_CLIENT
1189 uiop->afsio_offset = finalOffset;
1191 #endif /* AFS_64BIT_CLIENT */
1192 ReleaseReadLock(&vcp->lock);
1195 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3, ICL_TYPE_POINTER, vcp,
1196 ICL_TYPE_INT32, code);
1202 afs_direct_rdwr(struct vnode *vp,
1206 struct ucred *credp)
1209 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1210 struct vcache *vcp = VTOAFS(vp);
1211 afs_int32 save_resid = uiop->afsio_resid;
1212 struct vrequest treq;
1214 if (code = afs_InitReq(&treq, credp))
1217 /* special case easy transfer; apparently a lot are done */
1218 if ((xfrSize = uiop->afsio_resid) == 0)
1221 ObtainReadLock(&vcp->lock);
1222 fileSize = vcp->f.m.Length;
1223 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1224 uiop->afsio_offset = fileSize;
1226 /* compute xfrOffset now, and do some checks */
1227 xfrOffset = uiop->afsio_offset;
1228 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1230 ReleaseReadLock(&vcp->lock);
1234 /* check for "file too big" error, which should really be done above us */
1236 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1238 ReleaseReadLock(&vcp->lock);
1242 ReleaseReadLock(&vcp->lock);
1243 if (rw == UIO_WRITE) {
1244 ObtainWriteLock(&vcp->lock, 400);
1245 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1247 if (xfrSize + xfrOffset > fileSize)
1248 vcp->f.m.Length = xfrSize + xfrOffset;
1249 ReleaseWriteLock(&vcp->lock);
1251 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR, ICL_TYPE_POINTER, vp,
1252 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1253 ICL_TYPE_LONG, uiop->afsio_resid);
1254 code = afs_rdwr(VTOAFS(vp), uiop, rw, ioflag, credp);
1256 uiop->afsio_resid = save_resid;
1258 uiop->afsio_offset = xfrOffset + xfrSize;
1259 if (uiop->afsio_resid > 0) {
1260 /* should zero here the remaining buffer */
1261 uiop->afsio_resid = 0;
1263 /* Purge dirty chunks of file if there are too many dirty chunks.
1264 * Inside the write loop, we only do this at a chunk boundary.
1265 * Clean up partial chunk if necessary at end of loop.
1267 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1268 ObtainWriteLock(&vcp->lock, 402);
1269 code = afs_DoPartialWrite(vcp, &treq);
1270 vcp->f.states |= CDirty;
1271 ReleaseWriteLock(&vcp->lock);
1281 lock_normalize(struct vnode *vp,
1282 struct flock *lckdat,
1289 switch (lckdat->l_whence) {
1293 lckdat->l_start += (off_t) offset;
1296 code = afs_getattr(VTOAFS(vp), &vattr, cred);
1299 lckdat->l_start += (off_t) vattr.va_size;
1304 lckdat->l_whence = 0;
1311 afs_gn_lockctl(struct vnode *vp,
1313 struct eflock *lckdat,
1315 int (*ignored_fcn) (),
1316 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
1318 #else /* AFS_AIX52_ENV */
1319 ulong32int64_t * ignored_id,
1320 #endif /* AFS_AIX52_ENV */
1323 int error, ncmd = 0;
1325 struct vattr *attrs;
1327 AFS_STATCNT(afs_gn_lockctl);
1328 /* Convert from AIX's cmd to standard lockctl lock types... */
1331 else if (cmd & SETFLCK) {
1336 flkd.l_type = lckdat->l_type;
1337 flkd.l_whence = lckdat->l_whence;
1338 flkd.l_start = lckdat->l_start;
1339 flkd.l_len = lckdat->l_len;
1340 flkd.l_pid = lckdat->l_pid;
1341 flkd.l_sysid = lckdat->l_sysid;
1343 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1345 if (error = lock_normalize(vp, &flkd, offset, cred))
1347 error = afs_lockctl(vp, &flkd, ncmd, cred);
1348 lckdat->l_type = flkd.l_type;
1349 lckdat->l_whence = flkd.l_whence;
1350 lckdat->l_start = flkd.l_start;
1351 lckdat->l_len = flkd.l_len;
1352 lckdat->l_pid = flkd.l_pid;
1353 lckdat->l_sysid = flkd.l_sysid;
1354 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, vp,
1355 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1360 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1362 afs_gn_ioctl(struct vnode *vp,
1365 size_t flags, /* Ignored in AFS */
1366 ext_t ext, /* Ignored in AFS */
1367 struct ucred *crp) /* Ignored in AFS */
1372 AFS_STATCNT(afs_gn_ioctl);
1373 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1374 error = afs_ioctl(vp, cmd, arg);
1375 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, vp,
1376 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1382 afs_gn_readlink(struct vnode *vp,
1388 AFS_STATCNT(afs_gn_readlink);
1389 error = afs_readlink(vp, uiop, cred);
1390 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, vp,
1391 ICL_TYPE_LONG, error);
1397 afs_gn_select(struct vnode *vp,
1398 int32long64_t correl,
1405 AFS_STATCNT(afs_gn_select);
1406 /* NO SUPPORT for this in afs YET! */
1407 return (EOPNOTSUPP);
1412 afs_gn_symlink(struct vnode *vp,
1420 AFS_STATCNT(afs_gn_symlink);
1423 error = afs_symlink(vp, link, &va, target, cred);
1424 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, vp,
1425 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG,
1432 afs_gn_readdir(struct vnode *vp,
1438 AFS_STATCNT(afs_gn_readdir);
1439 error = afs_readdir(vp, uiop, cred);
1440 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, vp,
1441 ICL_TYPE_LONG, error);
1446 extern Simple_lock afs_asyncbuf_lock;
1447 extern struct buf *afs_asyncbuf;
1448 extern int afs_asyncbuf_cv;
1451 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1452 * when the buffer is processed by afs_gn_strategy. afs_biotime is
1453 * incremented for each buffer. A buffer's age is kept in its av_back field.
1454 * The age ranking is used by the daemons, which favor older buffers.
1456 afs_int32 afs_biotime = 0;
1458 /* This function is called with a list of buffers, threaded through
1459 * the av_forw field. Our goal is to copy the list of buffers into the
1460 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1461 * Within buffers within the same work group, the guy with the lowest address
1462 * has to be located at the head of the queue; his b_bcount field will also
1463 * be increased to cover all of the buffers in the b_work queue.
1465 #define AIX_VM_BLKSIZE 8192
1466 /* Note: This function seems to be called as ddstrategy entry point, ie
1467 * has one argument. However, it also needs to be present as
1468 * vn_strategy entry point which has three arguments, but it seems to never
1469 * be called in that capacity (it would fail horribly due to the argument
1470 * mismatch). I'm confused, but it obviously has to be this way, maybe
1471 * some IBM people can shed som light on this
1474 afs_gn_strategy(struct buf *abp)
1476 struct buf **lbp, *tbp;
1478 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1482 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1483 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1484 && (x)->b_flags == (y)->b_flags \
1485 && !((x)->b_flags & B_PFPROT) \
1486 && !((y)->b_flags & B_PFPROT))
1488 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1489 for (tbp = abp; tbp; tbp = nbp) {
1490 nbp = tbp->av_forw; /* remember for later */
1492 tbp->av_back = (struct buf *)afs_biotime++;
1494 /* first insert the buffer into the afs_async queue. Insert buffer
1495 * sorted within its disk position within a set of comparable buffers.
1496 * Ensure that all comparable buffers are grouped contiguously.
1497 * Later on, we'll merge adjacent buffers into a single request.
1499 firstComparable = NULL;
1500 lbp = &afs_asyncbuf;
1501 for (qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1502 if (EFS_COMPARABLE(tbp, qbp)) {
1503 if (!firstComparable)
1504 firstComparable = qbp;
1505 /* this buffer is comparable, so see if the next buffer
1506 * is farther in the file; if it is insert before next buffer.
1508 if (tbp->b_blkno < qbp->b_blkno) {
1512 /* If we're at the end of a block of comparable buffers, we
1513 * insert the buffer here to keep all comparable buffers
1516 if (firstComparable)
1520 /* do the insert before qbp now */
1521 tbp->av_forw = *lbp;
1523 if (firstComparable == NULL) {
1524 /* next we're going to do all sorts of buffer merging tricks, but
1525 * here we know we're the only COMPARABLE block in the
1526 * afs_asyncbuf list, so we just skip that and continue with
1527 * the next input buffer.
1532 /* we may have actually added the "new" firstComparable */
1533 if (tbp->av_forw == firstComparable)
1534 firstComparable = tbp;
1536 * when we get here, firstComparable points to the first dude in the
1537 * same vnode and subspace that we (tbp) are in. We go through the
1538 * area of this list with COMPARABLE buffers (a contiguous region) and
1539 * repeated merge buffers that are contiguous and in the same block or
1540 * buffers that are contiguous and are both integral numbers of blocks.
1541 * Note that our end goal is to have as big blocks as we can, but we
1542 * must minimize the transfers that are not integral #s of blocks on
1543 * block boundaries, since Episode will do those smaller and/or
1544 * unaligned I/Os synchronously.
1546 * A useful example to consider has the async queue with this in it:
1547 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1548 * If we get a request that fills the 4K hole, we want to merge this
1549 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1550 * don't want to do any merging since adding the 4K transfer to the 8K
1551 * transfer makes the 8K transfer synchronous.
1553 * Note that if there are any blocks whose size is a multiple of
1554 * the file system block size, then we know that such blocks are also
1555 * on block boundaries.
1558 doMerge = 1; /* start the loop */
1559 while (doMerge) { /* loop until an iteration doesn't
1560 * make any more changes */
1562 for (qbp = firstComparable;; qbp = qnbp) {
1563 qnbp = qbp->av_forw;
1565 break; /* we're done */
1566 if (!EFS_COMPARABLE(qbp, qnbp))
1569 /* try to merge qbp and qnbp */
1571 /* first check if both not adjacent go on to next region */
1572 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) !=
1573 dbtob(qnbp->b_blkno))
1576 /* note if both in the same block, the first byte of leftmost guy
1577 * and last byte of rightmost guy are in the same block.
1579 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE - 1)) ==
1580 ((dbtob(qnbp->b_blkno) + qnbp->b_bcount -
1581 1) & ~(AIX_VM_BLKSIZE - 1))) {
1582 doMerge = 1; /* both in same block */
1583 } else if ((qbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0
1584 && (qnbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0) {
1585 doMerge = 1; /* both integral #s of blocks */
1590 /* merge both of these blocks together */
1591 /* first set age to the older of the two */
1592 if ((int32long64_t) qnbp->av_back -
1593 (int32long64_t) qbp->av_back < 0) {
1594 qbp->av_back = qnbp->av_back;
1596 lwbp = (struct buf **) &qbp->b_work;
1597 /* find end of qbp's work queue */
1598 for (xbp = *lwbp; xbp;
1599 lwbp = (struct buf **) &xbp->b_work, xbp = *lwbp);
1601 * now setting *lwbp will change the last ptr in the qbp's
1604 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1605 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1606 *lwbp = qnbp; /* append qnbp to end */
1608 * note that qnbp is bogus, but it doesn't matter because
1609 * we're going to restart the for loop now.
1611 break; /* out of the for loop */
1615 } /* for loop for all interrupt data */
1616 /* at this point, all I/O has been queued. Wakeup the daemon */
1617 e_wakeup_one((int *)&afs_asyncbuf_cv);
1618 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1624 afs_inactive(struct vcache *avc,
1627 afs_InactiveVCache(avc, acred);
1631 afs_gn_revoke(struct vnode *vp,
1634 struct vattr *vinfop,
1637 AFS_STATCNT(afs_gn_revoke);
1638 /* NO SUPPORT for this in afs YET! */
1639 return (EOPNOTSUPP);
1643 afs_gn_getacl(struct vnode *vp,
1652 afs_gn_setacl(struct vnode *vp,
1661 afs_gn_getpcl(struct vnode *vp,
1670 afs_gn_setpcl(struct vnode *vp,
1679 afs_gn_seek(struct vnode* vp, offset_t * offp, struct ucred * crp)
1682 * File systems which do not wish to do offset validation can simply
1683 * return 0. File systems which do not provide the vn_seek entry point
1684 * will have a maximum offset of OFF_MAX (2 gigabytes minus 1) enforced
1685 * by the logical file system.
1698 * declare a struct vnodeops and initialize it with ptrs to all functions
1700 struct vnodeops afs_gn_vnodeops = {
1701 /* creation/naming/deletion */
1708 /* lookup, file handle stuff */
1710 (int(*)(struct vnode*,struct fileid*,struct ucred*))
1712 /* access to files */
1713 (int(*)(struct vnode *, int32long64_t, ext_t, caddr_t *,struct ucred *))
1715 (int(*)(struct vnode *, struct vnode **, int32long64_t,caddr_t, int32long64_t, caddr_t *, struct ucred *))
1722 /* manipulate attributes of files */
1726 /* data update operations */
1739 (int(*)(struct vnode*,struct buf*,struct ucred*))
1741 /* security things */
1748 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
1749 afs_gn_enosys, /* vn_fsync_range */
1750 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
1751 afs_gn_enosys, /* vn_create_attr */
1752 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
1753 afs_gn_enosys, /* vn_finfo */
1754 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
1755 afs_gn_enosys, /* vn_map_lloff */
1756 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
1757 afs_gn_enosys, /* vn_readdir_eofp */
1758 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
1759 afs_gn_enosys, /* vn_rdwr_attr */
1760 (int(*)(struct vnode*,int,void*,struct ucred*))
1761 afs_gn_enosys, /* vn_memcntl */
1762 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
1763 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
1764 afs_gn_enosys, /* vn_getea */
1765 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
1766 afs_gn_enosys, /* vn_setea */
1767 (int(*)(struct vnode *, struct uio *, struct ucred *))
1768 afs_gn_enosys, /* vn_listea */
1769 (int(*)(struct vnode *, const char *, struct ucred *))
1770 afs_gn_enosys, /* vn_removeea */
1771 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
1772 afs_gn_enosys, /* vn_statea */
1773 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
1774 afs_gn_enosys, /* vn_getxacl */
1775 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
1776 afs_gn_enosys, /* vn_setxacl */
1777 #else /* AFS_AIX53_ENV */
1778 afs_gn_enosys, /* vn_spare7 */
1779 afs_gn_enosys, /* vn_spare8 */
1780 afs_gn_enosys, /* vn_spare9 */
1781 afs_gn_enosys, /* vn_spareA */
1782 afs_gn_enosys, /* vn_spareB */
1783 afs_gn_enosys, /* vn_spareC */
1784 afs_gn_enosys, /* vn_spareD */
1785 #endif /* AFS_AIX53_ENV */
1786 afs_gn_enosys, /* vn_spareE */
1787 afs_gn_enosys /* vn_spareF */
1788 #ifdef AFS_AIX51_ENV
1789 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
1790 afs_gn_enosys, /* pagerBackRange */
1791 (int64_t(*)(struct gnode*))
1792 afs_gn_enosys, /* pagerGetFileSize */
1793 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
1794 afs_gn_enosys, /* pagerReadAhead */
1795 (void(*)(struct gnode *, int64_t, int64_t, uint))
1796 afs_gn_enosys, /* pagerReadWriteBehind */
1797 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
1798 afs_gn_enosys /* pagerEndCopy */
1801 struct vnodeops *afs_ops = &afs_gn_vnodeops;
1805 extern struct vfsops Afs_vfsops;
1806 extern int Afs_init();
1808 #define AFS_CALLOUT_TBL_SIZE 256
1811 * the following additional layer of gorp is due to the fact that the
1812 * filesystem layer no longer obtains the kernel lock for me. I was relying
1813 * on this behavior to avoid having to think about locking.
1817 vfs_mount(struct vfs *a, struct ucred *b)
1819 int glockOwner, ret;
1821 glockOwner = ISAFS_GLOCK();
1824 ret = (*Afs_vfsops.vfs_mount) (a, b);
1832 vfs_unmount(struct vfs *a, int b, struct ucred *c)
1834 int glockOwner, ret;
1836 glockOwner = ISAFS_GLOCK();
1839 ret = (*Afs_vfsops.vfs_unmount) (a, b, c);
1847 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c)
1849 int glockOwner, ret;
1851 glockOwner = ISAFS_GLOCK();
1854 ret = (*Afs_vfsops.vfs_root) (a, b, c);
1862 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c)
1864 int glockOwner, ret;
1866 glockOwner = ISAFS_GLOCK();
1869 ret = (*Afs_vfsops.vfs_statfs) (a, b, c);
1877 vfs_sync(struct gfs *a)
1879 int glockOwner, ret;
1881 glockOwner = ISAFS_GLOCK();
1884 ret = (*Afs_vfsops.vfs_sync) (a);
1891 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c, struct ucred *d)
1893 int glockOwner, ret;
1895 glockOwner = ISAFS_GLOCK();
1898 ret = (*Afs_vfsops.vfs_vget) (a, b, c, d);
1906 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e)
1908 int glockOwner, ret;
1910 glockOwner = ISAFS_GLOCK();
1913 ret = (*Afs_vfsops.vfs_cntl) (a, b, c, d, e);
1921 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d, struct ucred *e)
1923 int glockOwner, ret;
1925 glockOwner = ISAFS_GLOCK();
1928 ret = (*Afs_vfsops.vfs_quotactl) (a, b, c, d, e);
1935 #ifdef AFS_AIX51_ENV
1937 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1939 int glockOwner, ret;
1941 glockOwner = ISAFS_GLOCK();
1944 ret = (*Afs_vfsops.vfs_syncvfs) (a, b, c, d);
1953 struct vfsops locked_Afs_vfsops = {
1962 #ifdef AFS_AIX51_ENV
1968 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
1970 int glockOwner, ret;
1972 glockOwner = ISAFS_GLOCK();
1975 ret = (*afs_gn_vnodeops.vn_link) (a, b, c, d);
1983 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d)
1985 int glockOwner, ret;
1987 glockOwner = ISAFS_GLOCK();
1990 ret = (*afs_gn_vnodeops.vn_mkdir) (a, b, c, d);
1998 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d,
2001 int glockOwner, ret;
2003 glockOwner = ISAFS_GLOCK();
2006 ret = (*afs_gn_vnodeops.vn_mknod) (a, b, c, d, e);
2014 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2016 int glockOwner, ret;
2018 glockOwner = ISAFS_GLOCK();
2021 ret = (*afs_gn_vnodeops.vn_remove) (a, b, c, d);
2029 vn_rename(struct vnode *a, struct vnode *b, caddr_t c, struct vnode *d,
2030 struct vnode *e, caddr_t f, struct ucred *g)
2032 int glockOwner, ret;
2034 glockOwner = ISAFS_GLOCK();
2037 ret = (*afs_gn_vnodeops.vn_rename) (a, b, c, d, e, f, g);
2045 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2047 int glockOwner, ret;
2049 glockOwner = ISAFS_GLOCK();
2052 ret = (*afs_gn_vnodeops.vn_rmdir) (a, b, c, d);
2060 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2061 struct vattr *v, struct ucred *e)
2063 int glockOwner, ret;
2065 glockOwner = ISAFS_GLOCK();
2068 ret = (*afs_gn_vnodeops.vn_lookup) (a, b, c, d, v, e);
2076 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c)
2078 int glockOwner, ret;
2080 glockOwner = ISAFS_GLOCK();
2083 ret = (*afs_gn_vnodeops.vn_fid) (a, b, c);
2091 vn_open(struct vnode *a,
2097 int glockOwner, ret;
2099 glockOwner = ISAFS_GLOCK();
2102 ret = (*afs_gn_vnodeops.vn_open) (a, b, c, d, e);
2110 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d,
2111 int32long64_t e, caddr_t * f, struct ucred *g)
2113 int glockOwner, ret;
2115 glockOwner = ISAFS_GLOCK();
2118 ret = (*afs_gn_vnodeops.vn_create) (a, b, c, d, e, f, g);
2126 vn_hold(struct vnode *a)
2128 int glockOwner, ret;
2130 glockOwner = ISAFS_GLOCK();
2133 ret = (*afs_gn_vnodeops.vn_hold) (a);
2141 vn_rele(struct vnode *a)
2143 int glockOwner, ret;
2145 glockOwner = ISAFS_GLOCK();
2148 ret = (*afs_gn_vnodeops.vn_rele) (a);
2156 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d)
2158 int glockOwner, ret;
2160 glockOwner = ISAFS_GLOCK();
2163 ret = (*afs_gn_vnodeops.vn_close) (a, b, c, d);
2171 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d,
2172 uint32long64_t e, struct ucred *f)
2174 int glockOwner, ret;
2176 glockOwner = ISAFS_GLOCK();
2179 ret = (*afs_gn_vnodeops.vn_map) (a, b, c, d, e, f);
2187 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c)
2189 int glockOwner, ret;
2191 glockOwner = ISAFS_GLOCK();
2194 ret = (*afs_gn_vnodeops.vn_unmap) (a, b, c);
2202 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2204 int glockOwner, ret;
2206 glockOwner = ISAFS_GLOCK();
2209 ret = (*afs_gn_vnodeops.vn_access) (a, b, c, d);
2217 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c)
2219 int glockOwner, ret;
2221 glockOwner = ISAFS_GLOCK();
2224 ret = (*afs_gn_vnodeops.vn_getattr) (a, b, c);
2232 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d,
2233 int32long64_t e, struct ucred *f)
2235 int glockOwner, ret;
2237 glockOwner = ISAFS_GLOCK();
2240 ret = (*afs_gn_vnodeops.vn_setattr) (a, b, c, d, e, f);
2248 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2249 , caddr_t e, struct ucred *f)
2251 int glockOwner, ret;
2253 glockOwner = ISAFS_GLOCK();
2256 ret = (*afs_gn_vnodeops.vn_fclear) (a, b, c, d, e, f);
2264 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2266 int glockOwner, ret;
2268 glockOwner = ISAFS_GLOCK();
2271 ret = (*afs_gn_vnodeops.vn_fsync) (a, b, c, d);
2279 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d,
2282 int glockOwner, ret;
2284 glockOwner = ISAFS_GLOCK();
2287 ret = (*afs_gn_vnodeops.vn_ftrunc) (a, b, c, d, e);
2295 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d,
2296 ext_t e, caddr_t f, struct vattr *v, struct ucred *g)
2298 int glockOwner, ret;
2300 glockOwner = ISAFS_GLOCK();
2303 ret = (*afs_gn_vnodeops.vn_rdwr) (a, b, c, d, e, f, v, g);
2311 vn_lockctl(struct vnode *a,
2316 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
2318 #else /* AFS_AIX52_ENV */
2320 #endif /* AFS_AIX52_ENV */
2323 int glockOwner, ret;
2325 glockOwner = ISAFS_GLOCK();
2328 ret = (*afs_gn_vnodeops.vn_lockctl) (a, b, c, d, e, f, g);
2336 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e,
2339 int glockOwner, ret;
2341 glockOwner = ISAFS_GLOCK();
2344 ret = (*afs_gn_vnodeops.vn_ioctl) (a, b, c, d, e, f);
2352 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c)
2354 int glockOwner, ret;
2356 glockOwner = ISAFS_GLOCK();
2359 ret = (*afs_gn_vnodeops.vn_readlink) (a, b, c);
2367 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort * d,
2368 void (*e) (), caddr_t f, struct ucred *g)
2370 int glockOwner, ret;
2372 glockOwner = ISAFS_GLOCK();
2375 ret = (*afs_gn_vnodeops.vn_select) (a, b, c, d, e, f, g);
2383 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d)
2385 int glockOwner, ret;
2387 glockOwner = ISAFS_GLOCK();
2390 ret = (*afs_gn_vnodeops.vn_symlink) (a, b, c, d);
2398 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c)
2400 int glockOwner, ret;
2402 glockOwner = ISAFS_GLOCK();
2405 ret = (*afs_gn_vnodeops.vn_readdir) (a, b, c);
2413 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d,
2416 int glockOwner, ret;
2418 glockOwner = ISAFS_GLOCK();
2421 ret = (*afs_gn_vnodeops.vn_revoke) (a, b, c, d, e);
2429 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c)
2431 int glockOwner, ret;
2433 glockOwner = ISAFS_GLOCK();
2436 ret = (*afs_gn_vnodeops.vn_getacl) (a, b, c);
2444 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c)
2446 int glockOwner, ret;
2448 glockOwner = ISAFS_GLOCK();
2451 ret = (*afs_gn_vnodeops.vn_setacl) (a, b, c);
2459 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c)
2461 int glockOwner, ret;
2463 glockOwner = ISAFS_GLOCK();
2466 ret = (*afs_gn_vnodeops.vn_getpcl) (a, b, c);
2474 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c)
2476 int glockOwner, ret;
2478 glockOwner = ISAFS_GLOCK();
2481 ret = (*afs_gn_vnodeops.vn_setpcl) (a, b, c);
2489 struct vnodeops locked_afs_gn_vnodeops = {
2518 (int(*)(struct vnode*,struct buf*,struct ucred*))
2519 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2526 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
2527 afs_gn_enosys, /* vn_fsync_range */
2528 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
2529 afs_gn_enosys, /* vn_create_attr */
2530 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
2531 afs_gn_enosys, /* vn_finfo */
2532 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
2533 afs_gn_enosys, /* vn_map_lloff */
2534 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
2535 afs_gn_enosys, /* vn_readdir_eofp */
2536 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
2537 afs_gn_enosys, /* vn_rdwr_attr */
2538 (int(*)(struct vnode*,int,void*,struct ucred*))
2539 afs_gn_enosys, /* vn_memcntl */
2540 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
2541 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
2542 afs_gn_enosys, /* vn_getea */
2543 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
2544 afs_gn_enosys, /* vn_setea */
2545 (int(*)(struct vnode *, struct uio *, struct ucred *))
2546 afs_gn_enosys, /* vn_listea */
2547 (int(*)(struct vnode *, const char *, struct ucred *))
2548 afs_gn_enosys, /* vn_removeea */
2549 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
2550 afs_gn_enosys, /* vn_statea */
2551 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
2552 afs_gn_enosys, /* vn_getxacl */
2553 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
2554 afs_gn_enosys, /* vn_setxacl */
2555 #else /* AFS_AIX53_ENV */
2556 afs_gn_enosys, /* vn_spare7 */
2557 afs_gn_enosys, /* vn_spare8 */
2558 afs_gn_enosys, /* vn_spare9 */
2559 afs_gn_enosys, /* vn_spareA */
2560 afs_gn_enosys, /* vn_spareB */
2561 afs_gn_enosys, /* vn_spareC */
2562 afs_gn_enosys, /* vn_spareD */
2563 #endif /* AFS_AIX53_ENV */
2564 afs_gn_enosys, /* vn_spareE */
2565 afs_gn_enosys /* vn_spareF */
2566 #ifdef AFS_AIX51_ENV
2567 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
2568 afs_gn_enosys, /* pagerBackRange */
2569 (int64_t(*)(struct gnode*))
2570 afs_gn_enosys, /* pagerGetFileSize */
2571 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
2572 afs_gn_enosys, /* pagerReadAhead */
2573 (void(*)(struct gnode *, int64_t, int64_t, uint))
2574 afs_gn_enosys, /* pagerReadWriteBehind */
2575 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
2576 afs_gn_enosys /* pagerEndCopy */
2580 struct gfs afs_gfs = {
2582 &locked_afs_gn_vnodeops,
2586 GFS_VERSION4 | GFS_VERSION42 | GFS_REMOTE,