2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
31 #include "rpc/types.h"
33 #include "netinet/in.h"
37 #include "rpc/types.h"
41 #include "afs/afs_osi.h"
42 #define RFTP_INTERNALS 1
43 #include "afs/volerrors.h"
47 #include "afs/exporter.h"
49 #include "afs/afs_chunkops.h"
50 #include "afs/afs_stats.h"
51 #include "afs/nfsclient.h"
53 #include "afs/prs_fs.h"
55 #include "afsincludes.h"
59 afs_gn_link(struct vnode *vp,
66 AFS_STATCNT(afs_gn_link);
67 error = afs_link(vp, dp, name, cred);
68 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, vp,
69 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
75 afs_gn_mkdir(struct vnode *dp,
85 AFS_STATCNT(afs_gn_mkdir);
88 va.va_mode = (mode & 07777) & ~get_umask();
89 error = afs_mkdir(dp, name, &va, &vp, cred);
93 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, vp,
94 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
101 afs_gn_mknod(struct vnode *dp,
112 AFS_STATCNT(afs_gn_mknod);
114 va.va_type = IFTOVT(mode);
115 va.va_mode = (mode & 07777) & ~get_umask();
117 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
118 if (va.va_type != VFIFO && !suser((char *)&error))
120 switch (va.va_type) {
122 error = afs_mkdir(dp, name, &va, &vp, cred);
132 error = afs_create(VTOAFS(dp), name, &va, NONEXCL, mode, (struct vcache **)&vp, cred);
137 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32) vp,
138 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
145 afs_gn_remove(struct vnode *vp, /* Ignored in AFS */
152 AFS_STATCNT(afs_gn_remove);
153 error = afs_remove(dp, name, cred);
154 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, dp,
155 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
161 afs_gn_rename(struct vnode *vp, /* Ignored in AFS */
164 struct vnode *tp, /* Ignored in AFS */
171 AFS_STATCNT(afs_gn_rename);
172 error = afs_rename(dp, name, tdp, tname, cred);
173 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, dp,
174 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG,
181 afs_gn_rmdir(struct vnode *vp, /* Ignored in AFS */
188 AFS_STATCNT(afs_gn_rmdir);
189 error = afs_rmdir(dp, name, cred);
191 if (error == 66 /* 4.3's ENOTEMPTY */ )
192 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
194 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, dp,
195 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
201 afs_gn_lookup(struct vnode *dp,
204 int32long64_t Flags, /* includes FOLLOW... */
205 struct vattr *vattrp,
211 AFS_STATCNT(afs_gn_lookup);
212 error = afs_lookup(dp, name, vpp, cred);
213 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, dp,
214 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
215 if (vattrp != NULL && error == 0)
216 afs_gn_getattr(*vpp, vattrp, cred);
222 afs_gn_fid(struct vnode *vp,
228 AFS_STATCNT(afs_gn_fid);
229 error = afs_fid(vp, fidp);
230 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, vp,
231 ICL_TYPE_LONG, (afs_int32) fidp, ICL_TYPE_LONG, error);
237 afs_gn_open(struct vnode *vp,
240 struct ucred **vinfop,
245 struct vcache *tvp = VTOAFS(vp);
249 AFS_STATCNT(afs_gn_open);
255 if ((flags & FWRITE) || (flags & FTRUNC))
258 while ((flags & FNSHARE) && tvp->opens) {
259 if (!(flags & FDELAY)) {
263 afs_osi_Sleep(&tvp->opens);
266 error = afs_access(VTOAFS(vp), modes, cred);
271 error = afs_open((struct vcache **) &vp, flags, cred);
273 if (flags & FTRUNC) {
276 error = afs_setattr(VTOAFS(vp), &va, cred);
280 tvp->f.states |= CNSHARE;
283 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
285 /* an error occurred; we've told CM that the file
286 * is open, so close it now so that open and
287 * writer counts are correct. Ignore error code,
288 * as it is likely to fail (the setattr just did).
290 afs_close(vp, flags, cred);
295 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, vp,
296 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
302 afs_gn_create(struct vnode *dp,
307 struct ucred **vinfop, /* return ptr for fp->f_vinfo, used as fp->f_cred */
312 enum vcexcl exclusive;
313 int error, modes = 0;
317 AFS_STATCNT(afs_gn_create);
318 if ((flags & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
324 va.va_mode = (mode & 07777) & ~get_umask();
329 if ((flags & FWRITE) || (flags & FTRUNC))
331 error = afs_create(VTOAFS(dp), name, &va, exclusive, modes, (struct vcache **)vpp, cred);
335 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
336 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
337 * call it when this NFS traffic since the close will never happen thus
338 * we'd never flush the files out to the server! Gross but the simplest
339 * solution we came out with */
340 if (cred->cr_luid != RMTUSER_REQ) {
341 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
342 if (!(flags & FDELAY))
344 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
346 /* Since in the standard copen() for bsd vnode kernels they do an
347 * vop_open after the vop_create, we must do the open here since there
348 * are stuff in afs_open that we need. For example advance the
349 * execsOrWriters flag (else we'll be treated as the sun's "core"
351 *vinfop = cred; /* save user creds in fp->f_vinfo */
352 error = afs_open((struct vcache **)vpp, flags, cred);
354 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, dp,
355 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG,
362 afs_gn_hold(struct vnode *vp)
364 AFS_STATCNT(afs_gn_hold);
372 afs_gn_rele(struct vnode *vp)
374 struct vcache *vcp = VTOAFS(vp);
377 AFS_STATCNT(afs_gn_rele);
378 if (vp->v_count == 0)
379 osi_Panic("afs_rele: zero v_count");
380 if (--(vp->v_count) == 0) {
381 if (vcp->f.states & CPageHog) {
383 vcp->f.states &= ~CPageHog;
385 error = afs_inactive(vp, 0);
392 afs_gn_close(struct vnode *vp,
394 caddr_t vinfo, /* Ignored in AFS */
398 struct vcache *tvp = VTOAFS(vp);
401 AFS_STATCNT(afs_gn_close);
403 if (flags & FNSHARE) {
404 tvp->f.states &= ~CNSHARE;
405 afs_osi_Wakeup(&tvp->opens);
408 error = afs_close(vp, flags, cred);
409 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32) vp,
410 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
416 afs_gn_map(struct vnode *vp,
423 struct vcache *vcp = VTOAFS(vp);
424 struct vrequest treq;
428 afs_int32 flag = Flag;
430 AFS_STATCNT(afs_gn_map);
432 if (error = afs_InitReq(&treq, cred))
434 error = afs_VerifyVCache(vcp, &treq);
436 return afs_CheckCode(error, &treq, 49);
438 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
439 ObtainWriteLock(&vcp->lock, 401);
440 vcp->f.states |= CMAPPED; /* flag cleared at afs_inactive */
442 * We map the segment into our address space using the handle returned by vm_create.
445 afs_uint32 tlen = vcp->f.m.Length;
446 #ifdef AFS_64BIT_CLIENT
447 if (vcp->f.m.Length > afs_vmMappingEnd)
448 tlen = afs_vmMappingEnd;
450 /* Consider V_INTRSEG too for interrupts */
452 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
453 ReleaseWriteLock(&vcp->lock);
456 #ifdef AFS_64BIT_KERNEL
457 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
459 vcp->vmh = SRVAL(vcp->segid, 0, 0);
462 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
463 if (flag & SHM_RDONLY) {
464 vp->v_gnode->gn_mrdcnt++;
466 vp->v_gnode->gn_mwrcnt++;
469 * We keep the caller's credentials since an async daemon will handle the
470 * request at some point. We assume that the same credentials will be used.
472 if (!vcp->credp || (vcp->credp != cred)) {
475 struct ucred *crp = vcp->credp;
481 ReleaseWriteLock(&vcp->lock);
483 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, vp,
484 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
490 afs_gn_unmap(struct vnode *vp,
494 struct vcache *vcp = VTOAFS(vp);
495 AFS_STATCNT(afs_gn_unmap);
496 ObtainWriteLock(&vcp->lock, 402);
497 if (flag & SHM_RDONLY) {
498 vp->v_gnode->gn_mrdcnt--;
499 if (vp->v_gnode->gn_mrdcnt <= 0)
500 vp->v_gnode->gn_mrdcnt = 0;
502 vp->v_gnode->gn_mwrcnt--;
503 if (vp->v_gnode->gn_mwrcnt <= 0)
504 vp->v_gnode->gn_mwrcnt = 0;
506 ReleaseWriteLock(&vcp->lock);
514 afs_gn_access(struct vnode *vp,
524 AFS_STATCNT(afs_gn_access);
530 error = afs_access(VTOAFS(vp), mode, cred);
532 /* Additional testing */
533 if (who == ACC_OTHERS || who == ACC_ANY) {
534 error = afs_getattr(VTOAFS(vp), &vattr, cred);
536 if (who == ACC_ANY) {
537 if (((vattr.va_mode >> 6) & mode) == mode) {
542 if (((vattr.va_mode >> 3) & mode) == mode)
547 } else if (who == ACC_ALL) {
548 error = afs_getattr(VTOAFS(vp), &vattr, cred);
550 if ((!((vattr.va_mode >> 6) & mode))
551 || (!((vattr.va_mode >> 3) & mode))
552 || (!(vattr.va_mode & mode)))
561 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, vp,
562 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
568 afs_gn_getattr(struct vnode *vp,
569 struct vattr *vattrp,
574 AFS_STATCNT(afs_gn_getattr);
575 error = afs_getattr(VTOAFS(vp), vattrp, cred);
576 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, vp,
577 ICL_TYPE_LONG, error);
583 afs_gn_setattr(struct vnode *vp,
593 AFS_STATCNT(afs_gn_setattr);
601 if ((arg1 & T_OWNER_AS_IS) == 0)
603 if ((arg1 & T_GROUP_AS_IS) == 0)
608 error = afs_access(vp, VWRITE, cred);
612 if (arg1 & T_SETTIME) {
613 va.va_atime.tv_sec = time;
614 va.va_mtime.tv_sec = time;
616 va.va_atime = *(struct timestruc_t *)arg2;
617 va.va_mtime = *(struct timestruc_t *)arg3;
625 error = afs_setattr(VTOAFS(vp), &va, cred);
627 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, vp,
628 ICL_TYPE_LONG, error);
633 char zero_buffer[PAGESIZE];
635 afs_gn_fclear(struct vnode *vp,
642 int i, len, error = 0;
645 static int fclear_init = 0;
646 struct vcache *avc = VTOAFS(vp);
648 AFS_STATCNT(afs_gn_fclear);
650 memset(zero_buffer, 0, PAGESIZE);
654 * Don't clear past ulimit
656 if (offset + length > get_ulimit())
659 /* Flush all pages first */
662 vm_flushp(avc->segid, 0, MAXFSIZE / PAGESIZE - 1);
663 vms_iowait(avc->segid);
666 uio.afsio_offset = offset;
667 for (i = offset; i < offset + length; i = uio.afsio_offset) {
668 len = offset + length - i;
669 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
670 iov.iov_base = zero_buffer;
671 uio.afsio_iov = &iov;
672 uio.afsio_iovcnt = 1;
673 uio.afsio_seg = AFS_UIOSYS;
674 uio.afsio_resid = iov.iov_len;
675 if (error = afs_rdwr(VTOAFS(vp), &uio, UIO_WRITE, 0, cred))
678 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, vp,
679 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG,
686 afs_gn_fsync(struct vnode *vp,
687 int32long64_t flags, /* Not used by AFS */
688 int32long64_t vinfo, /* Not used by AFS */
693 AFS_STATCNT(afs_gn_fsync);
694 error = afs_fsync(vp, cred);
695 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, vp,
696 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
702 afs_gn_ftrunc(struct vnode *vp,
711 AFS_STATCNT(afs_gn_ftrunc);
714 error = afs_setattr(VTOAFS(vp), &va, cred);
715 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, vp,
716 ICL_TYPE_LONG, flags, ICL_TYPE_OFFSET,
717 ICL_HANDLE_OFFSET(length), ICL_TYPE_LONG, error);
721 /* Min size of a file which is dumping core before we declare it a page hog. */
722 #define MIN_PAGE_HOG_SIZE 8388608
725 afs_gn_rdwr(struct vnode *vp,
729 ext_t ext, /* Ignored in AFS */
730 caddr_t vinfo, /* Ignored in AFS */
731 struct vattr *vattrp,
734 struct vcache *vcp = VTOAFS(vp);
735 struct vrequest treq;
740 AFS_STATCNT(afs_gn_rdwr);
743 if (op == UIO_WRITE) {
744 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1, ICL_TYPE_POINTER, vp,
745 ICL_TYPE_LONG, vcp->vc_error);
746 return vcp->vc_error;
751 ObtainSharedLock(&vcp->lock, 507);
753 * We keep the caller's credentials since an async daemon will handle the
754 * request at some point. We assume that the same credentials will be used.
755 * If this is being called from an NFS server thread, then dupe the
756 * cred and only use that copy in calls and for the stach.
758 if (!vcp->credp || (vcp->credp != cred)) {
759 #ifdef AFS_AIX_IAUTH_ENV
760 if (AFS_NFSXLATORREQ(cred)) {
761 /* Must be able to use cred later, so dupe it so that nfs server
762 * doesn't overwrite it's contents.
768 crhold(cred); /* Bump refcount for reference in vcache */
772 UpgradeSToWLock(&vcp->lock, 508);
775 ConvertWToSLock(&vcp->lock);
780 ReleaseSharedLock(&vcp->lock);
783 * XXX Is the following really required?? XXX
785 if (error = afs_InitReq(&treq, cred))
787 if (error = afs_VerifyVCache(vcp, &treq))
788 return afs_CheckCode(error, &treq, 50);
789 osi_FlushPages(vcp, cred); /* Flush old pages */
791 if (AFS_NFSXLATORREQ(cred)) {
794 if (op == UIO_READ) {
796 (vcp, PRSFS_READ, &treq,
797 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
806 * We have to bump the open/exwriters field here courtesy of the nfs xlator
807 * because there're no open/close nfs rpcs to call our afs_open/close.
808 * We do a similar thing on the afs_read/write interface.
810 if (op == UIO_WRITE) {
811 #ifdef AFS_64BIT_CLIENT
812 if (ubuf->afsio_offset < afs_vmMappingEnd) {
813 #endif /* AFS_64BIT_CLIENT */
814 ObtainWriteLock(&vcp->lock, 240);
815 vcp->f.states |= CDirty; /* Set the dirty bit */
817 ReleaseWriteLock(&vcp->lock);
818 #ifdef AFS_64BIT_CLIENT
820 #endif /* AFS_64BIT_CLIENT */
823 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
825 if (op == UIO_WRITE) {
826 #ifdef AFS_64BIT_CLIENT
827 if (ubuf->afsio_offset < afs_vmMappingEnd) {
828 #endif /* AFS_64BIT_CLIENT */
829 ObtainWriteLock(&vcp->lock, 241);
830 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
831 ReleaseWriteLock(&vcp->lock);
832 #ifdef AFS_64BIT_CLIENT
834 #endif /* AFS_64BIT_CLIENT */
836 if (vattrp != NULL && error == 0)
837 afs_gn_getattr(vp, vattrp, cred);
839 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, vp,
840 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
847 #define AFS_MAX_VM_CHUNKS 10
849 afs_vm_rdwr(struct vnode *vp,
858 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
860 #ifdef AFS_64BIT_CLIENT
861 afs_size_t finalOffset;
864 afs_size_t add2resid = 0;
865 #endif /* AFS_64BIT_CLIENT */
866 struct vcache *vcp = VTOAFS(vp);
868 afs_size_t start_offset;
869 afs_int32 save_resid = uiop->afsio_resid;
870 int first_page, last_page, pages;
873 struct vrequest treq;
875 if (code = afs_InitReq(&treq, credp))
878 /* special case easy transfer; apparently a lot are done */
879 if ((xfrSize = uiop->afsio_resid) == 0)
882 ObtainReadLock(&vcp->lock);
883 fileSize = vcp->f.m.Length;
884 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
885 uiop->afsio_offset = fileSize;
887 /* compute xfrOffset now, and do some checks */
888 xfrOffset = uiop->afsio_offset;
889 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
891 ReleaseReadLock(&vcp->lock);
894 #ifndef AFS_64BIT_CLIENT
895 /* check for "file too big" error, which should really be done above us */
896 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
898 ReleaseReadLock(&vcp->lock);
901 #endif /* AFS_64BIT_CLIENT */
903 #ifdef AFS_64BIT_CLIENT
904 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
905 if (rw == UIO_READ) {
906 /* don't read past EOF */
907 if (xfrSize+xfrOffset > fileSize) {
908 add2resid = xfrSize + xfrOffset - fileSize;
909 xfrSize = fileSize - xfrOffset;
911 ReleaseReadLock(&vcp->lock);
915 afsio_trim(uiop, txfrSize);
918 if (xfrOffset < afs_vmMappingEnd) {
919 /* special case of a buffer crossing the VM mapping line */
921 struct iovec tvec[16]; /* Should have access to #define */
925 finalOffset = xfrOffset + xfrSize;
926 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
928 afsio_copy(uiop, &tuio, tvec);
929 afsio_skip(&tuio, txfrSize - tsize);
930 afsio_trim(&tuio, tsize);
931 tuio.afsio_offset = afs_vmMappingEnd;
932 ReleaseReadLock(&vcp->lock);
933 ObtainWriteLock(&vcp->lock, 243);
934 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
935 ReleaseWriteLock(&vcp->lock);
936 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
937 ObtainWriteLock(&vcp->lock, 244);
938 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
939 ReleaseWriteLock(&vcp->lock);
942 ObtainReadLock(&vcp->lock);
943 xfrSize = afs_vmMappingEnd - xfrOffset;
945 afsio_trim(uiop, txfrSize);
947 ReleaseReadLock(&vcp->lock);
948 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
949 uiop->uio_resid += add2resid;
953 #endif /* AFS_64BIT_CLIENT */
956 afs_uint32 tlen = vcp->f.m.Length;
957 #ifdef AFS_64BIT_CLIENT
958 if (vcp->f.m.Length > afs_vmMappingEnd)
959 tlen = afs_vmMappingEnd;
961 /* Consider V_INTRSEG too for interrupts */
963 vms_create(&vcp->segid, V_CLIENT, (dev_t) vcp->v.v_gnode, tlen, 0, 0)) {
964 ReleaseReadLock(&vcp->lock);
967 #ifdef AFS_64BIT_KERNEL
968 vcp->vmh = vm_handle(vcp->segid, (int32long64_t) 0);
970 vcp->vmh = SRVAL(vcp->segid, 0, 0);
973 vcp->v.v_gnode->gn_seg = vcp->segid;
974 if (rw == UIO_READ) {
975 ReleaseReadLock(&vcp->lock);
976 /* don't read past EOF */
977 if (xfrSize + xfrOffset > fileSize)
978 xfrSize = fileSize - xfrOffset;
981 #ifdef AFS_64BIT_CLIENT
983 uiop->afsio_offset = xfrOffset;
984 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
985 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
986 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
989 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
990 #else /* AFS_64BIT_CLIENT */
992 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
993 #endif /* AFS_64BIT_CLIENT */
996 * If at a chunk boundary and staying within chunk,
997 * start prefetch of next chunk.
999 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1000 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1001 ObtainWriteLock(&vcp->lock, 407);
1002 tdc = afs_FindDCache(vcp, xfrOffset);
1004 if (!(tdc->mflags & DFNextStarted))
1005 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1008 ReleaseWriteLock(&vcp->lock);
1010 #ifdef AFS_64BIT_CLIENT
1012 uiop->afsio_offset = finalOffset;
1014 uiop->uio_resid += add2resid;
1015 #endif /* AFS_64BIT_CLIENT */
1020 start_offset = uiop->afsio_offset;
1021 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE, ICL_TYPE_POINTER, vcp,
1022 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1023 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1024 ReleaseReadLock(&vcp->lock);
1025 ObtainWriteLock(&vcp->lock, 400);
1026 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1028 /* un-protect last page. */
1029 last_page = vcp->f.m.Length / PAGESIZE;
1030 #ifdef AFS_64BIT_CLIENT
1031 if (vcp->f.m.Length > afs_vmMappingEnd)
1032 last_page = afs_vmMappingEnd / PAGESIZE;
1034 vm_protectp(vcp->segid, last_page, 1, FILEKEY);
1035 if (xfrSize + xfrOffset > fileSize) {
1036 vcp->f.m.Length = xfrSize + xfrOffset;
1038 if ((!(vcp->f.states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1040 vcp->f.states |= CPageHog;
1042 ReleaseWriteLock(&vcp->lock);
1044 /* If the write will fit into a single chunk we'll write all of it
1045 * at once. Otherwise, we'll write one chunk at a time, flushing
1046 * some of it to disk.
1050 /* Only create a page to avoid excess VM access if we're writing a
1051 * small file which is either new or completely overwrites the
1054 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize)
1055 && (vcp->v.v_gnode->gn_mwrcnt == 0)
1056 && (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1057 (void)vm_makep(vcp->segid, 0);
1060 while (xfrSize > 0) {
1061 offset = AFS_CHUNKBASE(xfrOffset);
1064 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1066 (afs_size_t) AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1068 if (len == xfrSize) {
1069 /* All data goes to this one chunk. */
1071 old_offset = uiop->afsio_offset;
1072 #ifdef AFS_64BIT_CLIENT
1073 uiop->afsio_offset = xfrOffset;
1074 toffset = xfrOffset;
1076 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1077 #else /* AFS_64BIT_CLIENT */
1078 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1079 #endif /* AFS_64BIT_CLIENT */
1087 /* Write just one chunk's worth of data. */
1089 struct iovec tvec[16]; /* Should have access to #define */
1091 /* Purge dirty chunks of file if there are too many dirty chunks.
1092 * Inside the write loop, we only do this at a chunk boundary.
1093 * Clean up partial chunk if necessary at end of loop.
1095 if (counter > 0 && code == 0 && xfrOffset == offset) {
1096 ObtainWriteLock(&vcp->lock, 403);
1097 if (xfrOffset > vcp->f.m.Length)
1098 vcp->f.m.Length = xfrOffset;
1099 code = afs_DoPartialWrite(vcp, &treq);
1100 vcp->f.states |= CDirty;
1101 ReleaseWriteLock(&vcp->lock);
1108 afsio_copy(uiop, &tuio, tvec);
1109 afsio_trim(&tuio, len);
1110 tuio.afsio_offset = xfrOffset;
1113 old_offset = uiop->afsio_offset;
1114 #ifdef AFS_64BIT_CLIENT
1115 toffset = xfrOffset;
1116 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1117 #else /* AFS_64BIT_CLIENT */
1118 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1119 #endif /* AFS_64BIT_CLIENT */
1121 len -= tuio.afsio_resid;
1122 if (code || (len <= 0)) {
1123 code = code ? code : EINVAL;
1126 afsio_skip(uiop, len);
1131 first_page = (afs_size_t) old_offset >> PGSHIFT;
1133 1 + (((afs_size_t) old_offset + (len - 1)) >> PGSHIFT) -
1135 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2, ICL_TYPE_POINTER, vcp,
1136 ICL_TYPE_INT32, first_page, ICL_TYPE_INT32, pages);
1138 code = vm_writep(vcp->segid, first_page, pages);
1143 if (++count > AFS_MAX_VM_CHUNKS) {
1145 code = vms_iowait(vcp->segid);
1147 /* cache device failure? */
1158 code = vms_iowait(vcp->segid);
1161 /* cache device failure? */
1166 ObtainWriteLock(&vcp->lock, 242);
1167 if (code == 0 && (vcp->f.states & CDirty)) {
1168 code = afs_DoPartialWrite(vcp, &treq);
1170 vm_protectp(vcp->segid, last_page, 1, RDONLY);
1171 ReleaseWriteLock(&vcp->lock);
1173 /* If requested, fsync the file after every write */
1175 afs_fsync(vp, credp);
1177 ObtainReadLock(&vcp->lock);
1178 if (vcp->vc_error) {
1179 /* Pretend we didn't write anything. We need to get the error back to
1180 * the user. If we don't it's possible for a quota error for this
1181 * write to succeed and the file to be closed without the user ever
1182 * having seen the store error. And AIX syscall clears the error if
1183 * anything was written.
1185 code = vcp->vc_error;
1186 if (code == EDQUOT || code == ENOSPC)
1187 uiop->afsio_resid = save_resid;
1189 #ifdef AFS_64BIT_CLIENT
1191 uiop->afsio_offset = finalOffset;
1193 #endif /* AFS_64BIT_CLIENT */
1194 ReleaseReadLock(&vcp->lock);
1197 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3, ICL_TYPE_POINTER, vcp,
1198 ICL_TYPE_INT32, code);
1204 afs_direct_rdwr(struct vnode *vp,
1208 struct ucred *credp)
1211 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1212 struct vcache *vcp = VTOAFS(vp);
1213 afs_int32 save_resid = uiop->afsio_resid;
1214 struct vrequest treq;
1216 if (code = afs_InitReq(&treq, credp))
1219 /* special case easy transfer; apparently a lot are done */
1220 if ((xfrSize = uiop->afsio_resid) == 0)
1223 ObtainReadLock(&vcp->lock);
1224 fileSize = vcp->f.m.Length;
1225 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1226 uiop->afsio_offset = fileSize;
1228 /* compute xfrOffset now, and do some checks */
1229 xfrOffset = uiop->afsio_offset;
1230 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1232 ReleaseReadLock(&vcp->lock);
1236 /* check for "file too big" error, which should really be done above us */
1238 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1240 ReleaseReadLock(&vcp->lock);
1244 ReleaseReadLock(&vcp->lock);
1245 if (rw == UIO_WRITE) {
1246 ObtainWriteLock(&vcp->lock, 400);
1247 vcp->f.m.Date = osi_Time(); /* Set file date (for ranlib) */
1249 if (xfrSize + xfrOffset > fileSize)
1250 vcp->f.m.Length = xfrSize + xfrOffset;
1251 ReleaseWriteLock(&vcp->lock);
1253 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR, ICL_TYPE_POINTER, vp,
1254 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1255 ICL_TYPE_LONG, uiop->afsio_resid);
1256 code = afs_rdwr(VTOAFS(vp), uiop, rw, ioflag, credp);
1258 uiop->afsio_resid = save_resid;
1260 uiop->afsio_offset = xfrOffset + xfrSize;
1261 if (uiop->afsio_resid > 0) {
1262 /* should zero here the remaining buffer */
1263 uiop->afsio_resid = 0;
1265 /* Purge dirty chunks of file if there are too many dirty chunks.
1266 * Inside the write loop, we only do this at a chunk boundary.
1267 * Clean up partial chunk if necessary at end of loop.
1269 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1270 ObtainWriteLock(&vcp->lock, 402);
1271 code = afs_DoPartialWrite(vcp, &treq);
1272 vcp->f.states |= CDirty;
1273 ReleaseWriteLock(&vcp->lock);
1283 lock_normalize(struct vnode *vp,
1284 struct flock *lckdat,
1291 switch (lckdat->l_whence) {
1295 lckdat->l_start += (off_t) offset;
1298 code = afs_getattr(VTOAFS(vp), &vattr, cred);
1301 lckdat->l_start += (off_t) vattr.va_size;
1306 lckdat->l_whence = 0;
1313 afs_gn_lockctl(struct vnode *vp,
1315 struct eflock *lckdat,
1317 int (*ignored_fcn) (),
1318 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
1320 #else /* AFS_AIX52_ENV */
1321 ulong32int64_t * ignored_id,
1322 #endif /* AFS_AIX52_ENV */
1325 int error, ncmd = 0;
1327 struct vattr *attrs;
1329 AFS_STATCNT(afs_gn_lockctl);
1330 /* Convert from AIX's cmd to standard lockctl lock types... */
1333 else if (cmd & SETFLCK) {
1338 flkd.l_type = lckdat->l_type;
1339 flkd.l_whence = lckdat->l_whence;
1340 flkd.l_start = lckdat->l_start;
1341 flkd.l_len = lckdat->l_len;
1342 flkd.l_pid = lckdat->l_pid;
1343 flkd.l_sysid = lckdat->l_sysid;
1345 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1347 if (error = lock_normalize(vp, &flkd, offset, cred))
1349 error = afs_lockctl(vp, &flkd, ncmd, cred);
1350 lckdat->l_type = flkd.l_type;
1351 lckdat->l_whence = flkd.l_whence;
1352 lckdat->l_start = flkd.l_start;
1353 lckdat->l_len = flkd.l_len;
1354 lckdat->l_pid = flkd.l_pid;
1355 lckdat->l_sysid = flkd.l_sysid;
1356 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, vp,
1357 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1362 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1364 afs_gn_ioctl(struct vnode *vp,
1367 size_t flags, /* Ignored in AFS */
1368 ext_t ext, /* Ignored in AFS */
1369 struct ucred *crp) /* Ignored in AFS */
1374 AFS_STATCNT(afs_gn_ioctl);
1375 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1376 error = afs_ioctl(vp, cmd, arg);
1377 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, vp,
1378 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1384 afs_gn_readlink(struct vnode *vp,
1390 AFS_STATCNT(afs_gn_readlink);
1391 error = afs_readlink(vp, uiop, cred);
1392 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, vp,
1393 ICL_TYPE_LONG, error);
1399 afs_gn_select(struct vnode *vp,
1400 int32long64_t correl,
1407 AFS_STATCNT(afs_gn_select);
1408 /* NO SUPPORT for this in afs YET! */
1409 return (EOPNOTSUPP);
1414 afs_gn_symlink(struct vnode *vp,
1422 AFS_STATCNT(afs_gn_symlink);
1425 error = afs_symlink(vp, link, &va, target, cred);
1426 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, vp,
1427 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG,
1434 afs_gn_readdir(struct vnode *vp,
1440 AFS_STATCNT(afs_gn_readdir);
1441 error = afs_readdir(vp, uiop, cred);
1442 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, vp,
1443 ICL_TYPE_LONG, error);
1448 extern Simple_lock afs_asyncbuf_lock;
1449 extern struct buf *afs_asyncbuf;
1450 extern int afs_asyncbuf_cv;
1453 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1454 * when the buffer is processed by afs_gn_strategy. afs_biotime is
1455 * incremented for each buffer. A buffer's age is kept in its av_back field.
1456 * The age ranking is used by the daemons, which favor older buffers.
1458 afs_int32 afs_biotime = 0;
1460 /* This function is called with a list of buffers, threaded through
1461 * the av_forw field. Our goal is to copy the list of buffers into the
1462 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1463 * Within buffers within the same work group, the guy with the lowest address
1464 * has to be located at the head of the queue; his b_bcount field will also
1465 * be increased to cover all of the buffers in the b_work queue.
1467 #define AIX_VM_BLKSIZE 8192
1468 /* Note: This function seems to be called as ddstrategy entry point, ie
1469 * has one argument. However, it also needs to be present as
1470 * vn_strategy entry point which has three arguments, but it seems to never
1471 * be called in that capacity (it would fail horribly due to the argument
1472 * mismatch). I'm confused, but it obviously has to be this way, maybe
1473 * some IBM people can shed som light on this
1476 afs_gn_strategy(struct buf *abp)
1478 struct buf **lbp, *tbp;
1480 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1484 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1485 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1486 && (x)->b_flags == (y)->b_flags \
1487 && !((x)->b_flags & B_PFPROT) \
1488 && !((y)->b_flags & B_PFPROT))
1490 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1491 for (tbp = abp; tbp; tbp = nbp) {
1492 nbp = tbp->av_forw; /* remember for later */
1494 tbp->av_back = (struct buf *)afs_biotime++;
1496 /* first insert the buffer into the afs_async queue. Insert buffer
1497 * sorted within its disk position within a set of comparable buffers.
1498 * Ensure that all comparable buffers are grouped contiguously.
1499 * Later on, we'll merge adjacent buffers into a single request.
1501 firstComparable = NULL;
1502 lbp = &afs_asyncbuf;
1503 for (qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1504 if (EFS_COMPARABLE(tbp, qbp)) {
1505 if (!firstComparable)
1506 firstComparable = qbp;
1507 /* this buffer is comparable, so see if the next buffer
1508 * is farther in the file; if it is insert before next buffer.
1510 if (tbp->b_blkno < qbp->b_blkno) {
1514 /* If we're at the end of a block of comparable buffers, we
1515 * insert the buffer here to keep all comparable buffers
1518 if (firstComparable)
1522 /* do the insert before qbp now */
1523 tbp->av_forw = *lbp;
1525 if (firstComparable == NULL) {
1526 /* next we're going to do all sorts of buffer merging tricks, but
1527 * here we know we're the only COMPARABLE block in the
1528 * afs_asyncbuf list, so we just skip that and continue with
1529 * the next input buffer.
1534 /* we may have actually added the "new" firstComparable */
1535 if (tbp->av_forw == firstComparable)
1536 firstComparable = tbp;
1538 * when we get here, firstComparable points to the first dude in the
1539 * same vnode and subspace that we (tbp) are in. We go through the
1540 * area of this list with COMPARABLE buffers (a contiguous region) and
1541 * repeated merge buffers that are contiguous and in the same block or
1542 * buffers that are contiguous and are both integral numbers of blocks.
1543 * Note that our end goal is to have as big blocks as we can, but we
1544 * must minimize the transfers that are not integral #s of blocks on
1545 * block boundaries, since Episode will do those smaller and/or
1546 * unaligned I/Os synchronously.
1548 * A useful example to consider has the async queue with this in it:
1549 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1550 * If we get a request that fills the 4K hole, we want to merge this
1551 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1552 * don't want to do any merging since adding the 4K transfer to the 8K
1553 * transfer makes the 8K transfer synchronous.
1555 * Note that if there are any blocks whose size is a multiple of
1556 * the file system block size, then we know that such blocks are also
1557 * on block boundaries.
1560 doMerge = 1; /* start the loop */
1561 while (doMerge) { /* loop until an iteration doesn't
1562 * make any more changes */
1564 for (qbp = firstComparable;; qbp = qnbp) {
1565 qnbp = qbp->av_forw;
1567 break; /* we're done */
1568 if (!EFS_COMPARABLE(qbp, qnbp))
1571 /* try to merge qbp and qnbp */
1573 /* first check if both not adjacent go on to next region */
1574 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) !=
1575 dbtob(qnbp->b_blkno))
1578 /* note if both in the same block, the first byte of leftmost guy
1579 * and last byte of rightmost guy are in the same block.
1581 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE - 1)) ==
1582 ((dbtob(qnbp->b_blkno) + qnbp->b_bcount -
1583 1) & ~(AIX_VM_BLKSIZE - 1))) {
1584 doMerge = 1; /* both in same block */
1585 } else if ((qbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0
1586 && (qnbp->b_bcount & (AIX_VM_BLKSIZE - 1)) == 0) {
1587 doMerge = 1; /* both integral #s of blocks */
1592 /* merge both of these blocks together */
1593 /* first set age to the older of the two */
1594 if ((int32long64_t) qnbp->av_back -
1595 (int32long64_t) qbp->av_back < 0) {
1596 qbp->av_back = qnbp->av_back;
1598 lwbp = (struct buf **) &qbp->b_work;
1599 /* find end of qbp's work queue */
1600 for (xbp = *lwbp; xbp;
1601 lwbp = (struct buf **) &xbp->b_work, xbp = *lwbp);
1603 * now setting *lwbp will change the last ptr in the qbp's
1606 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1607 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1608 *lwbp = qnbp; /* append qnbp to end */
1610 * note that qnbp is bogus, but it doesn't matter because
1611 * we're going to restart the for loop now.
1613 break; /* out of the for loop */
1617 } /* for loop for all interrupt data */
1618 /* at this point, all I/O has been queued. Wakeup the daemon */
1619 e_wakeup_one((int *)&afs_asyncbuf_cv);
1620 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1626 afs_inactive(struct vcache *avc,
1627 struct AFS_UCRED *acred)
1629 afs_InactiveVCache(avc, acred);
1633 afs_gn_revoke(struct vnode *vp,
1636 struct vattr *vinfop,
1639 AFS_STATCNT(afs_gn_revoke);
1640 /* NO SUPPORT for this in afs YET! */
1641 return (EOPNOTSUPP);
1645 afs_gn_getacl(struct vnode *vp,
1654 afs_gn_setacl(struct vnode *vp,
1663 afs_gn_getpcl(struct vnode *vp,
1672 afs_gn_setpcl(struct vnode *vp,
1681 afs_gn_seek(struct vnode* vp, offset_t * offp, struct ucred * crp)
1684 * File systems which do not wish to do offset validation can simply
1685 * return 0. File systems which do not provide the vn_seek entry point
1686 * will have a maximum offset of OFF_MAX (2 gigabytes minus 1) enforced
1687 * by the logical file system.
1700 * declare a struct vnodeops and initialize it with ptrs to all functions
1702 struct vnodeops afs_gn_vnodeops = {
1703 /* creation/naming/deletion */
1710 /* lookup, file handle stuff */
1712 (int(*)(struct vnode*,struct fileid*,struct ucred*))
1714 /* access to files */
1715 (int(*)(struct vnode *, int32long64_t, ext_t, caddr_t *,struct ucred *))
1717 (int(*)(struct vnode *, struct vnode **, int32long64_t,caddr_t, int32long64_t, caddr_t *, struct ucred *))
1724 /* manipulate attributes of files */
1728 /* data update operations */
1741 (int(*)(struct vnode*,struct buf*,struct ucred*))
1743 /* security things */
1750 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
1751 afs_gn_enosys, /* vn_fsync_range */
1752 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
1753 afs_gn_enosys, /* vn_create_attr */
1754 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
1755 afs_gn_enosys, /* vn_finfo */
1756 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
1757 afs_gn_enosys, /* vn_map_lloff */
1758 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
1759 afs_gn_enosys, /* vn_readdir_eofp */
1760 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
1761 afs_gn_enosys, /* vn_rdwr_attr */
1762 (int(*)(struct vnode*,int,void*,struct ucred*))
1763 afs_gn_enosys, /* vn_memcntl */
1764 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
1765 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
1766 afs_gn_enosys, /* vn_getea */
1767 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
1768 afs_gn_enosys, /* vn_setea */
1769 (int(*)(struct vnode *, struct uio *, struct ucred *))
1770 afs_gn_enosys, /* vn_listea */
1771 (int(*)(struct vnode *, const char *, struct ucred *))
1772 afs_gn_enosys, /* vn_removeea */
1773 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
1774 afs_gn_enosys, /* vn_statea */
1775 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
1776 afs_gn_enosys, /* vn_getxacl */
1777 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
1778 afs_gn_enosys, /* vn_setxacl */
1779 #else /* AFS_AIX53_ENV */
1780 afs_gn_enosys, /* vn_spare7 */
1781 afs_gn_enosys, /* vn_spare8 */
1782 afs_gn_enosys, /* vn_spare9 */
1783 afs_gn_enosys, /* vn_spareA */
1784 afs_gn_enosys, /* vn_spareB */
1785 afs_gn_enosys, /* vn_spareC */
1786 afs_gn_enosys, /* vn_spareD */
1787 #endif /* AFS_AIX53_ENV */
1788 afs_gn_enosys, /* vn_spareE */
1789 afs_gn_enosys /* vn_spareF */
1790 #ifdef AFS_AIX51_ENV
1791 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
1792 afs_gn_enosys, /* pagerBackRange */
1793 (int64_t(*)(struct gnode*))
1794 afs_gn_enosys, /* pagerGetFileSize */
1795 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
1796 afs_gn_enosys, /* pagerReadAhead */
1797 (void(*)(struct gnode *, int64_t, int64_t, uint))
1798 afs_gn_enosys, /* pagerReadWriteBehind */
1799 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
1800 afs_gn_enosys /* pagerEndCopy */
1803 struct vnodeops *afs_ops = &afs_gn_vnodeops;
1807 extern struct vfsops Afs_vfsops;
1808 extern int Afs_init();
1810 #define AFS_CALLOUT_TBL_SIZE 256
1813 * the following additional layer of gorp is due to the fact that the
1814 * filesystem layer no longer obtains the kernel lock for me. I was relying
1815 * on this behavior to avoid having to think about locking.
1819 vfs_mount(struct vfs *a, struct ucred *b)
1821 int glockOwner, ret;
1823 glockOwner = ISAFS_GLOCK();
1826 ret = (*Afs_vfsops.vfs_mount) (a, b);
1834 vfs_unmount(struct vfs *a, int b, struct ucred *c)
1836 int glockOwner, ret;
1838 glockOwner = ISAFS_GLOCK();
1841 ret = (*Afs_vfsops.vfs_unmount) (a, b, c);
1849 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c)
1851 int glockOwner, ret;
1853 glockOwner = ISAFS_GLOCK();
1856 ret = (*Afs_vfsops.vfs_root) (a, b, c);
1864 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c)
1866 int glockOwner, ret;
1868 glockOwner = ISAFS_GLOCK();
1871 ret = (*Afs_vfsops.vfs_statfs) (a, b, c);
1879 vfs_sync(struct gfs *a)
1881 int glockOwner, ret;
1883 glockOwner = ISAFS_GLOCK();
1886 ret = (*Afs_vfsops.vfs_sync) (a);
1893 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c, struct ucred *d)
1895 int glockOwner, ret;
1897 glockOwner = ISAFS_GLOCK();
1900 ret = (*Afs_vfsops.vfs_vget) (a, b, c, d);
1908 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e)
1910 int glockOwner, ret;
1912 glockOwner = ISAFS_GLOCK();
1915 ret = (*Afs_vfsops.vfs_cntl) (a, b, c, d, e);
1923 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d, struct ucred *e)
1925 int glockOwner, ret;
1927 glockOwner = ISAFS_GLOCK();
1930 ret = (*Afs_vfsops.vfs_quotactl) (a, b, c, d, e);
1937 #ifdef AFS_AIX51_ENV
1939 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1941 int glockOwner, ret;
1943 glockOwner = ISAFS_GLOCK();
1946 ret = (*Afs_vfsops.vfs_syncvfs) (a, b, c, d);
1955 struct vfsops locked_Afs_vfsops = {
1964 #ifdef AFS_AIX51_ENV
1970 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
1972 int glockOwner, ret;
1974 glockOwner = ISAFS_GLOCK();
1977 ret = (*afs_gn_vnodeops.vn_link) (a, b, c, d);
1985 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d)
1987 int glockOwner, ret;
1989 glockOwner = ISAFS_GLOCK();
1992 ret = (*afs_gn_vnodeops.vn_mkdir) (a, b, c, d);
2000 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d,
2003 int glockOwner, ret;
2005 glockOwner = ISAFS_GLOCK();
2008 ret = (*afs_gn_vnodeops.vn_mknod) (a, b, c, d, e);
2016 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2018 int glockOwner, ret;
2020 glockOwner = ISAFS_GLOCK();
2023 ret = (*afs_gn_vnodeops.vn_remove) (a, b, c, d);
2031 vn_rename(struct vnode *a, struct vnode *b, caddr_t c, struct vnode *d,
2032 struct vnode *e, caddr_t f, struct ucred *g)
2034 int glockOwner, ret;
2036 glockOwner = ISAFS_GLOCK();
2039 ret = (*afs_gn_vnodeops.vn_rename) (a, b, c, d, e, f, g);
2047 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d)
2049 int glockOwner, ret;
2051 glockOwner = ISAFS_GLOCK();
2054 ret = (*afs_gn_vnodeops.vn_rmdir) (a, b, c, d);
2062 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2063 struct vattr *v, struct ucred *e)
2065 int glockOwner, ret;
2067 glockOwner = ISAFS_GLOCK();
2070 ret = (*afs_gn_vnodeops.vn_lookup) (a, b, c, d, v, e);
2078 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c)
2080 int glockOwner, ret;
2082 glockOwner = ISAFS_GLOCK();
2085 ret = (*afs_gn_vnodeops.vn_fid) (a, b, c);
2093 vn_open(struct vnode *a,
2099 int glockOwner, ret;
2101 glockOwner = ISAFS_GLOCK();
2104 ret = (*afs_gn_vnodeops.vn_open) (a, b, c, d, e);
2112 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d,
2113 int32long64_t e, caddr_t * f, struct ucred *g)
2115 int glockOwner, ret;
2117 glockOwner = ISAFS_GLOCK();
2120 ret = (*afs_gn_vnodeops.vn_create) (a, b, c, d, e, f, g);
2128 vn_hold(struct vnode *a)
2130 int glockOwner, ret;
2132 glockOwner = ISAFS_GLOCK();
2135 ret = (*afs_gn_vnodeops.vn_hold) (a);
2143 vn_rele(struct vnode *a)
2145 int glockOwner, ret;
2147 glockOwner = ISAFS_GLOCK();
2150 ret = (*afs_gn_vnodeops.vn_rele) (a);
2158 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d)
2160 int glockOwner, ret;
2162 glockOwner = ISAFS_GLOCK();
2165 ret = (*afs_gn_vnodeops.vn_close) (a, b, c, d);
2173 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d,
2174 uint32long64_t e, struct ucred *f)
2176 int glockOwner, ret;
2178 glockOwner = ISAFS_GLOCK();
2181 ret = (*afs_gn_vnodeops.vn_map) (a, b, c, d, e, f);
2189 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c)
2191 int glockOwner, ret;
2193 glockOwner = ISAFS_GLOCK();
2196 ret = (*afs_gn_vnodeops.vn_unmap) (a, b, c);
2204 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2206 int glockOwner, ret;
2208 glockOwner = ISAFS_GLOCK();
2211 ret = (*afs_gn_vnodeops.vn_access) (a, b, c, d);
2219 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c)
2221 int glockOwner, ret;
2223 glockOwner = ISAFS_GLOCK();
2226 ret = (*afs_gn_vnodeops.vn_getattr) (a, b, c);
2234 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d,
2235 int32long64_t e, struct ucred *f)
2237 int glockOwner, ret;
2239 glockOwner = ISAFS_GLOCK();
2242 ret = (*afs_gn_vnodeops.vn_setattr) (a, b, c, d, e, f);
2250 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2251 , caddr_t e, struct ucred *f)
2253 int glockOwner, ret;
2255 glockOwner = ISAFS_GLOCK();
2258 ret = (*afs_gn_vnodeops.vn_fclear) (a, b, c, d, e, f);
2266 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d)
2268 int glockOwner, ret;
2270 glockOwner = ISAFS_GLOCK();
2273 ret = (*afs_gn_vnodeops.vn_fsync) (a, b, c, d);
2281 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d,
2284 int glockOwner, ret;
2286 glockOwner = ISAFS_GLOCK();
2289 ret = (*afs_gn_vnodeops.vn_ftrunc) (a, b, c, d, e);
2297 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d,
2298 ext_t e, caddr_t f, struct vattr *v, struct ucred *g)
2300 int glockOwner, ret;
2302 glockOwner = ISAFS_GLOCK();
2305 ret = (*afs_gn_vnodeops.vn_rdwr) (a, b, c, d, e, f, v, g);
2313 vn_lockctl(struct vnode *a,
2318 #ifdef AFS_AIX52_ENV /* Changed in AIX 5.2 and up */
2320 #else /* AFS_AIX52_ENV */
2322 #endif /* AFS_AIX52_ENV */
2325 int glockOwner, ret;
2327 glockOwner = ISAFS_GLOCK();
2330 ret = (*afs_gn_vnodeops.vn_lockctl) (a, b, c, d, e, f, g);
2338 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e,
2341 int glockOwner, ret;
2343 glockOwner = ISAFS_GLOCK();
2346 ret = (*afs_gn_vnodeops.vn_ioctl) (a, b, c, d, e, f);
2354 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c)
2356 int glockOwner, ret;
2358 glockOwner = ISAFS_GLOCK();
2361 ret = (*afs_gn_vnodeops.vn_readlink) (a, b, c);
2369 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort * d,
2370 void (*e) (), caddr_t f, struct ucred *g)
2372 int glockOwner, ret;
2374 glockOwner = ISAFS_GLOCK();
2377 ret = (*afs_gn_vnodeops.vn_select) (a, b, c, d, e, f, g);
2385 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d)
2387 int glockOwner, ret;
2389 glockOwner = ISAFS_GLOCK();
2392 ret = (*afs_gn_vnodeops.vn_symlink) (a, b, c, d);
2400 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c)
2402 int glockOwner, ret;
2404 glockOwner = ISAFS_GLOCK();
2407 ret = (*afs_gn_vnodeops.vn_readdir) (a, b, c);
2415 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d,
2418 int glockOwner, ret;
2420 glockOwner = ISAFS_GLOCK();
2423 ret = (*afs_gn_vnodeops.vn_revoke) (a, b, c, d, e);
2431 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c)
2433 int glockOwner, ret;
2435 glockOwner = ISAFS_GLOCK();
2438 ret = (*afs_gn_vnodeops.vn_getacl) (a, b, c);
2446 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c)
2448 int glockOwner, ret;
2450 glockOwner = ISAFS_GLOCK();
2453 ret = (*afs_gn_vnodeops.vn_setacl) (a, b, c);
2461 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c)
2463 int glockOwner, ret;
2465 glockOwner = ISAFS_GLOCK();
2468 ret = (*afs_gn_vnodeops.vn_getpcl) (a, b, c);
2476 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c)
2478 int glockOwner, ret;
2480 glockOwner = ISAFS_GLOCK();
2483 ret = (*afs_gn_vnodeops.vn_setpcl) (a, b, c);
2491 struct vnodeops locked_afs_gn_vnodeops = {
2520 (int(*)(struct vnode*,struct buf*,struct ucred*))
2521 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2528 (int(*)(struct vnode *, int32long64_t, int32long64_t, offset_t, offset_t, struct ucred *))
2529 afs_gn_enosys, /* vn_fsync_range */
2530 (int(*)(struct vnode *, struct vnode **, int32long64_t, char *, struct vattr *, int32long64_t, caddr_t *, struct ucred *))
2531 afs_gn_enosys, /* vn_create_attr */
2532 (int(*)(struct vnode *, int32long64_t, void *, size_t, struct ucred *))
2533 afs_gn_enosys, /* vn_finfo */
2534 (int(*)(struct vnode *, caddr_t, offset_t, offset_t, uint32long64_t, uint32long64_t, struct ucred *))
2535 afs_gn_enosys, /* vn_map_lloff */
2536 (int(*)(struct vnode*,struct uio*,int*,struct ucred*))
2537 afs_gn_enosys, /* vn_readdir_eofp */
2538 (int(*)(struct vnode *, enum uio_rw, int32long64_t, struct uio *, ext_t , caddr_t, struct vattr *, struct vattr *, struct ucred *))
2539 afs_gn_enosys, /* vn_rdwr_attr */
2540 (int(*)(struct vnode*,int,void*,struct ucred*))
2541 afs_gn_enosys, /* vn_memcntl */
2542 #ifdef AFS_AIX53_ENV /* Present in AIX 5.3 and up */
2543 (int(*)(struct vnode*,const char*,struct uio*,struct ucred*))
2544 afs_gn_enosys, /* vn_getea */
2545 (int(*)(struct vnode*,const char*,struct uio*,int,struct ucred*))
2546 afs_gn_enosys, /* vn_setea */
2547 (int(*)(struct vnode *, struct uio *, struct ucred *))
2548 afs_gn_enosys, /* vn_listea */
2549 (int(*)(struct vnode *, const char *, struct ucred *))
2550 afs_gn_enosys, /* vn_removeea */
2551 (int(*)(struct vnode *, const char *, struct vattr *, struct ucred *))
2552 afs_gn_enosys, /* vn_statea */
2553 (int(*)(struct vnode *, uint64_t, acl_type_t *, struct uio *, size_t *, mode_t *, struct ucred *))
2554 afs_gn_enosys, /* vn_getxacl */
2555 (int(*)(struct vnode *, uint64_t, acl_type_t, struct uio *, mode_t, struct ucred *))
2556 afs_gn_enosys, /* vn_setxacl */
2557 #else /* AFS_AIX53_ENV */
2558 afs_gn_enosys, /* vn_spare7 */
2559 afs_gn_enosys, /* vn_spare8 */
2560 afs_gn_enosys, /* vn_spare9 */
2561 afs_gn_enosys, /* vn_spareA */
2562 afs_gn_enosys, /* vn_spareB */
2563 afs_gn_enosys, /* vn_spareC */
2564 afs_gn_enosys, /* vn_spareD */
2565 #endif /* AFS_AIX53_ENV */
2566 afs_gn_enosys, /* vn_spareE */
2567 afs_gn_enosys /* vn_spareF */
2568 #ifdef AFS_AIX51_ENV
2569 ,(int(*)(struct gnode*,long long,char*,unsigned long*, unsigned long*,unsigned int*))
2570 afs_gn_enosys, /* pagerBackRange */
2571 (int64_t(*)(struct gnode*))
2572 afs_gn_enosys, /* pagerGetFileSize */
2573 (void(*)(struct gnode *, vpn_t, vpn_t *, vpn_t *, vpn_t *, boolean_t))
2574 afs_gn_enosys, /* pagerReadAhead */
2575 (void(*)(struct gnode *, int64_t, int64_t, uint))
2576 afs_gn_enosys, /* pagerReadWriteBehind */
2577 (void(*)(struct gnode*,long long,unsigned long,unsigned long,unsigned int))
2578 afs_gn_enosys /* pagerEndCopy */
2582 struct gfs afs_gfs = {
2584 &locked_afs_gn_vnodeops,
2588 GFS_VERSION4 | GFS_VERSION42 | GFS_REMOTE,