2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #include "../h/systm.h"
16 #include "../h/types.h"
17 #include "../h/errno.h"
18 #include "../h/stat.h"
19 #include "../h/user.h"
21 #include "../h/vattr.h"
22 #include "../h/file.h"
24 #include "../h/chownx.h"
25 #include "../h/systm.h"
26 #include "../h/access.h"
30 #include "../rpc/types.h"
31 #include "../afs/osi_vfs.h"
32 #include "../netinet/in.h"
33 #include "../h/mbuf.h"
34 #include "../h/vmuser.h"
36 #include "../rpc/types.h"
37 #include "../rpc/xdr.h"
39 #include "../afs/stds.h"
40 #include "../afs/afs_osi.h"
41 #define RFTP_INTERNALS 1
42 #include "../afs/volerrors.h"
43 #include "../afsint/afsint.h"
44 #include "../afsint/vldbint.h"
45 #include "../afs/lock.h"
46 #include "../afs/exporter.h"
47 #include "../afs/afs.h"
48 #include "../afs/afs_chunkops.h"
49 #include "../afs/afs_stats.h"
50 #include "../afs/nfsclient.h"
51 #include "../afs/icl.h"
52 #include "../afs/prs_fs.h"
53 #include "../h/flock.h"
54 #include "../afs/afsincludes.h"
58 * declare all the functions so they can be used to init the table
60 /* creation/naming/deletion */
67 /* lookup, file handle stuff */
78 /* manipulate attributes of files */
82 /* data update operations */
90 int afs_gn_readlink();
95 int afs_gn_strategy();
106 * declare a struct vnodeops and initialize it with ptrs to all functions
108 struct vnodeops afs_gn_vnodeops = {
109 /* creation/naming/deletion */
116 /* lookup, file handle stuff */
119 /* access to files */
127 /* manipulate attributes of files */
131 /* data update operations */
145 /* security things */
151 afs_gn_enosys, /* vn_seek */
152 afs_gn_enosys, /* vn_fsync_range */
153 afs_gn_enosys, /* vn_create_attr */
154 afs_gn_enosys, /* vn_finfo */
155 afs_gn_enosys, /* vn_map_lloff */
156 afs_gn_enosys, /* vn_readdir_eofp */
157 afs_gn_enosys, /* vn_rdwr_attr */
158 afs_gn_enosys, /* vn_memcntl */
159 afs_gn_enosys, /* vn_spare7 */
160 afs_gn_enosys, /* vn_spare8 */
161 afs_gn_enosys, /* vn_spare9 */
162 afs_gn_enosys, /* vn_spareA */
163 afs_gn_enosys, /* vn_spareB */
164 afs_gn_enosys, /* vn_spareC */
165 afs_gn_enosys, /* vn_spareD */
166 afs_gn_enosys, /* vn_spareE */
167 afs_gn_enosys /* vn_spareF */
169 ,afs_gn_enosys, /* pagerBackRange */
170 afs_gn_enosys, /* pagerGetFileSize */
171 afs_gn_enosys, /* pagerReadAhead */
172 afs_gn_enosys, /* pagerWriteBehind */
173 afs_gn_enosys /* pagerEndCopy */
176 struct vnodeops *afs_ops = &afs_gn_vnodeops;
180 afs_gn_link(vp, dp, name, cred)
188 AFS_STATCNT(afs_gn_link);
189 error = afs_link(vp, dp, name, cred);
190 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, (afs_int32)vp,
191 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
197 afs_gn_mkdir(dp, name, Mode, cred)
212 AFS_STATCNT(afs_gn_mkdir);
215 va.va_mode = (mode & 07777) & ~get_umask();
216 error = afs_mkdir(dp, name, &va, &vp, cred);
220 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, (afs_int32)vp,
221 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
227 afs_gn_mknod(dp, name, Mode, dev, cred)
243 AFS_STATCNT(afs_gn_mknod);
245 va.va_type = IFTOVT(mode);
246 va.va_mode = (mode & 07777) & ~get_umask();
248 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
249 if (va.va_type != VFIFO && !suser(&error))
251 switch (va.va_type) {
253 error = afs_mkdir(dp, name, &va, &vp, cred);
263 error = afs_create(dp, name, &va, NONEXCL, mode, &vp, cred);
268 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32)vp,
269 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
275 afs_gn_remove(vp, dp, name, cred)
276 struct vnode *vp; /* Ignored in AFS */
283 AFS_STATCNT(afs_gn_remove);
284 error = afs_remove(dp, name, cred);
285 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, (afs_int32)dp,
286 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
292 afs_gn_rename(vp, dp, name, tp, tdp, tname, cred)
295 struct vnode *vp; /* Ignored in AFS */
296 struct vnode *tp; /* Ignored in AFS */
303 AFS_STATCNT(afs_gn_rename);
304 error = afs_rename(dp, name, tdp, tname, cred);
305 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, (afs_int32)dp,
306 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG, error);
312 afs_gn_rmdir(vp, dp, name, cred)
313 struct vnode *vp; /* Ignored in AFS */
320 AFS_STATCNT(afs_gn_rmdir);
321 error = afs_rmdir(dp, name, cred);
323 if (error == 66 /* 4.3's ENOTEMPTY */)
324 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
326 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, (afs_int32)dp,
327 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
333 afs_gn_lookup(dp, vpp, name, Flags, vattrp, cred)
334 struct vattr *vattrp;
339 int32long64_t Flags; /* includes FOLLOW... */
341 afs_uint32 Flags; /* includes FOLLOW... */
348 AFS_STATCNT(afs_gn_lookup);
349 error = afs_lookup(dp, name, vpp, cred);
350 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, (afs_int32)dp,
351 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
352 if (vattrp != NULL && error == 0)
353 afs_gn_getattr(*vpp, vattrp, cred);
359 afs_gn_fid(vp, fidp, cred)
366 AFS_STATCNT(afs_gn_fid);
367 error = afs_fid(vp, fidp);
368 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, (afs_int32)vp,
369 ICL_TYPE_LONG, (afs_int32)fidp, ICL_TYPE_LONG, error);
375 afs_gn_open(vp, Flags, ext, vinfop, cred)
379 ext_t ext; /* Ignored in AFS */
382 int ext; /* Ignored in AFS */
384 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
389 struct vcache *tvp = VTOAFS(vp);
393 AFS_STATCNT(afs_gn_open);
395 if ((flags & FREAD)) modes |= R_ACC;
396 if ((flags & FEXEC)) modes |= X_ACC;
397 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
399 while ((flags & FNSHARE) && tvp->opens) {
400 if (!(flags & FDELAY)) {
404 afs_osi_Sleep(&tvp->opens);
407 error = afs_access(vp, modes, cred);
412 error = afs_open(&vp, flags, cred);
414 if (flags & FTRUNC) {
417 error = afs_setattr(vp, &va, cred);
421 tvp->states |= CNSHARE;
424 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
427 /* an error occurred; we've told CM that the file
428 * is open, so close it now so that open and
429 * writer counts are correct. Ignore error code,
430 * as it is likely to fail (the setattr just did).
432 afs_close(vp, flags, cred);
437 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, (afs_int32)vp,
438 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
444 afs_gn_create(dp, vpp, Flags, name, Mode, vinfop, cred)
455 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
459 enum vcexcl exclusive;
464 AFS_STATCNT(afs_gn_create);
465 if ((flags & (O_EXCL|O_CREAT)) == (O_EXCL|O_CREAT))
471 va.va_mode = (mode & 07777) & ~get_umask();
472 if ((flags & FREAD)) modes |= R_ACC;
473 if ((flags & FEXEC)) modes |= X_ACC;
474 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
475 error = afs_create(dp, name, &va, exclusive, modes, vpp, cred);
479 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
480 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
481 * call it when this NFS traffic since the close will never happen thus
482 * we'd never flush the files out to the server! Gross but the simplest
483 * solution we came out with */
484 if (cred->cr_luid != RMTUSER_REQ) {
485 while ((flags & FNSHARE) && VTOAFS(*vpp)->opens) {
486 if (!(flags & FDELAY))
488 afs_osi_Sleep(&VTOAFS(*vpp)->opens);
490 /* Since in the standard copen() for bsd vnode kernels they do an
491 * vop_open after the vop_create, we must do the open here since there
492 * are stuff in afs_open that we need. For example advance the
493 * execsOrWriters flag (else we'll be treated as the sun's "core"
495 *vinfop = cred; /* save user creds in fp->f_vinfo */
496 error = afs_open(vpp, flags, cred);
498 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, (afs_int32)dp,
499 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
508 AFS_STATCNT(afs_gn_hold);
519 struct vcache *vcp = VTOAFS(vp);
522 AFS_STATCNT(afs_gn_rele);
523 if (vp->v_count == 0)
524 osi_Panic("afs_rele: zero v_count");
525 if (--(vp->v_count) == 0) {
526 if (vcp->states & CPageHog) {
528 vcp->states &= ~CPageHog;
530 error = afs_inactive(vp, 0);
537 afs_gn_close(vp, Flags, vinfo, cred)
544 caddr_t vinfo; /* Ignored in AFS */
548 struct vcache *tvp = VTOAFS(vp);
551 AFS_STATCNT(afs_gn_close);
553 if (flags & FNSHARE) {
554 tvp->states &= ~CNSHARE;
555 afs_osi_Wakeup(&tvp->opens);
558 error = afs_close(vp, flags, cred);
559 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32)vp,
560 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
566 afs_gn_map(vp, addr, Len, Off, Flag, cred)
570 uint32long64_t Len, Off, Flag;
572 u_int Len, Off, Flag;
576 struct vcache *vcp = VTOAFS(vp);
577 struct vrequest treq;
581 afs_int32 flag = Flag;
583 AFS_STATCNT(afs_gn_map);
585 if (error = afs_InitReq(&treq, cred)) return error;
586 error = afs_VerifyVCache(vcp, &treq);
588 return afs_CheckCode(error, &treq, 49);
590 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
591 ObtainWriteLock(&vcp->lock, 401);
592 vcp->states |= CMAPPED; /* flag cleared at afs_inactive */
594 * We map the segment into our address space using the handle returned by vm_create.
597 afs_uint32 tlen = vcp->m.Length;
598 #ifdef AFS_64BIT_CLIENT
599 if (vcp->m.Length > afs_vmMappingEnd)
600 tlen = afs_vmMappingEnd;
602 /* Consider V_INTRSEG too for interrupts */
603 if (error = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode, tlen, 0, 0)) {
604 ReleaseWriteLock(&vcp->lock);
607 vcp->vmh = SRVAL(vcp->segid, 0, 0);
609 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
610 if (flag & SHM_RDONLY) {
611 vp->v_gnode->gn_mrdcnt++;
614 vp->v_gnode->gn_mwrcnt++;
617 * We keep the caller's credentials since an async daemon will handle the
618 * request at some point. We assume that the same credentials will be used.
620 if (!vcp->credp || (vcp->credp != cred)) {
623 struct ucred *crp = vcp->credp;
629 ReleaseWriteLock(&vcp->lock);
631 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, (afs_int32)vp,
632 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
638 afs_gn_unmap(vp, flag, cred)
647 struct vcache *vcp = VTOAFS(vp);
648 AFS_STATCNT(afs_gn_unmap);
649 ObtainWriteLock(&vcp->lock, 402);
650 if (flag & SHM_RDONLY) {
651 vp->v_gnode->gn_mrdcnt--;
652 if (vp->v_gnode->gn_mrdcnt <=0) vp->v_gnode->gn_mrdcnt = 0;
655 vp->v_gnode->gn_mwrcnt--;
656 if (vp->v_gnode->gn_mwrcnt <=0) vp->v_gnode->gn_mwrcnt = 0;
658 ReleaseWriteLock(&vcp->lock);
666 afs_gn_access(vp, Mode, Who, cred)
682 AFS_STATCNT(afs_gn_access);
688 error = afs_access(vp, mode, cred);
690 /* Additional testing */
691 if (who == ACC_OTHERS || who == ACC_ANY) {
692 error = afs_getattr(vp, &vattr, cred);
694 if (who == ACC_ANY) {
695 if (((vattr.va_mode >> 6) & mode) == mode) {
700 if (((vattr.va_mode >> 3) & mode) == mode)
705 } else if (who == ACC_ALL) {
706 error = afs_getattr(vp, &vattr, cred);
708 if ((!((vattr.va_mode >> 6) & mode)) || (!((vattr.va_mode >> 3) & mode)) ||
709 (!(vattr.va_mode & mode)))
718 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, (afs_int32)vp,
719 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
725 afs_gn_getattr(vp, vattrp, cred)
727 struct vattr *vattrp;
732 AFS_STATCNT(afs_gn_getattr);
733 error = afs_getattr(vp, vattrp, cred);
734 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
735 ICL_TYPE_LONG, error);
741 afs_gn_setattr(vp, Op, Arg1, Arg2, Arg3, cred)
763 AFS_STATCNT(afs_gn_setattr);
771 if ((arg1 & T_OWNER_AS_IS) == 0)
773 if ((arg1 & T_GROUP_AS_IS) == 0)
778 error = afs_access(vp, VWRITE, cred);
782 if (arg1 & T_SETTIME) {
783 va.va_atime.tv_sec = time;
784 va.va_mtime.tv_sec = time;
786 va.va_atime = *(struct timestruc_t *) arg2;
787 va.va_mtime = *(struct timestruc_t *) arg3;
795 error = afs_setattr(vp, &va, cred);
797 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
798 ICL_TYPE_LONG, error);
803 char zero_buffer[PAGESIZE];
805 afs_gn_fclear(vp, flags, offset, length, vinfo, cred)
817 int i, len, error = 0;
820 static int fclear_init =0;
821 register struct vcache *avc = VTOAFS(vp);
823 AFS_STATCNT(afs_gn_fclear);
825 memset(zero_buffer, 0, PAGESIZE);
829 * Don't clear past ulimit
831 if (offset + length > get_ulimit())
834 /* Flush all pages first */
837 vm_flushp(avc->segid, 0, MAXFSIZE/PAGESIZE - 1);
838 vms_iowait(avc->vmh);
841 uio.afsio_offset = offset;
842 for (i = offset; i < offset + length; i = uio.afsio_offset) {
843 len = offset + length - i;
844 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
845 iov.iov_base = zero_buffer;
846 uio.afsio_iov = &iov;
847 uio.afsio_iovcnt = 1;
848 uio.afsio_seg = AFS_UIOSYS;
849 uio.afsio_resid = iov.iov_len;
850 if (error = afs_rdwr(vp, &uio, UIO_WRITE, 0, cred))
853 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, (afs_int32)vp,
854 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG, error);
860 afs_gn_fsync(vp, flags, vinfo, cred)
863 int32long64_t flags; /* Not used by AFS */
864 int32long64_t vinfo; /* Not used by AFS */
866 int flags; /* Not used by AFS */
867 caddr_t vinfo; /* Not used by AFS */
873 AFS_STATCNT(afs_gn_fsync);
874 error = afs_fsync(vp, cred);
875 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, (afs_int32)vp,
876 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
882 afs_gn_ftrunc(vp, flags, length, vinfo, cred)
885 int32long64_t flags; /* Ignored in AFS */
887 int flags; /* Ignored in AFS */
890 caddr_t vinfo; /* Ignored in AFS */
896 AFS_STATCNT(afs_gn_ftrunc);
899 error = afs_setattr(vp, &va, cred);
900 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, (afs_int32)vp,
901 ICL_TYPE_LONG, flags,
902 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length),
903 ICL_TYPE_LONG, error);
907 /* Min size of a file which is dumping core before we declare it a page hog. */
908 #define MIN_PAGE_HOG_SIZE 8388608
910 int afs_gn_rdwr(vp, op, Flags, ubuf, ext, vinfo, vattrp, cred)
915 ext_t ext; /* Ignored in AFS */
918 int ext; /* Ignored in AFS */
921 caddr_t vinfo; /* Ignored in AFS */
922 struct vattr *vattrp;
925 register struct vcache *vcp = VTOAFS(vp);
926 struct vrequest treq;
931 AFS_STATCNT(afs_gn_rdwr);
934 if (op == UIO_WRITE) {
935 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1,
936 ICL_TYPE_POINTER, (afs_int32)vp,
937 ICL_TYPE_LONG, vcp->vc_error);
938 return vcp->vc_error;
943 ObtainSharedLock(&vcp->lock, 507);
945 * We keep the caller's credentials since an async daemon will handle the
946 * request at some point. We assume that the same credentials will be used.
947 * If this is being called from an NFS server thread, then dupe the
948 * cred and only use that copy in calls and for the stach.
950 if (!vcp->credp || (vcp->credp != cred)) {
951 #ifdef AFS_AIX_IAUTH_ENV
952 if (AFS_NFSXLATORREQ(cred)) {
953 /* Must be able to use cred later, so dupe it so that nfs server
954 * doesn't overwrite it's contents.
960 crhold(cred); /* Bump refcount for reference in vcache */
964 UpgradeSToWLock(&vcp->lock, 508);
967 ConvertWToSLock(&vcp->lock);
972 ReleaseSharedLock(&vcp->lock);
975 * XXX Is the following really required?? XXX
977 if (error = afs_InitReq(&treq, cred)) return error;
978 if (error = afs_VerifyVCache(vcp, &treq))
979 return afs_CheckCode(error, &treq, 50);
980 osi_FlushPages(vcp, cred); /* Flush old pages */
982 if (AFS_NFSXLATORREQ(cred)) {
985 if (op == UIO_READ) {
986 if (!afs_AccessOK(vcp, PRSFS_READ, &treq,
987 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
996 * We have to bump the open/exwriters field here courtesy of the nfs xlator
997 * because there're no open/close nfs rpcs to call our afs_open/close.
998 * We do a similar thing on the afs_read/write interface.
1000 if (op == UIO_WRITE) {
1001 #ifdef AFS_64BIT_CLIENT
1002 if (ubuf->afsio_offset < afs_vmMappingEnd) {
1003 #endif /* AFS_64BIT_ENV */
1004 ObtainWriteLock(&vcp->lock,240);
1005 vcp->states |= CDirty; /* Set the dirty bit */
1007 ReleaseWriteLock(&vcp->lock);
1008 #ifdef AFS_64BIT_CLIENT
1010 #endif /* AFS_64BIT_ENV */
1013 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
1015 if (op == UIO_WRITE) {
1016 #ifdef AFS_64BIT_CLIENT
1017 if (ubuf->afsio_offset < afs_vmMappingEnd) {
1018 #endif /* AFS_64BIT_ENV */
1019 ObtainWriteLock(&vcp->lock,241);
1020 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
1021 ReleaseWriteLock(&vcp->lock);
1022 #ifdef AFS_64BIT_CLIENT
1024 #endif /* AFS_64BIT_ENV */
1026 if (vattrp != NULL && error == 0)
1027 afs_gn_getattr(vp, vattrp, cred);
1029 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, (afs_int32)vp,
1030 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
1037 #define AFS_MAX_VM_CHUNKS 10
1038 afs_vm_rdwr(vp, uiop, rw, ioflag, credp)
1039 register struct vnode *vp;
1043 struct ucred *credp;
1045 register afs_int32 code = 0;
1047 afs_int32 blockSize;
1048 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1050 #ifdef AFS_64BIT_CLIENT
1051 afs_size_t finalOffset;
1054 #endif /* AFS_64BIT_CLIENT */
1055 register struct vcache *vcp = VTOAFS(vp);
1057 afs_size_t start_offset;
1058 afs_int32 save_resid = uiop->afsio_resid;
1059 int first_page, last_page, pages;
1062 struct vrequest treq;
1064 if (code = afs_InitReq(&treq, credp)) return code;
1066 /* special case easy transfer; apparently a lot are done */
1067 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
1069 ObtainReadLock(&vcp->lock);
1070 fileSize = vcp->m.Length;
1071 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1072 uiop->afsio_offset = fileSize;
1074 /* compute xfrOffset now, and do some checks */
1075 xfrOffset = uiop->afsio_offset;
1076 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1081 #ifndef AFS_64BIT_CLIENT
1082 /* check for "file too big" error, which should really be done above us */
1083 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1087 #endif /* AFS_64BIT_CLIENT */
1089 #ifdef AFS_64BIT_CLIENT
1090 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
1091 if (xfrOffset < afs_vmMappingEnd) {
1092 /* special case of a buffer crossing the VM mapping line */
1094 struct iovec tvec[16]; /* Should have access to #define */
1098 finalOffset = xfrOffset + xfrSize;
1099 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
1101 afsio_copy(uiop, &tuio, tvec);
1102 afsio_skip(&tuio, txfrSize - tsize);
1103 afsio_trim(&tuio, tsize);
1104 tuio.afsio_offset = afs_vmMappingEnd;
1105 ReleaseReadLock(&vcp->lock);
1106 ObtainWriteLock(&vcp->lock,243);
1107 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
1108 ReleaseWriteLock(&vcp->lock);
1109 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
1110 ObtainWriteLock(&vcp->lock,244);
1111 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
1112 ReleaseWriteLock(&vcp->lock);
1113 ObtainReadLock(&vcp->lock);
1114 if (code) goto fail;
1115 xfrSize = afs_vmMappingEnd - xfrOffset;
1117 afsio_trim(uiop, txfrSize);
1119 ReleaseReadLock(&vcp->lock);
1120 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
1124 #endif /* AFS_64BIT_CLIENT */
1127 afs_uint32 tlen = vcp->m.Length;
1128 #ifdef AFS_64BIT_CLIENT
1129 if (vcp->m.Length > afs_vmMappingEnd)
1130 tlen = afs_vmMappingEnd;
1132 /* Consider V_INTRSEG too for interrupts */
1133 if (code = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode,
1137 vcp->vmh = SRVAL(vcp->segid, 0, 0);
1139 vcp->v.v_gnode->gn_seg = vcp->segid;
1140 if (rw == UIO_READ) {
1141 /* don't read past EOF */
1142 if (xfrSize+xfrOffset > fileSize)
1143 xfrSize = fileSize - xfrOffset;
1144 if (xfrSize <= 0) goto fail;
1145 ReleaseReadLock(&vcp->lock);
1146 #ifdef AFS_64BIT_CLIENT
1147 toffset = xfrOffset;
1148 uiop->afsio_offset = xfrOffset;
1149 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1150 ICL_TYPE_POINTER, vcp,
1151 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
1152 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1155 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1156 #else /* AFS_64BIT_CLIENT */
1158 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1159 #endif /* AFS_64BIT_CLIENT */
1162 * If at a chunk boundary and staying within chunk,
1163 * start prefetch of next chunk.
1165 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1166 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1167 ObtainWriteLock(&vcp->lock,407);
1168 tdc = afs_FindDCache(vcp, xfrOffset);
1170 if (!(tdc->mflags & DFNextStarted))
1171 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1174 ReleaseWriteLock(&vcp->lock);
1176 #ifdef AFS_64BIT_CLIENT
1178 uiop->afsio_offset = finalOffset;
1180 #endif /* AFS_64BIT_CLIENT */
1185 start_offset = uiop->afsio_offset;
1186 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1187 ICL_TYPE_POINTER, vcp,
1188 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1189 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrSize));
1190 ReleaseReadLock(&vcp->lock);
1191 ObtainWriteLock(&vcp->lock,400);
1192 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1194 /* un-protect last page. */
1195 last_page = vcp->m.Length/PAGESIZE;
1196 #ifdef AFS_64BIT_CLIENT
1197 if (vcp->m.Length > afs_vmMappingEnd)
1198 last_page = afs_vmMappingEnd/PAGESIZE;
1200 vm_protectp(vcp->vmh, last_page, 1, FILEKEY);
1201 if (xfrSize + xfrOffset > fileSize) {
1202 vcp->m.Length = xfrSize+xfrOffset;
1204 if ((!(vcp->states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1206 vcp->states |= CPageHog;
1208 ReleaseWriteLock(&vcp->lock);
1210 /* If the write will fit into a single chunk we'll write all of it
1211 * at once. Otherwise, we'll write one chunk at a time, flushing
1212 * some of it to disk.
1216 /* Only create a page to avoid excess VM access if we're writing a
1217 * small file which is either new or completely overwrites the
1220 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize) &&
1221 (vcp->v.v_gnode->gn_mwrcnt == 0) &&
1222 (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1223 (void) vm_makep(vcp->segid, 0);
1226 while (xfrSize > 0) {
1227 offset = AFS_CHUNKBASE(xfrOffset);
1230 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1231 len = (afs_size_t)AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1233 if (len == xfrSize) {
1234 /* All data goes to this one chunk. */
1236 old_offset = uiop->afsio_offset;
1237 #ifdef AFS_64BIT_CLIENT
1238 uiop->afsio_offset = xfrOffset;
1239 toffset = xfrOffset;
1241 code = vm_move(vcp->segid, toffset, txfrSize, rw, uiop);
1242 #else /* AFS_64BIT_CLIENT */
1243 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1244 #endif /* AFS_64BIT_CLIENT */
1250 /* Write just one chunk's worth of data. */
1252 struct iovec tvec[16]; /* Should have access to #define */
1254 /* Purge dirty chunks of file if there are too many dirty chunks.
1255 * Inside the write loop, we only do this at a chunk boundary.
1256 * Clean up partial chunk if necessary at end of loop.
1258 if (counter > 0 && code == 0 && xfrOffset == offset) {
1259 ObtainWriteLock(&vcp->lock,403);
1260 if (xfrOffset > vcp->m.Length)
1261 vcp->m.Length = xfrOffset;
1262 code = afs_DoPartialWrite(vcp, &treq);
1263 vcp->states |= CDirty;
1264 ReleaseWriteLock(&vcp->lock);
1268 afsio_copy(uiop, &tuio, tvec);
1269 afsio_trim(&tuio, len);
1270 tuio.afsio_offset = xfrOffset;
1273 old_offset = uiop->afsio_offset;
1274 #ifdef AFS_64BIT_CLIENT
1275 toffset = xfrOffset;
1276 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1277 #else /* AFS_64BIT_CLIENT */
1278 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1279 #endif /* AFS_64BIT_CLIENT */
1281 len -= tuio.afsio_resid;
1282 afsio_skip(uiop, len);
1287 first_page = (afs_size_t)old_offset >> PGSHIFT;
1288 pages = 1 + (((afs_size_t)old_offset + (len - 1)) >> PGSHIFT) - first_page;
1289 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2,
1290 ICL_TYPE_POINTER, (afs_int32) vcp,
1291 ICL_TYPE_INT32, first_page,
1292 ICL_TYPE_INT32, pages);
1294 code = vm_writep(vcp->segid, first_page, pages);
1295 if (++count > AFS_MAX_VM_CHUNKS) {
1297 vms_iowait(vcp->segid);
1305 vms_iowait(vcp->segid);
1309 ObtainWriteLock(&vcp->lock,242);
1310 if (code == 0 && (vcp->states & CDirty)) {
1311 code = afs_DoPartialWrite(vcp, &treq);
1313 vm_protectp(vcp->vmh, last_page, 1, RDONLY);
1314 ReleaseWriteLock(&vcp->lock);
1316 /* If requested, fsync the file after every write */
1318 afs_fsync(vp, credp);
1320 ObtainReadLock(&vcp->lock);
1321 if (vcp->vc_error) {
1322 /* Pretend we didn't write anything. We need to get the error back to
1323 * the user. If we don't it's possible for a quota error for this
1324 * write to succeed and the file to be closed without the user ever
1325 * having seen the store error. And AIX syscall clears the error if
1326 * anything was written.
1328 code = vcp->vc_error;
1329 if (code == EDQUOT || code == ENOSPC)
1330 uiop->afsio_resid = save_resid;
1332 #ifdef AFS_64BIT_CLIENT
1334 uiop->afsio_offset = finalOffset;
1336 #endif /* AFS_64BIT_CLIENT */
1339 ReleaseReadLock(&vcp->lock);
1340 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3,
1341 ICL_TYPE_POINTER, vcp,
1342 ICL_TYPE_INT32, code);
1347 afs_direct_rdwr(vp, uiop, rw, ioflag, credp)
1348 register struct vnode *vp;
1352 struct ucred *credp;
1354 register afs_int32 code = 0;
1355 afs_size_t fileSize, xfrOffset, offset, old_offset, xfrSize;
1356 struct vcache *vcp = VTOAFS(vp);
1357 afs_int32 save_resid = uiop->afsio_resid;
1358 struct vrequest treq;
1360 if (code = afs_InitReq(&treq, credp)) return code;
1362 /* special case easy transfer; apparently a lot are done */
1363 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
1365 ObtainReadLock(&vcp->lock);
1366 fileSize = vcp->m.Length;
1367 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1368 uiop->afsio_offset = fileSize;
1370 /* compute xfrOffset now, and do some checks */
1371 xfrOffset = uiop->afsio_offset;
1372 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1374 ReleaseReadLock(&vcp->lock);
1378 /* check for "file too big" error, which should really be done above us */
1380 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1382 ReleaseReadLock(&vcp->lock);
1386 ReleaseReadLock(&vcp->lock);
1387 if (rw == UIO_WRITE) {
1388 ObtainWriteLock(&vcp->lock,400);
1389 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1391 if (xfrSize + xfrOffset > fileSize)
1392 vcp->m.Length = xfrSize + xfrOffset;
1393 ReleaseWriteLock(&vcp->lock);
1395 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR,
1396 ICL_TYPE_POINTER, (afs_int32)vp,
1397 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1398 ICL_TYPE_LONG, uiop->afsio_resid);
1399 code = afs_rdwr(vp, uiop, rw, ioflag, credp);
1401 uiop->afsio_resid = save_resid;
1403 uiop->afsio_offset = xfrOffset + xfrSize;
1404 if (uiop->afsio_resid > 0) {
1405 /* should zero here the remaining buffer */
1406 uiop->afsio_resid = 0;
1408 /* Purge dirty chunks of file if there are too many dirty chunks.
1409 * Inside the write loop, we only do this at a chunk boundary.
1410 * Clean up partial chunk if necessary at end of loop.
1412 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1413 ObtainWriteLock(&vcp->lock,402);
1414 code = afs_DoPartialWrite(vcp, &treq);
1415 vcp->states |= CDirty;
1416 ReleaseWriteLock(&vcp->lock);
1425 static int lock_normalize(vp, lckdat, offset, cred)
1427 struct eflock *lckdat;
1434 switch(lckdat->l_whence) {
1438 lckdat->l_start += (off_t) offset;
1441 code = afs_getattr(vp, &vattr, cred);
1442 if (code != 0) return code;
1443 lckdat->l_start += (off_t) vattr.va_size;
1448 lckdat->l_whence = 0;
1454 afs_gn_lockctl(vp, offset, lckdat, cmd, ignored_fcn, ignored_id, cred)
1455 void (*ignored_fcn)();
1459 struct eflock *lckdat;
1461 #ifdef AFS_AIX51_ENV
1469 struct vattr *attrs;
1471 AFS_STATCNT(afs_gn_lockctl);
1472 /* Convert from AIX's cmd to standard lockctl lock types... */
1475 else if (cmd & SETFLCK) {
1480 flkd.l_type = lckdat->l_type;
1481 flkd.l_whence = lckdat->l_whence;
1482 flkd.l_start = lckdat->l_start;
1483 flkd.l_len = lckdat->l_len;
1484 flkd.l_pid = lckdat->l_pid;
1485 flkd.l_sysid = lckdat->l_sysid;
1487 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1489 if (error = lock_normalize(vp, &flkd, offset, cred))
1491 error = afs_lockctl(vp, &flkd, ncmd, cred);
1492 lckdat->l_type = flkd.l_type;
1493 lckdat->l_whence = flkd.l_whence;
1494 lckdat->l_start = flkd.l_start;
1495 lckdat->l_len = flkd.l_len;
1496 lckdat->l_pid = flkd.l_pid;
1497 lckdat->l_sysid = flkd.l_sysid;
1498 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1499 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1504 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1505 int afs_gn_ioctl(vp, Cmd, arg, flags, channel, ext)
1507 #ifdef AFS_AIX51_ENV
1513 int flags; /* Ignored in AFS */
1514 int channel; /* Ignored in AFS */
1515 int ext; /* Ignored in AFS */
1520 AFS_STATCNT(afs_gn_ioctl);
1521 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1522 error = afs_ioctl(vp, cmd, arg);
1523 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1524 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1530 afs_gn_readlink(vp, uiop, cred)
1537 AFS_STATCNT(afs_gn_readlink);
1538 error = afs_readlink(vp, uiop, cred);
1539 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1540 ICL_TYPE_LONG, error);
1546 afs_gn_select(vp, which, vinfo, mpx)
1552 AFS_STATCNT(afs_gn_select);
1553 /* NO SUPPORT for this in afs YET! */
1559 afs_gn_symlink(vp, link, target, cred)
1568 AFS_STATCNT(afs_gn_symlink);
1571 error = afs_symlink(vp, link, &va, target, cred);
1572 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1573 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG, error);
1579 afs_gn_readdir(vp, uiop, cred)
1586 AFS_STATCNT(afs_gn_readdir);
1587 error = afs_readdir(vp, uiop, cred);
1588 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, (afs_int32)vp,
1589 ICL_TYPE_LONG, error);
1594 extern Simple_lock afs_asyncbuf_lock;
1596 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1597 * when the buffer is processed by naix_vmstrategy. afs_biotime is
1598 * incremented for each buffer. A buffer's age is kept in its av_back field.
1599 * The age ranking is used by the daemons, which favor older buffers.
1601 afs_int32 afs_biotime = 0;
1603 extern struct buf *afs_asyncbuf;
1604 extern int afs_asyncbuf_cv;
1605 /* This function is called with a list of buffers, threaded through
1606 * the av_forw field. Our goal is to copy the list of buffers into the
1607 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1608 * Within buffers within the same work group, the guy with the lowest address
1609 * has to be located at the head of the queue; his b_bcount field will also
1610 * be increased to cover all of the buffers in the b_work queue.
1612 #define AIX_VM_BLKSIZE 8192
1613 afs_gn_strategy(abp, cred)
1615 register struct buf *abp;
1617 register struct buf **lbp, *tbp;
1618 int *lwbp; /* last guy in work chain */
1619 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1623 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1624 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1625 && (x)->b_flags == (y)->b_flags \
1626 && !((x)->b_flags & B_PFPROT) \
1627 && !((y)->b_flags & B_PFPROT))
1629 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1630 for(tbp = abp; tbp; tbp=nbp) {
1631 nbp = tbp->av_forw; /* remember for later */
1633 tbp->av_back = (struct buf *) afs_biotime++;
1635 /* first insert the buffer into the afs_async queue. Insert buffer
1636 * sorted within its disk position within a set of comparable buffers.
1637 * Ensure that all comparable buffers are grouped contiguously.
1638 * Later on, we'll merge adjacent buffers into a single request.
1640 firstComparable = NULL;
1641 lbp = &afs_asyncbuf;
1642 for(qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1643 if (EFS_COMPARABLE(tbp, qbp)) {
1644 if (!firstComparable) firstComparable = qbp;
1645 /* this buffer is comparable, so see if the next buffer
1646 * is farther in the file; if it is insert before next buffer.
1648 if (tbp->b_blkno < qbp->b_blkno) {
1652 /* If we're at the end of a block of comparable buffers, we
1653 * insert the buffer here to keep all comparable buffers
1656 if (firstComparable)
1660 /* do the insert before qbp now */
1661 tbp->av_forw = *lbp;
1663 if (firstComparable == NULL) {
1664 /* next we're going to do all sorts of buffer merging tricks, but
1665 * here we know we're the only COMPARABLE block in the
1666 * afs_asyncbuf list, so we just skip that and continue with
1667 * the next input buffer.
1671 /* we may have actually added the "new" firstComparable */
1672 if (tbp->av_forw == firstComparable)
1673 firstComparable = tbp;
1675 * when we get here, firstComparable points to the first dude in the
1676 * same vnode and subspace that we (tbp) are in. We go through the
1677 * area of this list with COMPARABLE buffers (a contiguous region) and
1678 * repeated merge buffers that are contiguous and in the same block or
1679 * buffers that are contiguous and are both integral numbers of blocks.
1680 * Note that our end goal is to have as big blocks as we can, but we
1681 * must minimize the transfers that are not integral #s of blocks on
1682 * block boundaries, since Episode will do those smaller and/or
1683 * unaligned I/Os synchronously.
1685 * A useful example to consider has the async queue with this in it:
1686 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1687 * If we get a request that fills the 4K hole, we want to merge this
1688 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1689 * don't want to do any merging since adding the 4K transfer to the 8K
1690 * transfer makes the 8K transfer synchronous.
1692 * Note that if there are any blocks whose size is a multiple of
1693 * the file system block size, then we know that such blocks are also
1694 * on block boundaries.
1697 doMerge = 1; /* start the loop */
1698 while(doMerge) { /* loop until an iteration doesn't
1699 * make any more changes */
1701 for (qbp = firstComparable; ; qbp = qnbp) {
1702 qnbp = qbp->av_forw;
1703 if (!qnbp) break; /* we're done */
1704 if (!EFS_COMPARABLE(qbp, qnbp)) break;
1706 /* try to merge qbp and qnbp */
1708 /* first check if both not adjacent go on to next region */
1709 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) != dbtob(qnbp->b_blkno))
1712 /* note if both in the same block, the first byte of leftmost guy
1713 * and last byte of rightmost guy are in the same block.
1715 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE-1)) ==
1716 ((dbtob(qnbp->b_blkno)+qnbp->b_bcount-1) & ~(AIX_VM_BLKSIZE-1))) {
1717 doMerge = 1; /* both in same block */
1719 else if ((qbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0
1720 && (qnbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0) {
1721 doMerge = 1; /* both integral #s of blocks */
1724 register struct buf *xbp;
1726 /* merge both of these blocks together */
1727 /* first set age to the older of the two */
1728 if ((int) qnbp->av_back - (int) qbp->av_back < 0)
1729 qbp->av_back = qnbp->av_back;
1730 lwbp = &qbp->b_work;
1731 /* find end of qbp's work queue */
1732 for(xbp = (struct buf *)(*lwbp); xbp;
1733 lwbp = &xbp->b_work, xbp = (struct buf *) (*lwbp));
1735 * now setting *lwbp will change the last ptr in the qbp's
1738 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1739 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1740 *lwbp = (int) qnbp; /* append qnbp to end */
1742 * note that qnbp is bogus, but it doesn't matter because
1743 * we're going to restart the for loop now.
1745 break; /* out of the for loop */
1749 } /* for loop for all interrupt data */
1750 /* at this point, all I/O has been queued. Wakeup the daemon */
1751 e_wakeup_one((int*) &afs_asyncbuf_cv);
1752 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1757 afs_inactive(avc, acred)
1758 register struct vcache *avc;
1759 struct AFS_UCRED *acred;
1761 afs_InactiveVCache(avc, acred);
1768 AFS_STATCNT(afs_gn_revoke);
1769 /* NO SUPPORT for this in afs YET! */
1773 int afs_gn_getacl(vp, uiop, cred)
1782 int afs_gn_setacl(vp, uiop, cred)
1791 int afs_gn_getpcl(vp, uiop, cred)
1800 int afs_gn_setpcl(vp, uiop, cred)
1812 extern struct vfsops Afs_vfsops;
1813 extern struct vnodeops afs_gn_vnodeops;
1814 extern int Afs_init();
1816 #define AFS_CALLOUT_TBL_SIZE 256
1819 * the following additional layer of gorp is due to the fact that the
1820 * filesystem layer no longer obtains the kernel lock for me. I was relying
1821 * on this behavior to avoid having to think about locking.
1825 vfs_mount(struct vfs *a, struct ucred *b) {
1826 register glockOwner, ret;
1828 glockOwner = ISAFS_GLOCK();
1831 ret = (*Afs_vfsops.vfs_mount)(a, b);
1839 vfs_unmount(struct vfs *a, int b, struct ucred *c) {
1840 register glockOwner, ret;
1842 glockOwner = ISAFS_GLOCK();
1845 ret = (*Afs_vfsops.vfs_unmount)(a, b, c);
1853 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c) {
1854 register glockOwner, ret;
1856 glockOwner = ISAFS_GLOCK();
1859 ret = (*Afs_vfsops.vfs_root)(a, b, c);
1867 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c) {
1868 register glockOwner, ret;
1870 glockOwner = ISAFS_GLOCK();
1873 ret = (*Afs_vfsops.vfs_statfs)(a, b, c);
1881 vfs_sync(struct gfs *a) {
1882 register glockOwner, ret;
1884 glockOwner = ISAFS_GLOCK();
1887 ret = (*Afs_vfsops.vfs_sync)(a);
1894 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c
1895 , struct ucred *d) {
1896 register glockOwner, ret;
1898 glockOwner = ISAFS_GLOCK();
1901 ret = (*Afs_vfsops.vfs_vget)(a, b, c, d);
1909 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e) {
1910 register glockOwner, ret;
1912 glockOwner = ISAFS_GLOCK();
1915 ret = (*Afs_vfsops.vfs_cntl)(a, b, c, d, e);
1923 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d
1924 , struct ucred *e) {
1925 register glockOwner, ret;
1927 glockOwner = ISAFS_GLOCK();
1930 ret = (*Afs_vfsops.vfs_quotactl)(a, b, c, d, e);
1937 #ifdef AFS_AIX51_ENV
1939 vfs_syncvfs(struct gfs *a, struct vfs *b, int c, struct ucred *d)
1941 register glockOwner, ret;
1943 glockOwner = ISAFS_GLOCK();
1946 ret = (*Afs_vfsops.vfs_syncvfs)(a, b, c, d);
1955 struct vfsops locked_Afs_vfsops = {
1964 #ifdef AFS_AIX51_ENV
1970 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1971 register glockOwner, ret;
1973 glockOwner = ISAFS_GLOCK();
1976 ret = (*afs_gn_vnodeops.vn_link)(a, b, c, d);
1984 #ifdef AFS_AIX51_ENV
1985 vn_mkdir(struct vnode *a, char *b, int32long64_t c, struct ucred *d) {
1987 vn_mkdir(struct vnode *a, char *b, int c, struct ucred *d) {
1989 register glockOwner, ret;
1991 glockOwner = ISAFS_GLOCK();
1994 ret = (*afs_gn_vnodeops.vn_mkdir)(a, b, c, d);
2002 #ifdef AFS_AIX51_ENV
2003 vn_mknod(struct vnode *a, caddr_t b, int32long64_t c, dev_t d, struct ucred *e) {
2005 vn_mknod(struct vnode *a, caddr_t b, int c, dev_t d, struct ucred *e) {
2007 register glockOwner, ret;
2009 glockOwner = ISAFS_GLOCK();
2012 ret = (*afs_gn_vnodeops.vn_mknod)(a, b, c, d, e);
2020 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
2021 register glockOwner, ret;
2023 glockOwner = ISAFS_GLOCK();
2026 ret = (*afs_gn_vnodeops.vn_remove)(a, b, c, d);
2034 vn_rename(struct vnode *a, struct vnode *b, caddr_t c
2035 , struct vnode *d, struct vnode *e, caddr_t f, struct ucred *g) {
2036 register glockOwner, ret;
2038 glockOwner = ISAFS_GLOCK();
2041 ret = (*afs_gn_vnodeops.vn_rename)(a, b, c, d, e, f, g);
2049 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
2050 register glockOwner, ret;
2052 glockOwner = ISAFS_GLOCK();
2055 ret = (*afs_gn_vnodeops.vn_rmdir)(a, b, c, d);
2063 #ifdef AFS_AIX51_ENV
2064 vn_lookup(struct vnode *a, struct vnode **b, char *c, int32long64_t d,
2066 vn_lookup(struct vnode *a, struct vnode **b, char *c, int d,
2068 struct vattr *v, struct ucred *e) {
2069 register glockOwner, ret;
2071 glockOwner = ISAFS_GLOCK();
2074 ret = (*afs_gn_vnodeops.vn_lookup)(a, b, c, d, v, e);
2082 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c) {
2083 register glockOwner, ret;
2085 glockOwner = ISAFS_GLOCK();
2088 ret = (*afs_gn_vnodeops.vn_fid)(a, b, c);
2096 #ifdef AFS_AIX51_ENV
2097 vn_open(struct vnode *a, int b, int c, caddr_t *d, struct ucred *e) {
2099 vn_open(struct vnode *a, int32long64_t b, ext_t c, caddr_t *d, struct ucred *e) {
2101 register glockOwner, ret;
2103 glockOwner = ISAFS_GLOCK();
2106 ret = (*afs_gn_vnodeops.vn_open)(a, b, c, d, e);
2114 #ifdef AFS_AIX51_ENV
2115 vn_create(struct vnode *a, struct vnode **b, int32long64_t c, caddr_t d
2116 , int32long64_t e, caddr_t *f, struct ucred *g) {
2118 vn_create(struct vnode *a, struct vnode **b, int c, caddr_t d
2119 , int e, caddr_t *f, struct ucred *g) {
2121 register glockOwner, ret;
2123 glockOwner = ISAFS_GLOCK();
2126 ret = (*afs_gn_vnodeops.vn_create)(a, b, c, d, e, f, g);
2134 vn_hold(struct vnode *a) {
2135 register glockOwner, ret;
2137 glockOwner = ISAFS_GLOCK();
2140 ret = (*afs_gn_vnodeops.vn_hold)(a);
2148 vn_rele(struct vnode *a) {
2149 register glockOwner, ret;
2151 glockOwner = ISAFS_GLOCK();
2154 ret = (*afs_gn_vnodeops.vn_rele)(a);
2162 #ifdef AFS_AIX51_ENV
2163 vn_close(struct vnode *a, int32long64_t b, caddr_t c, struct ucred *d) {
2165 vn_close(struct vnode *a, int b, caddr_t c, struct ucred *d) {
2167 register glockOwner, ret;
2169 glockOwner = ISAFS_GLOCK();
2172 ret = (*afs_gn_vnodeops.vn_close)(a, b, c, d);
2180 #ifdef AFS_AIX51_ENV
2181 vn_map(struct vnode *a, caddr_t b, uint32long64_t c, uint32long64_t d, uint32long64_t e, struct ucred *f) {
2183 vn_map(struct vnode *a, caddr_t b, uint c, uint d, uint e, struct ucred *f) {
2185 register glockOwner, ret;
2187 glockOwner = ISAFS_GLOCK();
2190 ret = (*afs_gn_vnodeops.vn_map)(a, b, c, d, e, f);
2198 #ifdef AFS_AIX51_ENV
2199 vn_unmap(struct vnode *a, int32long64_t b, struct ucred *c) {
2201 vn_unmap(struct vnode *a, int b, struct ucred *c) {
2203 register glockOwner, ret;
2205 glockOwner = ISAFS_GLOCK();
2208 ret = (*afs_gn_vnodeops.vn_unmap)(a, b, c);
2216 #ifdef AFS_AIX51_ENV
2217 vn_access(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d) {
2219 vn_access(struct vnode *a, int b, int c, struct ucred *d) {
2221 register glockOwner, ret;
2223 glockOwner = ISAFS_GLOCK();
2226 ret = (*afs_gn_vnodeops.vn_access)(a, b, c, d);
2234 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c) {
2235 register glockOwner, ret;
2237 glockOwner = ISAFS_GLOCK();
2240 ret = (*afs_gn_vnodeops.vn_getattr)(a, b, c);
2248 #ifdef AFS_AIX51_ENV
2249 vn_setattr(struct vnode *a, int32long64_t b, int32long64_t c, int32long64_t d, int32long64_t e, struct ucred *f) {
2251 vn_setattr(struct vnode *a, int b, int c, int d, int e, struct ucred *f) {
2253 register glockOwner, ret;
2255 glockOwner = ISAFS_GLOCK();
2258 ret = (*afs_gn_vnodeops.vn_setattr)(a, b, c, d, e, f);
2266 #ifdef AFS_AIX51_ENV
2267 vn_fclear(struct vnode *a, int32long64_t b, offset_t c, offset_t d
2269 vn_fclear(struct vnode *a, int b, offset_t c, offset_t d
2271 , caddr_t e, struct ucred *f) {
2272 register glockOwner, ret;
2274 glockOwner = ISAFS_GLOCK();
2277 ret = (*afs_gn_vnodeops.vn_fclear)(a, b, c, d, e, f);
2285 #ifdef AFS_AIX51_ENV
2286 vn_fsync(struct vnode *a, int32long64_t b, int32long64_t c, struct ucred *d) {
2288 vn_fsync(struct vnode *a, int b, int c, struct ucred *d) {
2290 register glockOwner, ret;
2292 glockOwner = ISAFS_GLOCK();
2295 ret = (*afs_gn_vnodeops.vn_fsync)(a, b, c, d);
2303 #ifdef AFS_AIX51_ENV
2304 vn_ftrunc(struct vnode *a, int32long64_t b, offset_t c, caddr_t d, struct ucred *e) {
2306 vn_ftrunc(struct vnode *a, int b, offset_t c, caddr_t d, struct ucred *e) {
2308 register glockOwner, ret;
2310 glockOwner = ISAFS_GLOCK();
2313 ret = (*afs_gn_vnodeops.vn_ftrunc)(a, b, c, d, e);
2321 #ifdef AFS_AIX51_ENV
2322 vn_rdwr(struct vnode *a, enum uio_rw b, int32long64_t c, struct uio *d
2323 , ext_t e, caddr_t f, struct vattr *v, struct ucred *g) {
2325 vn_rdwr(struct vnode *a, enum uio_rw b, int c, struct uio *d
2326 , int e, caddr_t f, struct vattr *v, struct ucred *g) {
2328 register glockOwner, ret;
2330 glockOwner = ISAFS_GLOCK();
2333 ret = (*afs_gn_vnodeops.vn_rdwr)(a, b, c, d, e, f, v, g);
2341 #ifdef AFS_AIX51_ENV
2342 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int32long64_t d
2343 , int (*e)(), ulong32int64_t *f, struct ucred *g) {
2345 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int d
2346 , int (*e)(), ulong *f, struct ucred *g) {
2348 register glockOwner, ret;
2350 glockOwner = ISAFS_GLOCK();
2353 ret = (*afs_gn_vnodeops.vn_lockctl)(a, b, c, d, e, f, g);
2361 #ifdef AFS_AIX51_ENV
2362 vn_ioctl(struct vnode *a, int32long64_t b, caddr_t c, size_t d, ext_t e, struct ucred *f) {
2364 vn_ioctl(struct vnode *a, int b, caddr_t c, size_t d, int e, struct ucred *f) {
2366 register glockOwner, ret;
2368 glockOwner = ISAFS_GLOCK();
2371 ret = (*afs_gn_vnodeops.vn_ioctl)(a, b, c, d, e, f);
2379 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c) {
2380 register glockOwner, ret;
2382 glockOwner = ISAFS_GLOCK();
2385 ret = (*afs_gn_vnodeops.vn_readlink)(a, b, c);
2393 #ifdef AFS_AIX51_ENV
2394 vn_select(struct vnode *a, int32long64_t b, ushort c, ushort *d, void (*e)()
2396 vn_select(struct vnode *a, int b, ushort c, ushort *d, void (*e)()
2398 , caddr_t f, struct ucred *g) {
2399 register glockOwner, ret;
2401 glockOwner = ISAFS_GLOCK();
2404 ret = (*afs_gn_vnodeops.vn_select)(a, b, c, d, e, f, g);
2412 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d) {
2413 register glockOwner, ret;
2415 glockOwner = ISAFS_GLOCK();
2418 ret = (*afs_gn_vnodeops.vn_symlink)(a, b, c, d);
2426 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c) {
2427 register glockOwner, ret;
2429 glockOwner = ISAFS_GLOCK();
2432 ret = (*afs_gn_vnodeops.vn_readdir)(a, b, c);
2440 #ifdef AFS_AIX51_ENV
2441 vn_revoke(struct vnode *a, int32long64_t b, int32long64_t c, struct vattr *d, struct ucred *e) {
2443 vn_revoke(struct vnode *a, int b, int c, struct vattr *d, struct ucred *e) {
2445 register glockOwner, ret;
2447 glockOwner = ISAFS_GLOCK();
2450 ret = (*afs_gn_vnodeops.vn_revoke)(a, b, c, d, e);
2458 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c) {
2459 register glockOwner, ret;
2461 glockOwner = ISAFS_GLOCK();
2464 ret = (*afs_gn_vnodeops.vn_getacl)(a, b, c);
2472 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c) {
2473 register glockOwner, ret;
2475 glockOwner = ISAFS_GLOCK();
2478 ret = (*afs_gn_vnodeops.vn_setacl)(a, b, c);
2486 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2487 register glockOwner, ret;
2489 glockOwner = ISAFS_GLOCK();
2492 ret = (*afs_gn_vnodeops.vn_getpcl)(a, b, c);
2500 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2501 register glockOwner, ret;
2503 glockOwner = ISAFS_GLOCK();
2506 ret = (*afs_gn_vnodeops.vn_setpcl)(a, b, c);
2513 extern int afs_gn_strategy();
2515 struct vnodeops locked_afs_gn_vnodeops = {
2544 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2550 afs_gn_enosys, /* vn_seek */
2551 afs_gn_enosys, /* vn_fsync_range */
2552 afs_gn_enosys, /* vn_create_attr */
2553 afs_gn_enosys, /* vn_finfo */
2554 afs_gn_enosys, /* vn_map_lloff */
2555 afs_gn_enosys, /* vn_readdir_eofp */
2556 afs_gn_enosys, /* vn_rdwr_attr */
2557 afs_gn_enosys, /* vn_memcntl */
2558 afs_gn_enosys, /* vn_spare7 */
2559 afs_gn_enosys, /* vn_spare8 */
2560 afs_gn_enosys, /* vn_spare9 */
2561 afs_gn_enosys, /* vn_spareA */
2562 afs_gn_enosys, /* vn_spareB */
2563 afs_gn_enosys, /* vn_spareC */
2564 afs_gn_enosys, /* vn_spareD */
2565 afs_gn_enosys, /* vn_spareE */
2566 afs_gn_enosys /* vn_spareF */
2567 #ifdef AFS_AIX51_ENV
2568 ,afs_gn_enosys, /* pagerBackRange */
2569 afs_gn_enosys, /* pagerGetFileSize */
2570 afs_gn_enosys, /* pagerReadAhead */
2571 afs_gn_enosys, /* pagerWriteBehind */
2572 afs_gn_enosys /* pagerEndCopy */
2576 struct gfs afs_gfs = {
2578 &locked_afs_gn_vnodeops,
2582 GFS_VERSION4 | GFS_REMOTE,