2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #include "../h/systm.h"
16 #include "../h/types.h"
17 #include "../h/errno.h"
18 #include "../h/stat.h"
19 #include "../h/user.h"
21 #include "../h/vattr.h"
22 #include "../h/file.h"
24 #include "../h/chownx.h"
25 #include "../h/systm.h"
26 #include "../h/access.h"
27 #include "../rpc/types.h"
28 #include "../afs/osi_vfs.h"
29 #include "../netinet/in.h"
30 #include "../h/mbuf.h"
31 #include "../h/vmuser.h"
33 #include "../rpc/types.h"
34 #include "../rpc/xdr.h"
36 #include "../afs/stds.h"
37 #include "../afs/afs_osi.h"
38 #define RFTP_INTERNALS 1
39 #include "../afs/volerrors.h"
40 #include "../afsint/afsint.h"
41 #include "../afsint/vldbint.h"
42 #include "../afs/lock.h"
43 #include "../afs/exporter.h"
44 #include "../afs/afs.h"
45 #include "../afs/afs_chunkops.h"
46 #include "../afs/afs_stats.h"
47 #include "../afs/nfsclient.h"
48 #include "../afs/icl.h"
49 #include "../afs/prs_fs.h"
50 #include "../h/flock.h"
54 * declare all the functions so they can be used to init the table
56 /* creation/naming/deletion */
63 /* lookup, file handle stuff */
74 /* manipulate attributes of files */
78 /* data update operations */
86 int afs_gn_readlink();
91 int afs_gn_strategy();
102 * declare a struct vnodeops and initialize it with ptrs to all functions
104 struct vnodeops afs_gn_vnodeops = {
105 /* creation/naming/deletion */
112 /* lookup, file handle stuff */
115 /* access to files */
123 /* manipulate attributes of files */
127 /* data update operations */
141 /* security things */
147 afs_gn_enosys, /* vn_seek */
148 afs_gn_enosys, /* vn_spare0 */
149 afs_gn_enosys, /* vn_spare1 */
150 afs_gn_enosys, /* vn_spare2 */
151 afs_gn_enosys, /* vn_spare3 */
152 afs_gn_enosys, /* vn_spare4 */
153 afs_gn_enosys, /* vn_spare5 */
154 afs_gn_enosys, /* vn_spare6 */
155 afs_gn_enosys, /* vn_spare7 */
156 afs_gn_enosys, /* vn_spare8 */
157 afs_gn_enosys, /* vn_spare9 */
158 afs_gn_enosys, /* vn_spareA */
159 afs_gn_enosys, /* vn_spareB */
160 afs_gn_enosys, /* vn_spareC */
161 afs_gn_enosys, /* vn_spareD */
162 afs_gn_enosys, /* vn_spareE */
163 afs_gn_enosys /* vn_spareF */
165 struct vnodeops *afs_ops = &afs_gn_vnodeops;
169 afs_gn_link(vp, dp, name, cred)
177 AFS_STATCNT(afs_gn_link);
178 error = afs_link(vp, dp, name, cred);
179 afs_Trace3(afs_iclSetp, CM_TRACE_GNLINK, ICL_TYPE_POINTER, (afs_int32)vp,
180 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
186 afs_gn_mkdir(dp, name, mode, cred)
196 AFS_STATCNT(afs_gn_mkdir);
199 va.va_mode = (mode & 07777) & ~get_umask();
200 error = afs_mkdir(dp, name, &va, &vp, cred);
204 afs_Trace4(afs_iclSetp, CM_TRACE_GMKDIR, ICL_TYPE_POINTER, (afs_int32)vp,
205 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
211 afs_gn_mknod(dp, name, mode, dev, cred)
222 AFS_STATCNT(afs_gn_mknod);
224 va.va_type = IFTOVT(mode);
225 va.va_mode = (mode & 07777) & ~get_umask();
227 /**** I'm not sure if suser() should stay here since it makes no sense in AFS; however the documentation says that one "should be super-user unless making a FIFO file. Others systems such as SUN do this checking in the early stages of mknod (before the abstraction), so it's equivalently the same! *****/
228 if (va.va_type != VFIFO && !suser(&error))
230 switch (va.va_type) {
232 error = afs_mkdir(dp, name, &va, &vp, cred);
242 error = afs_create(dp, name, &va, NONEXCL, mode, &vp, cred);
247 afs_Trace4(afs_iclSetp, CM_TRACE_GMKNOD, ICL_TYPE_POINTER, (afs_int32)vp,
248 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
254 afs_gn_remove(vp, dp, name, cred)
255 struct vnode *vp; /* Ignored in AFS */
262 AFS_STATCNT(afs_gn_remove);
263 error = afs_remove(dp, name, cred);
264 afs_Trace3(afs_iclSetp, CM_TRACE_GREMOVE, ICL_TYPE_POINTER, (afs_int32)dp,
265 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
271 afs_gn_rename(vp, dp, name, tp, tdp, tname, cred)
274 struct vnode *vp; /* Ignored in AFS */
275 struct vnode *tp; /* Ignored in AFS */
282 AFS_STATCNT(afs_gn_rename);
283 error = afs_rename(dp, name, tdp, tname, cred);
284 afs_Trace4(afs_iclSetp, CM_TRACE_GRENAME, ICL_TYPE_POINTER, (afs_int32)dp,
285 ICL_TYPE_STRING, name, ICL_TYPE_STRING, tname, ICL_TYPE_LONG, error);
291 afs_gn_rmdir(vp, dp, name, cred)
292 struct vnode *vp; /* Ignored in AFS */
299 AFS_STATCNT(afs_gn_rmdir);
300 error = afs_rmdir(dp, name, cred);
302 if (error == 66 /* 4.3's ENOTEMPTY */)
303 error = EEXIST; /* AIX returns EEXIST where 4.3 used ENOTEMPTY */
305 afs_Trace3(afs_iclSetp, CM_TRACE_GRMDIR, ICL_TYPE_POINTER, (afs_int32)dp,
306 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
312 afs_gn_lookup(dp, vpp, name, flags, vattrp, cred)
313 struct vattr *vattrp;
317 afs_uint32 flags; /* includes FOLLOW... */
322 AFS_STATCNT(afs_gn_lookup);
323 error = afs_lookup(dp, name, vpp, cred);
324 afs_Trace3(afs_iclSetp, CM_TRACE_GLOOKUP, ICL_TYPE_POINTER, (afs_int32)dp,
325 ICL_TYPE_STRING, name, ICL_TYPE_LONG, error);
326 if (vattrp != NULL && error == 0)
327 afs_gn_getattr(*vpp, vattrp, cred);
333 afs_gn_fid(vp, fidp, cred)
340 AFS_STATCNT(afs_gn_fid);
341 error = afs_fid(vp, fidp);
342 afs_Trace3(afs_iclSetp, CM_TRACE_GFID, ICL_TYPE_POINTER, (afs_int32)vp,
343 ICL_TYPE_LONG, (afs_int32)fidp, ICL_TYPE_LONG, error);
349 afs_gn_open(vp, flags, ext, vinfop, cred)
352 int ext; /* Ignored in AFS */
353 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
358 struct vcache *tvp = (struct vcache *)vp;
361 AFS_STATCNT(afs_gn_open);
363 if ((flags & FREAD)) modes |= R_ACC;
364 if ((flags & FEXEC)) modes |= X_ACC;
365 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
367 while ((flags & FNSHARE) && tvp->opens) {
368 if (!(flags & FDELAY)) {
372 afs_osi_Sleep(&tvp->opens);
375 error = afs_access(vp, modes, cred);
380 error = afs_open(&vp, flags, cred);
382 if (flags & FTRUNC) {
385 error = afs_setattr(vp, &va, cred);
389 tvp->states |= CNSHARE;
392 *vinfop = cred; /* fp->f_vinfo is like fp->f_cred in suns */
395 /* an error occurred; we've told CM that the file
396 * is open, so close it now so that open and
397 * writer counts are correct. Ignore error code,
398 * as it is likely to fail (the setattr just did).
400 afs_close(vp, flags, cred);
405 afs_Trace3(afs_iclSetp, CM_TRACE_GOPEN, ICL_TYPE_POINTER, (afs_int32)vp,
406 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
412 afs_gn_create(dp, vpp, flags, name, mode, vinfop, cred)
418 struct ucred **vinfop; /* return ptr for fp->f_vinfo, used as fp->f_cred */
422 enum vcexcl exclusive;
425 AFS_STATCNT(afs_gn_create);
426 if ((flags & (O_EXCL|O_CREAT)) == (O_EXCL|O_CREAT))
432 va.va_mode = (mode & 07777) & ~get_umask();
433 if ((flags & FREAD)) modes |= R_ACC;
434 if ((flags & FEXEC)) modes |= X_ACC;
435 if ((flags & FWRITE) || (flags & FTRUNC)) modes |= W_ACC;
436 error = afs_create(dp, name, &va, exclusive, modes, vpp, cred);
440 /* 'cr_luid' is a flag (when it comes thru the NFS server it's set to
441 * RMTUSER_REQ) that determines if we should call afs_open(). We shouldn't
442 * call it when this NFS traffic since the close will never happen thus
443 * we'd never flush the files out to the server! Gross but the simplest
444 * solution we came out with */
445 if (cred->cr_luid != RMTUSER_REQ) {
446 while ((flags & FNSHARE) && ((struct vcache *)*vpp)->opens) {
447 if (!(flags & FDELAY))
449 afs_osi_Sleep(&((struct vcache *)*vpp)->opens);
451 /* Since in the standard copen() for bsd vnode kernels they do an
452 * vop_open after the vop_create, we must do the open here since there
453 * are stuff in afs_open that we need. For example advance the
454 * execsOrWriters flag (else we'll be treated as the sun's "core"
456 *vinfop = cred; /* save user creds in fp->f_vinfo */
457 error = afs_open(vpp, flags, cred);
459 afs_Trace4(afs_iclSetp, CM_TRACE_GCREATE, ICL_TYPE_POINTER, (afs_int32)dp,
460 ICL_TYPE_STRING, name, ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
469 AFS_STATCNT(afs_gn_hold);
480 struct vcache *vcp = (struct vcache *)vp;
483 AFS_STATCNT(afs_gn_rele);
484 if (vp->v_count == 0)
485 osi_Panic("afs_rele: zero v_count");
486 if (--(vp->v_count) == 0) {
487 if (vcp->states & CPageHog) {
489 vcp->states &= ~CPageHog;
491 error = afs_inactive(vp, 0);
498 afs_gn_close(vp, flags, vinfo, cred)
501 caddr_t vinfo; /* Ignored in AFS */
505 struct vcache *tvp = (struct vcache *)vp;
507 AFS_STATCNT(afs_gn_close);
509 if (flags & FNSHARE) {
510 tvp->states &= ~CNSHARE;
511 afs_osi_Wakeup(&tvp->opens);
514 error = afs_close(vp, flags, cred);
515 afs_Trace3(afs_iclSetp, CM_TRACE_GCLOSE, ICL_TYPE_POINTER, (afs_int32)vp,
516 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
522 afs_gn_map(vp, addr, len, off, flag, cred)
525 u_int len, off, flag;
528 struct vcache *vcp = (struct vcache *)vp;
529 struct vrequest treq;
531 AFS_STATCNT(afs_gn_map);
533 if (error = afs_InitReq(&treq, cred)) return error;
534 error = afs_VerifyVCache(vcp, &treq);
536 return afs_CheckCode(error, &treq, 49);
538 osi_FlushPages(vcp, cred); /* XXX ensure old pages are gone XXX */
539 ObtainWriteLock(&vcp->lock, 401);
540 vcp->states |= CMAPPED; /* flag cleared at afs_inactive */
542 * We map the segment into our address space using the handle returned by vm_create.
545 /* Consider V_INTRSEG too for interrupts */
546 if (error = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode, vcp->m.Length, 0, 0)) {
547 ReleaseWriteLock(&vcp->lock);
550 vcp->vmh = SRVAL(vcp->segid, 0, 0);
552 vcp->v.v_gnode->gn_seg = vcp->segid; /* XXX Important XXX */
553 if (flag & SHM_RDONLY) {
554 vp->v_gnode->gn_mrdcnt++;
557 vp->v_gnode->gn_mwrcnt++;
560 * We keep the caller's credentials since an async daemon will handle the
561 * request at some point. We assume that the same credentials will be used.
563 if (!vcp->credp || (vcp->credp != cred)) {
566 struct ucred *crp = vcp->credp;
567 vcp->credp = (struct ucred *)0;
572 ReleaseWriteLock(&vcp->lock);
574 afs_Trace4(afs_iclSetp, CM_TRACE_GMAP, ICL_TYPE_POINTER, (afs_int32)vp,
575 ICL_TYPE_LONG, addr, ICL_TYPE_LONG, len, ICL_TYPE_LONG, off);
581 afs_gn_unmap(vp, flag, cred)
586 struct vcache *vcp = (struct vcache *)vp;
587 AFS_STATCNT(afs_gn_unmap);
588 ObtainWriteLock(&vcp->lock, 402);
589 if (flag & SHM_RDONLY) {
590 vp->v_gnode->gn_mrdcnt--;
591 if (vp->v_gnode->gn_mrdcnt <=0) vp->v_gnode->gn_mrdcnt = 0;
594 vp->v_gnode->gn_mwrcnt--;
595 if (vp->v_gnode->gn_mwrcnt <=0) vp->v_gnode->gn_mwrcnt = 0;
597 ReleaseWriteLock(&vcp->lock);
605 afs_gn_access(vp, mode, who, cred)
614 AFS_STATCNT(afs_gn_access);
618 error = afs_access(vp, mode, cred);
620 /* Additional testing */
621 if (who == ACC_OTHERS || who == ACC_ANY) {
622 error = afs_getattr(vp, &vattr, cred);
624 if (who == ACC_ANY) {
625 if (((vattr.va_mode >> 6) & mode) == mode) {
630 if (((vattr.va_mode >> 3) & mode) == mode)
635 } else if (who == ACC_ALL) {
636 error = afs_getattr(vp, &vattr, cred);
638 if ((!((vattr.va_mode >> 6) & mode)) || (!((vattr.va_mode >> 3) & mode)) ||
639 (!(vattr.va_mode & mode)))
648 afs_Trace3(afs_iclSetp, CM_TRACE_GACCESS, ICL_TYPE_POINTER, (afs_int32)vp,
649 ICL_TYPE_LONG, mode, ICL_TYPE_LONG, error);
655 afs_gn_getattr(vp, vattrp, cred)
657 struct vattr *vattrp;
662 AFS_STATCNT(afs_gn_getattr);
663 error = afs_getattr(vp, vattrp, cred);
664 afs_Trace2(afs_iclSetp, CM_TRACE_GGETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
665 ICL_TYPE_LONG, error);
671 afs_gn_setattr(vp, op, arg1, arg2, arg3, cred)
682 AFS_STATCNT(afs_gn_setattr);
690 if ((arg1 & T_OWNER_AS_IS) == 0)
692 if ((arg1 & T_GROUP_AS_IS) == 0)
697 error = afs_access(vp, VWRITE, cred);
701 if (arg1 & T_SETTIME) {
702 va.va_atime.tv_sec = time;
703 va.va_mtime.tv_sec = time;
705 va.va_atime = *(struct timestruc_t *) arg2;
706 va.va_mtime = *(struct timestruc_t *) arg3;
713 error = afs_setattr(vp, &va, cred);
714 afs_Trace2(afs_iclSetp, CM_TRACE_GSETATTR, ICL_TYPE_POINTER, (afs_int32)vp,
715 ICL_TYPE_LONG, error);
720 char zero_buffer[PAGESIZE];
722 afs_gn_fclear(vp, flags, offset, length, vinfo, cred)
730 int i, len, error = 0;
733 static int fclear_init =0;
734 register struct vcache *avc = (struct vcache *)vp;
736 AFS_STATCNT(afs_gn_fclear);
738 memset(zero_buffer, 0, PAGESIZE);
742 * Don't clear past ulimit
744 if (offset + length > get_ulimit())
747 /* Flush all pages first */
750 vm_flushp(avc->segid, 0, MAXFSIZE/PAGESIZE - 1);
751 vms_iowait(avc->vmh);
754 uio.afsio_offset = offset;
755 for (i = offset; i < offset + length; i = uio.afsio_offset) {
756 len = offset + length - i;
757 iov.iov_len = (len > PAGESIZE) ? PAGESIZE : len;
758 iov.iov_base = zero_buffer;
759 uio.afsio_iov = &iov;
760 uio.afsio_iovcnt = 1;
761 uio.afsio_seg = AFS_UIOSYS;
762 uio.afsio_resid = iov.iov_len;
763 if (error = afs_rdwr(vp, &uio, UIO_WRITE, 0, cred))
766 afs_Trace4(afs_iclSetp, CM_TRACE_GFCLEAR, ICL_TYPE_POINTER, (afs_int32)vp,
767 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, length, ICL_TYPE_LONG, error);
773 afs_gn_fsync(vp, flags, vinfo, cred)
775 int flags; /* Not used by AFS */
776 caddr_t vinfo; /* Not used by AFS */
781 AFS_STATCNT(afs_gn_fsync);
782 error = afs_fsync(vp, cred);
783 afs_Trace3(afs_iclSetp, CM_TRACE_GFSYNC, ICL_TYPE_POINTER, (afs_int32)vp,
784 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, error);
790 afs_gn_ftrunc(vp, flags, length, vinfo, cred)
792 int flags; /* Ignored in AFS */
794 caddr_t vinfo; /* Ignored in AFS */
800 AFS_STATCNT(afs_gn_ftrunc);
803 error = afs_setattr(vp, &va, cred);
804 afs_Trace4(afs_iclSetp, CM_TRACE_GFTRUNC, ICL_TYPE_POINTER, (afs_int32)vp,
805 ICL_TYPE_LONG, flags,
806 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length),
807 ICL_TYPE_LONG, error);
811 /* Min size of a file which is dumping core before we declare it a page hog. */
812 #define MIN_PAGE_HOG_SIZE 8388608
814 int afs_gn_rdwr(vp, op, flags, ubuf, ext, vinfo, vattrp, cred)
819 int ext; /* Ignored in AFS */
820 caddr_t vinfo; /* Ignored in AFS */
821 struct vattr *vattrp;
824 register struct vcache *vcp = (struct vcache *)vp;
825 struct vrequest treq;
829 AFS_STATCNT(afs_gn_rdwr);
832 if (op == UIO_WRITE) {
833 afs_Trace2(afs_iclSetp, CM_TRACE_GRDWR1,
834 ICL_TYPE_POINTER, (afs_int32)vp,
835 ICL_TYPE_LONG, vcp->vc_error);
836 return vcp->vc_error;
841 ObtainSharedLock(&vcp->lock, 507);
843 * We keep the caller's credentials since an async daemon will handle the
844 * request at some point. We assume that the same credentials will be used.
845 * If this is being called from an NFS server thread, then dupe the
846 * cred and only use that copy in calls and for the stach.
848 if (!vcp->credp || (vcp->credp != cred)) {
849 #ifdef AFS_AIX_IAUTH_ENV
850 if (AFS_NFSXLATORREQ(cred)) {
851 /* Must be able to use cred later, so dupe it so that nfs server
852 * doesn't overwrite it's contents.
858 crhold(cred); /* Bump refcount for reference in vcache */
862 UpgradeSToWLock(&vcp->lock, 508);
864 vcp->credp = (struct ucred *)0;
865 ConvertWToSLock(&vcp->lock);
870 ReleaseSharedLock(&vcp->lock);
873 * XXX Is the following really required?? XXX
875 if (error = afs_InitReq(&treq, cred)) return error;
876 if (error = afs_VerifyVCache(vcp, &treq))
877 return afs_CheckCode(error, &treq, 50);
878 osi_FlushPages(vcp, cred); /* Flush old pages */
880 if (AFS_NFSXLATORREQ(cred)) {
883 if (op == UIO_READ) {
884 if (!afs_AccessOK(vcp, PRSFS_READ, &treq,
885 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
894 * We have to bump the open/exwriters field here courtesy of the nfs xlator
895 * because there're no open/close nfs rpcs to call our afs_open/close.
896 * We do a similar thing on the afs_read/write interface.
898 if (op == UIO_WRITE) {
899 #ifdef AFS_64BIT_CLIENT
900 if (ubuf->afsio_offset < afs_vmMappingEnd) {
901 #endif /* AFS_64BIT_ENV */
902 ObtainWriteLock(&vcp->lock,240);
903 vcp->states |= CDirty; /* Set the dirty bit */
905 ReleaseWriteLock(&vcp->lock);
906 #ifdef AFS_64BIT_CLIENT
908 #endif /* AFS_64BIT_ENV */
911 error = afs_vm_rdwr(vp, ubuf, op, flags, cred);
913 if (op == UIO_WRITE) {
914 #ifdef AFS_64BIT_CLIENT
915 if (ubuf->afsio_offset < afs_vmMappingEnd) {
916 #endif /* AFS_64BIT_ENV */
917 ObtainWriteLock(&vcp->lock,241);
918 afs_FakeClose(vcp, cred); /* XXXX For nfs trans and cores XXXX */
919 ReleaseWriteLock(&vcp->lock);
920 #ifdef AFS_64BIT_CLIENT
922 #endif /* AFS_64BIT_ENV */
924 if (vattrp != NULL && error == 0)
925 afs_gn_getattr(vp, vattrp, cred);
927 afs_Trace4(afs_iclSetp, CM_TRACE_GRDWR, ICL_TYPE_POINTER, (afs_int32)vp,
928 ICL_TYPE_LONG, flags, ICL_TYPE_LONG, op, ICL_TYPE_LONG, error);
935 #define AFS_MAX_VM_CHUNKS 10
936 afs_vm_rdwr(vp, uiop, rw, ioflag, credp)
937 register struct vnode *vp;
943 register afs_int32 code = 0;
946 afs_size_t fileSize, xfrOffset, offset, old_offset;
948 #ifdef AFS_64BIT_CLIENT
949 afs_size_t finalOffset;
952 #endif /* AFS_64BIT_CLIENT */
953 register struct vcache *vcp = (struct vcache *)vp;
955 afs_size_t start_offset;
956 afs_int32 save_resid = uiop->afsio_resid;
957 int first_page, last_page, pages;
960 struct vrequest treq;
962 if (code = afs_InitReq(&treq, credp)) return code;
964 /* special case easy transfer; apparently a lot are done */
965 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
967 ObtainReadLock(&vcp->lock);
968 fileSize = vcp->m.Length;
969 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
970 uiop->afsio_offset = fileSize;
972 /* compute xfrOffset now, and do some checks */
973 xfrOffset = uiop->afsio_offset;
974 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
979 #ifndef AFS_64BIT_CLIENT
980 /* check for "file too big" error, which should really be done above us */
981 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
985 #endif /* AFS_64BIT_CLIENT */
987 #ifdef AFS_64BIT_CLIENT
988 if (xfrOffset + xfrSize > afs_vmMappingEnd) {
989 if (xfrOffset < afs_vmMappingEnd) {
990 /* special case of a buffer crossing the VM mapping line */
992 struct iovec tvec[16]; /* Should have access to #define */
996 finalOffset = xfrOffset + xfrSize;
997 tsize = (afs_size_t) (xfrOffset + xfrSize - afs_vmMappingEnd);
998 afsio_copy(uiop, &tuio, tvec);
999 afsio_skip(&tuio, xfrSize - tsize);
1000 afsio_trim(&tuio, tsize);
1001 tuio.afsio_offset = afs_vmMappingEnd;
1002 ReleaseReadLock(&vcp->lock);
1003 ObtainWriteLock(&vcp->lock,243);
1004 afs_FakeClose(vcp, credp); /* XXXX For nfs trans and cores XXXX */
1005 ReleaseWriteLock(&vcp->lock);
1006 code = afs_direct_rdwr(vp, &tuio, rw, ioflag, credp);
1007 ObtainWriteLock(&vcp->lock,244);
1008 afs_FakeOpen(vcp); /* XXXX For nfs trans and cores XXXX */
1009 ReleaseWriteLock(&vcp->lock);
1010 ObtainReadLock(&vcp->lock);
1011 if (code) goto fail;
1012 xfrSize = (afs_size_t) (afs_vmMappingEnd - xfrOffset);
1013 afsio_trim(uiop, xfrSize);
1015 ReleaseReadLock(&vcp->lock);
1016 code = afs_direct_rdwr(vp, uiop, rw, ioflag, credp);
1020 #endif /* AFS_64BIT_CLIENT */
1023 /* Consider V_INTRSEG too for interrupts */
1024 if (code = vms_create(&vcp->segid, V_CLIENT, vcp->v.v_gnode,
1025 vcp->m.Length, 0, 0)) {
1028 vcp->vmh = SRVAL(vcp->segid, 0, 0);
1030 if (rw == UIO_READ) {
1031 /* don't read past EOF */
1032 if (xfrSize+xfrOffset > fileSize)
1033 xfrSize = fileSize - xfrOffset;
1034 if (xfrSize <= 0) goto fail;
1035 ReleaseReadLock(&vcp->lock);
1036 #ifdef AFS_64BIT_CLIENT
1037 toffset = xfrOffset;
1038 uiop->afsio_offset = xfrOffset;
1039 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1040 ICL_TYPE_POINTER, vcp,
1041 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xfrOffset),
1042 ICL_TYPE_INT32, xfrSize);
1044 code = vm_move(vcp->segid, toffset, xfrSize, rw, uiop);
1045 #else /* AFS_64BIT_CLIENT */
1047 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1048 #endif /* AFS_64BIT_CLIENT */
1051 * If at a chunk boundary and staying within chunk,
1052 * start prefetch of next chunk.
1054 if (counter == 0 || AFS_CHUNKOFFSET(xfrOffset) == 0
1055 && xfrSize <= AFS_CHUNKSIZE(xfrOffset)) {
1056 ObtainWriteLock(&vcp->lock,407);
1057 tdc = afs_FindDCache(vcp, xfrOffset);
1059 if (!(tdc->mflags & DFNextStarted))
1060 afs_PrefetchChunk(vcp, tdc, credp, &treq);
1063 ReleaseWriteLock(&vcp->lock);
1065 #ifdef AFS_64BIT_CLIENT
1067 uiop->afsio_offset = finalOffset;
1069 #endif /* AFS_64BIT_CLIENT */
1074 start_offset = uiop->afsio_offset;
1075 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE,
1076 ICL_TYPE_POINTER, vcp,
1077 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(start_offset),
1078 ICL_TYPE_INT32, xfrSize);
1079 ReleaseReadLock(&vcp->lock);
1080 ObtainWriteLock(&vcp->lock,400);
1081 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1083 /* un-protect last page. */
1084 last_page = vcp->m.Length/PAGESIZE;
1085 vm_protectp(vcp->vmh, last_page, 1, FILEKEY);
1086 if (xfrSize + xfrOffset > fileSize) {
1087 vcp->m.Length = xfrSize+xfrOffset;
1089 if ((!(vcp->states & CPageHog)) && (xfrSize >= MIN_PAGE_HOG_SIZE)) {
1091 vcp->states |= CPageHog;
1093 ReleaseWriteLock(&vcp->lock);
1095 /* If the write will fit into a single chunk we'll write all of it
1096 * at once. Otherwise, we'll write one chunk at a time, flushing
1097 * some of it to disk.
1101 /* Only create a page to avoid excess VM access if we're writing a
1102 * small file which is either new or completely overwrites the
1105 if ((xfrOffset == 0) && (xfrSize < PAGESIZE) && (xfrSize >= fileSize) &&
1106 (vcp->v.v_gnode->gn_mwrcnt == 0) &&
1107 (vcp->v.v_gnode->gn_mrdcnt == 0)) {
1108 (void) vm_makep(vcp->segid, 0);
1111 while (xfrSize > 0) {
1112 offset = AFS_CHUNKBASE(xfrOffset);
1115 if (AFS_CHUNKSIZE(xfrOffset) <= len)
1116 len = AFS_CHUNKSIZE(xfrOffset) - (xfrOffset - offset);
1118 if (len == xfrSize) {
1119 /* All data goes to this one chunk. */
1121 old_offset = uiop->afsio_offset;
1122 #ifdef AFS_64BIT_CLIENT
1123 uiop->afsio_offset = xfrOffset;
1124 toffset = xfrOffset;
1125 code = vm_move(vcp->segid, toffset, xfrSize, rw, uiop);
1126 #else /* AFS_64BIT_CLIENT */
1127 code = vm_move(vcp->segid, xfrOffset, xfrSize, rw, uiop);
1128 #endif /* AFS_64BIT_CLIENT */
1134 /* Write just one chunk's worth of data. */
1136 struct iovec tvec[16]; /* Should have access to #define */
1138 /* Purge dirty chunks of file if there are too many dirty chunks.
1139 * Inside the write loop, we only do this at a chunk boundary.
1140 * Clean up partial chunk if necessary at end of loop.
1142 if (counter > 0 && code == 0 && xfrOffset == offset) {
1143 ObtainWriteLock(&vcp->lock,403);
1144 code = afs_DoPartialWrite(vcp, &treq);
1145 vcp->states |= CDirty;
1146 ReleaseWriteLock(&vcp->lock);
1150 afsio_copy(uiop, &tuio, tvec);
1151 afsio_trim(&tuio, len);
1152 tuio.afsio_offset = xfrOffset;
1155 old_offset = uiop->afsio_offset;
1156 #ifdef AFS_64BIT_CLIENT
1157 toffset = xfrOffset;
1158 code = vm_move(vcp->segid, toffset, len, rw, &tuio);
1159 #else /* AFS_64BIT_CLIENT */
1160 code = vm_move(vcp->segid, xfrOffset, len, rw, &tuio);
1161 #endif /* AFS_64BIT_CLIENT */
1163 len -= tuio.afsio_resid;
1164 afsio_skip(uiop, len);
1169 first_page = old_offset >> PGSHIFT;
1170 pages = 1 + ((old_offset + (len - 1)) >> PGSHIFT) - first_page;
1171 afs_Trace3(afs_iclSetp, CM_TRACE_VMWRITE2,
1172 ICL_TYPE_POINTER, (afs_int32) vcp,
1173 ICL_TYPE_INT32, first_page,
1174 ICL_TYPE_INT32, pages);
1176 code = vm_writep(vcp->segid, first_page, pages);
1177 if (++count > AFS_MAX_VM_CHUNKS) {
1179 vms_iowait(vcp->segid);
1187 vms_iowait(vcp->segid);
1191 ObtainWriteLock(&vcp->lock,242);
1192 if (code == 0 && (vcp->states & CDirty)) {
1193 code = afs_DoPartialWrite(vcp, &treq);
1195 vm_protectp(vcp->vmh, last_page, 1, RDONLY);
1196 ReleaseWriteLock(&vcp->lock);
1198 /* If requested, fsync the file after every write */
1200 afs_fsync(vp, credp);
1202 ObtainReadLock(&vcp->lock);
1203 if (vcp->vc_error) {
1204 /* Pretend we didn't write anything. We need to get the error back to
1205 * the user. If we don't it's possible for a quota error for this
1206 * write to succeed and the file to be closed without the user ever
1207 * having seen the store error. And AIX syscall clears the error if
1208 * anything was written.
1210 code = vcp->vc_error;
1211 if (code == EDQUOT || code == ENOSPC)
1212 uiop->afsio_resid = save_resid;
1214 #ifdef AFS_64BIT_CLIENT
1216 uiop->afsio_offset = finalOffset;
1218 #endif /* AFS_64BIT_CLIENT */
1221 ReleaseReadLock(&vcp->lock);
1222 afs_Trace2(afs_iclSetp, CM_TRACE_VMWRITE3,
1223 ICL_TYPE_POINTER, vcp,
1224 ICL_TYPE_INT32, code);
1229 afs_direct_rdwr(vp, uiop, rw, ioflag, credp)
1230 register struct vnode *vp;
1234 struct ucred *credp;
1236 register afs_int32 code = 0;
1238 afs_size_t fileSize, xfrOffset, offset, old_offset;
1239 struct vcache *vcp = (struct vcache *)vp;
1240 afs_int32 save_resid = uiop->afsio_resid;
1241 struct vrequest treq;
1243 if (code = afs_InitReq(&treq, credp)) return code;
1245 /* special case easy transfer; apparently a lot are done */
1246 if ((xfrSize=uiop->afsio_resid) == 0) return 0;
1248 ObtainReadLock(&vcp->lock);
1249 fileSize = vcp->m.Length;
1250 if (rw == UIO_WRITE && (ioflag & IO_APPEND)) { /* handle IO_APPEND mode */
1251 uiop->afsio_offset = fileSize;
1253 /* compute xfrOffset now, and do some checks */
1254 xfrOffset = uiop->afsio_offset;
1255 if (xfrOffset < 0 || xfrOffset + xfrSize < 0) {
1257 ReleaseReadLock(&vcp->lock);
1261 /* check for "file too big" error, which should really be done above us */
1263 if (rw == UIO_WRITE && xfrSize + fileSize > get_ulimit()) {
1265 ReleaseReadLock(&vcp->lock);
1269 ReleaseReadLock(&vcp->lock);
1270 if (rw == UIO_WRITE) {
1271 ObtainWriteLock(&vcp->lock,400);
1272 vcp->m.Date = osi_Time(); /* Set file date (for ranlib) */
1274 if (xfrSize + xfrOffset > fileSize) {
1275 vcp->m.Length = xfrSize+xfrOffset;
1277 ReleaseWriteLock(&vcp->lock);
1279 afs_Trace3(afs_iclSetp, CM_TRACE_DIRECTRDWR,
1280 ICL_TYPE_POINTER, (afs_int32)vp,
1281 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(uiop->afsio_offset),
1282 ICL_TYPE_LONG, uiop->afsio_resid);
1283 code = afs_rdwr(vp, uiop, rw, ioflag, credp);
1285 uiop->afsio_resid = save_resid;
1287 uiop->afsio_offset = xfrOffset + xfrSize;
1288 if (uiop->afsio_resid > 0) {
1289 /* should zero here the remaining buffer */
1290 uiop->afsio_resid = 0;
1292 /* Purge dirty chunks of file if there are too many dirty chunks.
1293 * Inside the write loop, we only do this at a chunk boundary.
1294 * Clean up partial chunk if necessary at end of loop.
1296 if (AFS_CHUNKBASE(uiop->afsio_offset) != AFS_CHUNKBASE(xfrOffset)) {
1297 ObtainWriteLock(&vcp->lock,402);
1298 code = afs_DoPartialWrite(vcp, &treq);
1299 vcp->states |= CDirty;
1300 ReleaseWriteLock(&vcp->lock);
1309 static int lock_normalize(vp, lckdat, offset, cred)
1311 struct eflock *lckdat;
1318 switch(lckdat->l_whence) {
1322 lckdat->l_start += (off_t) offset;
1325 code = afs_getattr(vp, &vattr, cred);
1326 if (code != 0) return code;
1327 lckdat->l_start += (off_t) vattr.va_size;
1329 default: return EINVAL;
1331 lckdat->l_whence = 0;
1337 afs_gn_lockctl(vp, offset, lckdat, cmd, ignored_fcn, ignored_id, cred)
1338 void (*ignored_fcn)();
1342 struct eflock *lckdat;
1347 struct vattr *attrs;
1349 AFS_STATCNT(afs_gn_lockctl);
1350 /* Convert from AIX's cmd to standard lockctl lock types... */
1353 else if (cmd & SETFLCK) {
1358 flkd.l_type = lckdat->l_type;
1359 flkd.l_whence = lckdat->l_whence;
1360 flkd.l_start = lckdat->l_start;
1361 flkd.l_len = lckdat->l_len;
1362 flkd.l_pid = lckdat->l_pid;
1363 flkd.l_sysid = lckdat->l_sysid;
1365 if (flkd.l_start != lckdat->l_start || flkd.l_len != lckdat->l_len)
1367 if (error = lock_normalize(vp, &flkd, offset, cred))
1369 error = afs_lockctl(vp, &flkd, ncmd, cred);
1370 lckdat->l_type = flkd.l_type;
1371 lckdat->l_whence = flkd.l_whence;
1372 lckdat->l_start = flkd.l_start;
1373 lckdat->l_len = flkd.l_len;
1374 lckdat->l_pid = flkd.l_pid;
1375 lckdat->l_sysid = flkd.l_sysid;
1376 afs_Trace3(afs_iclSetp, CM_TRACE_GLOCKCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1377 ICL_TYPE_LONG, ncmd, ICL_TYPE_LONG, error);
1382 /* NOTE: In the nfs glue routine (nfs_gn2sun.c) the order was wrong (vp, flags, cmd, arg, ext); was that another typo? */
1383 int afs_gn_ioctl(vp, cmd, arg, flags, channel, ext)
1387 int flags; /* Ignored in AFS */
1388 int channel; /* Ignored in AFS */
1389 int ext; /* Ignored in AFS */
1392 AFS_STATCNT(afs_gn_ioctl);
1393 /* This seems to be a perfect fit for our ioctl redirection (afs_xioctl hack); thus the ioctl(2) entry in sysent.c is unaffected in the aix/afs port. */
1394 error = afs_ioctl(vp, cmd, arg);
1395 afs_Trace3(afs_iclSetp, CM_TRACE_GIOCTL, ICL_TYPE_POINTER, (afs_int32)vp,
1396 ICL_TYPE_LONG, cmd, ICL_TYPE_LONG, error);
1402 afs_gn_readlink(vp, uiop, cred)
1409 AFS_STATCNT(afs_gn_readlink);
1410 error = afs_readlink(vp, uiop, cred);
1411 afs_Trace2(afs_iclSetp, CM_TRACE_GREADLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1412 ICL_TYPE_LONG, error);
1418 afs_gn_select(vp, which, vinfo, mpx)
1424 AFS_STATCNT(afs_gn_select);
1425 /* NO SUPPORT for this in afs YET! */
1431 afs_gn_symlink(vp, link, target, cred)
1440 AFS_STATCNT(afs_gn_symlink);
1443 error = afs_symlink(vp, link, &va, target, cred);
1444 afs_Trace4(afs_iclSetp, CM_TRACE_GSYMLINK, ICL_TYPE_POINTER, (afs_int32)vp,
1445 ICL_TYPE_STRING, link, ICL_TYPE_STRING, target, ICL_TYPE_LONG, error);
1451 afs_gn_readdir(vp, uiop, cred)
1458 AFS_STATCNT(afs_gn_readdir);
1459 error = afs_readdir(vp, uiop, cred);
1460 afs_Trace2(afs_iclSetp, CM_TRACE_GREADDIR, ICL_TYPE_POINTER, (afs_int32)vp,
1461 ICL_TYPE_LONG, error);
1466 extern Simple_lock afs_asyncbuf_lock;
1468 * Buffers are ranked by age. A buffer's age is the value of afs_biotime
1469 * when the buffer is processed by naix_vmstrategy. afs_biotime is
1470 * incremented for each buffer. A buffer's age is kept in its av_back field.
1471 * The age ranking is used by the daemons, which favor older buffers.
1473 afs_int32 afs_biotime = 0;
1475 extern struct buf *afs_asyncbuf;
1476 extern int afs_asyncbuf_cv;
1477 /* This function is called with a list of buffers, threaded through
1478 * the av_forw field. Our goal is to copy the list of buffers into the
1479 * afs_asyncbuf list, sorting buffers into sublists linked by the b_work field.
1480 * Within buffers within the same work group, the guy with the lowest address
1481 * has to be located at the head of the queue; his b_bcount field will also
1482 * be increased to cover all of the buffers in the b_work queue.
1484 #define AIX_VM_BLKSIZE 8192
1485 afs_gn_strategy(abp, cred)
1487 register struct buf *abp;
1489 register struct buf **lbp, *tbp;
1490 int *lwbp; /* last guy in work chain */
1491 struct buf *nbp, *qbp, *qnbp, *firstComparable;
1495 #define EFS_COMPARABLE(x,y) ((x)->b_vp == (y)->b_vp \
1496 && (x)->b_xmemd.subspace_id == (y)->b_xmemd.subspace_id \
1497 && (x)->b_flags == (y)->b_flags \
1498 && !((x)->b_flags & B_PFPROT) \
1499 && !((y)->b_flags & B_PFPROT))
1501 oldPriority = disable_lock(INTMAX, &afs_asyncbuf_lock);
1502 for(tbp = abp; tbp; tbp=nbp) {
1503 nbp = tbp->av_forw; /* remember for later */
1505 tbp->av_back = (struct buf *) afs_biotime++;
1507 /* first insert the buffer into the afs_async queue. Insert buffer
1508 * sorted within its disk position within a set of comparable buffers.
1509 * Ensure that all comparable buffers are grouped contiguously.
1510 * Later on, we'll merge adjacent buffers into a single request.
1512 firstComparable = (struct buf *) 0;
1513 lbp = &afs_asyncbuf;
1514 for(qbp = *lbp; qbp; lbp = &qbp->av_forw, qbp = *lbp) {
1515 if (EFS_COMPARABLE(tbp, qbp)) {
1516 if (!firstComparable) firstComparable = qbp;
1517 /* this buffer is comparable, so see if the next buffer
1518 * is farther in the file; if it is insert before next buffer.
1520 if (tbp->b_blkno < qbp->b_blkno) {
1524 /* If we're at the end of a block of comparable buffers, we
1525 * insert the buffer here to keep all comparable buffers
1528 if (firstComparable)
1532 /* do the insert before qbp now */
1533 tbp->av_forw = *lbp;
1535 if (firstComparable == (struct buf *) 0) {
1536 /* next we're going to do all sorts of buffer merging tricks, but
1537 * here we know we're the only COMPARABLE block in the
1538 * afs_asyncbuf list, so we just skip that and continue with
1539 * the next input buffer.
1543 /* we may have actually added the "new" firstComparable */
1544 if (tbp->av_forw == firstComparable)
1545 firstComparable = tbp;
1547 * when we get here, firstComparable points to the first dude in the
1548 * same vnode and subspace that we (tbp) are in. We go through the
1549 * area of this list with COMPARABLE buffers (a contiguous region) and
1550 * repeated merge buffers that are contiguous and in the same block or
1551 * buffers that are contiguous and are both integral numbers of blocks.
1552 * Note that our end goal is to have as big blocks as we can, but we
1553 * must minimize the transfers that are not integral #s of blocks on
1554 * block boundaries, since Episode will do those smaller and/or
1555 * unaligned I/Os synchronously.
1557 * A useful example to consider has the async queue with this in it:
1558 * [8K block, 2 pages] [4K block, 1 page] [4K hole] [8K block, 2 pages]
1559 * If we get a request that fills the 4K hole, we want to merge this
1560 * whole mess into a 24K, 6 page transfer. If we don't, however, we
1561 * don't want to do any merging since adding the 4K transfer to the 8K
1562 * transfer makes the 8K transfer synchronous.
1564 * Note that if there are any blocks whose size is a multiple of
1565 * the file system block size, then we know that such blocks are also
1566 * on block boundaries.
1569 doMerge = 1; /* start the loop */
1570 while(doMerge) { /* loop until an iteration doesn't
1571 * make any more changes */
1573 for (qbp = firstComparable; ; qbp = qnbp) {
1574 qnbp = qbp->av_forw;
1575 if (!qnbp) break; /* we're done */
1576 if (!EFS_COMPARABLE(qbp, qnbp)) break;
1578 /* try to merge qbp and qnbp */
1580 /* first check if both not adjacent go on to next region */
1581 if ((dbtob(qbp->b_blkno) + qbp->b_bcount) != dbtob(qnbp->b_blkno))
1584 /* note if both in the same block, the first byte of leftmost guy
1585 * and last byte of rightmost guy are in the same block.
1587 if ((dbtob(qbp->b_blkno) & ~(AIX_VM_BLKSIZE-1)) ==
1588 ((dbtob(qnbp->b_blkno)+qnbp->b_bcount-1) & ~(AIX_VM_BLKSIZE-1))) {
1589 doMerge = 1; /* both in same block */
1591 else if ((qbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0
1592 && (qnbp->b_bcount & (AIX_VM_BLKSIZE-1)) == 0) {
1593 doMerge = 1; /* both integral #s of blocks */
1596 register struct buf *xbp;
1598 /* merge both of these blocks together */
1599 /* first set age to the older of the two */
1600 if ((int) qnbp->av_back - (int) qbp->av_back < 0)
1601 qbp->av_back = qnbp->av_back;
1602 lwbp = &qbp->b_work;
1603 /* find end of qbp's work queue */
1604 for(xbp = (struct buf *)(*lwbp); xbp;
1605 lwbp = &xbp->b_work, xbp = (struct buf *) (*lwbp));
1607 * now setting *lwbp will change the last ptr in the qbp's
1610 qbp->av_forw = qnbp->av_forw; /* splice out qnbp */
1611 qbp->b_bcount += qnbp->b_bcount; /* fix count */
1612 *lwbp = (int) qnbp; /* append qnbp to end */
1614 * note that qnbp is bogus, but it doesn't matter because
1615 * we're going to restart the for loop now.
1617 break; /* out of the for loop */
1621 } /* for loop for all interrupt data */
1622 /* at this point, all I/O has been queued. Wakeup the daemon */
1623 e_wakeup_one((int*) &afs_asyncbuf_cv);
1624 unlock_enable(oldPriority, &afs_asyncbuf_lock);
1629 afs_inactive(avc, acred)
1630 register struct vcache *avc;
1631 struct AFS_UCRED *acred;
1633 afs_InactiveVCache(avc, acred);
1640 AFS_STATCNT(afs_gn_revoke);
1641 /* NO SUPPORT for this in afs YET! */
1645 int afs_gn_getacl(vp, uiop, cred)
1654 int afs_gn_setacl(vp, uiop, cred)
1663 int afs_gn_getpcl(vp, uiop, cred)
1672 int afs_gn_setpcl(vp, uiop, cred)
1684 extern struct vfsops Afs_vfsops;
1685 extern struct vnodeops afs_gn_vnodeops;
1686 extern int Afs_init();
1688 #define AFS_CALLOUT_TBL_SIZE 256
1691 * the following additional layer of gorp is due to the fact that the
1692 * filesystem layer no longer obtains the kernel lock for me. I was relying
1693 * on this behavior to avoid having to think about locking.
1697 vfs_mount(struct vfs *a, struct ucred *b) {
1698 register glockOwner, ret;
1700 glockOwner = ISAFS_GLOCK();
1703 ret = (*Afs_vfsops.vfs_mount)(a, b);
1711 vfs_unmount(struct vfs *a, int b, struct ucred *c) {
1712 register glockOwner, ret;
1714 glockOwner = ISAFS_GLOCK();
1717 ret = (*Afs_vfsops.vfs_unmount)(a, b, c);
1725 vfs_root(struct vfs *a, struct vnode **b, struct ucred *c) {
1726 register glockOwner, ret;
1728 glockOwner = ISAFS_GLOCK();
1731 ret = (*Afs_vfsops.vfs_root)(a, b, c);
1739 vfs_statfs(struct vfs *a, struct statfs *b, struct ucred *c) {
1740 register glockOwner, ret;
1742 glockOwner = ISAFS_GLOCK();
1745 ret = (*Afs_vfsops.vfs_statfs)(a, b, c);
1753 vfs_sync(struct gfs *a) {
1754 register glockOwner, ret;
1756 glockOwner = ISAFS_GLOCK();
1759 ret = (*Afs_vfsops.vfs_sync)(a);
1766 vfs_vget(struct vfs *a, struct vnode **b, struct fileid *c
1767 , struct ucred *d) {
1768 register glockOwner, ret;
1770 glockOwner = ISAFS_GLOCK();
1773 ret = (*Afs_vfsops.vfs_vget)(a, b, c, d);
1781 vfs_cntl(struct vfs *a, int b, caddr_t c, size_t d, struct ucred *e) {
1782 register glockOwner, ret;
1784 glockOwner = ISAFS_GLOCK();
1787 ret = (*Afs_vfsops.vfs_cntl)(a, b, c, d, e);
1795 vfs_quotactl(struct vfs *a, int b, uid_t c, caddr_t d
1796 , struct ucred *e) {
1797 register glockOwner, ret;
1799 glockOwner = ISAFS_GLOCK();
1802 ret = (*Afs_vfsops.vfs_quotactl)(a, b, c, d, e);
1810 struct vfsops locked_Afs_vfsops = {
1822 vn_link(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1823 register glockOwner, ret;
1825 glockOwner = ISAFS_GLOCK();
1828 ret = (*afs_gn_vnodeops.vn_link)(a, b, c, d);
1836 vn_mkdir(struct vnode *a, char *b, int c, struct ucred *d) {
1837 register glockOwner, ret;
1839 glockOwner = ISAFS_GLOCK();
1842 ret = (*afs_gn_vnodeops.vn_mkdir)(a, b, c, d);
1850 vn_mknod(struct vnode *a, caddr_t b, int c, dev_t d, struct ucred *e) {
1851 register glockOwner, ret;
1853 glockOwner = ISAFS_GLOCK();
1856 ret = (*afs_gn_vnodeops.vn_mknod)(a, b, c, d, e);
1864 vn_remove(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1865 register glockOwner, ret;
1867 glockOwner = ISAFS_GLOCK();
1870 ret = (*afs_gn_vnodeops.vn_remove)(a, b, c, d);
1878 vn_rename(struct vnode *a, struct vnode *b, caddr_t c
1879 , struct vnode *d, struct vnode *e, caddr_t f, struct ucred *g) {
1880 register glockOwner, ret;
1882 glockOwner = ISAFS_GLOCK();
1885 ret = (*afs_gn_vnodeops.vn_rename)(a, b, c, d, e, f, g);
1893 vn_rmdir(struct vnode *a, struct vnode *b, char *c, struct ucred *d) {
1894 register glockOwner, ret;
1896 glockOwner = ISAFS_GLOCK();
1899 ret = (*afs_gn_vnodeops.vn_rmdir)(a, b, c, d);
1907 vn_lookup(struct vnode *a, struct vnode **b, char *c, int d,
1908 struct vattr *v, struct ucred *e) {
1909 register glockOwner, ret;
1911 glockOwner = ISAFS_GLOCK();
1914 ret = (*afs_gn_vnodeops.vn_lookup)(a, b, c, d, v, e);
1922 vn_fid(struct vnode *a, struct fileid *b, struct ucred *c) {
1923 register glockOwner, ret;
1925 glockOwner = ISAFS_GLOCK();
1928 ret = (*afs_gn_vnodeops.vn_fid)(a, b, c);
1936 vn_open(struct vnode *a, int b, int c, caddr_t *d, struct ucred *e) {
1937 register glockOwner, ret;
1939 glockOwner = ISAFS_GLOCK();
1942 ret = (*afs_gn_vnodeops.vn_open)(a, b, c, d, e);
1950 vn_create(struct vnode *a, struct vnode **b, int c, caddr_t d
1951 , int e, caddr_t *f, struct ucred *g) {
1952 register glockOwner, ret;
1954 glockOwner = ISAFS_GLOCK();
1957 ret = (*afs_gn_vnodeops.vn_create)(a, b, c, d, e, f, g);
1965 vn_hold(struct vnode *a) {
1966 register glockOwner, ret;
1968 glockOwner = ISAFS_GLOCK();
1971 ret = (*afs_gn_vnodeops.vn_hold)(a);
1979 vn_rele(struct vnode *a) {
1980 register glockOwner, ret;
1982 glockOwner = ISAFS_GLOCK();
1985 ret = (*afs_gn_vnodeops.vn_rele)(a);
1993 vn_close(struct vnode *a, int b, caddr_t c, struct ucred *d) {
1994 register glockOwner, ret;
1996 glockOwner = ISAFS_GLOCK();
1999 ret = (*afs_gn_vnodeops.vn_close)(a, b, c, d);
2007 vn_map(struct vnode *a, caddr_t b, uint c, uint d, uint e, struct ucred *f) {
2008 register glockOwner, ret;
2010 glockOwner = ISAFS_GLOCK();
2013 ret = (*afs_gn_vnodeops.vn_map)(a, b, c, d, e, f);
2021 vn_unmap(struct vnode *a, int b, struct ucred *c) {
2022 register glockOwner, ret;
2024 glockOwner = ISAFS_GLOCK();
2027 ret = (*afs_gn_vnodeops.vn_unmap)(a, b, c);
2035 vn_access(struct vnode *a, int b, int c, struct ucred *d) {
2036 register glockOwner, ret;
2038 glockOwner = ISAFS_GLOCK();
2041 ret = (*afs_gn_vnodeops.vn_access)(a, b, c, d);
2049 vn_getattr(struct vnode *a, struct vattr *b, struct ucred *c) {
2050 register glockOwner, ret;
2052 glockOwner = ISAFS_GLOCK();
2055 ret = (*afs_gn_vnodeops.vn_getattr)(a, b, c);
2063 vn_setattr(struct vnode *a, int b, int c, int d, int e, struct ucred *f) {
2064 register glockOwner, ret;
2066 glockOwner = ISAFS_GLOCK();
2069 ret = (*afs_gn_vnodeops.vn_setattr)(a, b, c, d, e, f);
2077 vn_fclear(struct vnode *a, int b, offset_t c, offset_t d
2078 , caddr_t e, struct ucred *f) {
2079 register glockOwner, ret;
2081 glockOwner = ISAFS_GLOCK();
2084 ret = (*afs_gn_vnodeops.vn_fclear)(a, b, c, d, e, f);
2092 vn_fsync(struct vnode *a, int b, int c, struct ucred *d) {
2093 register glockOwner, ret;
2095 glockOwner = ISAFS_GLOCK();
2098 ret = (*afs_gn_vnodeops.vn_fsync)(a, b, c, d);
2106 vn_ftrunc(struct vnode *a, int b, offset_t c, caddr_t d, struct ucred *e) {
2107 register glockOwner, ret;
2109 glockOwner = ISAFS_GLOCK();
2112 ret = (*afs_gn_vnodeops.vn_ftrunc)(a, b, c, d, e);
2120 vn_rdwr(struct vnode *a, enum uio_rw b, int c, struct uio *d
2121 , int e, caddr_t f, struct vattr *v, struct ucred *g) {
2122 register glockOwner, ret;
2124 glockOwner = ISAFS_GLOCK();
2127 ret = (*afs_gn_vnodeops.vn_rdwr)(a, b, c, d, e, f, v, g);
2135 vn_lockctl(struct vnode *a, offset_t b, struct eflock *c, int d
2136 , int (*e)(), ulong *f, struct ucred *g) {
2137 register glockOwner, ret;
2139 glockOwner = ISAFS_GLOCK();
2142 ret = (*afs_gn_vnodeops.vn_lockctl)(a, b, c, d, e, f, g);
2150 vn_ioctl(struct vnode *a, int b, caddr_t c, size_t d, int e, struct ucred *f) {
2151 register glockOwner, ret;
2153 glockOwner = ISAFS_GLOCK();
2156 ret = (*afs_gn_vnodeops.vn_ioctl)(a, b, c, d, e, f);
2164 vn_readlink(struct vnode *a, struct uio *b, struct ucred *c) {
2165 register glockOwner, ret;
2167 glockOwner = ISAFS_GLOCK();
2170 ret = (*afs_gn_vnodeops.vn_readlink)(a, b, c);
2178 vn_select(struct vnode *a, int b, ushort c, ushort *d, void (*e)()
2179 , caddr_t f, struct ucred *g) {
2180 register glockOwner, ret;
2182 glockOwner = ISAFS_GLOCK();
2185 ret = (*afs_gn_vnodeops.vn_select)(a, b, c, d, e, f, g);
2193 vn_symlink(struct vnode *a, char *b, char *c, struct ucred *d) {
2194 register glockOwner, ret;
2196 glockOwner = ISAFS_GLOCK();
2199 ret = (*afs_gn_vnodeops.vn_symlink)(a, b, c, d);
2207 vn_readdir(struct vnode *a, struct uio *b, struct ucred *c) {
2208 register glockOwner, ret;
2210 glockOwner = ISAFS_GLOCK();
2213 ret = (*afs_gn_vnodeops.vn_readdir)(a, b, c);
2221 vn_revoke(struct vnode *a, int b, int c, struct vattr *d, struct ucred *e) {
2222 register glockOwner, ret;
2224 glockOwner = ISAFS_GLOCK();
2227 ret = (*afs_gn_vnodeops.vn_revoke)(a, b, c, d, e);
2235 vn_getacl(struct vnode *a, struct uio *b, struct ucred *c) {
2236 register glockOwner, ret;
2238 glockOwner = ISAFS_GLOCK();
2241 ret = (*afs_gn_vnodeops.vn_getacl)(a, b, c);
2249 vn_setacl(struct vnode *a, struct uio *b, struct ucred *c) {
2250 register glockOwner, ret;
2252 glockOwner = ISAFS_GLOCK();
2255 ret = (*afs_gn_vnodeops.vn_setacl)(a, b, c);
2263 vn_getpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2264 register glockOwner, ret;
2266 glockOwner = ISAFS_GLOCK();
2269 ret = (*afs_gn_vnodeops.vn_getpcl)(a, b, c);
2277 vn_setpcl(struct vnode *a, struct uio *b, struct ucred *c) {
2278 register glockOwner, ret;
2280 glockOwner = ISAFS_GLOCK();
2283 ret = (*afs_gn_vnodeops.vn_setpcl)(a, b, c);
2290 extern int afs_gn_strategy();
2292 struct vnodeops locked_afs_gn_vnodeops = {
2321 afs_gn_strategy, /* no locking!!! (discovered the hard way) */
2329 struct gfs afs_gfs = {
2331 &locked_afs_gn_vnodeops,
2335 GFS_VERSION4 | GFS_REMOTE,