1 /* Copyright (C) 1998 Transarc Corporation - All rights reserved. */
3 * vnodeops structure and Digital Unix specific ops and support routines.
6 #include "../afs/param.h" /* Should be always first */
8 #include "../afs/sysincludes.h" /* Standard vendor system headers */
9 #include "../afs/afsincludes.h" /* Afs-based standard headers */
10 #include "../afs/afs_stats.h" /* statistics */
11 #include <vm/vm_mmap.h>
12 #include <vm/vm_ubc.h>
13 #include "../afs/afs_cbqueue.h"
14 #include "../afs/nfsclient.h"
15 #include "../afs/afs_osidnlc.h"
18 extern int afs_lookup(), afs_create(), afs_noop(), afs_open(), afs_close();
19 extern int afs_access(), afs_getattr(), afs_setattr(), afs_badop();
20 extern int afs_fsync(), afs_seek(), afs_remove(), afs_link(), afs_rename();
21 extern int afs_mkdir(), afs_rmdir(), afs_symlink(), afs_readdir();
22 extern int afs_readlink(), afs_lockctl();
23 extern int vn_pathconf_default(), seltrue();
25 int mp_afs_lookup(), mp_afs_create(), mp_afs_open();
26 int mp_afs_access(), mp_afs_getattr(), mp_afs_setattr(), mp_afs_ubcrdwr();
27 int mp_afs_ubcrdwr(), mp_afs_mmap();
28 int mp_afs_fsync(), mp_afs_seek(), mp_afs_remove(), mp_afs_link();
29 int mp_afs_rename(), mp_afs_mkdir(), mp_afs_rmdir(), mp_afs_symlink();
30 int mp_afs_readdir(), mp_afs_readlink(), mp_afs_abortop(), mp_afs_inactive();
31 int mp_afs_reclaim(), mp_afs_bmap(), mp_afs_strategy(), mp_afs_print();
32 int mp_afs_page_read(), mp_afs_page_write(), mp_afs_swap(), mp_afs_bread();
33 int mp_afs_brelse(), mp_afs_lockctl(), mp_afs_syncdata(), mp_afs_close();
37 struct vnodeops Afs_vnodeops = {
40 afs_noop, /* vn_mknod */
48 afs_badop, /* vn_ioctl */
49 seltrue, /* vn_select */
75 afs_noop, /* unLock */
76 afs_noop, /* get ext attrs */
77 afs_noop, /* set ext attrs */
78 afs_noop, /* del ext attrs */
81 struct vnodeops *afs_ops = &Afs_vnodeops;
83 /* vnode file operations, and our own */
85 extern int vn_write();
86 extern int vn_ioctl();
87 extern int vn_select();
88 extern int afs_closex();
90 struct fileops afs_fileops = {
98 mp_afs_lookup(adp, ndp)
100 struct nameidata *ndp;
104 code = afs_lookup(adp, ndp);
109 mp_afs_create(ndp, attrs)
110 struct nameidata *ndp;
115 code = afs_create(ndp, attrs);
120 mp_afs_open(avcp, aflags, acred)
121 struct vcache **avcp;
123 struct AFS_UCRED *acred;
127 code = afs_open(avcp, aflags, acred);
132 mp_afs_access(avc, amode, acred)
135 struct AFS_UCRED *acred;
139 code = afs_access(avc, amode, acred);
144 mp_afs_close(avc, flags, cred)
151 code = afs_close(avc, flags, cred);
156 mp_afs_getattr(avc, attrs, acred)
159 struct AFS_UCRED *acred;
163 code = afs_getattr(avc, attrs, acred);
168 mp_afs_setattr(avc, attrs, acred)
171 struct AFS_UCRED *acred;
175 code = afs_setattr(avc, attrs, acred);
180 mp_afs_fsync(avc, fflags, acred, waitfor)
183 struct AFS_UCRED *acred;
188 code = afs_fsync(avc, fflags, acred, waitfor);
194 struct nameidata *ndp;
198 code = afs_remove(ndp);
203 mp_afs_link(avc, ndp)
205 struct nameidata *ndp;
209 code = afs_link(avc, ndp);
214 mp_afs_rename(fndp, tndp)
215 struct nameidata *fndp, *tndp;
219 code = afs_rename(fndp, tndp);
224 mp_afs_mkdir(ndp, attrs)
225 struct nameidata *ndp;
230 code = afs_mkdir(ndp, attrs);
236 struct nameidata *ndp;
240 code = afs_rmdir(ndp);
245 mp_afs_symlink(ndp, attrs, atargetName)
246 struct nameidata *ndp;
248 register char *atargetName;
252 code = afs_symlink(ndp, attrs, atargetName);
257 mp_afs_readdir(avc, auio, acred, eofp)
260 struct AFS_UCRED *acred;
265 code = afs_readdir(avc, auio, acred, eofp);
270 mp_afs_readlink(avc, auio, acred)
273 struct AFS_UCRED *acred;
277 code = afs_readlink(avc, auio, acred);
282 mp_afs_lockctl(avc, af, flag, acred, clid, offset)
285 struct AFS_UCRED *acred;
292 code = afs_lockctl(avc, af, flag, acred, clid, offset);
302 code = afs_closex(afd);
307 mp_afs_seek(avc, oldoff, newoff, cred)
309 off_t oldoff, newoff;
312 if ((int) newoff < 0)
319 struct nameidata *ndp;
324 mp_afs_inactive(avc, acred)
325 register struct vcache *avc;
326 struct AFS_UCRED *acred;
329 afs_InactiveVCache(avc, acred);
346 mp_afs_page_read(avc, uio, acred)
352 struct vrequest treq;
355 error = afs_rdwr(avc, uio, UIO_READ, 0, acred);
356 afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_READ, ICL_TYPE_POINTER, avc,
357 ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
360 } else if ((avc->states & CWired) == 0) {
361 afs_InitReq(&treq, acred);
362 ObtainWriteLock(&avc->lock,161);
363 afs_Wire(avc, &treq);
364 ReleaseWriteLock(&avc->lock);
371 mp_afs_page_write(avc, uio, acred, pager, offset)
375 memory_object_t pager;
381 error = afs_rdwr(avc, uio, UIO_WRITE, 0, acred);
382 afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_WRITE, ICL_TYPE_POINTER, avc,
383 ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
393 mp_afs_ubcrdwr(avc, uio, ioflag, cred)
399 register afs_int32 code;
401 afs_int32 fileBase, size, cnt=0;
403 register afs_int32 tsize;
404 register afs_int32 pageOffset;
406 struct vrequest treq;
407 int rw = uio->uio_rw;
411 afs_int32 save_resid;
417 afs_InitReq(&treq, cred);
418 if (AFS_NFSXLATORREQ(cred) && rw == UIO_READ) {
419 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
420 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
425 afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, avc,
426 ICL_TYPE_INT32, (rw==UIO_WRITE? 1 : 0),
427 ICL_TYPE_LONG, uio->uio_offset,
428 ICL_TYPE_LONG, uio->uio_resid);
429 code = afs_VerifyVCache(avc, &treq);
431 code = afs_CheckCode(code, &treq, 35);
435 if (vType(avc) != VREG) {
437 return EISDIR; /* can't read or write other things */
439 afs_BozonLock(&avc->pvnLock, avc);
440 osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
441 ObtainWriteLock(&avc->lock,162);
442 /* adjust parameters when appending files */
443 if ((ioflag & IO_APPEND) && uio->uio_rw == UIO_WRITE)
444 uio->uio_offset = avc->m.Length; /* write at EOF position */
445 if (uio->uio_rw == UIO_WRITE) {
446 avc->states |= CDirty;
450 * before starting any I/O, we must ensure that the file is big enough
451 * to hold the results (since afs_putpage will be called to force
454 size = uio->afsio_resid + uio->afsio_offset; /* new file size */
455 if (size > avc->m.Length) avc->m.Length = size; /* file grew */
456 avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
457 if (uio->afsio_resid > PAGE_SIZE)
458 cnt = uio->afsio_resid / PAGE_SIZE;
459 save_resid = uio->afsio_resid;
464 * compute the amount of data to move into this block,
465 * based on uio->afsio_resid.
467 size = uio->afsio_resid; /* transfer size */
468 fileBase = uio->afsio_offset; /* start file position */
469 pageBase = fileBase & ~(PAGE_SIZE-1); /* file position of the page */
470 pageOffset = fileBase & (PAGE_SIZE-1); /* start offset within page */
471 tsize = PAGE_SIZE-pageOffset; /* amount left in this page */
473 * we'll read tsize bytes,
474 * but first must make sure tsize isn't too big
476 if (tsize > size) tsize = size; /* don't read past end of request */
477 eof = 0; /* flag telling us if we hit the EOF on the read */
478 if (uio->uio_rw == UIO_READ) { /* we're doing a read operation */
479 /* don't read past EOF */
480 if (tsize + fileBase > avc->m.Length) {
481 tsize = avc->m.Length - fileBase;
482 eof = 1; /* we did hit the EOF */
483 if (tsize < 0) tsize = 0; /* better safe than sorry */
486 if (tsize <= 0) break; /* nothing to transfer, we're done */
488 /* Purge dirty chunks of file if there are too many dirty chunks.
489 * Inside the write loop, we only do this at a chunk boundary.
490 * Clean up partial chunk if necessary at end of loop.
492 if (uio->uio_rw == UIO_WRITE && counter > 0
493 && AFS_CHUNKOFFSET(fileBase) == 0) {
494 code = afs_DoPartialWrite(avc, &treq);
495 avc->states |= CDirty;
503 ReleaseWriteLock(&avc->lock);
505 code = ubc_lookup(((struct vnode *)avc)->v_object, pageBase,
506 PAGE_SIZE, PAGE_SIZE, &page, &flags);
508 ObtainWriteLock(&avc->lock,163);
513 if (flags & B_NOCACHE) {
515 No page found. We should not read the page in if
516 1. the write starts on a page edge (ie, pageoffset == 0)
518 1. we will fill the page (ie, size == PAGESIZE), or
519 2. we are writing past eof
521 if ((uio->uio_rw == UIO_WRITE) &&
522 ((pageOffset == 0 && (size == PAGE_SIZE || fileBase >= avc->m.Length)))) {
523 struct vnode *vp = (struct vnode *)avc;
524 /* we're doing a write operation past eof; no need to read it */
527 ubc_page_zero(page, 0, PAGE_SIZE);
528 ubc_page_release(page, B_DONE);
531 /* page wasn't cached, read it in. */
535 bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_READ);
538 bp->b_vp = (struct vnode *)avc;
539 bp->b_blkno = btodb(pageBase);
540 ReleaseWriteLock(&avc->lock);
541 code = afs_ustrategy(bp, cred); /* do the I/O */
542 ObtainWriteLock(&avc->lock,164);
548 ubc_page_release(page, 0);
556 data = (char *)page->pg_addr; /* DUX 4.0D */
558 data = (char *)PHYS_TO_KSEG(page->pg_phys_addr); /* DUX 4.0E */
560 ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
562 code = uiomove(data+pageOffset, tsize, uio);
563 ubc_unload(page, pageOffset, page_size);
564 if (uio->uio_rw == UIO_WRITE) {
567 /* Mark the page dirty and release it to avoid a deadlock
568 * in ubc_dirty_kluster when more than one process writes
569 * this page at the same time. */
570 toffset = page->pg_offset;
572 ubc_page_release(page, flags);
579 /* We released the page, so we can get a null page
580 * list if another thread calls the strategy routine.
582 pl = ubc_dirty_kluster(((struct vnode *)avc)->v_object,
583 NULL, toffset, 0, B_WANTED, FALSE, &kpcnt);
585 bp = ubc_bufalloc(pl, 1, PAGE_SIZE, 1, B_WRITE);
587 bp->b_vp = (struct vnode *)avc;
588 bp->b_blkno = btodb(pageBase);
590 code = afs_ustrategy(bp, cred); /* do the I/O */
595 ObtainWriteLock(&avc->lock,415);
601 ubc_page_release(page, flags);
604 ObtainWriteLock(&avc->lock,165);
606 * If reading at a chunk boundary, start prefetch of next chunk.
608 if (uio->uio_rw == UIO_READ
609 && (counter == 0 || AFS_CHUNKOFFSET(fileBase) == 0)) {
610 tdc = afs_FindDCache(avc, fileBase);
612 if (!(tdc->flags & DFNextStarted))
613 afs_PrefetchChunk(avc, tdc, cred, &treq);
621 afs_FakeClose(avc, cred);
622 if (uio->uio_rw == UIO_WRITE && code == 0 && (avc->states & CDirty)) {
623 code = afs_DoPartialWrite(avc, &treq);
625 ReleaseWriteLock(&avc->lock);
626 afs_BozonUnlock(&avc->pvnLock, avc);
627 if (DO_FLUSH || (!newpage && (cnt < 10))) {
629 ubc_flush_dirty(((struct vnode *)avc)->v_object, flags);
633 ObtainSharedLock(&avc->lock, 409);
636 code = avc->vc_error;
639 /* This is required since we may still have dirty pages after the write.
640 * I could just let close do the right thing, but stat's before the close
641 * return the wrong length.
643 if (code == EDQUOT || code == ENOSPC) {
644 uio->uio_resid = save_resid;
645 UpgradeSToWLock(&avc->lock, 410);
646 osi_ReleaseVM(avc, cred);
647 ConvertWToSLock(&avc->lock);
649 ReleaseSharedLock(&avc->lock);
651 if (!code && (ioflag & IO_SYNC) && (uio->uio_rw == UIO_WRITE)
652 && !AFS_NFSXLATORREQ(cred)) {
653 code = afs_fsync(avc, 0, cred, 0);
656 code = afs_CheckCode(code, &treq, 36);
663 * Now for some bad news. Since we artificially hold on to vnodes by doing
664 * and extra VNHOLD in afs_NewVCache(), there is no way for us to know
665 * when we need to flush the pages when a program exits. Particularly
666 * if it closes the file after mapping it R/W.
670 mp_afs_mmap(avc, offset, map, addrp, len, prot, maxprot, flags, cred)
671 register struct vcache *avc;
681 struct vp_mmap_args args;
682 register struct vp_mmap_args *ap = &args;
683 struct vnode *vp = (struct vnode *)avc;
685 struct vrequest treq;
687 extern kern_return_t u_vp_create();
691 afs_InitReq(&treq, cred);
692 code = afs_VerifyVCache(avc, &treq);
694 code = afs_CheckCode(code, &treq, 37);
698 afs_BozonLock(&avc->pvnLock, avc);
699 osi_FlushPages(avc); /* ensure old pages are gone */
700 afs_BozonUnlock(&avc->pvnLock, avc);
701 ObtainWriteLock(&avc->lock,166);
702 avc->states |= CMAPPED;
703 ReleaseWriteLock(&avc->lock);
704 ap->a_offset = offset;
708 ap->a_maxprot = maxprot;
711 code = u_vp_create(map, vp->v_object, (vm_offset_t) ap);
713 code = afs_CheckCode(code, &treq, 38);
719 int mp_afs_getpage(vop, offset, len, protp, pl, plsz, mape, addr, rw, cred)
731 register afs_int32 code;
732 struct vrequest treq;
734 int i, pages = (len + PAGE_SIZE - 1) >> page_shift;
738 struct vcache *avc = (struct vcache *)vop->vu_vp;
740 /* first, obtain the proper lock for the VM system */
743 afs_InitReq(&treq, cred);
744 code = afs_VerifyVCache(avc, &treq);
747 code = afs_CheckCode(code, &treq, 39); /* failed to get it */
752 /* clean all dirty pages for this vnode */
754 ubc_flush_dirty(vop,0);
757 afs_BozonLock(&avc->pvnLock, avc);
758 ObtainWriteLock(&avc->lock,167);
759 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, avc,
760 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, len,
761 ICL_TYPE_INT32, (int) rw);
762 for (i = 0; i < pages; i++) {
764 off = offset + PAGE_SIZE * i;
765 if (protp) protp[i] = 0;
767 ReleaseWriteLock(&avc->lock);
769 code = ubc_lookup(((struct vnode *)avc)->v_object, off,
770 PAGE_SIZE, PAGE_SIZE, pagep, &flags);
772 ObtainWriteLock(&avc->lock,168);
776 if(flags & B_NOCACHE) { /* if (page) */
777 if ((rw & B_WRITE) && (offset+len >= avc->m.Length)) {
778 struct vnode *vp = (struct vnode *)avc;
779 /* we're doing a write operation past eof; no need to read it */
781 ubc_page_zero(*pagep, 0, PAGE_SIZE);
782 ubc_page_release(*pagep, B_DONE);
785 /* page wasn't cached, read it in. */
789 bp = ubc_bufalloc(*pagep, 1, PAGE_SIZE, 1, B_READ);
792 bp->b_vp = (struct vnode *)avc;
793 bp->b_blkno = btodb(off);
794 ReleaseWriteLock(&avc->lock);
795 code = afs_ustrategy(bp, cred); /* do the I/O */
796 ObtainWriteLock(&avc->lock,169);
802 ubc_page_release(pl[i], 0);
808 if ((rw & B_READ) == 0) {
810 ubc_page_dirty(pl[i]);
813 if (protp && (flags & B_DIRTY) == 0) {
814 protp[i] = VM_PROT_WRITE;
819 pl[i] = VM_PAGE_NULL;
820 ReleaseWriteLock(&avc->lock);
821 afs_BozonUnlock(&avc->pvnLock, avc);
822 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_INT32, code,
823 ICL_TYPE_POINTER, *pagep, ICL_TYPE_INT32, flags);
824 code = afs_CheckCode(code, &treq, 40);
830 int mp_afs_putpage(vop, pl, pcnt, flags, cred)
837 register afs_int32 code=0;
838 struct vcache *avc = (struct vcache *)vop->vu_vp;
839 struct vnode *vp = (struct vnode *)avc;
843 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER, avc,
844 ICL_TYPE_INT32, pcnt, ICL_TYPE_INT32, vp->v_flag,
845 ICL_TYPE_INT32, flags);
849 if (vp->v_flag & VXLOCK) {
851 for (i = 0; i < pcnt; i++) {
852 ubc_page_release(pl[i], B_DONE|B_DIRTY);
853 pl[i] = VM_PAGE_NULL;
862 /* first, obtain the proper lock for the VM system */
863 afs_BozonLock(&avc->pvnLock, avc);
864 ObtainWriteLock(&avc->lock,170);
865 for (i = 0; i < pcnt; i++) {
866 vm_page_t page = pl[i];
871 bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_WRITE);
874 bp->b_vp = (struct vnode *)avc;
875 bp->b_blkno = btodb(page->pg_offset);
876 ReleaseWriteLock(&avc->lock);
877 code = afs_ustrategy(bp, cred); /* do the I/O */
878 ObtainWriteLock(&avc->lock,171);
885 pl[i] = VM_PAGE_NULL;
889 ReleaseWriteLock(&avc->lock);
890 afs_BozonUnlock(&avc->pvnLock, avc);
891 afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_INT32, code,
892 ICL_TYPE_INT32, avc->m.Length);
898 int mp_afs_swap(avc, swapop, argp)
906 int mp_afs_syncdata(avc, flag, offset, length, cred)
913 /* NFS V3 makes this call, ignore it. We'll sync the data in afs_fsync. */
914 if (AFS_NFSXLATORREQ(cred))
920 /* a freelist of one */
921 struct buf *afs_bread_freebp = 0;
924 * Only rfs_read calls this, and it only looks at bp->b_un.b_addr.
925 * Thus we can use fake bufs (ie not from the real buffer pool).
927 mp_afs_bread(vp, lbn, bpp, cred)
933 int offset, fsbsize, error;
939 AFS_STATCNT(afs_bread);
940 fsbsize = vp->v_vfsp->vfs_bsize;
941 offset = lbn * fsbsize;
942 if (afs_bread_freebp) {
943 bp = afs_bread_freebp;
944 afs_bread_freebp = 0;
946 bp = (struct buf *) AFS_KALLOC(sizeof(*bp));
947 bp->b_un.b_addr = (caddr_t) AFS_KALLOC(fsbsize);
950 iov.iov_base = bp->b_un.b_addr;
951 iov.iov_len = fsbsize;
952 uio.afsio_iov = &iov;
953 uio.afsio_iovcnt = 1;
954 uio.afsio_seg = AFS_UIOSYS;
955 uio.afsio_offset = offset;
956 uio.afsio_resid = fsbsize;
958 error = afs_read((struct vcache *)vp, &uio, cred, lbn, bpp, 0);
960 afs_bread_freebp = bp;
965 afs_bread_freebp = bp;
967 *(struct buf **)&bp->b_vp = bp; /* mark as fake */
975 mp_afs_brelse(vp, bp)
980 AFS_STATCNT(afs_brelse);
981 if ((struct buf *)bp->b_vp != bp) { /* not fake */
983 } else if (afs_bread_freebp) {
984 AFS_KFREE(bp->b_un.b_addr, vp->v_vfsp->vfs_bsize);
985 AFS_KFREE(bp, sizeof(*bp));
987 afs_bread_freebp = bp;
993 mp_afs_bmap(avc, abn, anvp, anbn)
994 register struct vcache *avc;
995 afs_int32 abn, *anbn;
996 struct vcache **anvp;
999 AFS_STATCNT(afs_bmap);
1003 *anbn = abn * (8192 / DEV_BSIZE); /* in 512 byte units */
1010 mp_afs_strategy (abp)
1011 register struct buf *abp;
1013 register afs_int32 code;
1016 AFS_STATCNT(afs_strategy);
1017 code = afs_osi_MapStrategy(afs_ustrategy, abp);
1023 mp_afs_refer(vm_ubc_object_t vop)
1029 mp_afs_release(vm_ubc_object_t vop)
1035 mp_afs_write_check(vm_ubc_object_t vop, vm_page_t pp)
1042 struct vfs_ubcops afs_ubcops = {
1043 mp_afs_refer, /* refer vnode */
1044 mp_afs_release, /* release vnode */
1045 mp_afs_getpage, /* get page */
1046 mp_afs_putpage, /* put page */
1047 mp_afs_write_check, /* check writablity */
1052 * Cover function for lookup name using OSF equivalent, namei()
1054 * Note, the result vnode (ni_vp) in the namei data structure is remains
1055 * locked after return.
1057 lookupname(namep, seg, follow, dvpp, cvpp)
1058 char *namep; /* path name */
1059 int seg; /* address space containing name */
1060 int follow; /* follow symbolic links */
1061 struct vnode **dvpp; /* result, containing parent vnode */
1062 struct vnode **cvpp; /* result, containing final component vnode */
1064 /* Should I use free-bee in u-area? */
1065 struct nameidata *ndp = &u.u_nd;
1068 ndp->ni_nameiop = ((follow) ? (LOOKUP|FOLLOW) : (LOOKUP));
1069 ndp->ni_segflg = seg;
1070 ndp->ni_dirp = namep;
1072 if (dvpp != (struct vnode **)0)
1073 *dvpp = ndp->ni_dvp;
1074 if (cvpp != (struct vnode **)0)