2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * vnodeops structure and Digital Unix specific ops and support routines.
14 #include <afsconfig.h>
15 #include "afs/param.h"
21 #include "afs/sysincludes.h" /* Standard vendor system headers */
22 #include "afsincludes.h" /* Afs-based standard headers */
23 #include "afs/afs_stats.h" /* statistics */
24 #include <vm/vm_mmap.h>
25 #include <vm/vm_ubc.h>
26 #include "afs/afs_cbqueue.h"
27 #include "afs/nfsclient.h"
28 #include "afs/afs_osidnlc.h"
31 extern int afs_lookup(), afs_create(), afs_noop(), afs_open(), afs_close();
32 extern int afs_access(), afs_getattr(), afs_setattr(), afs_badop();
33 extern int afs_fsync(), afs_seek(), afs_remove(), afs_link(), afs_rename();
34 extern int afs_mkdir(), afs_rmdir(), afs_symlink(), afs_readdir();
35 extern int afs_readlink(), afs_lockctl();
36 extern int vn_pathconf_default(), seltrue();
38 int mp_afs_lookup(), mp_afs_create(), mp_afs_open();
39 int mp_afs_access(), mp_afs_getattr(), mp_afs_setattr(), mp_afs_ubcrdwr();
40 int mp_afs_ubcrdwr(), mp_afs_mmap();
41 int mp_afs_fsync(), mp_afs_seek(), mp_afs_remove(), mp_afs_link();
42 int mp_afs_rename(), mp_afs_mkdir(), mp_afs_rmdir(), mp_afs_symlink();
43 int mp_afs_readdir(), mp_afs_readlink(), mp_afs_abortop(), mp_afs_inactive();
44 int mp_afs_reclaim(), mp_afs_bmap(), mp_afs_strategy(), mp_afs_print();
45 int mp_afs_page_read(), mp_afs_page_write(), mp_afs_swap(), mp_afs_bread();
46 int mp_afs_brelse(), mp_afs_lockctl(), mp_afs_syncdata(), mp_afs_close();
51 struct vnodeops Afs_vnodeops = {
54 afs_noop, /* vn_mknod */
62 mp_afs_ioctl, /* vn_ioctl */
63 seltrue, /* vn_select */
89 afs_noop, /* unLock */
90 afs_noop, /* get ext attrs */
91 afs_noop, /* set ext attrs */
92 afs_noop, /* del ext attrs */
95 struct vnodeops *afs_ops = &Afs_vnodeops;
97 /* vnode file operations, and our own */
99 extern int vn_write();
100 extern int vn_ioctl();
101 extern int vn_select();
102 extern int afs_closex();
104 struct fileops afs_fileops = {
112 mp_afs_lookup(adp, ndp)
114 struct nameidata *ndp;
118 code = afs_lookup(adp, ndp);
123 mp_afs_create(ndp, attrs)
124 struct nameidata *ndp;
129 code = afs_create(ndp, attrs);
134 mp_afs_open(avcp, aflags, acred)
135 struct vcache **avcp;
137 struct AFS_UCRED *acred;
141 code = afs_open(avcp, aflags, acred);
146 mp_afs_access(avc, amode, acred)
149 struct AFS_UCRED *acred;
153 code = afs_access(avc, amode, acred);
158 mp_afs_close(avc, flags, cred)
165 code = afs_close(avc, flags, cred);
170 mp_afs_getattr(avc, attrs, acred)
173 struct AFS_UCRED *acred;
177 code = afs_getattr(avc, attrs, acred);
182 mp_afs_setattr(avc, attrs, acred)
185 struct AFS_UCRED *acred;
189 code = afs_setattr(avc, attrs, acred);
194 mp_afs_fsync(avc, fflags, acred, waitfor)
197 struct AFS_UCRED *acred;
202 code = afs_fsync(avc, fflags, acred, waitfor);
208 struct nameidata *ndp;
212 code = afs_remove(ndp);
217 mp_afs_link(avc, ndp)
219 struct nameidata *ndp;
223 code = afs_link(avc, ndp);
228 mp_afs_rename(fndp, tndp)
229 struct nameidata *fndp, *tndp;
233 code = afs_rename(fndp, tndp);
238 mp_afs_mkdir(ndp, attrs)
239 struct nameidata *ndp;
244 code = afs_mkdir(ndp, attrs);
250 struct nameidata *ndp;
254 code = afs_rmdir(ndp);
259 mp_afs_symlink(ndp, attrs, atargetName)
260 struct nameidata *ndp;
262 register char *atargetName;
266 code = afs_symlink(ndp, attrs, atargetName);
271 mp_afs_readdir(avc, auio, acred, eofp)
274 struct AFS_UCRED *acred;
279 code = afs_readdir(avc, auio, acred, eofp);
284 mp_afs_readlink(avc, auio, acred)
287 struct AFS_UCRED *acred;
291 code = afs_readlink(avc, auio, acred);
296 mp_afs_lockctl(avc, af, flag, acred, clid, offset)
299 struct AFS_UCRED *acred;
306 code = afs_lockctl(avc, af, flag, acred, clid, offset);
316 code = afs_closex(afd);
321 mp_afs_seek(avc, oldoff, newoff, cred)
323 off_t oldoff, newoff;
333 struct nameidata *ndp;
338 mp_afs_inactive(avc, acred)
339 register struct vcache *avc;
340 struct AFS_UCRED *acred;
343 afs_InactiveVCache(avc, acred);
360 mp_afs_page_read(avc, uio, acred)
366 struct vrequest treq;
369 error = afs_rdwr(avc, uio, UIO_READ, 0, acred);
370 afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_READ, ICL_TYPE_POINTER, avc,
371 ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
374 } else if ((avc->states & CWired) == 0) {
375 afs_InitReq(&treq, acred);
376 ObtainWriteLock(&avc->lock, 161);
377 afs_Wire(avc, &treq);
378 ReleaseWriteLock(&avc->lock);
385 mp_afs_page_write(avc, uio, acred, pager, offset)
389 memory_object_t pager;
395 error = afs_rdwr(avc, uio, UIO_WRITE, 0, acred);
396 afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_WRITE, ICL_TYPE_POINTER, avc,
397 ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
407 mp_afs_ubcrdwr(avc, uio, ioflag, cred)
413 register afs_int32 code;
415 afs_int32 fileBase, size, cnt = 0;
417 register afs_int32 tsize;
418 register afs_int32 pageOffset;
420 struct vrequest treq;
421 int rw = uio->uio_rw;
425 afs_int32 save_resid;
431 afs_InitReq(&treq, cred);
432 if (AFS_NFSXLATORREQ(cred) && rw == UIO_READ) {
434 (avc, PRSFS_READ, &treq,
435 CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) {
440 afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, avc,
441 ICL_TYPE_INT32, (rw == UIO_WRITE ? 1 : 0), ICL_TYPE_LONG,
442 uio->uio_offset, ICL_TYPE_LONG, uio->uio_resid);
443 code = afs_VerifyVCache(avc, &treq);
445 code = afs_CheckCode(code, &treq, 35);
449 if (vType(avc) != VREG) {
451 return EISDIR; /* can't read or write other things */
453 afs_BozonLock(&avc->pvnLock, avc);
454 osi_FlushPages(avc, cred); /* hold bozon lock, but not basic vnode lock */
455 ObtainWriteLock(&avc->lock, 162);
456 /* adjust parameters when appending files */
457 if ((ioflag & IO_APPEND) && uio->uio_rw == UIO_WRITE)
458 uio->uio_offset = avc->m.Length; /* write at EOF position */
459 if (uio->uio_rw == UIO_WRITE) {
460 avc->states |= CDirty;
464 * before starting any I/O, we must ensure that the file is big enough
465 * to hold the results (since afs_putpage will be called to force
468 size = uio->afsio_resid + uio->afsio_offset; /* new file size */
469 if (size > avc->m.Length)
470 avc->m.Length = size; /* file grew */
471 avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
472 if (uio->afsio_resid > PAGE_SIZE)
473 cnt = uio->afsio_resid / PAGE_SIZE;
474 save_resid = uio->afsio_resid;
479 * compute the amount of data to move into this block,
480 * based on uio->afsio_resid.
482 size = uio->afsio_resid; /* transfer size */
483 fileBase = uio->afsio_offset; /* start file position */
484 pageBase = fileBase & ~(PAGE_SIZE - 1); /* file position of the page */
485 pageOffset = fileBase & (PAGE_SIZE - 1); /* start offset within page */
486 tsize = PAGE_SIZE - pageOffset; /* amount left in this page */
488 * we'll read tsize bytes,
489 * but first must make sure tsize isn't too big
492 tsize = size; /* don't read past end of request */
493 eof = 0; /* flag telling us if we hit the EOF on the read */
494 if (uio->uio_rw == UIO_READ) { /* we're doing a read operation */
495 /* don't read past EOF */
496 if (tsize + fileBase > avc->m.Length) {
497 tsize = avc->m.Length - fileBase;
498 eof = 1; /* we did hit the EOF */
500 tsize = 0; /* better safe than sorry */
504 break; /* nothing to transfer, we're done */
506 /* Purge dirty chunks of file if there are too many dirty chunks.
507 * Inside the write loop, we only do this at a chunk boundary.
508 * Clean up partial chunk if necessary at end of loop.
510 if (uio->uio_rw == UIO_WRITE && counter > 0
511 && AFS_CHUNKOFFSET(fileBase) == 0) {
512 code = afs_DoPartialWrite(avc, &treq);
513 avc->states |= CDirty;
521 ReleaseWriteLock(&avc->lock);
525 ubc_lookup(AFSTOV(avc)->v_object, pageBase, PAGE_SIZE, PAGE_SIZE,
526 &page, &flags, NULL);
529 ubc_lookup(AFSTOV(avc)->v_object, pageBase, PAGE_SIZE, PAGE_SIZE,
533 ObtainWriteLock(&avc->lock, 163);
538 if (flags & B_NOCACHE) {
540 * No page found. We should not read the page in if
541 * 1. the write starts on a page edge (ie, pageoffset == 0)
543 * 1. we will fill the page (ie, size == PAGESIZE), or
544 * 2. we are writing past eof
546 if ((uio->uio_rw == UIO_WRITE)
549 && (size == PAGE_SIZE || fileBase >= avc->m.Length)))) {
550 struct vnode *vp = AFSTOV(avc);
551 /* we're doing a write operation past eof; no need to read it */
554 ubc_page_zero(page, 0, PAGE_SIZE);
555 ubc_page_release(page, B_DONE);
558 /* page wasn't cached, read it in. */
562 bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_READ);
565 bp->b_vp = AFSTOV(avc);
566 bp->b_blkno = btodb(pageBase);
567 ReleaseWriteLock(&avc->lock);
568 code = afs_ustrategy(bp, cred); /* do the I/O */
569 ObtainWriteLock(&avc->lock, 164);
575 ubc_page_release(page, 0);
583 data = ubc_load(page, pageOffset, page_size);
585 ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
587 code = uiomove(data + pageOffset, tsize, uio);
588 ubc_unload(page, pageOffset, page_size);
589 if (uio->uio_rw == UIO_WRITE) {
592 /* Mark the page dirty and release it to avoid a deadlock
593 * in ubc_dirty_kluster when more than one process writes
594 * this page at the same time. */
595 toffset = page->pg_offset;
597 ubc_page_release(page, flags);
604 /* We released the page, so we can get a null page
605 * list if another thread calls the strategy routine.
607 pl = ubc_dirty_kluster(AFSTOV(avc)->v_object, NULL, toffset,
608 0, B_WANTED, FALSE, &kpcnt);
610 bp = ubc_bufalloc(pl, 1, PAGE_SIZE, 1, B_WRITE);
612 bp->b_vp = AFSTOV(avc);
613 bp->b_blkno = btodb(pageBase);
615 code = afs_ustrategy(bp, cred); /* do the I/O */
620 ObtainWriteLock(&avc->lock, 415);
626 ubc_page_release(page, flags);
629 ObtainWriteLock(&avc->lock, 165);
631 * If reading at a chunk boundary, start prefetch of next chunk.
633 if (uio->uio_rw == UIO_READ
634 && (counter == 0 || AFS_CHUNKOFFSET(fileBase) == 0)) {
635 tdc = afs_FindDCache(avc, fileBase);
637 if (!(tdc->mflags & DFNextStarted))
638 afs_PrefetchChunk(avc, tdc, cred, &treq);
647 afs_FakeClose(avc, cred);
648 if (uio->uio_rw == UIO_WRITE && code == 0 && (avc->states & CDirty)) {
649 code = afs_DoPartialWrite(avc, &treq);
651 ReleaseWriteLock(&avc->lock);
652 afs_BozonUnlock(&avc->pvnLock, avc);
653 if (DO_FLUSH || (!newpage && (cnt < 10))) {
655 ubc_flush_dirty(AFSTOV(avc)->v_object, flags);
659 ObtainSharedLock(&avc->lock, 409);
662 code = avc->vc_error;
665 /* This is required since we may still have dirty pages after the write.
666 * I could just let close do the right thing, but stat's before the close
667 * return the wrong length.
669 if (code == EDQUOT || code == ENOSPC) {
670 uio->uio_resid = save_resid;
671 UpgradeSToWLock(&avc->lock, 410);
672 osi_ReleaseVM(avc, cred);
673 ConvertWToSLock(&avc->lock);
675 ReleaseSharedLock(&avc->lock);
677 if (!code && (ioflag & IO_SYNC) && (uio->uio_rw == UIO_WRITE)
678 && !AFS_NFSXLATORREQ(cred)) {
679 code = afs_fsync(avc, 0, cred, 0);
682 code = afs_CheckCode(code, &treq, 36);
688 mp_afs_ioctl(struct vnode *vp, int com, caddr_t data, int fflag,
689 struct ucred *cred, int *retval)
695 * Now for some bad news. Since we artificially hold on to vnodes by doing
696 * and extra VNHOLD in afs_NewVCache(), there is no way for us to know
697 * when we need to flush the pages when a program exits. Particularly
698 * if it closes the file after mapping it R/W.
702 mp_afs_mmap(avc, offset, map, addrp, len, prot, maxprot, flags, cred)
703 register struct vcache *avc;
713 struct vp_mmap_args args;
714 register struct vp_mmap_args *ap = &args;
715 struct vnode *vp = AFSTOV(avc);
717 struct vrequest treq;
719 extern kern_return_t u_vp_create();
723 afs_InitReq(&treq, cred);
724 code = afs_VerifyVCache(avc, &treq);
726 code = afs_CheckCode(code, &treq, 37);
730 afs_BozonLock(&avc->pvnLock, avc);
731 osi_FlushPages(avc, cred); /* ensure old pages are gone */
732 afs_BozonUnlock(&avc->pvnLock, avc);
733 ObtainWriteLock(&avc->lock, 166);
734 avc->states |= CMAPPED;
735 ReleaseWriteLock(&avc->lock);
736 ap->a_offset = offset;
739 ap->a_prot = prot, ap->a_maxprot = maxprot;
742 code = u_vp_create(map, vp->v_object, (vm_offset_t) ap);
744 code = afs_CheckCode(code, &treq, 38);
751 mp_afs_getpage(vop, offset, len, protp, pl, plsz,
765 struct vm_policy *policy;
773 register afs_int32 code;
774 struct vrequest treq;
776 int i, pages = (len + PAGE_SIZE - 1) >> page_shift;
780 struct vcache *avc = VTOAFS(vop->vu_vp);
782 /* first, obtain the proper lock for the VM system */
785 afs_InitReq(&treq, cred);
786 code = afs_VerifyVCache(avc, &treq);
789 code = afs_CheckCode(code, &treq, 39); /* failed to get it */
794 /* clean all dirty pages for this vnode */
796 ubc_flush_dirty(vop, 0);
799 afs_BozonLock(&avc->pvnLock, avc);
800 ObtainWriteLock(&avc->lock, 167);
801 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, avc,
802 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, len, ICL_TYPE_INT32,
804 for (i = 0; i < pages; i++) {
806 off = offset + PAGE_SIZE * i;
810 ReleaseWriteLock(&avc->lock);
814 ubc_lookup(AFSTOV(avc)->v_object, off, PAGE_SIZE, PAGE_SIZE,
815 pagep, &flags, NULL);
818 ubc_lookup(AFSTOV(avc)->v_object, off, PAGE_SIZE, PAGE_SIZE,
822 ObtainWriteLock(&avc->lock, 168);
826 if (flags & B_NOCACHE) { /* if (page) */
827 if ((rw & B_WRITE) && (offset + len >= avc->m.Length)) {
828 struct vnode *vp = AFSTOV(avc);
829 /* we're doing a write operation past eof; no need to read it */
831 ubc_page_zero(*pagep, 0, PAGE_SIZE);
832 ubc_page_release(*pagep, B_DONE);
835 /* page wasn't cached, read it in. */
839 bp = ubc_bufalloc(*pagep, 1, PAGE_SIZE, 1, B_READ);
842 bp->b_vp = AFSTOV(avc);
843 bp->b_blkno = btodb(off);
844 ReleaseWriteLock(&avc->lock);
845 code = afs_ustrategy(bp, cred); /* do the I/O */
846 ObtainWriteLock(&avc->lock, 169);
852 ubc_page_release(pl[i], 0);
858 if ((rw & B_READ) == 0) {
861 ubc_page_dirty(pl[i], 0);
863 ubc_page_dirty(pl[i]);
867 if (protp && (flags & B_DIRTY) == 0) {
868 protp[i] = VM_PROT_WRITE;
873 pl[i] = VM_PAGE_NULL;
874 ReleaseWriteLock(&avc->lock);
875 afs_BozonUnlock(&avc->pvnLock, avc);
876 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_INT32, code,
877 ICL_TYPE_POINTER, *pagep, ICL_TYPE_INT32, flags);
878 code = afs_CheckCode(code, &treq, 40);
885 mp_afs_putpage(vop, pl, pcnt, flags, cred)
892 register afs_int32 code = 0;
893 struct vnode *vp = vop->vu_vp;
894 struct vcache *avc = VTOAFS(vp);
898 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER, avc,
899 ICL_TYPE_INT32, pcnt, ICL_TYPE_INT32, vp->v_flag,
900 ICL_TYPE_INT32, flags);
904 if (vp->v_flag & VXLOCK) {
906 for (i = 0; i < pcnt; i++) {
907 ubc_page_release(pl[i], B_DONE | B_DIRTY);
908 pl[i] = VM_PAGE_NULL;
917 /* first, obtain the proper lock for the VM system */
918 afs_BozonLock(&avc->pvnLock, avc);
919 ObtainWriteLock(&avc->lock, 170);
920 for (i = 0; i < pcnt; i++) {
921 vm_page_t page = pl[i];
926 bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_WRITE);
929 bp->b_vp = AFSTOV(avc);
930 bp->b_blkno = btodb(page->pg_offset);
931 ReleaseWriteLock(&avc->lock);
932 code = afs_ustrategy(bp, cred); /* do the I/O */
933 ObtainWriteLock(&avc->lock, 171);
940 pl[i] = VM_PAGE_NULL;
944 ReleaseWriteLock(&avc->lock);
945 afs_BozonUnlock(&avc->pvnLock, avc);
946 afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_INT32, code,
947 ICL_TYPE_INT32, avc->m.Length);
954 mp_afs_swap(avc, swapop, argp)
963 mp_afs_syncdata(avc, flag, offset, length, cred)
970 /* NFS V3 makes this call, ignore it. We'll sync the data in afs_fsync. */
971 if (AFS_NFSXLATORREQ(cred))
977 /* a freelist of one */
978 struct buf *afs_bread_freebp = 0;
981 * Only rfs_read calls this, and it only looks at bp->b_un.b_addr.
982 * Thus we can use fake bufs (ie not from the real buffer pool).
984 mp_afs_bread(vp, lbn, bpp, cred)
990 int offset, fsbsize, error;
996 AFS_STATCNT(afs_bread);
997 fsbsize = vp->v_vfsp->vfs_bsize;
998 offset = lbn * fsbsize;
999 if (afs_bread_freebp) {
1000 bp = afs_bread_freebp;
1001 afs_bread_freebp = 0;
1003 bp = (struct buf *)AFS_KALLOC(sizeof(*bp));
1004 bp->b_un.b_addr = (caddr_t) AFS_KALLOC(fsbsize);
1007 iov.iov_base = bp->b_un.b_addr;
1008 iov.iov_len = fsbsize;
1009 uio.afsio_iov = &iov;
1010 uio.afsio_iovcnt = 1;
1011 uio.afsio_seg = AFS_UIOSYS;
1012 uio.afsio_offset = offset;
1013 uio.afsio_resid = fsbsize;
1015 error = afs_read(VTOAFS(vp), &uio, cred, lbn, bpp, 0);
1017 afs_bread_freebp = bp;
1022 afs_bread_freebp = bp;
1024 *(struct buf **)&bp->b_vp = bp; /* mark as fake */
1032 mp_afs_brelse(vp, bp)
1037 AFS_STATCNT(afs_brelse);
1038 if ((struct buf *)bp->b_vp != bp) { /* not fake */
1040 } else if (afs_bread_freebp) {
1041 AFS_KFREE(bp->b_un.b_addr, vp->v_vfsp->vfs_bsize);
1042 AFS_KFREE(bp, sizeof(*bp));
1044 afs_bread_freebp = bp;
1050 mp_afs_bmap(avc, abn, anvp, anbn)
1051 register struct vcache *avc;
1052 afs_int32 abn, *anbn;
1053 struct vcache **anvp;
1056 AFS_STATCNT(afs_bmap);
1060 *anbn = abn * (8192 / DEV_BSIZE); /* in 512 byte units */
1067 mp_afs_strategy(abp)
1068 register struct buf *abp;
1070 register afs_int32 code;
1073 AFS_STATCNT(afs_strategy);
1074 code = afs_osi_MapStrategy(afs_ustrategy, abp);
1080 mp_afs_refer(vm_ubc_object_t vop)
1086 mp_afs_release(vm_ubc_object_t vop)
1092 mp_afs_write_check(vm_ubc_object_t vop, vm_page_t pp)
1097 #ifdef AFS_DUX50_ENV
1099 mp_afs_objtovp(vm_ubc_object_t vop, struct vnode **vp)
1106 mp_afs_setpgstamp(vm_page_t pp, unsigned int tick)
1108 pp->pg_stamp = tick;
1114 struct vfs_ubcops afs_ubcops = {
1115 mp_afs_refer, /* refer vnode */
1116 mp_afs_release, /* release vnode */
1117 mp_afs_getpage, /* get page */
1118 mp_afs_putpage, /* put page */
1119 mp_afs_write_check, /* check writablity */
1120 #ifdef AFS_DUX50_ENV
1121 mp_afs_objtovp, /* get vnode pointer */
1122 mp_afs_setpgstamp /* set page stamp */
1128 * Cover function for lookup name using OSF equivalent, namei()
1130 * Note, the result vnode (ni_vp) in the namei data structure is remains
1131 * locked after return.
1133 lookupname(namep, seg, follow, dvpp, cvpp)
1134 char *namep; /* path name */
1135 int seg; /* address space containing name */
1136 int follow; /* follow symbolic links */
1137 struct vnode **dvpp; /* result, containing parent vnode */
1138 struct vnode **cvpp; /* result, containing final component vnode */
1140 /* Should I use free-bee in u-area? */
1141 struct nameidata *ndp = &u.u_nd;
1144 ndp->ni_nameiop = ((follow) ? (LOOKUP | FOLLOW) : (LOOKUP));
1145 ndp->ni_segflg = seg;
1146 ndp->ni_dirp = namep;
1149 *dvpp = ndp->ni_dvp;