2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
11 * vnodeops structure and Digital Unix specific ops and support routines.
14 #include <afsconfig.h>
15 #include "../afs/param.h"
20 #include "../afs/sysincludes.h" /* Standard vendor system headers */
21 #include "../afs/afsincludes.h" /* Afs-based standard headers */
22 #include "../afs/afs_stats.h" /* statistics */
23 #include <vm/vm_mmap.h>
24 #include <vm/vm_ubc.h>
25 #include "../afs/afs_cbqueue.h"
26 #include "../afs/nfsclient.h"
27 #include "../afs/afs_osidnlc.h"
30 extern int afs_lookup(), afs_create(), afs_noop(), afs_open(), afs_close();
31 extern int afs_access(), afs_getattr(), afs_setattr(), afs_badop();
32 extern int afs_fsync(), afs_seek(), afs_remove(), afs_link(), afs_rename();
33 extern int afs_mkdir(), afs_rmdir(), afs_symlink(), afs_readdir();
34 extern int afs_readlink(), afs_lockctl();
35 extern int vn_pathconf_default(), seltrue();
37 int mp_afs_lookup(), mp_afs_create(), mp_afs_open();
38 int mp_afs_access(), mp_afs_getattr(), mp_afs_setattr(), mp_afs_ubcrdwr();
39 int mp_afs_ubcrdwr(), mp_afs_mmap();
40 int mp_afs_fsync(), mp_afs_seek(), mp_afs_remove(), mp_afs_link();
41 int mp_afs_rename(), mp_afs_mkdir(), mp_afs_rmdir(), mp_afs_symlink();
42 int mp_afs_readdir(), mp_afs_readlink(), mp_afs_abortop(), mp_afs_inactive();
43 int mp_afs_reclaim(), mp_afs_bmap(), mp_afs_strategy(), mp_afs_print();
44 int mp_afs_page_read(), mp_afs_page_write(), mp_afs_swap(), mp_afs_bread();
45 int mp_afs_brelse(), mp_afs_lockctl(), mp_afs_syncdata(), mp_afs_close();
50 struct vnodeops Afs_vnodeops = {
53 afs_noop, /* vn_mknod */
61 mp_afs_ioctl, /* vn_ioctl */
62 seltrue, /* vn_select */
88 afs_noop, /* unLock */
89 afs_noop, /* get ext attrs */
90 afs_noop, /* set ext attrs */
91 afs_noop, /* del ext attrs */
94 struct vnodeops *afs_ops = &Afs_vnodeops;
96 /* vnode file operations, and our own */
98 extern int vn_write();
99 extern int vn_ioctl();
100 extern int vn_select();
101 extern int afs_closex();
103 struct fileops afs_fileops = {
111 mp_afs_lookup(adp, ndp)
113 struct nameidata *ndp;
117 code = afs_lookup(adp, ndp);
122 mp_afs_create(ndp, attrs)
123 struct nameidata *ndp;
128 code = afs_create(ndp, attrs);
133 mp_afs_open(avcp, aflags, acred)
134 struct vcache **avcp;
136 struct AFS_UCRED *acred;
140 code = afs_open(avcp, aflags, acred);
145 mp_afs_access(avc, amode, acred)
148 struct AFS_UCRED *acred;
152 code = afs_access(avc, amode, acred);
157 mp_afs_close(avc, flags, cred)
164 code = afs_close(avc, flags, cred);
169 mp_afs_getattr(avc, attrs, acred)
172 struct AFS_UCRED *acred;
176 code = afs_getattr(avc, attrs, acred);
181 mp_afs_setattr(avc, attrs, acred)
184 struct AFS_UCRED *acred;
188 code = afs_setattr(avc, attrs, acred);
193 mp_afs_fsync(avc, fflags, acred, waitfor)
196 struct AFS_UCRED *acred;
201 code = afs_fsync(avc, fflags, acred, waitfor);
207 struct nameidata *ndp;
211 code = afs_remove(ndp);
216 mp_afs_link(avc, ndp)
218 struct nameidata *ndp;
222 code = afs_link(avc, ndp);
227 mp_afs_rename(fndp, tndp)
228 struct nameidata *fndp, *tndp;
232 code = afs_rename(fndp, tndp);
237 mp_afs_mkdir(ndp, attrs)
238 struct nameidata *ndp;
243 code = afs_mkdir(ndp, attrs);
249 struct nameidata *ndp;
253 code = afs_rmdir(ndp);
258 mp_afs_symlink(ndp, attrs, atargetName)
259 struct nameidata *ndp;
261 register char *atargetName;
265 code = afs_symlink(ndp, attrs, atargetName);
270 mp_afs_readdir(avc, auio, acred, eofp)
273 struct AFS_UCRED *acred;
278 code = afs_readdir(avc, auio, acred, eofp);
283 mp_afs_readlink(avc, auio, acred)
286 struct AFS_UCRED *acred;
290 code = afs_readlink(avc, auio, acred);
295 mp_afs_lockctl(avc, af, flag, acred, clid, offset)
298 struct AFS_UCRED *acred;
305 code = afs_lockctl(avc, af, flag, acred, clid, offset);
315 code = afs_closex(afd);
320 mp_afs_seek(avc, oldoff, newoff, cred)
322 off_t oldoff, newoff;
325 if ((int) newoff < 0)
332 struct nameidata *ndp;
337 mp_afs_inactive(avc, acred)
338 register struct vcache *avc;
339 struct AFS_UCRED *acred;
342 afs_InactiveVCache(avc, acred);
359 mp_afs_page_read(avc, uio, acred)
365 struct vrequest treq;
368 error = afs_rdwr(avc, uio, UIO_READ, 0, acred);
369 afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_READ, ICL_TYPE_POINTER, avc,
370 ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
373 } else if ((avc->states & CWired) == 0) {
374 afs_InitReq(&treq, acred);
375 ObtainWriteLock(&avc->lock,161);
376 afs_Wire(avc, &treq);
377 ReleaseWriteLock(&avc->lock);
384 mp_afs_page_write(avc, uio, acred, pager, offset)
388 memory_object_t pager;
394 error = afs_rdwr(avc, uio, UIO_WRITE, 0, acred);
395 afs_Trace3(afs_iclSetp, CM_TRACE_PAGE_WRITE, ICL_TYPE_POINTER, avc,
396 ICL_TYPE_INT32, error, ICL_TYPE_INT32, avc->states);
406 mp_afs_ubcrdwr(avc, uio, ioflag, cred)
412 register afs_int32 code;
414 afs_int32 fileBase, size, cnt=0;
416 register afs_int32 tsize;
417 register afs_int32 pageOffset;
419 struct vrequest treq;
420 int rw = uio->uio_rw;
424 afs_int32 save_resid;
430 afs_InitReq(&treq, cred);
431 if (AFS_NFSXLATORREQ(cred) && rw == UIO_READ) {
432 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
433 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
438 afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, avc,
439 ICL_TYPE_INT32, (rw==UIO_WRITE? 1 : 0),
440 ICL_TYPE_LONG, uio->uio_offset,
441 ICL_TYPE_LONG, uio->uio_resid);
442 code = afs_VerifyVCache(avc, &treq);
444 code = afs_CheckCode(code, &treq, 35);
448 if (vType(avc) != VREG) {
450 return EISDIR; /* can't read or write other things */
452 afs_BozonLock(&avc->pvnLock, avc);
453 osi_FlushPages(avc); /* hold bozon lock, but not basic vnode lock */
454 ObtainWriteLock(&avc->lock,162);
455 /* adjust parameters when appending files */
456 if ((ioflag & IO_APPEND) && uio->uio_rw == UIO_WRITE)
457 uio->uio_offset = avc->m.Length; /* write at EOF position */
458 if (uio->uio_rw == UIO_WRITE) {
459 avc->states |= CDirty;
463 * before starting any I/O, we must ensure that the file is big enough
464 * to hold the results (since afs_putpage will be called to force
467 size = uio->afsio_resid + uio->afsio_offset; /* new file size */
468 if (size > avc->m.Length) avc->m.Length = size; /* file grew */
469 avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
470 if (uio->afsio_resid > PAGE_SIZE)
471 cnt = uio->afsio_resid / PAGE_SIZE;
472 save_resid = uio->afsio_resid;
477 * compute the amount of data to move into this block,
478 * based on uio->afsio_resid.
480 size = uio->afsio_resid; /* transfer size */
481 fileBase = uio->afsio_offset; /* start file position */
482 pageBase = fileBase & ~(PAGE_SIZE-1); /* file position of the page */
483 pageOffset = fileBase & (PAGE_SIZE-1); /* start offset within page */
484 tsize = PAGE_SIZE-pageOffset; /* amount left in this page */
486 * we'll read tsize bytes,
487 * but first must make sure tsize isn't too big
489 if (tsize > size) tsize = size; /* don't read past end of request */
490 eof = 0; /* flag telling us if we hit the EOF on the read */
491 if (uio->uio_rw == UIO_READ) { /* we're doing a read operation */
492 /* don't read past EOF */
493 if (tsize + fileBase > avc->m.Length) {
494 tsize = avc->m.Length - fileBase;
495 eof = 1; /* we did hit the EOF */
496 if (tsize < 0) tsize = 0; /* better safe than sorry */
499 if (tsize <= 0) break; /* nothing to transfer, we're done */
501 /* Purge dirty chunks of file if there are too many dirty chunks.
502 * Inside the write loop, we only do this at a chunk boundary.
503 * Clean up partial chunk if necessary at end of loop.
505 if (uio->uio_rw == UIO_WRITE && counter > 0
506 && AFS_CHUNKOFFSET(fileBase) == 0) {
507 code = afs_DoPartialWrite(avc, &treq);
508 avc->states |= CDirty;
516 ReleaseWriteLock(&avc->lock);
519 code = ubc_lookup(((struct vnode *)avc)->v_object, pageBase,
520 PAGE_SIZE, PAGE_SIZE, &page, &flags, NULL);
522 code = ubc_lookup(((struct vnode *)avc)->v_object, pageBase,
523 PAGE_SIZE, PAGE_SIZE, &page, &flags);
526 ObtainWriteLock(&avc->lock,163);
531 if (flags & B_NOCACHE) {
533 No page found. We should not read the page in if
534 1. the write starts on a page edge (ie, pageoffset == 0)
536 1. we will fill the page (ie, size == PAGESIZE), or
537 2. we are writing past eof
539 if ((uio->uio_rw == UIO_WRITE) &&
540 ((pageOffset == 0 && (size == PAGE_SIZE || fileBase >= avc->m.Length)))) {
541 struct vnode *vp = (struct vnode *)avc;
542 /* we're doing a write operation past eof; no need to read it */
545 ubc_page_zero(page, 0, PAGE_SIZE);
546 ubc_page_release(page, B_DONE);
549 /* page wasn't cached, read it in. */
553 bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_READ);
556 bp->b_vp = (struct vnode *)avc;
557 bp->b_blkno = btodb(pageBase);
558 ReleaseWriteLock(&avc->lock);
559 code = afs_ustrategy(bp, cred); /* do the I/O */
560 ObtainWriteLock(&avc->lock,164);
566 ubc_page_release(page, 0);
574 data = ubc_load(page, pageOffset, page_size);
576 ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
578 code = uiomove(data+pageOffset, tsize, uio);
579 ubc_unload(page, pageOffset, page_size);
580 if (uio->uio_rw == UIO_WRITE) {
583 /* Mark the page dirty and release it to avoid a deadlock
584 * in ubc_dirty_kluster when more than one process writes
585 * this page at the same time. */
586 toffset = page->pg_offset;
588 ubc_page_release(page, flags);
595 /* We released the page, so we can get a null page
596 * list if another thread calls the strategy routine.
598 pl = ubc_dirty_kluster(((struct vnode *)avc)->v_object,
599 NULL, toffset, 0, B_WANTED, FALSE, &kpcnt);
601 bp = ubc_bufalloc(pl, 1, PAGE_SIZE, 1, B_WRITE);
603 bp->b_vp = (struct vnode *)avc;
604 bp->b_blkno = btodb(pageBase);
606 code = afs_ustrategy(bp, cred); /* do the I/O */
611 ObtainWriteLock(&avc->lock,415);
617 ubc_page_release(page, flags);
620 ObtainWriteLock(&avc->lock,165);
622 * If reading at a chunk boundary, start prefetch of next chunk.
624 if (uio->uio_rw == UIO_READ
625 && (counter == 0 || AFS_CHUNKOFFSET(fileBase) == 0)) {
626 tdc = afs_FindDCache(avc, fileBase);
628 if (!(tdc->flags & DFNextStarted))
629 afs_PrefetchChunk(avc, tdc, cred, &treq);
637 afs_FakeClose(avc, cred);
638 if (uio->uio_rw == UIO_WRITE && code == 0 && (avc->states & CDirty)) {
639 code = afs_DoPartialWrite(avc, &treq);
641 ReleaseWriteLock(&avc->lock);
642 afs_BozonUnlock(&avc->pvnLock, avc);
643 if (DO_FLUSH || (!newpage && (cnt < 10))) {
645 ubc_flush_dirty(((struct vnode *)avc)->v_object, flags);
649 ObtainSharedLock(&avc->lock, 409);
652 code = avc->vc_error;
655 /* This is required since we may still have dirty pages after the write.
656 * I could just let close do the right thing, but stat's before the close
657 * return the wrong length.
659 if (code == EDQUOT || code == ENOSPC) {
660 uio->uio_resid = save_resid;
661 UpgradeSToWLock(&avc->lock, 410);
662 osi_ReleaseVM(avc, cred);
663 ConvertWToSLock(&avc->lock);
665 ReleaseSharedLock(&avc->lock);
667 if (!code && (ioflag & IO_SYNC) && (uio->uio_rw == UIO_WRITE)
668 && !AFS_NFSXLATORREQ(cred)) {
669 code = afs_fsync(avc, 0, cred, 0);
672 code = afs_CheckCode(code, &treq, 36);
678 mp_afs_ioctl(struct vnode *vp, int com, caddr_t data, int fflag,
679 struct ucred *cred, int *retval)
685 * Now for some bad news. Since we artificially hold on to vnodes by doing
686 * and extra VNHOLD in afs_NewVCache(), there is no way for us to know
687 * when we need to flush the pages when a program exits. Particularly
688 * if it closes the file after mapping it R/W.
692 mp_afs_mmap(avc, offset, map, addrp, len, prot, maxprot, flags, cred)
693 register struct vcache *avc;
703 struct vp_mmap_args args;
704 register struct vp_mmap_args *ap = &args;
705 struct vnode *vp = (struct vnode *)avc;
707 struct vrequest treq;
709 extern kern_return_t u_vp_create();
713 afs_InitReq(&treq, cred);
714 code = afs_VerifyVCache(avc, &treq);
716 code = afs_CheckCode(code, &treq, 37);
720 afs_BozonLock(&avc->pvnLock, avc);
721 osi_FlushPages(avc); /* ensure old pages are gone */
722 afs_BozonUnlock(&avc->pvnLock, avc);
723 ObtainWriteLock(&avc->lock,166);
724 avc->states |= CMAPPED;
725 ReleaseWriteLock(&avc->lock);
726 ap->a_offset = offset;
730 ap->a_maxprot = maxprot;
733 code = u_vp_create(map, vp->v_object, (vm_offset_t) ap);
735 code = afs_CheckCode(code, &treq, 38);
741 int mp_afs_getpage(vop, offset, len, protp, pl, plsz,
755 struct vm_policy *policy;
763 register afs_int32 code;
764 struct vrequest treq;
766 int i, pages = (len + PAGE_SIZE - 1) >> page_shift;
770 struct vcache *avc = (struct vcache *)vop->vu_vp;
772 /* first, obtain the proper lock for the VM system */
775 afs_InitReq(&treq, cred);
776 code = afs_VerifyVCache(avc, &treq);
779 code = afs_CheckCode(code, &treq, 39); /* failed to get it */
784 /* clean all dirty pages for this vnode */
786 ubc_flush_dirty(vop,0);
789 afs_BozonLock(&avc->pvnLock, avc);
790 ObtainWriteLock(&avc->lock,167);
791 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, avc,
792 ICL_TYPE_LONG, offset, ICL_TYPE_LONG, len,
793 ICL_TYPE_INT32, (int) rw);
794 for (i = 0; i < pages; i++) {
796 off = offset + PAGE_SIZE * i;
797 if (protp) protp[i] = 0;
799 ReleaseWriteLock(&avc->lock);
802 code = ubc_lookup(((struct vnode *)avc)->v_object, off,
803 PAGE_SIZE, PAGE_SIZE, pagep, &flags, NULL);
805 code = ubc_lookup(((struct vnode *)avc)->v_object, off,
806 PAGE_SIZE, PAGE_SIZE, pagep, &flags);
809 ObtainWriteLock(&avc->lock,168);
813 if(flags & B_NOCACHE) { /* if (page) */
814 if ((rw & B_WRITE) && (offset+len >= avc->m.Length)) {
815 struct vnode *vp = (struct vnode *)avc;
816 /* we're doing a write operation past eof; no need to read it */
818 ubc_page_zero(*pagep, 0, PAGE_SIZE);
819 ubc_page_release(*pagep, B_DONE);
822 /* page wasn't cached, read it in. */
826 bp = ubc_bufalloc(*pagep, 1, PAGE_SIZE, 1, B_READ);
829 bp->b_vp = (struct vnode *)avc;
830 bp->b_blkno = btodb(off);
831 ReleaseWriteLock(&avc->lock);
832 code = afs_ustrategy(bp, cred); /* do the I/O */
833 ObtainWriteLock(&avc->lock,169);
839 ubc_page_release(pl[i], 0);
845 if ((rw & B_READ) == 0) {
848 ubc_page_dirty(pl[i], 0);
850 ubc_page_dirty(pl[i]);
854 if (protp && (flags & B_DIRTY) == 0) {
855 protp[i] = VM_PROT_WRITE;
860 pl[i] = VM_PAGE_NULL;
861 ReleaseWriteLock(&avc->lock);
862 afs_BozonUnlock(&avc->pvnLock, avc);
863 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_INT32, code,
864 ICL_TYPE_POINTER, *pagep, ICL_TYPE_INT32, flags);
865 code = afs_CheckCode(code, &treq, 40);
871 int mp_afs_putpage(vop, pl, pcnt, flags, cred)
878 register afs_int32 code=0;
879 struct vcache *avc = (struct vcache *)vop->vu_vp;
880 struct vnode *vp = (struct vnode *)avc;
884 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER, avc,
885 ICL_TYPE_INT32, pcnt, ICL_TYPE_INT32, vp->v_flag,
886 ICL_TYPE_INT32, flags);
890 if (vp->v_flag & VXLOCK) {
892 for (i = 0; i < pcnt; i++) {
893 ubc_page_release(pl[i], B_DONE|B_DIRTY);
894 pl[i] = VM_PAGE_NULL;
903 /* first, obtain the proper lock for the VM system */
904 afs_BozonLock(&avc->pvnLock, avc);
905 ObtainWriteLock(&avc->lock,170);
906 for (i = 0; i < pcnt; i++) {
907 vm_page_t page = pl[i];
912 bp = ubc_bufalloc(page, 1, PAGE_SIZE, 1, B_WRITE);
915 bp->b_vp = (struct vnode *)avc;
916 bp->b_blkno = btodb(page->pg_offset);
917 ReleaseWriteLock(&avc->lock);
918 code = afs_ustrategy(bp, cred); /* do the I/O */
919 ObtainWriteLock(&avc->lock,171);
926 pl[i] = VM_PAGE_NULL;
930 ReleaseWriteLock(&avc->lock);
931 afs_BozonUnlock(&avc->pvnLock, avc);
932 afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_INT32, code,
933 ICL_TYPE_INT32, avc->m.Length);
939 int mp_afs_swap(avc, swapop, argp)
947 int mp_afs_syncdata(avc, flag, offset, length, cred)
954 /* NFS V3 makes this call, ignore it. We'll sync the data in afs_fsync. */
955 if (AFS_NFSXLATORREQ(cred))
961 /* a freelist of one */
962 struct buf *afs_bread_freebp = 0;
965 * Only rfs_read calls this, and it only looks at bp->b_un.b_addr.
966 * Thus we can use fake bufs (ie not from the real buffer pool).
968 mp_afs_bread(vp, lbn, bpp, cred)
974 int offset, fsbsize, error;
980 AFS_STATCNT(afs_bread);
981 fsbsize = vp->v_vfsp->vfs_bsize;
982 offset = lbn * fsbsize;
983 if (afs_bread_freebp) {
984 bp = afs_bread_freebp;
985 afs_bread_freebp = 0;
987 bp = (struct buf *) AFS_KALLOC(sizeof(*bp));
988 bp->b_un.b_addr = (caddr_t) AFS_KALLOC(fsbsize);
991 iov.iov_base = bp->b_un.b_addr;
992 iov.iov_len = fsbsize;
993 uio.afsio_iov = &iov;
994 uio.afsio_iovcnt = 1;
995 uio.afsio_seg = AFS_UIOSYS;
996 uio.afsio_offset = offset;
997 uio.afsio_resid = fsbsize;
999 error = afs_read((struct vcache *)vp, &uio, cred, lbn, bpp, 0);
1001 afs_bread_freebp = bp;
1006 afs_bread_freebp = bp;
1008 *(struct buf **)&bp->b_vp = bp; /* mark as fake */
1016 mp_afs_brelse(vp, bp)
1021 AFS_STATCNT(afs_brelse);
1022 if ((struct buf *)bp->b_vp != bp) { /* not fake */
1024 } else if (afs_bread_freebp) {
1025 AFS_KFREE(bp->b_un.b_addr, vp->v_vfsp->vfs_bsize);
1026 AFS_KFREE(bp, sizeof(*bp));
1028 afs_bread_freebp = bp;
1034 mp_afs_bmap(avc, abn, anvp, anbn)
1035 register struct vcache *avc;
1036 afs_int32 abn, *anbn;
1037 struct vcache **anvp;
1040 AFS_STATCNT(afs_bmap);
1044 *anbn = abn * (8192 / DEV_BSIZE); /* in 512 byte units */
1051 mp_afs_strategy (abp)
1052 register struct buf *abp;
1054 register afs_int32 code;
1057 AFS_STATCNT(afs_strategy);
1058 code = afs_osi_MapStrategy(afs_ustrategy, abp);
1064 mp_afs_refer(vm_ubc_object_t vop)
1070 mp_afs_release(vm_ubc_object_t vop)
1076 mp_afs_write_check(vm_ubc_object_t vop, vm_page_t pp)
1081 #ifdef AFS_DUX50_ENV
1083 mp_afs_objtovp(vm_ubc_object_t vop, struct vnode **vp)
1090 mp_afs_setpgstamp(vm_page_t pp, unsigned int tick)
1092 pp->pg_stamp = tick;
1098 struct vfs_ubcops afs_ubcops = {
1099 mp_afs_refer, /* refer vnode */
1100 mp_afs_release, /* release vnode */
1101 mp_afs_getpage, /* get page */
1102 mp_afs_putpage, /* put page */
1103 mp_afs_write_check, /* check writablity */
1104 #ifdef AFS_DUX50_ENV
1105 mp_afs_objtovp, /* get vnode pointer */
1106 mp_afs_setpgstamp /* set page stamp */
1112 * Cover function for lookup name using OSF equivalent, namei()
1114 * Note, the result vnode (ni_vp) in the namei data structure is remains
1115 * locked after return.
1117 lookupname(namep, seg, follow, dvpp, cvpp)
1118 char *namep; /* path name */
1119 int seg; /* address space containing name */
1120 int follow; /* follow symbolic links */
1121 struct vnode **dvpp; /* result, containing parent vnode */
1122 struct vnode **cvpp; /* result, containing final component vnode */
1124 /* Should I use free-bee in u-area? */
1125 struct nameidata *ndp = &u.u_nd;
1128 ndp->ni_nameiop = ((follow) ? (LOOKUP|FOLLOW) : (LOOKUP));
1129 ndp->ni_segflg = seg;
1130 ndp->ni_dirp = namep;
1132 if (dvpp != (struct vnode **)0)
1133 *dvpp = ndp->ni_dvp;
1134 if (cvpp != (struct vnode **)0)