2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
17 * SOLARIS/osi_vnodeops.c
21 * Functions: AFS_TRYUP, _init, _info, _fini, afs_addmap, afs_delmap,
22 * afs_vmread, afs_vmwrite, afs_getpage, afs_GetOnePage, afs_putpage,
23 * afs_putapage, afs_nfsrdwr, afs_map, afs_PageLeft, afs_pathconf/afs_cntl,
24 * afs_ioctl, afs_rwlock, afs_rwunlock, afs_seek, afs_space, afs_dump,
25 * afs_cmp, afs_realvp, afs_pageio, afs_dumpctl, afs_dispose, afs_setsecattr,
26 * afs_getsecattr, gafs_open, gafs_close, gafs_getattr, gafs_setattr,
27 * gafs_access, gafs_lookup, gafs_create, gafs_remove, gafs_link,
28 * gafs_rename, gafs_mkdir, gafs_rmdir, gafs_readdir, gafs_symlink,
29 * gafs_readlink, gafs_fsync, afs_inactive, gafs_inactive, gafs_fid
32 * Variables: Afs_vnodeops
35 #include "../afs/sysincludes.h" /* Standard vendor system headers */
36 #include "../afs/afsincludes.h" /* Afs-based standard headers */
37 #include "../afs/afs_stats.h" /* statistics */
38 #include "../afs/nfsclient.h"
47 #include <vm/seg_map.h>
48 #include <vm/seg_vn.h>
50 #if defined(AFS_SUN5_ENV)
51 #include <sys/modctl.h>
52 #include <sys/syscall.h>
56 #include <sys/debug.h>
57 #if defined(AFS_SUN5_ENV)
58 #include <sys/fs_subr.h>
61 #if defined(AFS_SUN5_ENV)
63 * XXX Temporary fix for problems with Solaris rw_tryupgrade() lock.
64 * It isn't very persistent in getting the upgrade when others are
65 * waiting for it and returns 0. So the UpgradeSToW() macro that the
66 * rw_tryupgrade used to map to wasn't good enough and we need to use
67 * the following code instead. Obviously this isn't the proper place
68 * for it but it's only called from here for now
75 if (!rw_tryupgrade(lock)) {
77 rw_enter(lock, RW_WRITER);
84 extern struct as kas; /* kernel addr space */
85 extern unsigned char *afs_indexFlags;
86 extern afs_lock_t afs_xdcache;
88 /* Additional vnodeops for SunOS 4.0.x */
89 int afs_nfsrdwr(), afs_getpage(), afs_putpage(), afs_map();
90 int afs_dump(), afs_cmp(), afs_realvp(), afs_GetOnePage();
96 int afs_addmap(avp, offset, asp, addr, length, prot, maxprot, flags, credp)
97 register struct vnode *avp;
101 int length, prot, maxprot, flags;
102 struct AFS_UCRED *credp;
104 /* XXX What should we do here?? XXX */
108 int afs_delmap(avp, offset, asp, addr, length, prot, maxprot, flags, credp)
109 register struct vnode *avp;
113 int length, prot, maxprot, flags;
114 struct AFS_UCRED *credp;
116 /* XXX What should we do here?? XXX */
120 int afs_vmread(avp, auio, ioflag, acred)
121 register struct vnode *avp;
124 struct AFS_UCRED *acred;
128 if (!RW_READ_HELD(&((struct vcache *)avp)->rwlock))
129 osi_Panic("afs_vmread: !rwlock");
131 code = afs_nfsrdwr((struct vcache *)avp, auio, UIO_READ, ioflag, acred);
137 int afs_vmwrite(avp, auio, ioflag, acred)
138 register struct vnode *avp;
141 struct AFS_UCRED *acred;
145 if (!RW_WRITE_HELD(&((struct vcache *)avp)->rwlock))
146 osi_Panic("afs_vmwrite: !rwlock");
148 code = afs_nfsrdwr((struct vcache *)avp, auio, UIO_WRITE, ioflag, acred);
153 #endif /* AFS_SUN5_ENV */
155 int afs_getpage(vp, off, len, protp, pl, plsz, seg, addr, rw, acred)
170 struct AFS_UCRED *acred;
172 register afs_int32 code = 0;
173 #if defined(AFS_SUN56_ENV)
174 u_offset_t toff = (u_offset_t)off;
177 AFS_STATCNT(afs_getpage);
179 if (vp->v_flag & VNOMAP) /* File doesn't allow mapping */
185 #if defined(AFS_SUN56_ENV)
187 code = afs_GetOnePage((struct vnode *) vp, toff, len, protp, pl, plsz,
188 seg, addr, rw, acred);
192 code = afs_GetOnePage(vp, (u_int)off, len, protp, pl, plsz,
193 seg, addr, rw, acred);
196 code = afs_GetOnePage(vp, off, protp, pl, plsz,
197 seg, addr, rw, acred);
201 struct vcache *vcp = (struct vcache *)vp;
203 ObtainWriteLock(&vcp->vlock, 548);
205 ReleaseWriteLock(&vcp->vlock);
207 afs_BozonLock(&vcp->pvnLock, vcp);
208 #if defined(AFS_SUN56_ENV)
209 code = pvn_getpages(afs_GetOnePage, (struct vnode *) vp, toff,
210 len, protp, pl, plsz, seg, addr, rw, acred);
212 code = pvn_getpages(afs_GetOnePage, (struct vnode *) vp, (u_int)off,
213 len, protp, pl, plsz, seg, addr, rw, acred);
215 afs_BozonUnlock(&vcp->pvnLock, vcp);
217 ObtainWriteLock(&vcp->vlock, 549);
219 ReleaseWriteLock(&vcp->vlock);
226 /* Return all the pages from [off..off+len) in file */
228 int afs_GetOnePage(vp, off, alen, protp, pl, plsz, seg, addr, rw, acred)
231 int afs_GetOnePage(vp, off, protp, pl, plsz, seg, addr, rw, acred)
234 #if defined(AFS_SUN56_ENV)
249 struct AFS_UCRED *acred;
251 register struct page *page;
252 register afs_int32 code = 0;
256 register struct vcache *avc;
257 register struct dcache *tdc;
259 int slot, offset, nlen;
260 struct vrequest treq;
261 afs_int32 mapForRead = 0, Code=0;
262 #if defined(AFS_SUN56_ENV)
270 osi_Panic("GetOnePage: !acred");
272 acred = u.u_cred; /* better than nothing */
275 avc = (struct vcache *) vp; /* cast to afs vnode */
278 if (avc->credp /*&& AFS_NFSXLATORREQ(acred)*/ && AFS_NFSXLATORREQ(avc->credp)) {
282 if (code = afs_InitReq(&treq, acred)) return code;
286 * This is a read-ahead request, e.g. due to madvise.
288 tdc = afs_GetDCache(avc, (afs_int32)off, &treq, &offset, &nlen, 1);
291 if (!(tdc->flags & DFNextStarted)) {
292 ObtainReadLock(&avc->lock);
293 afs_PrefetchChunk(avc, tdc, acred, &treq);
294 ReleaseReadLock(&avc->lock);
301 pl[0] = NULL; /* Make sure it's empty */
303 /* first, obtain the proper lock for the VM system */
305 /* if this is a read request, map the page in read-only. This will
306 * allow us to swap out the dcache entry if there are only read-only
307 * pages created for the chunk, which helps a *lot* when dealing
308 * with small caches. Otherwise, we have to invalidate the vm
309 * pages for the range covered by a chunk when we swap out the
312 if (rw == S_READ || rw == S_EXEC)
315 if (protp) *protp = PROT_ALL;
317 if (AFS_NFSXLATORREQ(acred)) {
319 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
320 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
328 if (rw == S_WRITE || rw == S_CREATE)
329 tdc = afs_GetDCache(avc, (afs_int32)off, &treq, &offset, &nlen, 5);
331 tdc = afs_GetDCache(avc, (afs_int32)off, &treq, &offset, &nlen, 1);
332 if (!tdc) return EINVAL;
334 code = afs_VerifyVCache(avc, &treq);
339 return afs_CheckCode(code, &treq, 44); /* failed to get it */
342 afs_BozonLock(&avc->pvnLock, avc);
343 ObtainReadLock(&avc->lock);
345 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, (afs_int32) vp,
346 ICL_TYPE_LONG, (afs_int32) off, ICL_TYPE_LONG, (afs_int32) len,
347 ICL_TYPE_LONG, (int) rw);
353 /* Check to see if we're in the middle of a VM purge, and if we are, release
354 * the locks and try again when the VM purge is done. */
355 ObtainWriteLock(&avc->vlock, 550);
357 ReleaseReadLock(&avc->lock);
358 ReleaseWriteLock(&avc->vlock);
359 afs_BozonUnlock(&avc->pvnLock, avc);
361 /* Check activeV again, it may have been turned off
362 * while we were waiting for a lock in afs_PutDCache */
363 ObtainWriteLock(&avc->vlock, 574);
365 avc->vstates |= VRevokeWait;
366 ReleaseWriteLock(&avc->vlock);
367 afs_osi_Sleep(&avc->vstates);
369 ReleaseWriteLock(&avc->vlock);
373 ReleaseWriteLock(&avc->vlock);
376 /* Check to see whether the cache entry is still valid */
377 if (!(avc->states & CStatd)
378 || !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
379 ReleaseReadLock(&avc->lock);
380 afs_BozonUnlock(&avc->pvnLock, avc);
386 while (1) { /* loop over all pages */
387 /* now, try to find the page in memory (it may already be intransit or laying
388 around the free list */
389 page = page_lookup( vp, toffset, (rw == S_CREATE ? SE_EXCL : SE_SHARED) );
393 /* if we make it here, we can't find the page in memory. Do a real disk read
394 from the cache to get the data */
395 Code |= 0x200; /* XXX */
397 #if defined(AFS_SUN54_ENV)
398 /* use PG_EXCL because we know the page does not exist already. If it
399 * actually does exist, we have somehow raced between lookup and create.
400 * As of 4/98, that shouldn't be possible, but we'll be defensive here
401 * in case someone tries to relax all the serialization of read and write
402 * operations with harmless things like stat. */
403 #if defined(AFS_SUN58_ENV)
404 page = page_create_va(vp, toffset, PAGESIZE, PG_WAIT|PG_EXCL, seg, addr);
406 page = page_create_va(vp, toffset, PAGESIZE, PG_WAIT|PG_EXCL, seg->s_as, addr);
409 page = page_create(vp, toffset, PAGESIZE, PG_WAIT);
415 pagezero(page, alen, PAGESIZE-alen);
417 page = rm_allocpage(seg, addr, PAGESIZE, 1); /* can't fail */
418 if (!page) osi_Panic("afs_getpage alloc page");
419 /* we get a circularly-linked list of pages back, but we expect only
420 one, since that's what we asked for */
421 if (page->p_next != page) osi_Panic("afs_getpage list");
422 /* page enter returns a locked page; we'll drop the lock as a side-effect
423 of the pvn_done done by afs_ustrategy. If we decide not to call
424 strategy, we must be sure to call pvn_fail, at least, to release the
425 page locks and otherwise reset the pages. The page, while locked, is
426 not held, for what it is worth */
427 page->p_intrans = 1; /* set appropriate flags */
429 /* next call shouldn't fail, since we have pvnLock set */
430 if (page_enter(page, vp, toffset)) osi_Panic("afs_getpage enter race");
431 #endif /* AFS_SUN5_ENV */
434 if (rw == S_CREATE) {
435 /* XXX Don't read from AFS in write only cases XXX */
436 page_io_unlock(page);
440 /* XXX Don't read from AFS in write only cases XXX */
441 page->p_intrans = page->p_pagein = 0;
442 page_unlock(page); /* XXX */
449 /* now it is time to start I/O operation */
450 buf = pageio_setup(page, PAGESIZE, vp, B_READ); /* allocate a buf structure */
451 #if defined(AFS_SUN5_ENV)
455 buf->b_blkno = btodb(toffset);
456 bp_mapin(buf); /* map it in to our address space */
458 ReleaseReadLock(&avc->lock);
460 #if defined(AFS_SUN5_ENV)
462 code = afs_ustrategy(buf, acred); /* do the I/O */
465 code = afs_ustrategy(buf); /* do the I/O */
468 ObtainReadLock(&avc->lock);
471 /* Before freeing unmap the buffer */
482 page_io_unlock(page);
486 /* come here when we have another page (already held) to enter */
488 /* put page in array and continue */
490 /* The p_selock must be downgraded to a shared lock after the page is read */
491 #if defined(AFS_SUN56_ENV)
492 if ((rw != S_CREATE) && !(PAGE_SHARED(page)))
494 if ((rw != S_CREATE) && !(se_shared_assert(&page->p_selock)))
497 page_downgrade(page);
502 code = page_iolock_assert(page);
508 if (tlen <= 0) break; /* done all the pages */
509 } /* while (1) ... */
512 pl[slot] = (struct page *) 0;
514 * XXX This seems kind-of wrong: we shouldn't be modifying
515 * avc->states while not holding the write lock (even
516 * though nothing really uses CHasPages..)
518 avc->states |= CHasPages;
519 ReleaseReadLock(&avc->lock);
521 ObtainWriteLock(&afs_xdcache,246);
523 /* track that we have dirty (or dirty-able) pages for this chunk. */
524 afs_indexFlags[tdc->index] |= IFDirtyPages;
526 afs_indexFlags[tdc->index] |= IFAnyPages;
527 ReleaseWriteLock(&afs_xdcache);
529 afs_BozonUnlock(&avc->pvnLock, avc);
533 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_LONG, code, ICL_TYPE_LONG, (int)page,
534 ICL_TYPE_LONG, Code);
539 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_LONG, code, ICL_TYPE_LONG, (int)page,
540 ICL_TYPE_LONG, Code);
541 /* release all pages, drop locks, return code */
544 pvn_read_done(page, B_ERROR);
546 for(i=0; i<slot; i++)
549 ReleaseReadLock(&avc->lock);
550 afs_BozonUnlock(&avc->pvnLock, avc);
558 int afs_putpage(vp, off, len, flags, cred)
563 struct AFS_UCRED *cred;
568 #if defined(AFS_SUN58_ENV)
573 afs_int32 endPos, NPages=0;
574 #if defined(AFS_SUN56_ENV)
575 u_offset_t toff = off;
580 AFS_STATCNT(afs_putpage);
581 if (vp->v_flag & VNOMAP) /* file doesn't allow mapping */
585 * Putpage (ASYNC) is called every sec to flush out dirty vm pages
588 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER, (afs_int32) vp,
589 ICL_TYPE_LONG, (afs_int32) off, ICL_TYPE_LONG, (afs_int32) len,
590 ICL_TYPE_LONG, (int) flags);
591 avc = (struct vcache *) vp;
592 afs_BozonLock(&avc->pvnLock, avc);
593 ObtainWriteLock(&avc->lock,247);
595 /* Get a list of modified (or whatever) pages */
597 endPos = (int)off + len; /* position we're supposed to write up to */
598 while ((afs_int32)toff < endPos && (afs_int32)toff < avc->m.Length) {
599 /* If not invalidating pages use page_lookup_nowait to avoid reclaiming
600 * them from the free list
603 if (flags & (B_FREE|B_INVAL))
604 pages = page_lookup(vp, toff, SE_EXCL);
606 pages = page_lookup_nowait(vp, toff, SE_SHARED);
607 if (!pages || !pvn_getdirty(pages, flags))
611 code = afs_putapage(vp, pages, &toff, &tlen, flags, cred);
622 #if defined(AFS_SUN56_ENV)
623 code = pvn_vplist_dirty(vp, toff, afs_putapage, flags, cred);
625 code = pvn_vplist_dirty(vp, (u_int)off, afs_putapage, flags, cred);
630 if (code && !avc->vc_error)
631 avc->vc_error = code;
633 ReleaseWriteLock(&avc->lock);
634 afs_BozonUnlock(&avc->pvnLock, avc);
635 afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_LONG, code, ICL_TYPE_LONG, NPages);
641 int afs_putapage(struct vnode *vp, struct page *pages,
642 #if defined(AFS_SUN56_ENV)
647 #if defined(AFS_SUN58_ENV)
652 int flags, struct AFS_UCRED *credp)
655 struct vcache *avc = (struct vcache *)vp;
657 u_int toff, tlen = PAGESIZE, off = (pages->p_offset/PAGESIZE)*PAGESIZE;
658 u_int poff = pages->p_offset;
661 * Now we've got the modified pages. All pages are locked and held
662 * XXX Find a kluster that fits in one block (or page). We also
663 * adjust the i/o if the file space is less than a while page. XXX
666 if (tlen+toff > avc->m.Length) {
667 tlen = avc->m.Length - toff;
669 /* can't call mapout with 0 length buffers (rmfree panics) */
670 if (((tlen>>24)&0xff) == 0xff) {
675 * Can't call mapout with 0 length buffers since we'll get rmfree panics
677 tbuf = pageio_setup(pages, tlen, vp, B_WRITE | flags);
678 if (!tbuf) return (ENOMEM);
681 tbuf->b_blkno = btodb(pages->p_offset);
684 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUTONE, ICL_TYPE_LONG, avc, ICL_TYPE_LONG, pages,
685 ICL_TYPE_LONG, tlen, ICL_TYPE_LONG, toff);
686 code = afs_ustrategy(tbuf, credp); /* unlocks page */
690 pvn_write_done(pages, ((code) ? B_ERROR:0) | B_WRITE | flags);
693 if (offp) *offp = toff;
694 if (lenp) *lenp = tlen;
700 int afs_putpage(vp, off, len, flags, cred)
705 struct AFS_UCRED *cred;
707 int wholeEnchilada; /* true if we must get all of the pages */
713 afs_int32 code = 0, rcode;
715 afs_int32 clusterStart, clusterEnd, endPos;
717 /* In the wholeEnchilada case, we must ensure that we get all of the pages
718 from the system, since we're doing this to shutdown the use of a vnode */
720 AFS_STATCNT(afs_putpage);
721 wholeEnchilada = (off == 0 && len == 0 && (flags & (B_INVAL|B_ASYNC)) == B_INVAL);
723 avc = (struct vcache *) vp;
724 afs_BozonLock(&avc->pvnLock, avc);
725 ObtainWriteLock(&avc->lock,248);
728 /* in whole enchilada case, loop until call to pvn_getdirty can't find
729 any more modified pages */
731 /* first we try to get a list of modified (or whatever) pages */
733 pages = pvn_vplist_dirty(vp, off, flags);
736 endPos = off + len; /* position we're supposed to write up to */
737 if (endPos > avc->m.Length) endPos = avc->m.Length; /* bound by this */
738 clusterStart = off & ~(PAGESIZE-1); /* round down to nearest page */
739 clusterEnd = ((endPos-1) | (PAGESIZE-1))+1; /* round up to nearest page */
740 pages = pvn_range_dirty(vp, off, endPos, clusterStart, clusterEnd, flags);
743 /* Now we've got the modified pages. All pages are locked and held */
744 rcode = 0; /* return code */
745 while(pages) { /* look over all pages in the returned set */
746 tpage = pages; /* get first page in the list */
748 /* write out the page */
749 poffset = tpage->p_offset; /* where the page starts in the file */
750 /* tlen will represent the end of the range to write, for a while */
751 tlen = PAGESIZE+poffset; /* basic place to end tpage write */
752 /* but we also don't want to write past end of off..off+len range */
753 if (len != 0 && tlen > off+len) tlen = off+len;
754 /* and we don't want to write past the end of the file */
755 if (tlen > avc->m.Length) tlen = avc->m.Length;
756 /* and we don't want to write at all if page starts after end */
757 if (poffset >= tlen) {
758 pvn_fail(pages, B_WRITE | flags);
761 /* finally change tlen from end position to length */
762 tlen -= poffset; /* compute bytes to write from this page */
763 page_sub(&pages, tpage); /* remove tpage from "pages" list */
764 tbuf = pageio_setup(tpage, tlen, vp, B_WRITE | flags);
766 pvn_fail(tpage, B_WRITE|flags);
767 pvn_fail(pages, B_WRITE|flags);
771 tbuf->b_blkno = btodb(tpage->p_offset);
773 ReleaseWriteLock(&avc->lock); /* can't hold during strategy call */
774 code = afs_ustrategy(tbuf); /* unlocks page */
775 ObtainWriteLock(&avc->lock,249); /* re-obtain */
777 /* unlocking of tpage is done by afs_ustrategy */
779 if (pages) /* may have already removed last page */
780 pvn_fail(pages, B_WRITE|flags);
783 } /* for (tpage=pages....) */
785 /* see if we've gotten all of the pages in the whole enchilada case */
786 if (!wholeEnchilada || !vp->v_pages) break;
787 } /* while(1) obtaining all pages */
790 * If low on chunks, and if writing the last byte of a chunk, try to
791 * free some. Note that afs_DoPartialWrite calls osi_SyncVM which now
792 * calls afs_putpage, so this is recursion. It stops there because we
793 * insist on len being non-zero.
795 if (afs_stats_cmperf.cacheCurrDirtyChunks > afs_stats_cmperf.cacheMaxDirtyChunks
796 && len != 0 && AFS_CHUNKOFFSET((off + len)) == 0) {
797 struct vrequest treq;
798 if (!afs_InitReq(&treq, cred ? cred : u.u_cred)) {
799 rcode = afs_DoPartialWrite(avc, &treq); /* XXX */
805 if (rcode && !avc->vc_error)
806 avc->vc_error = rcode;
808 /* when we're here, we just return code. */
809 ReleaseWriteLock(&avc->lock);
810 afs_BozonUnlock(&avc->pvnLock, avc);
814 #endif /* AFS_SUN5_ENV */
816 int afs_nfsrdwr(avc, auio, arw, ioflag, acred)
817 register struct vcache *avc;
821 struct AFS_UCRED *acred;
823 register afs_int32 code;
826 afs_int32 mode, sflags;
828 struct dcache *dcp, *dcp_newpage;
829 afs_int32 fileBase, size;
831 register afs_int32 tsize;
832 register afs_int32 pageOffset, extraResid=0;
833 register long origLength; /* length when reading/writing started */
834 register long appendLength; /* length when this call will finish */
835 int created; /* created pages instead of faulting them */
837 int didFakeOpen, eof;
838 struct vrequest treq;
842 AFS_STATCNT(afs_nfsrdwr);
844 /* can't read or write other things */
845 if (vType(avc) != VREG) return EISDIR;
847 if (auio->uio_resid == 0)
850 afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, (afs_int32)avc,
851 ICL_TYPE_LONG, (arw==UIO_WRITE? 1 : 0),
852 ICL_TYPE_LONG, auio->uio_offset,
853 ICL_TYPE_LONG, auio->uio_resid);
855 if ( AfsLargeFileUio(auio) ) /* file is larger than 2 GB */
859 if (!acred) osi_Panic("rdwr: !acred");
861 if (!acred) acred = u.u_cred;
863 if (code = afs_InitReq(&treq, acred)) return code;
865 /* It's not really possible to know if a write cause a growth in the
866 * cache size, we we wait for a cache drain for any write.
868 afs_MaybeWakeupTruncateDaemon();
869 while ((arw == UIO_WRITE) &&
870 (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100)) {
871 if (afs_blocksUsed - afs_blocksDiscarded >
872 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
873 afs_WaitForCacheDrain = 1;
874 afs_osi_Sleep(&afs_WaitForCacheDrain);
876 afs_MaybeFreeDiscardedDCache();
877 afs_MaybeWakeupTruncateDaemon();
879 code = afs_VerifyVCache(avc, &treq);
880 if (code) return afs_CheckCode(code, &treq, 45);
882 afs_BozonLock(&avc->pvnLock, avc);
883 osi_FlushPages(avc, acred); /* hold bozon lock, but not basic vnode lock */
885 ObtainWriteLock(&avc->lock,250);
887 /* adjust parameters when appending files */
888 if ((ioflag & IO_APPEND) && arw == UIO_WRITE)
890 #if defined(AFS_SUN56_ENV)
891 auio->uio_loffset = 0;
893 auio->uio_offset = avc->m.Length; /* write at EOF position */
895 if (auio->uio_offset < 0 || (auio->uio_offset + auio->uio_resid) < 0) {
896 ReleaseWriteLock(&avc->lock);
897 afs_BozonUnlock(&avc->pvnLock, avc);
901 /* file is larger than 2GB */
902 if ( AfsLargeFileSize(auio->uio_offset, auio->uio_resid) ) {
903 ReleaseWriteLock(&avc->lock);
904 afs_BozonUnlock(&avc->pvnLock, avc);
908 didFakeOpen=0; /* keep track of open so we can do close */
909 if (arw == UIO_WRITE) {
910 /* do ulimit processing; shrink resid or fail */
911 #if defined(AFS_SUN56_ENV)
912 if (auio->uio_loffset + auio->afsio_resid > auio->uio_llimit) {
913 if (auio->uio_llimit >= auio->uio_llimit) {
914 ReleaseWriteLock(&avc->lock);
915 afs_BozonUnlock(&avc->pvnLock, avc);
918 /* track # of bytes we should write, but won't because of
919 * ulimit; we must add this into the final resid value
920 * so caller knows we punted some data.
922 extraResid = auio->uio_resid;
923 auio->uio_resid = auio->uio_llimit - auio->uio_loffset;
924 extraResid -= auio->uio_resid;
929 if (auio->afsio_offset + auio->afsio_resid > auio->uio_limit) {
930 if (auio->afsio_offset >= auio->uio_limit) {
931 ReleaseWriteLock(&avc->lock);
932 afs_BozonUnlock(&avc->pvnLock, avc);
935 /* track # of bytes we should write, but won't because of
936 * ulimit; we must add this into the final resid value
937 * so caller knows we punted some data.
939 extraResid = auio->uio_resid;
940 auio->uio_resid = auio->uio_limit - auio->afsio_offset;
941 extraResid -= auio->uio_resid;
946 mode = S_WRITE; /* segment map-in mode */
947 afs_FakeOpen(avc); /* do this for writes, so data gets put back
948 when we want it to be put back */
949 didFakeOpen = 1; /* we'll be doing a fake open */
950 /* before starting any I/O, we must ensure that the file is big enough
951 to hold the results (since afs_putpage will be called to force the I/O */
952 size = auio->afsio_resid + auio->afsio_offset; /* new file size */
954 origLength = avc->m.Length;
955 if (size > avc->m.Length)
956 avc->m.Length = size; /* file grew */
957 avc->states |= CDirty; /* Set the dirty bit */
958 avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
960 mode = S_READ; /* map-in read-only */
961 origLength = avc->m.Length;
964 if (acred && AFS_NFSXLATORREQ(acred)) {
965 if (arw == UIO_READ) {
966 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
967 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
968 ReleaseWriteLock(&avc->lock);
969 afs_BozonUnlock(&avc->pvnLock, avc);
981 counter = 0; /* don't call afs_DoPartialWrite first time through. */
983 /* compute the amount of data to move into this block,
984 based on auio->afsio_resid. Note that we copy data in units of
985 MAXBSIZE, not PAGESIZE. This is because segmap_getmap panics if you
986 call it with an offset based on blocks smaller than MAXBSIZE
987 (implying that it should be named BSIZE, since it is clearly both a
989 size = auio->afsio_resid; /* transfer size */
990 fileBase = auio->afsio_offset; /* start file position for xfr */
991 pageBase = fileBase & ~(MAXBSIZE-1); /* file position of the page */
992 pageOffset = fileBase & (MAXBSIZE-1); /* xfr start's offset within page */
993 tsize = MAXBSIZE-pageOffset; /* how much more fits in this page */
994 /* we'll read tsize bytes, but first must make sure tsize isn't too big */
995 if (tsize > size) tsize = size; /* don't read past end of request */
996 eof = 0; /* flag telling us if we hit the EOF on the read */
997 if (arw == UIO_READ) { /* we're doing a read operation */
998 /* don't read past EOF */
999 if (tsize + fileBase > origLength) {
1000 tsize = origLength - fileBase;
1001 eof = 1; /* we did hit the EOF */
1002 if (tsize < 0) tsize = 0; /* better safe than sorry */
1008 /* Purge dirty chunks of file if there are too many dirty
1009 * chunks. Inside the write loop, we only do this at a chunk
1010 * boundary. Clean up partial chunk if necessary at end of loop.
1012 if (counter > 0 && code == 0 && AFS_CHUNKOFFSET(fileBase) == 0)
1014 code = afs_DoPartialWrite(avc, &treq);
1018 #endif /* AFS_SUN5_ENV */
1019 /* write case, we ask segmap_release to call putpage. Really, we
1020 don't have to do this on every page mapin, but for now we're
1021 lazy, and don't modify the rest of AFS to scan for modified
1022 pages on a close or other "synchronize with file server"
1023 operation. This makes things a little cleaner, but probably
1024 hurts performance. */
1029 break; /* nothing to transfer, we're done */
1032 if (arw == UIO_WRITE)
1033 avc->states |= CDirty; /* may have been cleared by DoPartialWrite*/
1035 /* Before dropping lock, hold the chunk (create it if necessary). This
1036 * serves two purposes: (1) Ensure Cache Truncate Daemon doesn't try
1037 * to purge the chunk's pages while we have them locked. This would
1038 * cause deadlock because we might be waiting for the CTD to free up
1039 * a chunk. (2) If we're writing past the original EOF, and we're
1040 * at the base of the chunk, then make sure it exists online
1041 * before we do the uiomove, since the segmap_release will
1042 * write out to the chunk, causing it to get fetched if it hasn't
1043 * been created yet. The code that would otherwise notice that
1044 * we're fetching a chunk past EOF won't work, since we've
1045 * already adjusted the file size above.
1047 ObtainWriteLock(&avc->vlock, 551);
1048 while (avc->vstates & VPageCleaning) {
1049 ReleaseWriteLock(&avc->vlock);
1050 ReleaseWriteLock(&avc->lock);
1051 afs_osi_Sleep(&avc->vstates);
1052 ObtainWriteLock(&avc->lock, 334);
1053 ObtainWriteLock(&avc->vlock, 552);
1055 ReleaseWriteLock(&avc->vlock);
1058 dcp = afs_GetDCache(avc, fileBase, &treq, &toff, &tlen, 2);
1065 ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
1067 #if defined(AFS_SUN56_ENV)
1068 data = segmap_getmap(segkmap,(struct vnode *)avc,(u_offset_t)pageBase);
1070 data = segmap_getmap(segkmap, (struct vnode *) avc, pageBase);
1072 #ifndef AFS_SUN5_ENV
1073 code = as_fault(&kas, data+pageOffset, tsize, F_SOFTLOCK, mode);
1075 AFS_UIOMOVE(data+pageOffset, tsize, arw, auio, code);
1076 as_fault(&kas, data+pageOffset, tsize, F_SOFTUNLOCK, mode);
1077 code2 = segmap_release(segkmap, data, sflags);
1082 (void) segmap_release(segkmap, data, 0);
1085 #if defined(AFS_SUN56_ENV)
1086 raddr = (caddr_t) (((uintptr_t)data +pageOffset) & PAGEMASK);
1088 raddr = (caddr_t) (((u_int)data +pageOffset) & PAGEMASK);
1090 rsize = (((u_int)data+pageOffset+tsize+PAGEOFFSET) & PAGEMASK)-(u_int)raddr;
1092 /* if we're doing a write, and we're starting at the rounded
1093 * down page base, and we're writing enough data to cover all
1094 * created pages, then we must be writing all of the pages
1095 * in this MAXBSIZE window that we're creating.
1098 if (arw == UIO_WRITE
1099 && ((long)raddr == (long)data+pageOffset)
1100 && tsize >= rsize) {
1101 /* probably the dcache backing this guy is around, but if
1102 * not, we can't do this optimization, since we're creating
1103 * writable pages, which must be backed by a chunk.
1106 dcp_newpage = afs_FindDCache(avc, pageBase);
1108 && hsame(avc->m.DataVersion, dcp_newpage->f.versionNo)) {
1109 ObtainWriteLock(&avc->lock,251);
1110 ObtainWriteLock(&avc->vlock,576);
1111 if ((avc->activeV == 0)
1112 && hsame(avc->m.DataVersion, dcp_newpage->f.versionNo)
1113 && !(dcp_newpage->flags & (DFFetching))) {
1115 segmap_pagecreate(segkmap, raddr, rsize, 1);
1117 ObtainWriteLock(&afs_xdcache,252);
1118 /* Mark the pages as created and dirty */
1119 afs_indexFlags[dcp_newpage->index]
1120 |= (IFAnyPages | IFDirtyPages);
1121 ReleaseWriteLock(&afs_xdcache);
1122 avc->states |= CHasPages;
1125 afs_PutDCache(dcp_newpage);
1126 ReleaseWriteLock(&avc->vlock);
1127 ReleaseWriteLock(&avc->lock);
1129 else if ( dcp_newpage )
1130 afs_PutDCache(dcp_newpage);
1134 code = segmap_fault(kas.a_hat, segkmap, raddr, rsize, F_SOFTLOCK, mode);
1137 AFS_UIOMOVE(data+pageOffset, tsize, arw, auio, code);
1138 segmap_fault(kas.a_hat, segkmap, raddr, rsize, F_SOFTUNLOCK, mode);
1141 code = segmap_release(segkmap, data, sflags);
1143 (void) segmap_release(segkmap, data, 0);
1145 #endif /* AFS_SUN5_ENV */
1147 ObtainWriteLock(&avc->lock,253);
1150 * If at a chunk boundary, start prefetch of next chunk.
1152 if (counter == 0 || AFS_CHUNKOFFSET(fileBase) == 0) {
1153 if (!(dcp->flags & DFNextStarted))
1154 afs_PrefetchChunk(avc, dcp, acred, &treq);
1159 #endif /* AFS_SUN5_ENV */
1163 afs_FakeClose(avc, acred);
1167 if (arw == UIO_WRITE && (avc->states & CDirty)) {
1168 code2 = afs_DoPartialWrite(avc, &treq);
1172 #endif /* AFS_SUN5_ENV */
1174 if (!code && avc->vc_error) {
1175 code = avc->vc_error;
1177 ReleaseWriteLock(&avc->lock);
1178 afs_BozonUnlock(&avc->pvnLock, avc);
1180 #ifdef AFS_SUN53_ENV
1181 if ((ioflag & FSYNC) && (arw == UIO_WRITE) && !AFS_NFSXLATORREQ(acred))
1182 code = afs_fsync(avc, 0, acred);
1184 if ((ioflag & IO_SYNC) && (arw == UIO_WRITE)
1185 && !AFS_NFSXLATORREQ(acred))
1186 code = afs_fsync(avc, acred);
1189 #ifdef AFS_SUN52_ENV
1191 * If things worked, add in as remaining in request any bytes
1192 * we didn't write due to file size ulimit.
1194 if (code == 0 && extraResid > 0)
1195 auio->uio_resid += extraResid;
1197 return afs_CheckCode(code, &treq, 46);
1200 afs_map(vp, off, as, addr, len, prot, maxprot, flags, cred)
1212 u_char prot, maxprot;
1214 u_int prot, maxprot;
1217 struct AFS_UCRED *cred;
1219 struct segvn_crargs crargs;
1220 register afs_int32 code;
1221 struct vrequest treq;
1222 register struct vcache *avc = (struct vcache *) vp;
1224 AFS_STATCNT(afs_map);
1227 /* check for reasonableness on segment bounds; apparently len can be < 0 */
1228 if ((int)off < 0 || (int)(off + len) < 0) {
1231 if ( AfsLargeFileSize(off, len) ) /* file is larger than 2 GB */
1237 #if defined(AFS_SUN5_ENV)
1238 if (vp->v_flag & VNOMAP) /* File isn't allowed to be mapped */
1241 if (vp->v_filocks) /* if locked, disallow mapping */
1245 if (code = afs_InitReq(&treq, cred)) goto out;
1247 if (vp->v_type != VREG) {
1252 code = afs_VerifyVCache(avc, &treq);
1256 afs_BozonLock(&avc->pvnLock, avc);
1257 osi_FlushPages(avc, cred); /* ensure old pages are gone */
1258 avc->states |= CMAPPED; /* flag cleared at afs_inactive */
1259 afs_BozonUnlock(&avc->pvnLock, avc);
1265 if ((flags & MAP_FIXED) == 0) {
1266 #if defined(AFS_SUN57_ENV)
1267 map_addr(addr, len, off, 1, flags);
1268 #elif defined(AFS_SUN56_ENV)
1269 map_addr(addr, len, off, 1);
1271 map_addr(addr, len, (off_t)off, 1);
1273 if (*addr == NULL) {
1281 (void) as_unmap(as, *addr, len); /* unmap old address space use */
1282 /* setup the create parameter block for the call */
1283 crargs.vp = (struct vnode *) avc;
1284 crargs.offset = (u_int)off;
1286 crargs.type = flags&MAP_TYPE;
1288 crargs.maxprot = maxprot;
1289 crargs.amp = (struct anon_map *) 0;
1290 #if defined(AFS_SUN5_ENV)
1291 crargs.flags = flags & ~MAP_TYPE;
1294 code = as_map(as, *addr, len, segvn_create, (char *) &crargs);
1300 code = afs_CheckCode(code, &treq, 47);
1304 code = afs_CheckCode(code, &treq, 48);
1309 /* Sun 4.0.X-specific code. It computes the number of bytes that need
1310 to be zeroed at the end of a page by pvn_vptrunc, given that you're
1311 trying to get vptrunc to truncate a file to alen bytes. The result
1312 will be passed to pvn_vptrunc by the truncate code */
1313 #ifndef AFS_SUN5_ENV /* Not good for Solaris */
1315 register afs_int32 alen; {
1316 register afs_int32 nbytes;
1318 AFS_STATCNT(afs_PageLeft);
1319 nbytes = PAGESIZE - (alen & PAGEOFFSET); /* amount to zap in last page */
1320 /* now check if we'd zero the entire last page. Don't need to do this
1321 since pvn_vptrunc will handle this case properly (it will invalidate
1323 if (nbytes == PAGESIZE) nbytes = 0;
1324 if (nbytes < 0) nbytes = 0; /* just in case */
1331 * For Now We use standard local kernel params for AFS system values. Change this
1334 #if defined(AFS_SUN5_ENV)
1335 afs_pathconf(vp, cmd, outdatap, credp)
1336 register struct AFS_UCRED *credp;
1338 afs_cntl(vp, cmd, indatap, outdatap, inflag, outflag)
1339 int inflag, outflag;
1346 AFS_STATCNT(afs_cntl);
1349 *outdatap = MAXLINK;
1352 *outdatap = MAXNAMLEN;
1355 *outdatap = MAXPATHLEN;
1357 case _PC_CHOWN_RESTRICTED:
1363 #if !defined(AFS_SUN5_ENV)
1365 *outdatap = CANBSIZ;
1368 *outdatap = VDISABLE;
1380 #endif /* AFS_SUN_ENV */
1382 #if defined(AFS_SUN5_ENV)
1384 afs_ioctl(vnp, com, arg, flag, credp, rvalp)
1393 void afs_rwlock(vnp, wlock)
1397 rw_enter(&((struct vcache *)vnp)->rwlock, (wlock ? RW_WRITER : RW_READER));
1401 void afs_rwunlock(vnp, wlock)
1405 rw_exit(&((struct vcache *)vnp)->rwlock);
1410 afs_seek(vnp, ooff, noffp)
1415 register int code = 0;
1417 if ((*noffp < 0 || *noffp > MAXOFF_T))
1422 int afs_frlock(vnp, cmd, ap, flag, off,
1423 #ifdef AFS_SUN59_ENV
1429 #if defined(AFS_SUN56_ENV)
1436 #ifdef AFS_SUN59_ENV
1437 struct flk_callback *flkcb;
1439 struct AFS_UCRED *credp;
1441 register afs_int32 code = 0;
1443 * Implement based on afs_lockctl
1446 #ifdef AFS_SUN59_ENV
1448 afs_warn("Don't know how to deal with flk_callback's!\n");
1450 if ((cmd == F_GETLK) || (cmd == F_O_GETLK) || (cmd == F_SETLK) || (cmd == F_SETLKW)) {
1451 #ifdef AFS_SUN53_ENV
1452 ap->l_pid = ttoproc(curthread)->p_pid;
1455 ap->l_pid = ttoproc(curthread)->p_epid;
1456 ap->l_sysid = ttoproc(curthread)->p_sysid;
1460 #ifdef AFS_SUN56_ENV
1461 code = convoff(vnp, ap, 0, off);
1463 code = convoff(vnp, ap, 0, (off_t)off);
1465 if (code) return code;
1469 code = afs_lockctl((struct vcache *)vnp, ap, cmd, credp);
1475 int afs_space(vnp, cmd, ap, flag, off, credp)
1478 #if defined(AFS_SUN56_ENV)
1485 struct AFS_UCRED *credp;
1487 register afs_int32 code = EINVAL;
1490 if ((cmd == F_FREESP)
1491 #ifdef AFS_SUN56_ENV
1492 && ((code = convoff(vnp, ap, 0, off)) == 0)) {
1494 && ((code = convoff(vnp, ap, 0, (off_t)off)) == 0)) {
1498 vattr.va_mask = AT_SIZE;
1499 vattr.va_size = ap->l_start;
1500 code = afs_setattr((struct vcache *)vnp, &vattr, 0, credp);
1510 int afs_dump(vp, addr, i1, i2)
1515 AFS_STATCNT(afs_dump);
1516 afs_warn("AFS_DUMP. MUST IMPLEMENT THIS!!!\n");
1521 /* Nothing fancy here; just compare if vnodes are identical ones */
1523 struct vnode *vp1, *vp2;
1525 AFS_STATCNT(afs_cmp);
1530 int afs_realvp(struct vnode *vp, struct vnode **vpp) {
1531 AFS_STATCNT(afs_realvp);
1536 int afs_pageio(vp, pp, ui1, ui2, i1, credp)
1543 afs_warn("afs_pageio: Not implemented\n");
1547 int afs_dumpctl(vp, i
1548 #ifdef AFS_SUN59_ENV
1554 #ifdef AFS_SUN59_ENV
1558 afs_warn("afs_dumpctl: Not implemented\n");
1562 #ifdef AFS_SUN54_ENV
1563 extern void afs_dispose(vp, p, fl, dn, cr)
1569 fs_dispose(vp, p, fl, dn, cr);
1572 int afs_setsecattr(vp, vsecattr, flag, creds)
1574 vsecattr_t *vsecattr;
1581 int afs_getsecattr(vp, vsecattr, flag, creds)
1583 vsecattr_t *vsecattr;
1587 return fs_fab_acl(vp, vsecattr, flag, creds);
1591 #ifdef AFS_GLOBAL_SUNLOCK
1592 extern int gafs_open(), gafs_close(), afs_ioctl(), gafs_access();
1593 extern int gafs_getattr(), gafs_setattr(), gafs_lookup(), gafs_create();
1594 extern int gafs_remove(), gafs_link(), gafs_rename(), gafs_mkdir();
1595 extern int gafs_rmdir(), gafs_readdir(), gafs_fsync(), gafs_symlink();
1596 extern int gafs_fid(), gafs_readlink(), fs_setfl(), afs_pathconf();
1597 extern int afs_lockctl();
1598 extern void gafs_inactive();
1600 struct vnodeops Afs_vnodeops = {
1640 #ifdef AFS_SUN54_ENV
1645 #if defined(AFS_SUN56_ENV)
1649 struct vnodeops *afs_ops = &Afs_vnodeops;
1653 gafs_open(avcp, aflags, acred)
1654 register struct vcache **avcp;
1656 struct AFS_UCRED *acred;
1661 code = afs_open(avcp, aflags, acred);
1667 gafs_close(avc, aflags, count, offset, acred)
1670 register struct vcache *avc;
1672 struct AFS_UCRED *acred;
1677 code = afs_close(avc, aflags, count, offset, acred);
1683 gafs_getattr(avc, attrs, flags, acred)
1685 register struct vcache *avc;
1686 register struct vattr *attrs;
1687 struct AFS_UCRED *acred;
1692 code = afs_getattr(avc, attrs, flags, acred);
1698 gafs_setattr(avc, attrs, flags, acred)
1700 register struct vcache *avc;
1701 register struct vattr *attrs;
1702 struct AFS_UCRED *acred;
1707 code = afs_setattr(avc, attrs, flags, acred);
1713 gafs_access(avc, amode, flags, acred)
1715 register struct vcache *avc;
1716 register afs_int32 amode;
1717 struct AFS_UCRED *acred;
1722 code = afs_access(avc, amode, flags, acred);
1728 gafs_lookup(adp, aname, avcp, pnp, flags, rdir, acred)
1729 struct pathname *pnp;
1732 register struct vcache *adp, **avcp;
1734 struct AFS_UCRED *acred;
1739 code = afs_lookup(adp, aname, avcp, pnp, flags, rdir, acred);
1745 gafs_create(adp, aname, attrs, aexcl, amode, avcp, acred)
1746 register struct vcache *adp;
1748 struct vattr *attrs;
1751 struct vcache **avcp;
1752 struct AFS_UCRED *acred;
1757 code = afs_create(adp, aname, attrs, aexcl, amode, avcp, acred);
1762 gafs_remove(adp, aname, acred)
1763 register struct vcache *adp;
1765 struct AFS_UCRED *acred;
1770 code = afs_remove(adp, aname, acred);
1775 gafs_link(adp, avc, aname, acred)
1776 register struct vcache *avc;
1777 register struct vcache *adp;
1779 struct AFS_UCRED *acred;
1784 code = afs_link(adp, avc, aname, acred);
1789 gafs_rename(aodp, aname1, andp, aname2, acred)
1790 register struct vcache *aodp, *andp;
1791 char *aname1, *aname2;
1792 struct AFS_UCRED *acred;
1797 code = afs_rename(aodp, aname1, andp, aname2, acred);
1802 gafs_mkdir(adp, aname, attrs, avcp, acred)
1803 register struct vcache *adp;
1804 register struct vcache **avcp;
1806 struct vattr *attrs;
1807 struct AFS_UCRED *acred;
1812 code = afs_mkdir(adp, aname, attrs, avcp, acred);
1818 gafs_rmdir(adp, aname, cdirp, acred)
1819 struct vnode *cdirp;
1820 register struct vcache *adp;
1822 struct AFS_UCRED *acred;
1827 code = afs_rmdir(adp, aname, cdirp, acred);
1833 gafs_readdir(avc, auio, acred, eofp)
1835 register struct vcache *avc;
1837 struct AFS_UCRED *acred;
1842 code = afs_readdir(avc, auio, acred, eofp);
1847 gafs_symlink(adp, aname, attrs, atargetName, acred)
1848 register struct vcache *adp;
1849 register char *atargetName;
1851 struct vattr *attrs;
1852 struct AFS_UCRED *acred;
1857 code = afs_symlink(adp, aname, attrs, atargetName, acred);
1863 gafs_readlink(avc, auio, acred)
1864 register struct vcache *avc;
1866 struct AFS_UCRED *acred;
1871 code = afs_readlink(avc, auio, acred);
1876 #ifdef AFS_SUN53_ENV
1877 gafs_fsync(avc, flag, acred)
1880 gafs_fsync(avc, acred)
1882 register struct vcache *avc;
1883 struct AFS_UCRED *acred;
1888 #ifdef AFS_SUN53_ENV
1889 code = afs_fsync(avc, flag, acred);
1891 code = afs_fsync(avc, acred);
1897 void afs_inactive(struct vcache *avc, struct AFS_UCRED *acred)
1899 struct vnode *vp = (struct vnode *)avc;
1900 if (afs_shuttingdown) return ;
1903 * In Solaris and HPUX s800 and HP-UX10.0 they actually call us with
1904 * v_count 1 on last reference!
1906 mutex_enter(&vp->v_lock);
1907 if (avc->vrefCount <= 0) osi_Panic("afs_inactive : v_count <=0\n");
1910 * If more than 1 don't unmap the vnode but do decrement the ref count
1913 if (vp->v_count > 0) {
1914 mutex_exit(&vp->v_lock);
1917 mutex_exit(&vp->v_lock);
1919 * Solaris calls VOP_OPEN on exec, but isn't very diligent about calling
1920 * VOP_CLOSE when executable exits.
1922 if (avc->opens > 0 && !(avc->states & CCore))
1923 avc->opens = avc->execsOrWriters = 0;
1925 afs_InactiveVCache(avc, acred);
1928 void gafs_inactive(avc, acred)
1929 register struct vcache *avc;
1930 struct AFS_UCRED *acred;
1933 afs_inactive(avc, acred);
1938 gafs_fid(avc, fidpp)
1945 code = afs_fid(avc, fidpp);
1950 #endif /* AFS_GLOBAL_SUNLOCK */