2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "../afs/param.h"
15 #if defined(AFS_SUN_ENV) || defined(AFS_SUN5_ENV)
17 * SOLARIS/osi_vnodeops.c
21 * Functions: AFS_TRYUP, _init, _info, _fini, afs_addmap, afs_delmap,
22 * afs_vmread, afs_vmwrite, afs_getpage, afs_GetOnePage, afs_putpage,
23 * afs_putapage, afs_nfsrdwr, afs_map, afs_PageLeft, afs_pathconf/afs_cntl,
24 * afs_ioctl, afs_rwlock, afs_rwunlock, afs_seek, afs_space, afs_dump,
25 * afs_cmp, afs_realvp, afs_pageio, afs_dumpctl, afs_dispose, afs_setsecattr,
26 * afs_getsecattr, gafs_open, gafs_close, gafs_getattr, gafs_setattr,
27 * gafs_access, gafs_lookup, gafs_create, gafs_remove, gafs_link,
28 * gafs_rename, gafs_mkdir, gafs_rmdir, gafs_readdir, gafs_symlink,
29 * gafs_readlink, gafs_fsync, afs_inactive, gafs_inactive, gafs_fid
32 * Variables: Afs_vnodeops
35 #include "../afs/sysincludes.h" /* Standard vendor system headers */
36 #include "../afs/afsincludes.h" /* Afs-based standard headers */
37 #include "../afs/afs_stats.h" /* statistics */
38 #include "../afs/nfsclient.h"
47 #include <vm/seg_map.h>
48 #include <vm/seg_vn.h>
50 #if defined(AFS_SUN5_ENV)
51 #include <sys/modctl.h>
52 #include <sys/syscall.h>
56 #include <sys/debug.h>
57 #if defined(AFS_SUN5_ENV)
58 #include <sys/fs_subr.h>
61 #if defined(AFS_SUN5_ENV)
63 * XXX Temporary fix for problems with Solaris rw_tryupgrade() lock.
64 * It isn't very persistent in getting the upgrade when others are
65 * waiting for it and returns 0. So the UpgradeSToW() macro that the
66 * rw_tryupgrade used to map to wasn't good enough and we need to use
67 * the following code instead. Obviously this isn't the proper place
68 * for it but it's only called from here for now
75 if (!rw_tryupgrade(lock)) {
77 rw_enter(lock, RW_WRITER);
84 extern struct as kas; /* kernel addr space */
85 extern unsigned char *afs_indexFlags;
86 extern afs_lock_t afs_xdcache;
88 /* Additional vnodeops for SunOS 4.0.x */
89 int afs_nfsrdwr(), afs_getpage(), afs_putpage(), afs_map();
90 int afs_dump(), afs_cmp(), afs_realvp(), afs_GetOnePage();
96 int afs_addmap(avp, offset, asp, addr, length, prot, maxprot, flags, credp)
97 register struct vnode *avp;
101 int length, prot, maxprot, flags;
102 struct AFS_UCRED *credp;
104 /* XXX What should we do here?? XXX */
108 int afs_delmap(avp, offset, asp, addr, length, prot, maxprot, flags, credp)
109 register struct vnode *avp;
113 int length, prot, maxprot, flags;
114 struct AFS_UCRED *credp;
116 /* XXX What should we do here?? XXX */
120 int afs_vmread(avp, auio, ioflag, acred)
121 register struct vnode *avp;
124 struct AFS_UCRED *acred;
128 if (!RW_READ_HELD(&((struct vcache *)avp)->rwlock))
129 osi_Panic("afs_vmread: !rwlock");
131 code = afs_nfsrdwr((struct vcache *)avp, auio, UIO_READ, ioflag, acred);
137 int afs_vmwrite(avp, auio, ioflag, acred)
138 register struct vnode *avp;
141 struct AFS_UCRED *acred;
145 if (!RW_WRITE_HELD(&((struct vcache *)avp)->rwlock))
146 osi_Panic("afs_vmwrite: !rwlock");
148 code = afs_nfsrdwr((struct vcache *)avp, auio, UIO_WRITE, ioflag, acred);
153 #endif /* AFS_SUN5_ENV */
155 int afs_getpage(vp, off, len, protp, pl, plsz, seg, addr, rw, acred)
170 struct AFS_UCRED *acred;
172 register afs_int32 code = 0;
173 AFS_STATCNT(afs_getpage);
176 if (vp->v_flag & VNOMAP) /* File doesn't allow mapping */
182 #if defined(AFS_SUN56_ENV)
184 code = afs_GetOnePage((struct vnode *) vp, off, len, protp, pl, plsz,
185 seg, addr, rw, acred);
189 code = afs_GetOnePage(vp, (u_int)off, len, protp, pl, plsz,
190 seg, addr, rw, acred);
193 code = afs_GetOnePage(vp, off, protp, pl, plsz,
194 seg, addr, rw, acred);
198 struct vcache *vcp = (struct vcache *)vp;
200 ObtainWriteLock(&vcp->vlock, 548);
202 ReleaseWriteLock(&vcp->vlock);
204 afs_BozonLock(&vcp->pvnLock, vcp);
205 #if defined(AFS_SUN56_ENV)
206 code = pvn_getpages(afs_GetOnePage, (struct vnode *) vp, off,
207 len, protp, pl, plsz, seg, addr, rw, acred);
209 code = pvn_getpages(afs_GetOnePage, (struct vnode *) vp, (u_int)off,
210 len, protp, pl, plsz, seg, addr, rw, acred);
212 afs_BozonUnlock(&vcp->pvnLock, vcp);
214 ObtainWriteLock(&vcp->vlock, 549);
216 ReleaseWriteLock(&vcp->vlock);
223 /* Return all the pages from [off..off+len) in file */
225 int afs_GetOnePage(vp, off, alen, protp, pl, plsz, seg, addr, rw, acred)
228 int afs_GetOnePage(vp, off, protp, pl, plsz, seg, addr, rw, acred)
231 #if defined(AFS_SUN56_ENV)
246 struct AFS_UCRED *acred;
248 register struct page *page;
249 register afs_int32 code = 0;
253 register struct vcache *avc;
254 register struct dcache *tdc;
257 afs_size_t offset, nlen;
258 struct vrequest treq;
259 afs_int32 mapForRead = 0, Code=0;
260 #if defined(AFS_SUN56_ENV)
268 osi_Panic("GetOnePage: !acred");
270 acred = u.u_cred; /* better than nothing */
273 avc = (struct vcache *) vp; /* cast to afs vnode */
276 if (avc->credp /*&& AFS_NFSXLATORREQ(acred)*/ && AFS_NFSXLATORREQ(avc->credp)) {
280 if (code = afs_InitReq(&treq, acred)) return code;
284 * This is a read-ahead request, e.g. due to madvise.
286 tdc = afs_GetDCache(avc, (afs_int32)off, &treq, &offset, &nlen, 1);
289 if (!(tdc->flags & DFNextStarted)) {
290 ObtainReadLock(&avc->lock);
291 afs_PrefetchChunk(avc, tdc, acred, &treq);
292 ReleaseReadLock(&avc->lock);
299 pl[0] = NULL; /* Make sure it's empty */
301 /* first, obtain the proper lock for the VM system */
303 /* if this is a read request, map the page in read-only. This will
304 * allow us to swap out the dcache entry if there are only read-only
305 * pages created for the chunk, which helps a *lot* when dealing
306 * with small caches. Otherwise, we have to invalidate the vm
307 * pages for the range covered by a chunk when we swap out the
310 if (rw == S_READ || rw == S_EXEC)
313 if (protp) *protp = PROT_ALL;
315 if (AFS_NFSXLATORREQ(acred)) {
317 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
318 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
326 if (rw == S_WRITE || rw == S_CREATE)
327 tdc = afs_GetDCache(avc, (afs_offs_t)off, &treq, &offset, &nlen, 5);
329 tdc = afs_GetDCache(avc, (afs_offs_t)off, &treq, &offset, &nlen, 1);
330 if (!tdc) return EINVAL;
332 code = afs_VerifyVCache(avc, &treq);
337 return afs_CheckCode(code, &treq, 44); /* failed to get it */
340 afs_BozonLock(&avc->pvnLock, avc);
341 ObtainReadLock(&avc->lock);
343 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEIN, ICL_TYPE_POINTER, (afs_int32) vp,
344 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(off),
346 ICL_TYPE_LONG, (int) rw);
352 /* Check to see if we're in the middle of a VM purge, and if we are, release
353 * the locks and try again when the VM purge is done. */
354 ObtainWriteLock(&avc->vlock, 550);
356 ReleaseReadLock(&avc->lock);
357 ReleaseWriteLock(&avc->vlock);
358 afs_BozonUnlock(&avc->pvnLock, avc);
360 /* Check activeV again, it may have been turned off
361 * while we were waiting for a lock in afs_PutDCache */
362 ObtainWriteLock(&avc->vlock, 574);
364 avc->vstates |= VRevokeWait;
365 ReleaseWriteLock(&avc->vlock);
366 afs_osi_Sleep(&avc->vstates);
368 ReleaseWriteLock(&avc->vlock);
372 ReleaseWriteLock(&avc->vlock);
375 /* Check to see whether the cache entry is still valid */
376 if (!(avc->states & CStatd)
377 || !hsame(avc->m.DataVersion, tdc->f.versionNo)) {
378 ReleaseReadLock(&avc->lock);
379 afs_BozonUnlock(&avc->pvnLock, avc);
385 while (1) { /* loop over all pages */
386 /* now, try to find the page in memory (it may already be intransit or laying
387 around the free list */
388 page = page_lookup( vp, toffset, (rw == S_CREATE ? SE_EXCL : SE_SHARED) );
392 /* if we make it here, we can't find the page in memory. Do a real disk read
393 from the cache to get the data */
394 Code |= 0x200; /* XXX */
396 #if defined(AFS_SUN54_ENV)
397 /* use PG_EXCL because we know the page does not exist already. If it
398 * actually does exist, we have somehow raced between lookup and create.
399 * As of 4/98, that shouldn't be possible, but we'll be defensive here
400 * in case someone tries to relax all the serialization of read and write
401 * operations with harmless things like stat. */
402 #if defined(AFS_SUN58_ENV)
403 page = page_create_va(vp, toffset, PAGESIZE, PG_WAIT|PG_EXCL, seg, addr);
405 page = page_create_va(vp, toffset, PAGESIZE, PG_WAIT|PG_EXCL, seg->s_as, addr);
408 page = page_create(vp, toffset, PAGESIZE, PG_WAIT);
414 pagezero(page, alen, PAGESIZE-alen);
416 page = rm_allocpage(seg, addr, PAGESIZE, 1); /* can't fail */
417 if (!page) osi_Panic("afs_getpage alloc page");
418 /* we get a circularly-linked list of pages back, but we expect only
419 one, since that's what we asked for */
420 if (page->p_next != page) osi_Panic("afs_getpage list");
421 /* page enter returns a locked page; we'll drop the lock as a side-effect
422 of the pvn_done done by afs_ustrategy. If we decide not to call
423 strategy, we must be sure to call pvn_fail, at least, to release the
424 page locks and otherwise reset the pages. The page, while locked, is
425 not held, for what it is worth */
426 page->p_intrans = 1; /* set appropriate flags */
428 /* next call shouldn't fail, since we have pvnLock set */
429 if (page_enter(page, vp, toffset)) osi_Panic("afs_getpage enter race");
430 #endif /* AFS_SUN5_ENV */
433 if (rw == S_CREATE) {
434 /* XXX Don't read from AFS in write only cases XXX */
435 page_io_unlock(page);
439 /* XXX Don't read from AFS in write only cases XXX */
440 page->p_intrans = page->p_pagein = 0;
441 page_unlock(page); /* XXX */
448 /* now it is time to start I/O operation */
449 buf = pageio_setup(page, PAGESIZE, vp, B_READ); /* allocate a buf structure */
450 #if defined(AFS_SUN5_ENV)
454 buf->b_blkno = btodb(toffset);
455 bp_mapin(buf); /* map it in to our address space */
457 ReleaseReadLock(&avc->lock);
459 #if defined(AFS_SUN5_ENV)
461 code = afs_ustrategy(buf, acred); /* do the I/O */
464 code = afs_ustrategy(buf); /* do the I/O */
467 ObtainReadLock(&avc->lock);
470 /* Before freeing unmap the buffer */
481 page_io_unlock(page);
485 /* come here when we have another page (already held) to enter */
487 /* put page in array and continue */
489 /* The p_selock must be downgraded to a shared lock after the page is read */
490 #if defined(AFS_SUN56_ENV)
491 if ((rw != S_CREATE) && !(PAGE_SHARED(page)))
493 if ((rw != S_CREATE) && !(se_shared_assert(&page->p_selock)))
496 page_downgrade(page);
501 code = page_iolock_assert(page);
507 if (tlen <= 0) break; /* done all the pages */
508 } /* while (1) ... */
511 pl[slot] = (struct page *) 0;
513 * XXX This seems kind-of wrong: we shouldn't be modifying
514 * avc->states while not holding the write lock (even
515 * though nothing really uses CHasPages..)
517 avc->states |= CHasPages;
518 ReleaseReadLock(&avc->lock);
520 ObtainWriteLock(&afs_xdcache,246);
522 /* track that we have dirty (or dirty-able) pages for this chunk. */
523 afs_indexFlags[tdc->index] |= IFDirtyPages;
525 afs_indexFlags[tdc->index] |= IFAnyPages;
526 ReleaseWriteLock(&afs_xdcache);
528 afs_BozonUnlock(&avc->pvnLock, avc);
532 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_LONG, code, ICL_TYPE_LONG, (int)page,
533 ICL_TYPE_LONG, Code);
538 afs_Trace3(afs_iclSetp, CM_TRACE_PAGEINDONE, ICL_TYPE_LONG, code, ICL_TYPE_LONG, (int)page,
539 ICL_TYPE_LONG, Code);
540 /* release all pages, drop locks, return code */
543 pvn_read_done(page, B_ERROR);
545 for(i=0; i<slot; i++)
548 ReleaseReadLock(&avc->lock);
549 afs_BozonUnlock(&avc->pvnLock, avc);
557 int afs_putpage(vp, off, len, flags, cred)
562 struct AFS_UCRED *cred;
567 #if defined(AFS_SUN58_ENV)
572 afs_int32 endPos, NPages=0;
573 #if defined(AFS_SUN56_ENV)
574 u_offset_t toff = off;
579 AFS_STATCNT(afs_putpage);
580 if (vp->v_flag & VNOMAP) /* file doesn't allow mapping */
584 * Putpage (ASYNC) is called every sec to flush out dirty vm pages
587 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUT, ICL_TYPE_POINTER, (afs_int32) vp,
588 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(off),
589 ICL_TYPE_INT32, (afs_int32) len,
590 ICL_TYPE_LONG, (int) flags);
591 avc = (struct vcache *) vp;
592 afs_BozonLock(&avc->pvnLock, avc);
593 ObtainWriteLock(&avc->lock,247);
595 /* Get a list of modified (or whatever) pages */
597 endPos = (int)off + len; /* position we're supposed to write up to */
598 while ((afs_int32)toff < endPos && (afs_int32)toff < avc->m.Length) {
599 /* If not invalidating pages use page_lookup_nowait to avoid reclaiming
600 * them from the free list
603 if (flags & (B_FREE|B_INVAL))
604 pages = page_lookup(vp, toff, SE_EXCL);
606 pages = page_lookup_nowait(vp, toff, SE_SHARED);
607 if (!pages || !pvn_getdirty(pages, flags))
611 code = afs_putapage(vp, pages, &toff, &tlen, flags, cred);
622 #if defined(AFS_SUN56_ENV)
623 code = pvn_vplist_dirty(vp, toff, afs_putapage, flags, cred);
625 code = pvn_vplist_dirty(vp, (u_int)off, afs_putapage, flags, cred);
630 if (code && !avc->vc_error)
631 avc->vc_error = code;
633 ReleaseWriteLock(&avc->lock);
634 afs_BozonUnlock(&avc->pvnLock, avc);
635 afs_Trace2(afs_iclSetp, CM_TRACE_PAGEOUTDONE, ICL_TYPE_LONG, code, ICL_TYPE_LONG, NPages);
641 int afs_putapage(struct vnode *vp, struct page *pages,
642 #if defined(AFS_SUN56_ENV)
647 #if defined(AFS_SUN58_ENV)
652 int flags, struct AFS_UCRED *credp)
655 struct vcache *avc = (struct vcache *)vp;
658 u_int tlen = PAGESIZE, off = (pages->p_offset/PAGESIZE)*PAGESIZE;
659 u_int poff = pages->p_offset;
662 * Now we've got the modified pages. All pages are locked and held
663 * XXX Find a kluster that fits in one block (or page). We also
664 * adjust the i/o if the file space is less than a while page. XXX
667 if (toff + tlen > avc->m.Length) {
668 tlen = avc->m.Length - toff;
670 /* can't call mapout with 0 length buffers (rmfree panics) */
671 if (((tlen>>24)&0xff) == 0xff) {
676 * Can't call mapout with 0 length buffers since we'll get rmfree panics
678 tbuf = pageio_setup(pages, tlen, vp, B_WRITE | flags);
679 if (!tbuf) return (ENOMEM);
682 tbuf->b_blkno = btodb(pages->p_offset);
685 afs_Trace4(afs_iclSetp, CM_TRACE_PAGEOUTONE, ICL_TYPE_LONG, avc,
686 ICL_TYPE_LONG, pages,
688 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(toff));
689 code = afs_ustrategy(tbuf, credp); /* unlocks page */
693 pvn_write_done(pages, ((code) ? B_ERROR:0) | B_WRITE | flags);
696 if (offp) *offp = toff;
697 if (lenp) *lenp = tlen;
703 int afs_putpage(vp, off, len, flags, cred)
708 struct AFS_UCRED *cred;
710 int wholeEnchilada; /* true if we must get all of the pages */
716 afs_int32 code = 0, rcode;
718 afs_int32 clusterStart, clusterEnd, endPos;
720 /* In the wholeEnchilada case, we must ensure that we get all of the pages
721 from the system, since we're doing this to shutdown the use of a vnode */
723 AFS_STATCNT(afs_putpage);
724 wholeEnchilada = (off == 0 && len == 0 && (flags & (B_INVAL|B_ASYNC)) == B_INVAL);
726 avc = (struct vcache *) vp;
727 afs_BozonLock(&avc->pvnLock, avc);
728 ObtainWriteLock(&avc->lock,248);
731 /* in whole enchilada case, loop until call to pvn_getdirty can't find
732 any more modified pages */
734 /* first we try to get a list of modified (or whatever) pages */
736 pages = pvn_vplist_dirty(vp, off, flags);
739 endPos = off + len; /* position we're supposed to write up to */
740 if (endPos > avc->m.Length) endPos = avc->m.Length; /* bound by this */
741 clusterStart = off & ~(PAGESIZE-1); /* round down to nearest page */
742 clusterEnd = ((endPos-1) | (PAGESIZE-1))+1; /* round up to nearest page */
743 pages = pvn_range_dirty(vp, off, endPos, clusterStart, clusterEnd, flags);
746 /* Now we've got the modified pages. All pages are locked and held */
747 rcode = 0; /* return code */
748 while(pages) { /* look over all pages in the returned set */
749 tpage = pages; /* get first page in the list */
751 /* write out the page */
752 poffset = tpage->p_offset; /* where the page starts in the file */
753 /* tlen will represent the end of the range to write, for a while */
754 tlen = PAGESIZE+poffset; /* basic place to end tpage write */
755 /* but we also don't want to write past end of off..off+len range */
756 if (len != 0 && tlen > off+len) tlen = off+len;
757 /* and we don't want to write past the end of the file */
758 if (tlen > avc->m.Length) tlen = avc->m.Length;
759 /* and we don't want to write at all if page starts after end */
760 if (poffset >= tlen) {
761 pvn_fail(pages, B_WRITE | flags);
764 /* finally change tlen from end position to length */
765 tlen -= poffset; /* compute bytes to write from this page */
766 page_sub(&pages, tpage); /* remove tpage from "pages" list */
767 tbuf = pageio_setup(tpage, tlen, vp, B_WRITE | flags);
769 pvn_fail(tpage, B_WRITE|flags);
770 pvn_fail(pages, B_WRITE|flags);
774 tbuf->b_blkno = btodb(tpage->p_offset);
776 ReleaseWriteLock(&avc->lock); /* can't hold during strategy call */
777 code = afs_ustrategy(tbuf); /* unlocks page */
778 ObtainWriteLock(&avc->lock,249); /* re-obtain */
780 /* unlocking of tpage is done by afs_ustrategy */
782 if (pages) /* may have already removed last page */
783 pvn_fail(pages, B_WRITE|flags);
786 } /* for (tpage=pages....) */
788 /* see if we've gotten all of the pages in the whole enchilada case */
789 if (!wholeEnchilada || !vp->v_pages) break;
790 } /* while(1) obtaining all pages */
793 * If low on chunks, and if writing the last byte of a chunk, try to
794 * free some. Note that afs_DoPartialWrite calls osi_SyncVM which now
795 * calls afs_putpage, so this is recursion. It stops there because we
796 * insist on len being non-zero.
798 if (afs_stats_cmperf.cacheCurrDirtyChunks > afs_stats_cmperf.cacheMaxDirtyChunks
799 && len != 0 && AFS_CHUNKOFFSET((off + len)) == 0) {
800 struct vrequest treq;
801 if (!afs_InitReq(&treq, cred ? cred : u.u_cred)) {
802 rcode = afs_DoPartialWrite(avc, &treq); /* XXX */
808 if (rcode && !avc->vc_error)
809 avc->vc_error = rcode;
811 /* when we're here, we just return code. */
812 ReleaseWriteLock(&avc->lock);
813 afs_BozonUnlock(&avc->pvnLock, avc);
817 #endif /* AFS_SUN5_ENV */
819 int afs_nfsrdwr(avc, auio, arw, ioflag, acred)
820 register struct vcache *avc;
824 struct AFS_UCRED *acred;
826 register afs_int32 code;
829 afs_int32 mode, sflags;
831 struct dcache *dcp, *dcp_newpage;
832 afs_size_t fileBase, size;
834 register afs_int32 tsize;
835 register afs_int32 pageOffset, extraResid=0;
836 register afs_size_t origLength; /* length when reading/writing started */
837 register long appendLength; /* length when this call will finish */
838 int created; /* created pages instead of faulting them */
840 int didFakeOpen, eof;
841 struct vrequest treq;
845 AFS_STATCNT(afs_nfsrdwr);
847 /* can't read or write other things */
848 if (vType(avc) != VREG) return EISDIR;
850 if (auio->uio_resid == 0)
853 afs_Trace4(afs_iclSetp, CM_TRACE_VMRW, ICL_TYPE_POINTER, (afs_int32)avc,
854 ICL_TYPE_LONG, (arw==UIO_WRITE? 1 : 0),
855 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->uio_offset),
856 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(auio->uio_resid));
858 #ifndef AFS_64BIT_CLIENT
859 if ( AfsLargeFileUio(auio) ) /* file is larger than 2 GB */
864 if (!acred) osi_Panic("rdwr: !acred");
866 if (!acred) acred = u.u_cred;
868 if (code = afs_InitReq(&treq, acred)) return code;
870 /* It's not really possible to know if a write cause a growth in the
871 * cache size, we we wait for a cache drain for any write.
873 afs_MaybeWakeupTruncateDaemon();
874 while ((arw == UIO_WRITE) &&
875 (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100)) {
876 if (afs_blocksUsed - afs_blocksDiscarded >
877 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
878 afs_WaitForCacheDrain = 1;
879 afs_osi_Sleep(&afs_WaitForCacheDrain);
881 afs_MaybeFreeDiscardedDCache();
882 afs_MaybeWakeupTruncateDaemon();
884 code = afs_VerifyVCache(avc, &treq);
885 if (code) return afs_CheckCode(code, &treq, 45);
887 afs_BozonLock(&avc->pvnLock, avc);
888 osi_FlushPages(avc, acred); /* hold bozon lock, but not basic vnode lock */
890 ObtainWriteLock(&avc->lock,250);
892 /* adjust parameters when appending files */
893 if ((ioflag & IO_APPEND) && arw == UIO_WRITE)
895 #if defined(AFS_SUN56_ENV)
896 auio->uio_loffset = 0;
898 auio->uio_offset = avc->m.Length; /* write at EOF position */
900 if (auio->uio_offset < 0 || (auio->uio_offset + auio->uio_resid) < 0) {
901 ReleaseWriteLock(&avc->lock);
902 afs_BozonUnlock(&avc->pvnLock, avc);
906 #ifndef AFS_64BIT_CLIENT
907 /* file is larger than 2GB */
908 if ( AfsLargeFileSize(auio->uio_offset, auio->uio_resid) ) {
909 ReleaseWriteLock(&avc->lock);
910 afs_BozonUnlock(&avc->pvnLock, avc);
915 didFakeOpen=0; /* keep track of open so we can do close */
916 if (arw == UIO_WRITE) {
917 /* do ulimit processing; shrink resid or fail */
918 #if defined(AFS_SUN56_ENV)
919 if (auio->uio_loffset + auio->afsio_resid > auio->uio_llimit) {
920 if (auio->uio_llimit >= auio->uio_llimit) {
921 ReleaseWriteLock(&avc->lock);
922 afs_BozonUnlock(&avc->pvnLock, avc);
925 /* track # of bytes we should write, but won't because of
926 * ulimit; we must add this into the final resid value
927 * so caller knows we punted some data.
929 extraResid = auio->uio_resid;
930 auio->uio_resid = auio->uio_llimit - auio->uio_loffset;
931 extraResid -= auio->uio_resid;
936 if (auio->afsio_offset + auio->afsio_resid > auio->uio_limit) {
937 if (auio->afsio_offset >= auio->uio_limit) {
938 ReleaseWriteLock(&avc->lock);
939 afs_BozonUnlock(&avc->pvnLock, avc);
942 /* track # of bytes we should write, but won't because of
943 * ulimit; we must add this into the final resid value
944 * so caller knows we punted some data.
946 extraResid = auio->uio_resid;
947 auio->uio_resid = auio->uio_limit - auio->afsio_offset;
948 extraResid -= auio->uio_resid;
953 mode = S_WRITE; /* segment map-in mode */
954 afs_FakeOpen(avc); /* do this for writes, so data gets put back
955 when we want it to be put back */
956 didFakeOpen = 1; /* we'll be doing a fake open */
957 /* before starting any I/O, we must ensure that the file is big enough
958 to hold the results (since afs_putpage will be called to force the I/O */
959 size = auio->afsio_resid + auio->afsio_offset; /* new file size */
961 origLength = avc->m.Length;
962 if (size > avc->m.Length)
963 avc->m.Length = size; /* file grew */
964 avc->states |= CDirty; /* Set the dirty bit */
965 avc->m.Date = osi_Time(); /* Set file date (for ranlib) */
967 mode = S_READ; /* map-in read-only */
968 origLength = avc->m.Length;
971 if (acred && AFS_NFSXLATORREQ(acred)) {
972 if (arw == UIO_READ) {
973 if (!afs_AccessOK(avc, PRSFS_READ, &treq,
974 CHECK_MODE_BITS|CMB_ALLOW_EXEC_AS_READ)) {
975 ReleaseWriteLock(&avc->lock);
976 afs_BozonUnlock(&avc->pvnLock, avc);
988 counter = 0; /* don't call afs_DoPartialWrite first time through. */
990 /* compute the amount of data to move into this block,
991 based on auio->afsio_resid. Note that we copy data in units of
992 MAXBSIZE, not PAGESIZE. This is because segmap_getmap panics if you
993 call it with an offset based on blocks smaller than MAXBSIZE
994 (implying that it should be named BSIZE, since it is clearly both a
996 size = auio->afsio_resid; /* transfer size */
997 fileBase = auio->afsio_offset; /* start file position for xfr */
998 pageBase = fileBase & ~(MAXBSIZE-1); /* file position of the page */
999 pageOffset = fileBase & (MAXBSIZE-1); /* xfr start's offset within page */
1000 tsize = MAXBSIZE-pageOffset; /* how much more fits in this page */
1001 /* we'll read tsize bytes, but first must make sure tsize isn't too big */
1002 if (tsize > size) tsize = size; /* don't read past end of request */
1003 eof = 0; /* flag telling us if we hit the EOF on the read */
1004 if (arw == UIO_READ) { /* we're doing a read operation */
1005 /* don't read past EOF */
1006 if (fileBase + tsize > origLength) {
1007 tsize = origLength - fileBase;
1008 eof = 1; /* we did hit the EOF */
1009 if (tsize < 0) tsize = 0; /* better safe than sorry */
1015 /* Purge dirty chunks of file if there are too many dirty
1016 * chunks. Inside the write loop, we only do this at a chunk
1017 * boundary. Clean up partial chunk if necessary at end of loop.
1019 if (counter > 0 && code == 0 && AFS_CHUNKOFFSET(fileBase) == 0)
1021 code = afs_DoPartialWrite(avc, &treq);
1025 #endif /* AFS_SUN5_ENV */
1026 /* write case, we ask segmap_release to call putpage. Really, we
1027 don't have to do this on every page mapin, but for now we're
1028 lazy, and don't modify the rest of AFS to scan for modified
1029 pages on a close or other "synchronize with file server"
1030 operation. This makes things a little cleaner, but probably
1031 hurts performance. */
1036 break; /* nothing to transfer, we're done */
1039 if (arw == UIO_WRITE)
1040 avc->states |= CDirty; /* may have been cleared by DoPartialWrite*/
1042 /* Before dropping lock, hold the chunk (create it if necessary). This
1043 * serves two purposes: (1) Ensure Cache Truncate Daemon doesn't try
1044 * to purge the chunk's pages while we have them locked. This would
1045 * cause deadlock because we might be waiting for the CTD to free up
1046 * a chunk. (2) If we're writing past the original EOF, and we're
1047 * at the base of the chunk, then make sure it exists online
1048 * before we do the uiomove, since the segmap_release will
1049 * write out to the chunk, causing it to get fetched if it hasn't
1050 * been created yet. The code that would otherwise notice that
1051 * we're fetching a chunk past EOF won't work, since we've
1052 * already adjusted the file size above.
1054 ObtainWriteLock(&avc->vlock, 551);
1055 while (avc->vstates & VPageCleaning) {
1056 ReleaseWriteLock(&avc->vlock);
1057 ReleaseWriteLock(&avc->lock);
1058 afs_osi_Sleep(&avc->vstates);
1059 ObtainWriteLock(&avc->lock, 334);
1060 ObtainWriteLock(&avc->vlock, 552);
1062 ReleaseWriteLock(&avc->vlock);
1064 afs_size_t toff, tlen;
1065 dcp = afs_GetDCache(avc, fileBase, &treq, &toff, &tlen, 2);
1072 ReleaseWriteLock(&avc->lock); /* uiomove may page fault */
1074 #if defined(AFS_SUN56_ENV)
1075 data = segmap_getmap(segkmap,(struct vnode *)avc,(u_offset_t)pageBase);
1077 data = segmap_getmap(segkmap, (struct vnode *) avc, pageBase);
1079 #ifndef AFS_SUN5_ENV
1080 code = as_fault(&kas, data+pageOffset, tsize, F_SOFTLOCK, mode);
1082 AFS_UIOMOVE(data+pageOffset, tsize, arw, auio, code);
1083 as_fault(&kas, data+pageOffset, tsize, F_SOFTUNLOCK, mode);
1084 code2 = segmap_release(segkmap, data, sflags);
1089 (void) segmap_release(segkmap, data, 0);
1092 #if defined(AFS_SUN56_ENV)
1093 raddr = (caddr_t) (((uintptr_t)data +pageOffset) & PAGEMASK);
1095 raddr = (caddr_t) (((u_int)data +pageOffset) & PAGEMASK);
1097 rsize = (((u_int)data+pageOffset+tsize+PAGEOFFSET) & PAGEMASK)-(u_int)raddr;
1099 /* if we're doing a write, and we're starting at the rounded
1100 * down page base, and we're writing enough data to cover all
1101 * created pages, then we must be writing all of the pages
1102 * in this MAXBSIZE window that we're creating.
1105 if (arw == UIO_WRITE
1106 && ((long)raddr == (long)data+pageOffset)
1107 && tsize >= rsize) {
1108 /* probably the dcache backing this guy is around, but if
1109 * not, we can't do this optimization, since we're creating
1110 * writable pages, which must be backed by a chunk.
1113 dcp_newpage = afs_FindDCache(avc, pageBase);
1115 && hsame(avc->m.DataVersion, dcp_newpage->f.versionNo)) {
1116 ObtainWriteLock(&avc->lock,251);
1117 ObtainWriteLock(&avc->vlock,576);
1118 if ((avc->activeV == 0)
1119 && hsame(avc->m.DataVersion, dcp_newpage->f.versionNo)
1120 && !(dcp_newpage->flags & (DFFetching))) {
1122 segmap_pagecreate(segkmap, raddr, rsize, 1);
1124 ObtainWriteLock(&afs_xdcache,252);
1125 /* Mark the pages as created and dirty */
1126 afs_indexFlags[dcp_newpage->index]
1127 |= (IFAnyPages | IFDirtyPages);
1128 ReleaseWriteLock(&afs_xdcache);
1129 avc->states |= CHasPages;
1132 afs_PutDCache(dcp_newpage);
1133 ReleaseWriteLock(&avc->vlock);
1134 ReleaseWriteLock(&avc->lock);
1136 else if ( dcp_newpage )
1137 afs_PutDCache(dcp_newpage);
1141 code = segmap_fault(kas.a_hat, segkmap, raddr, rsize, F_SOFTLOCK, mode);
1144 AFS_UIOMOVE(data+pageOffset, tsize, arw, auio, code);
1145 segmap_fault(kas.a_hat, segkmap, raddr, rsize, F_SOFTUNLOCK, mode);
1148 code = segmap_release(segkmap, data, sflags);
1150 (void) segmap_release(segkmap, data, 0);
1152 #endif /* AFS_SUN5_ENV */
1154 ObtainWriteLock(&avc->lock,253);
1157 * If at a chunk boundary, start prefetch of next chunk.
1159 if (counter == 0 || AFS_CHUNKOFFSET(fileBase) == 0) {
1160 if (!(dcp->flags & DFNextStarted))
1161 afs_PrefetchChunk(avc, dcp, acred, &treq);
1166 #endif /* AFS_SUN5_ENV */
1170 afs_FakeClose(avc, acred);
1174 if (arw == UIO_WRITE && (avc->states & CDirty)) {
1175 code2 = afs_DoPartialWrite(avc, &treq);
1179 #endif /* AFS_SUN5_ENV */
1181 if (!code && avc->vc_error) {
1182 code = avc->vc_error;
1184 ReleaseWriteLock(&avc->lock);
1185 afs_BozonUnlock(&avc->pvnLock, avc);
1187 #ifdef AFS_SUN53_ENV
1188 if ((ioflag & FSYNC) && (arw == UIO_WRITE) && !AFS_NFSXLATORREQ(acred))
1189 code = afs_fsync(avc, 0, acred);
1191 if ((ioflag & IO_SYNC) && (arw == UIO_WRITE)
1192 && !AFS_NFSXLATORREQ(acred))
1193 code = afs_fsync(avc, acred);
1196 #ifdef AFS_SUN52_ENV
1198 * If things worked, add in as remaining in request any bytes
1199 * we didn't write due to file size ulimit.
1201 if (code == 0 && extraResid > 0)
1202 auio->uio_resid += extraResid;
1204 return afs_CheckCode(code, &treq, 46);
1207 afs_map(vp, off, as, addr, len, prot, maxprot, flags, cred)
1219 u_char prot, maxprot;
1221 u_int prot, maxprot;
1224 struct AFS_UCRED *cred;
1226 struct segvn_crargs crargs;
1227 register afs_int32 code;
1228 struct vrequest treq;
1229 register struct vcache *avc = (struct vcache *) vp;
1231 AFS_STATCNT(afs_map);
1234 /* check for reasonableness on segment bounds; apparently len can be < 0 */
1235 if (off < 0 || off + len < 0) {
1238 #ifndef AFS_64BIT_CLIENT
1239 if ( AfsLargeFileSize(off, len) ) /* file is larger than 2 GB */
1246 #if defined(AFS_SUN5_ENV)
1247 if (vp->v_flag & VNOMAP) /* File isn't allowed to be mapped */
1250 if (vp->v_filocks) /* if locked, disallow mapping */
1254 if (code = afs_InitReq(&treq, cred)) goto out;
1256 if (vp->v_type != VREG) {
1261 code = afs_VerifyVCache(avc, &treq);
1265 afs_BozonLock(&avc->pvnLock, avc);
1266 osi_FlushPages(avc, cred); /* ensure old pages are gone */
1267 avc->states |= CMAPPED; /* flag cleared at afs_inactive */
1268 afs_BozonUnlock(&avc->pvnLock, avc);
1274 if ((flags & MAP_FIXED) == 0) {
1275 #if defined(AFS_SUN57_ENV)
1276 map_addr(addr, len, off, 1, flags);
1277 #elif defined(AFS_SUN56_ENV)
1278 map_addr(addr, len, off, 1);
1280 map_addr(addr, len, (off_t)off, 1);
1282 if (*addr == NULL) {
1290 (void) as_unmap(as, *addr, len); /* unmap old address space use */
1291 /* setup the create parameter block for the call */
1292 crargs.vp = (struct vnode *) avc;
1293 crargs.offset = (u_int)off;
1295 crargs.type = flags&MAP_TYPE;
1297 crargs.maxprot = maxprot;
1298 crargs.amp = (struct anon_map *) 0;
1299 #if defined(AFS_SUN5_ENV)
1300 crargs.flags = flags & ~MAP_TYPE;
1303 code = as_map(as, *addr, len, segvn_create, (char *) &crargs);
1309 code = afs_CheckCode(code, &treq, 47);
1313 code = afs_CheckCode(code, &treq, 48);
1318 /* Sun 4.0.X-specific code. It computes the number of bytes that need
1319 to be zeroed at the end of a page by pvn_vptrunc, given that you're
1320 trying to get vptrunc to truncate a file to alen bytes. The result
1321 will be passed to pvn_vptrunc by the truncate code */
1322 #ifndef AFS_SUN5_ENV /* Not good for Solaris */
1324 register afs_int32 alen; {
1325 register afs_int32 nbytes;
1327 AFS_STATCNT(afs_PageLeft);
1328 nbytes = PAGESIZE - (alen & PAGEOFFSET); /* amount to zap in last page */
1329 /* now check if we'd zero the entire last page. Don't need to do this
1330 since pvn_vptrunc will handle this case properly (it will invalidate
1332 if (nbytes == PAGESIZE) nbytes = 0;
1333 if (nbytes < 0) nbytes = 0; /* just in case */
1340 * For Now We use standard local kernel params for AFS system values. Change this
1343 #if defined(AFS_SUN5_ENV)
1344 afs_pathconf(vp, cmd, outdatap, credp)
1345 register struct AFS_UCRED *credp;
1347 afs_cntl(vp, cmd, indatap, outdatap, inflag, outflag)
1348 int inflag, outflag;
1355 AFS_STATCNT(afs_cntl);
1358 *outdatap = MAXLINK;
1361 *outdatap = MAXNAMLEN;
1364 *outdatap = MAXPATHLEN;
1366 case _PC_CHOWN_RESTRICTED:
1372 #if !defined(AFS_SUN5_ENV)
1374 *outdatap = CANBSIZ;
1377 *outdatap = VDISABLE;
1389 #endif /* AFS_SUN_ENV */
1391 #if defined(AFS_SUN5_ENV)
1393 afs_ioctl(vnp, com, arg, flag, credp, rvalp)
1402 void afs_rwlock(vnp, wlock)
1406 rw_enter(&((struct vcache *)vnp)->rwlock, (wlock ? RW_WRITER : RW_READER));
1410 void afs_rwunlock(vnp, wlock)
1414 rw_exit(&((struct vcache *)vnp)->rwlock);
1419 afs_seek(vnp, ooff, noffp)
1424 register int code = 0;
1426 if ((*noffp < 0 || *noffp > MAXOFF_T))
1431 int afs_frlock(vnp, cmd, ap, flag, off,
1432 #ifdef AFS_SUN59_ENV
1438 #if defined(AFS_SUN56_ENV)
1445 #ifdef AFS_SUN59_ENV
1446 struct flk_callback *flkcb;
1448 struct AFS_UCRED *credp;
1450 register afs_int32 code = 0;
1452 * Implement based on afs_lockctl
1455 #ifdef AFS_SUN59_ENV
1457 afs_warn("Don't know how to deal with flk_callback's!\n");
1459 if ((cmd == F_GETLK) || (cmd == F_O_GETLK) || (cmd == F_SETLK) || (cmd == F_SETLKW)) {
1460 #ifdef AFS_SUN53_ENV
1461 ap->l_pid = ttoproc(curthread)->p_pid;
1464 ap->l_pid = ttoproc(curthread)->p_epid;
1465 ap->l_sysid = ttoproc(curthread)->p_sysid;
1469 #ifdef AFS_SUN56_ENV
1470 code = convoff(vnp, ap, 0, off);
1472 code = convoff(vnp, ap, 0, (off_t)off);
1474 if (code) return code;
1478 code = afs_lockctl((struct vcache *)vnp, ap, cmd, credp);
1484 int afs_space(vnp, cmd, ap, flag, off, credp)
1487 #if defined(AFS_SUN56_ENV)
1494 struct AFS_UCRED *credp;
1496 register afs_int32 code = EINVAL;
1499 if ((cmd == F_FREESP)
1500 #ifdef AFS_SUN56_ENV
1501 && ((code = convoff(vnp, ap, 0, off)) == 0)) {
1503 && ((code = convoff(vnp, ap, 0, (off_t)off)) == 0)) {
1507 vattr.va_mask = AT_SIZE;
1508 vattr.va_size = ap->l_start;
1509 code = afs_setattr((struct vcache *)vnp, &vattr, 0, credp);
1519 int afs_dump(vp, addr, i1, i2)
1524 AFS_STATCNT(afs_dump);
1525 afs_warn("AFS_DUMP. MUST IMPLEMENT THIS!!!\n");
1530 /* Nothing fancy here; just compare if vnodes are identical ones */
1532 struct vnode *vp1, *vp2;
1534 AFS_STATCNT(afs_cmp);
1539 int afs_realvp(struct vnode *vp, struct vnode **vpp) {
1540 AFS_STATCNT(afs_realvp);
1545 int afs_pageio(vp, pp, ui1, ui2, i1, credp)
1552 afs_warn("afs_pageio: Not implemented\n");
1556 int afs_dumpctl(vp, i
1557 #ifdef AFS_SUN59_ENV
1563 #ifdef AFS_SUN59_ENV
1567 afs_warn("afs_dumpctl: Not implemented\n");
1571 #ifdef AFS_SUN54_ENV
1572 extern void afs_dispose(vp, p, fl, dn, cr)
1578 fs_dispose(vp, p, fl, dn, cr);
1581 int afs_setsecattr(vp, vsecattr, flag, creds)
1583 vsecattr_t *vsecattr;
1590 int afs_getsecattr(vp, vsecattr, flag, creds)
1592 vsecattr_t *vsecattr;
1596 return fs_fab_acl(vp, vsecattr, flag, creds);
1600 #ifdef AFS_GLOBAL_SUNLOCK
1601 extern int gafs_open(), gafs_close(), afs_ioctl(), gafs_access();
1602 extern int gafs_getattr(), gafs_setattr(), gafs_lookup(), gafs_create();
1603 extern int gafs_remove(), gafs_link(), gafs_rename(), gafs_mkdir();
1604 extern int gafs_rmdir(), gafs_readdir(), gafs_fsync(), gafs_symlink();
1605 extern int gafs_fid(), gafs_readlink(), fs_setfl(), afs_pathconf();
1606 extern int afs_lockctl();
1607 extern void gafs_inactive();
1609 struct vnodeops Afs_vnodeops = {
1649 #ifdef AFS_SUN54_ENV
1654 #if defined(AFS_SUN56_ENV)
1658 struct vnodeops *afs_ops = &Afs_vnodeops;
1662 gafs_open(avcp, aflags, acred)
1663 register struct vcache **avcp;
1665 struct AFS_UCRED *acred;
1670 code = afs_open(avcp, aflags, acred);
1676 gafs_close(avc, aflags, count, offset, acred)
1679 register struct vcache *avc;
1681 struct AFS_UCRED *acred;
1686 code = afs_close(avc, aflags, count, offset, acred);
1692 gafs_getattr(avc, attrs, flags, acred)
1694 register struct vcache *avc;
1695 register struct vattr *attrs;
1696 struct AFS_UCRED *acred;
1701 code = afs_getattr(avc, attrs, flags, acred);
1707 gafs_setattr(avc, attrs, flags, acred)
1709 register struct vcache *avc;
1710 register struct vattr *attrs;
1711 struct AFS_UCRED *acred;
1716 code = afs_setattr(avc, attrs, flags, acred);
1722 gafs_access(avc, amode, flags, acred)
1724 register struct vcache *avc;
1725 register afs_int32 amode;
1726 struct AFS_UCRED *acred;
1731 code = afs_access(avc, amode, flags, acred);
1737 gafs_lookup(adp, aname, avcp, pnp, flags, rdir, acred)
1738 struct pathname *pnp;
1741 register struct vcache *adp, **avcp;
1743 struct AFS_UCRED *acred;
1748 code = afs_lookup(adp, aname, avcp, pnp, flags, rdir, acred);
1754 gafs_create(adp, aname, attrs, aexcl, amode, avcp, acred)
1755 register struct vcache *adp;
1757 struct vattr *attrs;
1760 struct vcache **avcp;
1761 struct AFS_UCRED *acred;
1766 code = afs_create(adp, aname, attrs, aexcl, amode, avcp, acred);
1771 gafs_remove(adp, aname, acred)
1772 register struct vcache *adp;
1774 struct AFS_UCRED *acred;
1779 code = afs_remove(adp, aname, acred);
1784 gafs_link(adp, avc, aname, acred)
1785 register struct vcache *avc;
1786 register struct vcache *adp;
1788 struct AFS_UCRED *acred;
1793 code = afs_link(adp, avc, aname, acred);
1798 gafs_rename(aodp, aname1, andp, aname2, acred)
1799 register struct vcache *aodp, *andp;
1800 char *aname1, *aname2;
1801 struct AFS_UCRED *acred;
1806 code = afs_rename(aodp, aname1, andp, aname2, acred);
1811 gafs_mkdir(adp, aname, attrs, avcp, acred)
1812 register struct vcache *adp;
1813 register struct vcache **avcp;
1815 struct vattr *attrs;
1816 struct AFS_UCRED *acred;
1821 code = afs_mkdir(adp, aname, attrs, avcp, acred);
1827 gafs_rmdir(adp, aname, cdirp, acred)
1828 struct vnode *cdirp;
1829 register struct vcache *adp;
1831 struct AFS_UCRED *acred;
1836 code = afs_rmdir(adp, aname, cdirp, acred);
1842 gafs_readdir(avc, auio, acred, eofp)
1844 register struct vcache *avc;
1846 struct AFS_UCRED *acred;
1851 code = afs_readdir(avc, auio, acred, eofp);
1856 gafs_symlink(adp, aname, attrs, atargetName, acred)
1857 register struct vcache *adp;
1858 register char *atargetName;
1860 struct vattr *attrs;
1861 struct AFS_UCRED *acred;
1866 code = afs_symlink(adp, aname, attrs, atargetName, acred);
1872 gafs_readlink(avc, auio, acred)
1873 register struct vcache *avc;
1875 struct AFS_UCRED *acred;
1880 code = afs_readlink(avc, auio, acred);
1885 #ifdef AFS_SUN53_ENV
1886 gafs_fsync(avc, flag, acred)
1889 gafs_fsync(avc, acred)
1891 register struct vcache *avc;
1892 struct AFS_UCRED *acred;
1897 #ifdef AFS_SUN53_ENV
1898 code = afs_fsync(avc, flag, acred);
1900 code = afs_fsync(avc, acred);
1906 void afs_inactive(struct vcache *avc, struct AFS_UCRED *acred)
1908 struct vnode *vp = (struct vnode *)avc;
1909 if (afs_shuttingdown) return ;
1912 * In Solaris and HPUX s800 and HP-UX10.0 they actually call us with
1913 * v_count 1 on last reference!
1915 mutex_enter(&vp->v_lock);
1916 if (avc->vrefCount <= 0) osi_Panic("afs_inactive : v_count <=0\n");
1919 * If more than 1 don't unmap the vnode but do decrement the ref count
1922 if (vp->v_count > 0) {
1923 mutex_exit(&vp->v_lock);
1926 mutex_exit(&vp->v_lock);
1928 * Solaris calls VOP_OPEN on exec, but isn't very diligent about calling
1929 * VOP_CLOSE when executable exits.
1931 if (avc->opens > 0 && !(avc->states & CCore))
1932 avc->opens = avc->execsOrWriters = 0;
1934 afs_InactiveVCache(avc, acred);
1937 void gafs_inactive(avc, acred)
1938 register struct vcache *avc;
1939 struct AFS_UCRED *acred;
1942 afs_inactive(avc, acred);
1947 gafs_fid(avc, fidpp)
1954 code = afs_fid(avc, fidpp);
1959 #endif /* AFS_GLOBAL_SUNLOCK */