2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
46 #include "afs/sysincludes.h" /*Standard vendor system headers */
47 #include "afsincludes.h" /*AFS-based standard headers */
48 #include "afs/afs_stats.h"
49 #include "afs/afs_cbqueue.h"
50 #include "afs/afs_osidnlc.h"
52 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
53 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
54 afs_int32 afs_vcount = 0; /* number of vcache in use now */
55 #endif /* AFS_OSF_ENV */
63 #endif /* AFS_SGI64_ENV */
65 /* Exported variables */
66 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
67 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
68 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
69 #if !defined(AFS_LINUX22_ENV)
70 static struct vcache *freeVCList; /*Free list for stat cache entries */
71 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
72 static struct vcache *Initial_freeVCList; /*Initial list for above */
74 struct afs_q VLRU; /*vcache LRU */
75 afs_int32 vcachegen = 0;
76 unsigned int afs_paniconwarn = 0;
77 struct vcache *afs_vhashT[VCSIZE];
78 struct afs_q afs_vhashTV[VCSIZE];
79 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
80 afs_int32 afs_bulkStatsLost;
81 int afs_norefpanic = 0;
83 /* Forward declarations */
84 static afs_int32 afs_QueueVCB(struct vcache *avc);
87 * Generate an index into the hash table for a given Fid.
89 * \return The hash value.
92 afs_HashCBRFid(struct AFSFid *fid)
94 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
98 * Insert a CBR entry into the hash table.
99 * Must be called with afs_xvcb held.
104 afs_InsertHashCBR(struct afs_cbr *cbr)
106 int slot = afs_HashCBRFid(&cbr->fid);
108 cbr->hash_next = afs_cbrHashT[slot];
109 if (afs_cbrHashT[slot])
110 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
112 cbr->hash_pprev = &afs_cbrHashT[slot];
113 afs_cbrHashT[slot] = cbr;
118 * Flush the given vcache entry.
121 * afs_xvcache lock must be held for writing upon entry to
122 * prevent people from changing the vrefCount field, and to
123 * protect the lruq and hnext fields.
124 * LOCK: afs_FlushVCache afs_xvcache W
125 * REFCNT: vcache ref count must be zero on entry except for osf1
126 * RACE: lock is dropped and reobtained, permitting race in caller
128 * \param avc Pointer to vcache entry to flush.
129 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
133 afs_FlushVCache(struct vcache *avc, int *slept)
134 { /*afs_FlushVCache */
137 struct vcache **uvc, *wvc;
140 AFS_STATCNT(afs_FlushVCache);
141 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
142 ICL_TYPE_INT32, avc->states);
145 VN_LOCK(AFSTOV(avc));
149 code = osi_VM_FlushVCache(avc, slept);
153 if (avc->states & CVFlushed) {
157 #if !defined(AFS_LINUX22_ENV)
158 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
159 refpanic("LRU vs. Free inconsistency");
162 avc->states |= CVFlushed;
163 /* pull the entry out of the lruq and put it on the free list */
164 QRemove(&avc->vlruq);
166 /* keep track of # of files that we bulk stat'd, but never used
167 * before they got recycled.
169 if (avc->states & CBulkStat)
172 /* remove entry from the hash chain */
173 i = VCHash(&avc->fid);
174 uvc = &afs_vhashT[i];
175 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
178 avc->hnext = (struct vcache *)NULL;
183 /* remove entry from the volume hash table */
184 QRemove(&avc->vhashq);
187 osi_FreeSmallSpace(avc->mvid);
188 avc->mvid = (struct VenusFid *)0;
190 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
191 avc->linkData = NULL;
193 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
194 /* OK, there are no internal vrefCounts, so there shouldn't
195 * be any more refs here. */
197 #ifdef AFS_DARWIN80_ENV
198 vnode_clearfsnode(AFSTOV(avc));
199 vnode_removefsref(AFSTOV(avc));
201 avc->v->v_data = NULL; /* remove from vnode */
203 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
206 #ifdef AFS_SUN510_ENV
207 /* As we use private vnodes, cleanup is up to us */
208 vn_reinit(AFSTOV(avc));
210 afs_FreeAllAxs(&(avc->Access));
212 /* we can't really give back callbacks on RO files, since the
213 * server only tracks them on a per-volume basis, and we don't
214 * know whether we still have some other files from the same
216 if ((avc->states & CRO) == 0 && avc->callback) {
219 ObtainWriteLock(&afs_xcbhash, 460);
220 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
221 avc->states &= ~(CStatd | CUnique);
222 ReleaseWriteLock(&afs_xcbhash);
223 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
224 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
226 osi_dnlc_purgevp(avc);
229 * Next, keep track of which vnodes we've deleted for create's
230 * optimistic synchronization algorithm
233 if (avc->fid.Fid.Vnode & 1)
238 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
239 /* put the entry in the free list */
240 avc->nextfree = freeVCList;
242 if (avc->vlruq.prev || avc->vlruq.next) {
243 refpanic("LRU vs. Free inconsistency");
245 avc->states |= CVFlushed;
247 /* This should put it back on the vnode free list since usecount is 1 */
250 if (VREFCOUNT_GT(avc,0)) {
251 #if defined(AFS_OSF_ENV)
252 VN_UNLOCK(AFSTOV(avc));
254 AFS_RELE(AFSTOV(avc));
256 if (afs_norefpanic) {
257 printf("flush vc refcnt < 1");
259 #if defined(AFS_OSF_ENV)
260 (void)vgone(avc, VX_NOSLEEP, NULL);
262 VN_UNLOCK(AFSTOV(avc));
265 osi_Panic("flush vc refcnt < 1");
267 #endif /* AFS_OSF_ENV */
272 VN_UNLOCK(AFSTOV(avc));
276 } /*afs_FlushVCache */
280 * The core of the inactive vnode op for all but IRIX.
286 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
288 AFS_STATCNT(afs_inactive);
289 if (avc->states & CDirty) {
290 /* we can't keep trying to push back dirty data forever. Give up. */
291 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
293 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
294 avc->states &= ~CDirty; /* Turn it off */
295 if (avc->states & CUnlinked) {
296 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
297 avc->states |= CUnlinkedDel;
300 afs_remunlink(avc, 1); /* ignore any return code */
307 * Allocate a callback return structure from the
308 * free list and return it.
310 * Environment: The alloc and free routines are both called with the afs_xvcb lock
311 * held, so we don't have to worry about blocking in osi_Alloc.
313 * \return The allocated afs_cbr.
315 static struct afs_cbr *afs_cbrSpace = 0;
319 register struct afs_cbr *tsp;
322 while (!afs_cbrSpace) {
323 if (afs_stats_cmperf.CallBackAlloced >= 2) {
324 /* don't allocate more than 2 * AFS_NCBRS for now */
326 afs_stats_cmperf.CallBackFlushes++;
330 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
331 sizeof(struct afs_cbr));
332 for (i = 0; i < AFS_NCBRS - 1; i++) {
333 tsp[i].next = &tsp[i + 1];
335 tsp[AFS_NCBRS - 1].next = 0;
337 afs_stats_cmperf.CallBackAlloced++;
341 afs_cbrSpace = tsp->next;
346 * Free a callback return structure, removing it from all lists.
348 * Environment: the xvcb lock is held over these calls.
350 * \param asp The address of the structure to free.
355 afs_FreeCBR(register struct afs_cbr *asp)
357 *(asp->pprev) = asp->next;
359 asp->next->pprev = asp->pprev;
361 *(asp->hash_pprev) = asp->hash_next;
363 asp->hash_next->hash_pprev = asp->hash_pprev;
365 asp->next = afs_cbrSpace;
371 * Flush all queued callbacks to all servers.
373 * Environment: holds xvcb lock over RPC to guard against race conditions
374 * when a new callback is granted for the same file later on.
376 * \return 0 for success.
379 afs_FlushVCBs(afs_int32 lockit)
381 struct AFSFid *tfids;
382 struct AFSCallBack callBacks[1];
383 struct AFSCBFids fidArray;
384 struct AFSCBs cbArray;
386 struct afs_cbr *tcbrp;
390 struct vrequest treq;
392 int safety1, safety2, safety3;
394 if ((code = afs_InitReq(&treq, afs_osi_credp)))
396 treq.flags |= O_NONBLOCK;
397 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
400 MObtainWriteLock(&afs_xvcb, 273);
401 ObtainReadLock(&afs_xserver);
402 for (i = 0; i < NSERVERS; i++) {
403 for (safety1 = 0, tsp = afs_servers[i];
404 tsp && safety1 < afs_totalServers + 10;
405 tsp = tsp->next, safety1++) {
407 if (tsp->cbrs == (struct afs_cbr *)0)
410 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
411 * and make an RPC, over and over again.
413 tcount = 0; /* number found so far */
414 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
415 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
416 /* if buffer is full, or we've queued all we're going
417 * to from this server, we should flush out the
420 fidArray.AFSCBFids_len = tcount;
421 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
422 cbArray.AFSCBs_len = 1;
423 cbArray.AFSCBs_val = callBacks;
424 memset(&callBacks[0], 0, sizeof(callBacks[0]));
425 callBacks[0].CallBackType = CB_EXCLUSIVE;
426 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
427 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
428 tsp->cell->cellNum, &treq, 0,
432 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
435 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
443 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
448 /* ignore return code, since callbacks may have
449 * been returned anyway, we shouldn't leave them
450 * around to be returned again.
452 * Next, see if we are done with this server, and if so,
453 * break to deal with the next one.
459 /* if to flush full buffer */
460 /* if we make it here, we have an entry at the head of cbrs,
461 * which we should copy to the file ID array and then free.
464 tfids[tcount++] = tcbrp->fid;
466 /* Freeing the CBR will unlink it from the server's CBR list */
468 } /* while loop for this one server */
469 if (safety2 > afs_cacheStats) {
470 afs_warn("possible internal error afs_flushVCBs (%d)\n",
473 } /* for loop for this hash chain */
474 } /* loop through all hash chains */
475 if (safety1 > afs_totalServers + 2) {
477 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
478 safety1, afs_totalServers + 2);
480 osi_Panic("afs_flushVCBS safety1");
483 ReleaseReadLock(&afs_xserver);
485 MReleaseWriteLock(&afs_xvcb);
486 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
491 * Queue a callback on the given fid.
494 * Locks the xvcb lock.
495 * Called when the xvcache lock is already held.
497 * \param avc vcache entry
498 * \return 0 for success < 0 otherwise.
502 afs_QueueVCB(struct vcache *avc)
505 struct afs_cbr *tcbp;
507 AFS_STATCNT(afs_QueueVCB);
508 /* The callback is really just a struct server ptr. */
509 tsp = (struct server *)(avc->callback);
511 /* we now have a pointer to the server, so we just allocate
512 * a queue entry and queue it.
514 MObtainWriteLock(&afs_xvcb, 274);
515 tcbp = afs_AllocCBR();
516 tcbp->fid = avc->fid.Fid;
518 tcbp->next = tsp->cbrs;
520 tsp->cbrs->pprev = &tcbp->next;
523 tcbp->pprev = &tsp->cbrs;
525 afs_InsertHashCBR(tcbp);
527 /* now release locks and return */
528 MReleaseWriteLock(&afs_xvcb);
534 * Remove a queued callback for a given Fid.
537 * Locks xvcb and xserver locks.
538 * Typically called with xdcache, xvcache and/or individual vcache
541 * \param afid The fid we want cleansed of queued callbacks.
546 afs_RemoveVCB(struct VenusFid *afid)
549 struct afs_cbr *cbr, *ncbr;
551 AFS_STATCNT(afs_RemoveVCB);
552 MObtainWriteLock(&afs_xvcb, 275);
554 slot = afs_HashCBRFid(&afid->Fid);
555 ncbr = afs_cbrHashT[slot];
559 ncbr = cbr->hash_next;
561 if (afid->Fid.Volume == cbr->fid.Volume &&
562 afid->Fid.Vnode == cbr->fid.Vnode &&
563 afid->Fid.Unique == cbr->fid.Unique) {
568 MReleaseWriteLock(&afs_xvcb);
572 afs_FlushReclaimedVcaches(void)
574 #if !defined(AFS_LINUX22_ENV)
577 struct vcache *tmpReclaimedVCList = NULL;
579 ObtainWriteLock(&afs_xvreclaim, 76);
580 while (ReclaimedVCList) {
581 tvc = ReclaimedVCList; /* take from free list */
582 ReclaimedVCList = tvc->nextfree;
583 tvc->nextfree = NULL;
584 code = afs_FlushVCache(tvc, &fv_slept);
586 /* Ok, so, if we got code != 0, uh, wtf do we do? */
587 /* Probably, build a temporary list and then put all back when we
588 get to the end of the list */
589 /* This is actually really crappy, but we need to not leak these.
590 We probably need a way to be smarter about this. */
591 tvc->nextfree = tmpReclaimedVCList;
592 tmpReclaimedVCList = tvc;
593 printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code);
595 if (tvc->states & (CVInit
596 #ifdef AFS_DARWIN80_ENV
600 tvc->states &= ~(CVInit
601 #ifdef AFS_DARWIN80_ENV
605 afs_osi_Wakeup(&tvc->states);
608 if (tmpReclaimedVCList)
609 ReclaimedVCList = tmpReclaimedVCList;
611 ReleaseWriteLock(&afs_xvreclaim);
616 * This routine is responsible for allocating a new cache entry
617 * from the free list. It formats the cache entry and inserts it
618 * into the appropriate hash tables. It must be called with
619 * afs_xvcache write-locked so as to prevent several processes from
620 * trying to create a new cache entry simultaneously.
622 * LOCK: afs_NewVCache afs_xvcache W
624 * \param afid The file id of the file whose cache entry is being created.
626 * \return The new vcache struct.
629 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
633 afs_int32 anumber = VCACHE_FREE;
635 struct gnode *gnodepnt;
639 #endif /* AFS_OSF_ENV */
640 struct afs_q *tq, *uq;
643 AFS_STATCNT(afs_NewVCache);
645 afs_FlushReclaimedVcaches();
647 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
648 #if defined(AFS_OSF30_ENV) || defined(AFS_LINUX22_ENV)
649 if (afs_vcount >= afs_maxvcount)
652 * If we are using > 33 % of the total system vnodes for AFS vcache
653 * entries or we are using the maximum number of vcache entries,
654 * then free some. (if our usage is > 33% we should free some, if
655 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
656 * we _must_ free some -- no choice).
658 if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount))
665 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
668 if (tvc->states & CVFlushed) {
669 refpanic("CVFlushed on VLRU");
670 } else if (i++ > afs_maxvcount) {
671 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
672 } else if (QNext(uq) != tq) {
673 refpanic("VLRU inconsistent");
674 } else if (!VREFCOUNT_GT(tvc,0)) {
675 refpanic("refcnt 0 on VLRU");
678 #if defined(AFS_LINUX22_ENV)
679 if (tvc != afs_globalVp && VREFCOUNT(tvc) > 1 && tvc->opens == 0) {
680 struct dentry *dentry;
681 struct list_head *cur, *head;
683 #if defined(AFS_LINUX24_ENV)
684 spin_lock(&dcache_lock);
686 head = &(AFSTOV(tvc))->i_dentry;
690 while ((cur = cur->next) != head) {
691 dentry = list_entry(cur, struct dentry, d_alias);
693 if (d_unhashed(dentry))
698 #if defined(AFS_LINUX24_ENV)
699 spin_unlock(&dcache_lock);
701 if (d_invalidate(dentry) == -EBUSY) {
703 /* perhaps lock and try to continue? (use cur as head?) */
707 #if defined(AFS_LINUX24_ENV)
708 spin_lock(&dcache_lock);
712 #if defined(AFS_LINUX24_ENV)
713 spin_unlock(&dcache_lock);
720 if (VREFCOUNT_GT(tvc,0) && !VREFCOUNT_GT(tvc,1) &&
722 && (tvc->states & CUnlinkedDel) == 0) {
723 code = afs_FlushVCache(tvc, &fv_slept);
730 continue; /* start over - may have raced. */
736 if (anumber == VCACHE_FREE) {
737 printf("afs_NewVCache: warning none freed, using %d of %d\n",
738 afs_vcount, afs_maxvcount);
739 if (afs_vcount >= afs_maxvcount) {
740 printf("afs_NewVCache - none freed\n");
744 } /* finished freeing up space */
746 /* Alloc new vnode. */
747 #if defined(AFS_LINUX22_ENV)
752 ip = new_inode(afs_globalVFS);
754 osi_Panic("afs_NewVCache: no more inodes");
756 #if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
759 tvc = afs_osi_Alloc(sizeof(struct vcache));
760 ip->u.generic_ip = tvc;
766 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
767 /* What should we do ???? */
768 osi_Panic("afs_NewVCache: no more vnodes");
773 tvc->nextfree = NULL;
776 #else /* AFS_OSF_ENV */
777 /* pull out a free cache entry */
781 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
785 if (tvc->states & CVFlushed) {
786 refpanic("CVFlushed on VLRU");
787 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
788 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
789 } else if (QNext(uq) != tq) {
790 refpanic("VLRU inconsistent");
791 } else if (tvc->states & CVInit) {
795 if (!VREFCOUNT_GT(tvc,0)
796 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL) && !defined(AFS_DARWIN80_ENV)
797 || ((VREFCOUNT(tvc) == 1) &&
798 (UBCINFOEXISTS(AFSTOV(tvc))))
800 && tvc->opens == 0 && (tvc->states & CUnlinkedDel) == 0) {
801 #if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
802 #ifdef AFS_DARWIN80_ENV
803 vnode_t tvp = AFSTOV(tvc);
804 /* VREFCOUNT_GT only sees usecounts, not iocounts */
805 /* so this may fail to actually recycle the vnode now */
806 /* must call vnode_get to avoid races. */
808 if (vnode_get(tvp) == 0) {
810 /* must release lock, since vnode_put will immediately
811 reclaim if there are no other users */
812 ReleaseWriteLock(&afs_xvcache);
817 ObtainWriteLock(&afs_xvcache, 336);
819 /* we can't use the vnode_recycle return value to figure
820 * this out, since the iocount we have to hold makes it
822 if (AFSTOV(tvc) == tvp) {
823 if (anumber > 0 && fv_slept) {
824 QRemove(&tvc->vlruq);
825 QAdd(&VLRU, &tvc->vlruq);
832 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
833 * then it puts the vnode on the free list.
834 * If we don't do this we end up with a cleaned vnode that's
835 * not on the free list.
836 * XXX assume FreeBSD is the same for now.
845 code = afs_FlushVCache(tvc, &fv_slept);
855 continue; /* start over - may have raced. */
861 } /* end of if (!freeVCList) */
864 /* none free, making one is better than a panic */
865 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
866 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
867 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
868 tvc->v = NULL; /* important to clean this, or use memset 0 */
870 #ifdef KERNEL_HAVE_PIN
871 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
873 #if defined(AFS_SGI_ENV)
875 char name[METER_NAMSZ];
876 memset(tvc, 0, sizeof(struct vcache));
877 tvc->v.v_number = ++afsvnumbers;
878 tvc->vc_rwlockid = OSI_NO_LOCKID;
879 initnsema(&tvc->vc_rwlock, 1,
880 makesname(name, "vrw", tvc->v.v_number));
881 #ifndef AFS_SGI53_ENV
882 initnsema(&tvc->v.v_sync, 0,
883 makesname(name, "vsy", tvc->v.v_number));
885 #ifndef AFS_SGI62_ENV
886 initnlock(&tvc->v.v_lock,
887 makesname(name, "vlk", tvc->v.v_number));
890 #endif /* AFS_SGI_ENV */
892 tvc = freeVCList; /* take from free list */
893 freeVCList = tvc->nextfree;
894 tvc->nextfree = NULL;
895 } /* end of if (!freeVCList) */
897 #endif /* AFS_OSF_ENV */
899 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
901 panic("afs_NewVCache(): free vcache with vnode attached");
904 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
905 memset((char *)tvc, 0, sizeof(struct vcache));
910 RWLOCK_INIT(&tvc->lock, "vcache lock");
911 #if defined(AFS_SUN5_ENV)
912 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
913 #endif /* defined(AFS_SUN5_ENV) */
915 tvc->parentVnode = 0;
917 tvc->linkData = NULL;
920 tvc->execsOrWriters = 0;
923 tvc->states = CVInit;
924 tvc->last_looker = 0;
926 tvc->asynchrony = -1;
929 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
932 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
933 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
935 tvc->callback = serverp; /* to minimize chance that clear
937 #if defined(AFS_DISCON_ENV)
938 tvc->ddirty_flags = 0;
946 tvc->hnext = afs_vhashT[i];
948 QAdd(&afs_vhashTV[j], &tvc->vhashq);
950 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
951 refpanic("NewVCache VLRU inconsistent");
953 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
954 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
955 refpanic("NewVCache VLRU inconsistent2");
957 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
958 refpanic("NewVCache VLRU inconsistent3");
960 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
961 refpanic("NewVCache VLRU inconsistent4");
964 /* it should now be safe to drop the xvcache lock */
966 ReleaseWriteLock(&afs_xvcache);
968 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
970 ObtainWriteLock(&afs_xvcache,337);
971 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
973 #ifdef AFS_DARWIN_ENV
974 ReleaseWriteLock(&afs_xvcache);
976 afs_darwin_getnewvnode(tvc); /* includes one refcount */
978 ObtainWriteLock(&afs_xvcache,338);
979 #ifdef AFS_DARWIN80_ENV
980 LOCKINIT(tvc->rwlock);
982 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
989 ReleaseWriteLock(&afs_xvcache);
991 #if defined(AFS_FBSD60_ENV)
992 if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
993 #elif defined(AFS_FBSD50_ENV)
994 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
996 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
998 panic("afs getnewvnode"); /* can't happen */
1000 ObtainWriteLock(&afs_xvcache,339);
1001 if (tvc->v != NULL) {
1002 /* I'd like to know if this ever happens...
1003 * We don't drop global for the rest of this function,
1004 * so if we do lose the race, the other thread should
1005 * have found the same vnode and finished initializing
1006 * the vcache entry. Is it conceivable that this vcache
1007 * entry could be recycled during this interval? If so,
1008 * then there probably needs to be some sort of additional
1009 * mutual exclusion (an Embryonic flag would suffice).
1011 printf("afs_NewVCache: lost the race\n");
1015 tvc->v->v_data = tvc;
1016 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1020 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
1021 /* Hold it for the LRU (should make count 2) */
1022 VN_HOLD(AFSTOV(tvc));
1023 #else /* AFS_OSF_ENV */
1024 #if !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
1025 VREFCOUNT_SET(tvc, 1); /* us */
1026 #endif /* AFS_XBSD_ENV */
1027 #endif /* AFS_OSF_ENV */
1028 #ifdef AFS_AIX32_ENV
1029 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1030 tvc->vmh = tvc->segid = NULL;
1034 #if defined(AFS_CACHE_BYPASS)
1035 tvc->cachingStates = 0;
1036 tvc->cachingTransitions = 0;
1039 #ifdef AFS_BOZONLOCK_ENV
1040 #if defined(AFS_SUN5_ENV)
1041 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1043 #if defined(AFS_SUN55_ENV)
1044 /* This is required if the kaio (kernel aynchronous io)
1045 ** module is installed. Inside the kernel, the function
1046 ** check_vp( common/os/aio.c) checks to see if the kernel has
1047 ** to provide asynchronous io for this vnode. This
1048 ** function extracts the device number by following the
1049 ** v_data field of the vnode. If we do not set this field
1050 ** then the system panics. The value of the v_data field
1051 ** is not really important for AFS vnodes because the kernel
1052 ** does not do asynchronous io for regular files. Hence,
1053 ** for the time being, we fill up the v_data field with the
1054 ** vnode pointer itself. */
1055 tvc->v.v_data = (char *)tvc;
1056 #endif /* AFS_SUN55_ENV */
1058 afs_BozonInit(&tvc->pvnLock, tvc);
1061 /* initialize vnode data, note vrefCount is v.v_count */
1063 /* Don't forget to free the gnode space */
1064 tvc->v.v_gnode = gnodepnt =
1065 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1066 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1068 #ifdef AFS_SGI64_ENV
1069 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1070 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1071 #ifdef AFS_SGI65_ENV
1072 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1073 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1075 bhv_head_init(&(tvc->v.v_bh));
1076 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1078 #ifdef AFS_SGI65_ENV
1079 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1080 #ifdef VNODE_TRACING
1081 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1083 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1085 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1086 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1088 vnode_pcache_init(&tvc->v);
1089 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1090 /* Above define is never true execpt in SGI test kernels. */
1091 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1093 #ifdef INTR_KTHREADS
1094 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1097 SetAfsVnode(AFSTOV(tvc));
1098 #endif /* AFS_SGI64_ENV */
1100 * The proper value for mvstat (for root fids) is setup by the caller.
1103 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1105 if (afs_globalVFS == 0)
1106 osi_Panic("afs globalvfs");
1107 #if !defined(AFS_LINUX22_ENV)
1108 vSetVfsp(tvc, afs_globalVFS);
1110 vSetType(tvc, VREG);
1112 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1113 tvc->v.v_vfsprev = NULL;
1114 afs_globalVFS->vfs_vnodes = &tvc->v;
1115 if (tvc->v.v_vfsnext != NULL)
1116 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1117 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1118 gnodepnt->gn_vnode = &tvc->v;
1120 #if defined(AFS_DUX40_ENV)
1121 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1124 /* Is this needed??? */
1125 insmntque(tvc, afs_globalVFS);
1126 #endif /* AFS_OSF_ENV */
1127 #endif /* AFS_DUX40_ENV */
1128 #ifdef AFS_FBSD70_ENV
1129 #ifndef AFS_FBSD80_ENV /* yup. they put it back. */
1130 insmntque(AFSTOV(tvc), afs_globalVFS);
1133 #if defined(AFS_SGI_ENV)
1134 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1135 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1137 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1138 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1139 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1140 osi_Assert(tvc->v.v_filocks == NULL);
1141 #if !defined(AFS_SGI65_ENV)
1142 osi_Assert(tvc->v.v_filocksem == NULL);
1144 osi_Assert(tvc->cred == NULL);
1145 #ifdef AFS_SGI64_ENV
1146 vnode_pcache_reinit(&tvc->v);
1147 tvc->v.v_rdev = NODEV;
1149 vn_initlist((struct vnlist *)&tvc->v);
1151 #endif /* AFS_SGI_ENV */
1153 osi_dnlc_purgedp(tvc); /* this may be overkill */
1154 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1156 tvc->states &=~ CVInit;
1157 afs_osi_Wakeup(&tvc->states);
1161 } /*afs_NewVCache */
1167 * LOCK: afs_FlushActiveVcaches afs_xvcache N
1169 * \param doflocks : Do we handle flocks?
1172 afs_FlushActiveVcaches(register afs_int32 doflocks)
1174 register struct vcache *tvc;
1176 register struct afs_conn *tc;
1177 register afs_int32 code;
1178 register struct AFS_UCRED *cred = NULL;
1179 struct vrequest treq, ureq;
1180 struct AFSVolSync tsync;
1183 AFS_STATCNT(afs_FlushActiveVcaches);
1184 ObtainReadLock(&afs_xvcache);
1185 for (i = 0; i < VCSIZE; i++) {
1186 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1187 if (tvc->states & CVInit) continue;
1188 #ifdef AFS_DARWIN80_ENV
1189 if (tvc->states & CDeadVnode &&
1190 (tvc->states & (CCore|CUnlinkedDel) ||
1191 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1193 if (doflocks && tvc->flockCount != 0) {
1194 /* if this entry has an flock, send a keep-alive call out */
1196 ReleaseReadLock(&afs_xvcache);
1197 ObtainWriteLock(&tvc->lock, 51);
1199 afs_InitReq(&treq, afs_osi_credp);
1200 treq.flags |= O_NONBLOCK;
1202 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1204 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1207 RXAFS_ExtendLock(tc->id,
1208 (struct AFSFid *)&tvc->fid.Fid,
1214 } while (afs_Analyze
1215 (tc, code, &tvc->fid, &treq,
1216 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1218 ReleaseWriteLock(&tvc->lock);
1219 #ifdef AFS_DARWIN80_ENV
1221 ObtainReadLock(&afs_xvcache);
1223 ObtainReadLock(&afs_xvcache);
1228 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1230 * Don't let it evaporate in case someone else is in
1231 * this code. Also, drop the afs_xvcache lock while
1232 * getting vcache locks.
1235 ReleaseReadLock(&afs_xvcache);
1236 #ifdef AFS_BOZONLOCK_ENV
1237 afs_BozonLock(&tvc->pvnLock, tvc);
1239 #if defined(AFS_SGI_ENV)
1241 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1243 osi_Assert(VREFCOUNT_GT(tvc,0));
1244 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1246 ObtainWriteLock(&tvc->lock, 52);
1247 if (tvc->states & CCore) {
1248 tvc->states &= ~CCore;
1249 /* XXXX Find better place-holder for cred XXXX */
1250 cred = (struct AFS_UCRED *)tvc->linkData;
1251 tvc->linkData = NULL; /* XXX */
1252 afs_InitReq(&ureq, cred);
1253 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1254 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1255 tvc->execsOrWriters);
1256 code = afs_StoreOnLastReference(tvc, &ureq);
1257 ReleaseWriteLock(&tvc->lock);
1258 #ifdef AFS_BOZONLOCK_ENV
1259 afs_BozonUnlock(&tvc->pvnLock, tvc);
1261 hzero(tvc->flushDV);
1264 if (code && code != VNOVNODE) {
1265 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1266 /* /dev/console */ 1);
1268 } else if (tvc->states & CUnlinkedDel) {
1272 ReleaseWriteLock(&tvc->lock);
1273 #ifdef AFS_BOZONLOCK_ENV
1274 afs_BozonUnlock(&tvc->pvnLock, tvc);
1276 #if defined(AFS_SGI_ENV)
1277 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1279 afs_remunlink(tvc, 0);
1280 #if defined(AFS_SGI_ENV)
1281 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1284 /* lost (or won, perhaps) the race condition */
1285 ReleaseWriteLock(&tvc->lock);
1286 #ifdef AFS_BOZONLOCK_ENV
1287 afs_BozonUnlock(&tvc->pvnLock, tvc);
1290 #if defined(AFS_SGI_ENV)
1291 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1293 #ifdef AFS_DARWIN80_ENV
1296 AFS_RELE(AFSTOV(tvc));
1297 /* Matches write code setting CCore flag */
1300 ObtainReadLock(&afs_xvcache);
1302 ObtainReadLock(&afs_xvcache);
1305 AFS_RELE(AFSTOV(tvc));
1306 /* Matches write code setting CCore flag */
1313 ReleaseReadLock(&afs_xvcache);
1319 * Make sure a cache entry is up-to-date status-wise.
1321 * NOTE: everywhere that calls this can potentially be sped up
1322 * by checking CStatd first, and avoiding doing the InitReq
1323 * if this is up-to-date.
1325 * Anymore, the only places that call this KNOW already that the
1326 * vcache is not up-to-date, so we don't screw around.
1328 * \param avc : Ptr to vcache entry to verify.
1334 * Make sure a cache entry is up-to-date status-wise.
1336 * NOTE: everywhere that calls this can potentially be sped up
1337 * by checking CStatd first, and avoiding doing the InitReq
1338 * if this is up-to-date.
1340 * Anymore, the only places that call this KNOW already that the
1341 * vcache is not up-to-date, so we don't screw around.
1343 * \param avc Pointer to vcache entry to verify.
1346 * \return 0 for success or other error codes.
1349 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1351 register struct vcache *tvc;
1353 AFS_STATCNT(afs_VerifyVCache);
1355 #if defined(AFS_OSF_ENV)
1356 ObtainReadLock(&avc->lock);
1357 if (afs_IsWired(avc)) {
1358 ReleaseReadLock(&avc->lock);
1361 ReleaseReadLock(&avc->lock);
1362 #endif /* AFS_OSF_ENV */
1363 /* otherwise we must fetch the status info */
1365 ObtainWriteLock(&avc->lock, 53);
1366 if (avc->states & CStatd) {
1367 ReleaseWriteLock(&avc->lock);
1370 ObtainWriteLock(&afs_xcbhash, 461);
1371 avc->states &= ~(CStatd | CUnique);
1372 avc->callback = NULL;
1373 afs_DequeueCallback(avc);
1374 ReleaseWriteLock(&afs_xcbhash);
1375 ReleaseWriteLock(&avc->lock);
1377 /* since we've been called back, or the callback has expired,
1378 * it's possible that the contents of this directory, or this
1379 * file's name have changed, thus invalidating the dnlc contents.
1381 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1382 osi_dnlc_purgedp(avc);
1384 osi_dnlc_purgevp(avc);
1386 /* fetch the status info */
1387 tvc = afs_GetVCache(&avc->fid, areq, NULL, avc);
1390 /* Put it back; caller has already incremented vrefCount */
1394 } /*afs_VerifyVCache */
1398 * Simple copy of stat info into cache.
1400 * Callers:as of 1992-04-29, only called by WriteVCache
1402 * \param avc Ptr to vcache entry involved.
1403 * \param astat Ptr to stat info to copy.
1407 afs_SimpleVStat(register struct vcache *avc,
1408 register struct AFSFetchStatus *astat, struct vrequest *areq)
1411 AFS_STATCNT(afs_SimpleVStat);
1414 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1415 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1417 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1419 #ifdef AFS_64BIT_CLIENT
1420 FillInt64(length, astat->Length_hi, astat->Length);
1421 #else /* AFS_64BIT_CLIENT */
1422 length = astat->Length;
1423 #endif /* AFS_64BIT_CLIENT */
1424 #if defined(AFS_SGI_ENV)
1425 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1426 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1427 if (length < avc->m.Length) {
1428 vnode_t *vp = (vnode_t *) avc;
1430 osi_Assert(WriteLocked(&avc->lock));
1431 ReleaseWriteLock(&avc->lock);
1433 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1435 ObtainWriteLock(&avc->lock, 67);
1438 /* if writing the file, don't fetch over this value */
1439 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1440 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1441 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1442 avc->m.Length = length;
1443 avc->m.Date = astat->ClientModTime;
1445 avc->m.Owner = astat->Owner;
1446 avc->m.Group = astat->Group;
1447 avc->m.Mode = astat->UnixModeBits;
1448 if (vType(avc) == VREG) {
1449 avc->m.Mode |= S_IFREG;
1450 } else if (vType(avc) == VDIR) {
1451 avc->m.Mode |= S_IFDIR;
1452 } else if (vType(avc) == VLNK) {
1453 avc->m.Mode |= S_IFLNK;
1454 if ((avc->m.Mode & 0111) == 0)
1457 if (avc->states & CForeign) {
1458 struct axscache *ac;
1459 avc->anyAccess = astat->AnonymousAccess;
1461 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1463 * Caller has at least one bit not covered by anonymous, and
1464 * thus may have interesting rights.
1466 * HOWEVER, this is a really bad idea, because any access query
1467 * for bits which aren't covered by anonymous, on behalf of a user
1468 * who doesn't have any special rights, will result in an answer of
1469 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1470 * It's an especially bad idea under Ultrix, since (due to the lack of
1471 * a proper access() call) it must perform several afs_access() calls
1472 * in order to create magic mode bits that vary according to who makes
1473 * the call. In other words, _every_ stat() generates a test for
1476 #endif /* badidea */
1477 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1478 ac->axess = astat->CallerAccess;
1479 else /* not found, add a new one if possible */
1480 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1484 } /*afs_SimpleVStat */
1488 * Store the status info *only* back to the server for a
1491 * Environment: Must be called with a shared lock held on the vnode.
1493 * \param avc Ptr to the vcache entry.
1494 * \param astatus Ptr to the status info to store.
1495 * \param areq Ptr to the associated vrequest.
1497 * \return Operation status.
1501 afs_WriteVCache(register struct vcache *avc,
1502 register struct AFSStoreStatus *astatus,
1503 struct vrequest *areq)
1506 struct afs_conn *tc;
1507 struct AFSFetchStatus OutStatus;
1508 struct AFSVolSync tsync;
1510 AFS_STATCNT(afs_WriteVCache);
1511 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1512 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1514 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1516 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1519 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
1520 astatus, &OutStatus, &tsync);
1525 } while (afs_Analyze
1526 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1527 SHARED_LOCK, NULL));
1529 UpgradeSToWLock(&avc->lock, 20);
1531 /* success, do the changes locally */
1532 afs_SimpleVStat(avc, &OutStatus, areq);
1534 * Update the date, too. SimpleVStat didn't do this, since
1535 * it thought we were doing this after fetching new status
1536 * over a file being written.
1538 avc->m.Date = OutStatus.ClientModTime;
1540 /* failure, set up to check with server next time */
1541 ObtainWriteLock(&afs_xcbhash, 462);
1542 afs_DequeueCallback(avc);
1543 avc->states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1544 ReleaseWriteLock(&afs_xcbhash);
1545 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1546 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1548 ConvertWToSLock(&avc->lock);
1551 } /*afs_WriteVCache */
1552 #if defined(AFS_DISCON_ENV)
1555 * Store status info only locally, set the proper disconnection flags
1556 * and add to dirty list.
1558 * \param avc The vcache to be written locally.
1559 * \param astatus Get attr fields from local store.
1560 * \param attrs This one is only of the vs_size.
1562 * \note Must be called with a shared lock on the vnode
1564 int afs_WriteVCacheDiscon(register struct vcache *avc,
1565 register struct AFSStoreStatus *astatus,
1566 struct vattr *attrs)
1569 afs_int32 flags = 0;
1571 UpgradeSToWLock(&avc->lock, 700);
1573 if (!astatus->Mask) {
1579 /* Set attributes. */
1580 if (astatus->Mask & AFS_SETMODTIME) {
1581 avc->m.Date = astatus->ClientModTime;
1582 flags |= VDisconSetTime;
1585 if (astatus->Mask & AFS_SETOWNER) {
1586 printf("Not allowed yet. \n");
1587 //avc->m.Owner = astatus->Owner;
1590 if (astatus->Mask & AFS_SETGROUP) {
1591 printf("Not allowed yet. \n");
1592 //avc->m.Group = astatus->Group;
1595 if (astatus->Mask & AFS_SETMODE) {
1596 avc->m.Mode = astatus->UnixModeBits;
1598 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1600 if (vType(avc) == VREG) {
1601 avc->m.Mode |= S_IFREG;
1602 } else if (vType(avc) == VDIR) {
1603 avc->m.Mode |= S_IFDIR;
1604 } else if (vType(avc) == VLNK) {
1605 avc->m.Mode |= S_IFLNK;
1606 if ((avc->m.Mode & 0111) == 0)
1610 flags |= VDisconSetMode;
1611 } /* if(astatus.Mask & AFS_SETMODE) */
1613 } /* if (!astatus->Mask) */
1615 if (attrs->va_size > 0) {
1616 /* XXX: Do I need more checks? */
1617 /* Truncation operation. */
1618 flags |= VDisconTrunc;
1622 afs_DisconAddDirty(avc, flags, 1);
1624 /* XXX: How about the rest of the fields? */
1626 ConvertWToSLock(&avc->lock);
1634 * Copy astat block into vcache info
1636 * \note This code may get dataversion and length out of sync if the file has
1637 * been modified. This is less than ideal. I haven't thought about it sufficiently
1638 * to be certain that it is adequate.
1640 * \note Environment: Must be called under a write lock
1642 * \param avc Ptr to vcache entry.
1643 * \param astat Ptr to stat block to copy in.
1644 * \param areq Ptr to associated request.
1647 afs_ProcessFS(register struct vcache *avc,
1648 register struct AFSFetchStatus *astat, struct vrequest *areq)
1651 AFS_STATCNT(afs_ProcessFS);
1653 #ifdef AFS_64BIT_CLIENT
1654 FillInt64(length, astat->Length_hi, astat->Length);
1655 #else /* AFS_64BIT_CLIENT */
1656 length = astat->Length;
1657 #endif /* AFS_64BIT_CLIENT */
1658 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1659 * number for each bulk status request. Under no circumstances
1660 * should afs_DoBulkStat store a sequence number if the new
1661 * length will be ignored when afs_ProcessFS is called with
1662 * new stats. If you change the following conditional then you
1663 * also need to change the conditional in afs_DoBulkStat. */
1665 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1666 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1668 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1670 /* if we're writing or mapping this file, don't fetch over these
1673 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1674 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1675 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1676 avc->m.Length = length;
1677 avc->m.Date = astat->ClientModTime;
1679 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1680 avc->m.Owner = astat->Owner;
1681 avc->m.Mode = astat->UnixModeBits;
1682 avc->m.Group = astat->Group;
1683 avc->m.LinkCount = astat->LinkCount;
1684 if (astat->FileType == File) {
1685 vSetType(avc, VREG);
1686 avc->m.Mode |= S_IFREG;
1687 } else if (astat->FileType == Directory) {
1688 vSetType(avc, VDIR);
1689 avc->m.Mode |= S_IFDIR;
1690 } else if (astat->FileType == SymbolicLink) {
1691 if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
1692 vSetType(avc, VDIR);
1693 avc->m.Mode |= S_IFDIR;
1695 vSetType(avc, VLNK);
1696 avc->m.Mode |= S_IFLNK;
1698 if ((avc->m.Mode & 0111) == 0) {
1702 avc->anyAccess = astat->AnonymousAccess;
1704 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1706 * Caller has at least one bit not covered by anonymous, and
1707 * thus may have interesting rights.
1709 * HOWEVER, this is a really bad idea, because any access query
1710 * for bits which aren't covered by anonymous, on behalf of a user
1711 * who doesn't have any special rights, will result in an answer of
1712 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1713 * It's an especially bad idea under Ultrix, since (due to the lack of
1714 * a proper access() call) it must perform several afs_access() calls
1715 * in order to create magic mode bits that vary according to who makes
1716 * the call. In other words, _every_ stat() generates a test for
1719 #endif /* badidea */
1721 struct axscache *ac;
1722 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1723 ac->axess = astat->CallerAccess;
1724 else /* not found, add a new one if possible */
1725 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1727 } /*afs_ProcessFS */
1731 * Get fid from server.
1734 * \param areq Request to be passed on.
1735 * \param name Name of ?? to lookup.
1736 * \param OutStatus Fetch status.
1741 * \return Success status of operation.
1744 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1745 char *name, struct VenusFid *nfid,
1746 struct AFSFetchStatus *OutStatusp,
1747 struct AFSCallBack *CallBackp, struct server **serverp,
1748 struct AFSVolSync *tsyncp)
1752 register struct afs_conn *tc;
1753 struct AFSFetchStatus OutDirStatus;
1756 name = ""; /* XXX */
1758 tc = afs_Conn(afid, areq, SHARED_LOCK);
1761 *serverp = tc->srvr->server;
1763 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1766 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1767 (struct AFSFid *)&nfid->Fid, OutStatusp,
1768 &OutDirStatus, CallBackp, tsyncp);
1773 } while (afs_Analyze
1774 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1784 * Given a file id and a vrequest structure, fetch the status
1785 * information associated with the file.
1787 * \param afid File ID.
1788 * \param areq Ptr to associated vrequest structure, specifying the
1789 * user whose authentication tokens will be used.
1790 * \param avc Caller may already have a vcache for this file, which is
1793 * \note Environment:
1794 * The cache entry is returned with an increased vrefCount field.
1795 * The entry must be discarded by calling afs_PutVCache when you
1796 * are through using the pointer to the cache entry.
1798 * You should not hold any locks when calling this function, except
1799 * locks on other vcache entries. If you lock more than one vcache
1800 * entry simultaneously, you should lock them in this order:
1802 * 1. Lock all files first, then directories.
1803 * 2. Within a particular type, lock entries in Fid.Vnode order.
1805 * This locking hierarchy is convenient because it allows locking
1806 * of a parent dir cache entry, given a file (to check its access
1807 * control list). It also allows renames to be handled easily by
1808 * locking directories in a constant order.
1810 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1812 * \note Might have a vcache structure already, which must
1813 * already be held by the caller
1816 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1817 afs_int32 * cached, struct vcache *avc)
1820 afs_int32 code, newvcache = 0;
1821 register struct vcache *tvc;
1825 AFS_STATCNT(afs_GetVCache);
1828 *cached = 0; /* Init just in case */
1830 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1834 ObtainSharedLock(&afs_xvcache, 5);
1836 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1838 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1839 ReleaseSharedLock(&afs_xvcache);
1840 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1848 osi_Assert((tvc->states & CVInit) == 0);
1849 /* If we are in readdir, return the vnode even if not statd */
1850 if ((tvc->states & CStatd) || afs_InReadDir(tvc)) {
1851 ReleaseSharedLock(&afs_xvcache);
1855 UpgradeSToWLock(&afs_xvcache, 21);
1857 /* no cache entry, better grab one */
1858 tvc = afs_NewVCache(afid, NULL);
1861 ConvertWToSLock(&afs_xvcache);
1864 ReleaseSharedLock(&afs_xvcache);
1868 afs_stats_cmperf.vcacheMisses++;
1871 ReleaseSharedLock(&afs_xvcache);
1873 ObtainWriteLock(&tvc->lock, 54);
1875 if (tvc->states & CStatd) {
1876 ReleaseWriteLock(&tvc->lock);
1879 #if defined(AFS_OSF_ENV)
1880 if (afs_IsWired(tvc)) {
1881 ReleaseWriteLock(&tvc->lock);
1884 #endif /* AFS_OSF_ENV */
1885 #ifdef AFS_DARWIN80_ENV
1886 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1889 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1891 * XXX - I really don't like this. Should try to understand better.
1892 * It seems that sometimes, when we get called, we already hold the
1893 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1894 * We can't drop the vnode lock, because that could result in a race.
1895 * Sometimes, though, we get here and don't hold the vnode lock.
1896 * I hate code paths that sometimes hold locks and sometimes don't.
1897 * In any event, the dodge we use here is to check whether the vnode
1898 * is locked, and if it isn't, then we gain and drop it around the call
1899 * to vinvalbuf; otherwise, we leave it alone.
1902 struct vnode *vp = AFSTOV(tvc);
1905 #if defined(AFS_DARWIN_ENV)
1906 iheldthelock = VOP_ISLOCKED(vp);
1908 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1909 /* this is messy. we can call fsync which will try to reobtain this */
1910 if (VTOAFS(vp) == tvc)
1911 ReleaseWriteLock(&tvc->lock);
1912 if (UBCINFOEXISTS(vp)) {
1913 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1915 if (VTOAFS(vp) == tvc)
1916 ObtainWriteLock(&tvc->lock, 954);
1918 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1919 #elif defined(AFS_FBSD80_ENV)
1920 iheldthelock = VOP_ISLOCKED(vp);
1921 if (!iheldthelock) {
1922 /* nosleep/sleep lock order reversal */
1923 int glocked = ISAFS_GLOCK();
1926 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1930 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1933 #elif defined(AFS_FBSD60_ENV)
1934 iheldthelock = VOP_ISLOCKED(vp, curthread);
1936 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1937 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1939 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1940 #elif defined(AFS_FBSD50_ENV)
1941 iheldthelock = VOP_ISLOCKED(vp, curthread);
1943 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1944 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1946 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1947 #elif defined(AFS_FBSD40_ENV)
1948 iheldthelock = VOP_ISLOCKED(vp, curproc);
1950 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1951 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
1953 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
1954 #elif defined(AFS_OBSD_ENV)
1955 iheldthelock = VOP_ISLOCKED(vp, curproc);
1957 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1958 uvm_vnp_uncache(vp);
1960 VOP_UNLOCK(vp, 0, curproc);
1966 ObtainWriteLock(&afs_xcbhash, 464);
1967 tvc->states &= ~CUnique;
1969 afs_DequeueCallback(tvc);
1970 ReleaseWriteLock(&afs_xcbhash);
1972 /* It is always appropriate to throw away all the access rights? */
1973 afs_FreeAllAxs(&(tvc->Access));
1974 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1976 if ((tvp->states & VForeign)) {
1978 tvc->states |= CForeign;
1979 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1980 && (tvp->rootUnique == afid->Fid.Unique)) {
1984 if (tvp->states & VRO)
1986 if (tvp->states & VBackup)
1987 tvc->states |= CBackup;
1988 /* now copy ".." entry back out of volume structure, if necessary */
1989 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1991 tvc->mvid = (struct VenusFid *)
1992 osi_AllocSmallSpace(sizeof(struct VenusFid));
1993 *tvc->mvid = tvp->dotdot;
1995 afs_PutVolume(tvp, READ_LOCK);
1999 afs_RemoveVCB(afid);
2001 struct AFSFetchStatus OutStatus;
2003 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
2004 afs_ProcessFS(tvc, &OutStatus, areq);
2005 tvc->states |= CStatd | CUnique;
2006 tvc->parentVnode = OutStatus.ParentVnode;
2007 tvc->parentUnique = OutStatus.ParentUnique;
2011 if (AFS_IS_DISCONNECTED) {
2012 /* Nothing to do otherwise...*/
2014 printf("Network is down in afs_GetCache");
2016 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
2018 /* For the NFS translator's benefit, make sure
2019 * non-directory vnodes always have their parent FID set
2020 * correctly, even when created as a result of decoding an
2021 * NFS filehandle. It would be nice to also do this for
2022 * directories, but we can't because the fileserver fills
2023 * in the FID of the directory itself instead of that of
2026 if (!code && OutStatus.FileType != Directory &&
2027 !tvc->parentVnode) {
2028 tvc->parentVnode = OutStatus.ParentVnode;
2029 tvc->parentUnique = OutStatus.ParentUnique;
2035 ReleaseWriteLock(&tvc->lock);
2041 ReleaseWriteLock(&tvc->lock);
2044 } /*afs_GetVCache */
2049 * Lookup a vcache by fid. Look inside the cache first, if not
2050 * there, lookup the file on the server, and then get it's fresh
2055 * \param cached Is element cached? If NULL, don't answer.
2059 * \return The found element or NULL.
2062 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
2063 afs_int32 * cached, struct vcache *adp, char *aname)
2065 afs_int32 code, now, newvcache = 0;
2066 struct VenusFid nfid;
2067 register struct vcache *tvc;
2069 struct AFSFetchStatus OutStatus;
2070 struct AFSCallBack CallBack;
2071 struct AFSVolSync tsync;
2072 struct server *serverp = 0;
2076 AFS_STATCNT(afs_GetVCache);
2078 *cached = 0; /* Init just in case */
2080 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2084 ObtainReadLock(&afs_xvcache);
2085 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
2088 ReleaseReadLock(&afs_xvcache);
2090 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2091 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2095 ObtainReadLock(&tvc->lock);
2097 if (tvc->states & CStatd) {
2101 ReleaseReadLock(&tvc->lock);
2104 tvc->states &= ~CUnique;
2106 ReleaseReadLock(&tvc->lock);
2108 ObtainReadLock(&afs_xvcache);
2111 ReleaseReadLock(&afs_xvcache);
2113 /* lookup the file */
2116 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
2118 if (AFS_IS_DISCONNECTED) {
2119 printf("Network is down in afs_LookupVcache\n");
2123 afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus,
2124 &CallBack, &serverp, &tsync);
2126 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2130 ObtainSharedLock(&afs_xvcache, 6);
2131 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
2133 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2134 ReleaseSharedLock(&afs_xvcache);
2135 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2141 /* no cache entry, better grab one */
2142 UpgradeSToWLock(&afs_xvcache, 22);
2143 tvc = afs_NewVCache(&nfid, serverp);
2145 ConvertWToSLock(&afs_xvcache);
2148 ReleaseSharedLock(&afs_xvcache);
2153 ReleaseSharedLock(&afs_xvcache);
2154 ObtainWriteLock(&tvc->lock, 55);
2156 /* It is always appropriate to throw away all the access rights? */
2157 afs_FreeAllAxs(&(tvc->Access));
2158 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2160 if ((tvp->states & VForeign)) {
2162 tvc->states |= CForeign;
2163 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2164 && (tvp->rootUnique == afid->Fid.Unique))
2167 if (tvp->states & VRO)
2169 if (tvp->states & VBackup)
2170 tvc->states |= CBackup;
2171 /* now copy ".." entry back out of volume structure, if necessary */
2172 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2174 tvc->mvid = (struct VenusFid *)
2175 osi_AllocSmallSpace(sizeof(struct VenusFid));
2176 *tvc->mvid = tvp->dotdot;
2181 ObtainWriteLock(&afs_xcbhash, 465);
2182 afs_DequeueCallback(tvc);
2183 tvc->states &= ~(CStatd | CUnique);
2184 ReleaseWriteLock(&afs_xcbhash);
2185 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2186 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2188 afs_PutVolume(tvp, READ_LOCK);
2189 ReleaseWriteLock(&tvc->lock);
2194 ObtainWriteLock(&afs_xcbhash, 466);
2195 if (origCBs == afs_allCBs) {
2196 if (CallBack.ExpirationTime) {
2197 tvc->callback = serverp;
2198 tvc->cbExpires = CallBack.ExpirationTime + now;
2199 tvc->states |= CStatd | CUnique;
2200 tvc->states &= ~CBulkFetching;
2201 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2202 } else if (tvc->states & CRO) {
2203 /* adapt gives us an hour. */
2204 tvc->cbExpires = 3600 + osi_Time();
2205 /*XXX*/ tvc->states |= CStatd | CUnique;
2206 tvc->states &= ~CBulkFetching;
2207 afs_QueueCallback(tvc, CBHash(3600), tvp);
2209 tvc->callback = NULL;
2210 afs_DequeueCallback(tvc);
2211 tvc->states &= ~(CStatd | CUnique);
2212 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2213 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2216 afs_DequeueCallback(tvc);
2217 tvc->states &= ~CStatd;
2218 tvc->states &= ~CUnique;
2219 tvc->callback = NULL;
2220 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2221 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2223 ReleaseWriteLock(&afs_xcbhash);
2225 afs_PutVolume(tvp, READ_LOCK);
2226 afs_ProcessFS(tvc, &OutStatus, areq);
2228 ReleaseWriteLock(&tvc->lock);
2234 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2235 afs_int32 * cached, struct volume *tvolp)
2237 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2238 afs_int32 getNewFid = 0;
2240 struct VenusFid nfid;
2241 register struct vcache *tvc;
2242 struct server *serverp = 0;
2243 struct AFSFetchStatus OutStatus;
2244 struct AFSCallBack CallBack;
2245 struct AFSVolSync tsync;
2250 #ifdef AFS_DARWIN80_ENV
2257 if (!tvolp->rootVnode || getNewFid) {
2258 struct VenusFid tfid;
2261 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2262 origCBs = afs_allCBs; /* ignore InitCallBackState */
2264 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2269 /* ReleaseReadLock(&tvolp->lock); */
2270 ObtainWriteLock(&tvolp->lock, 56);
2271 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2272 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2273 ReleaseWriteLock(&tvolp->lock);
2274 /* ObtainReadLock(&tvolp->lock);*/
2277 afid->Fid.Vnode = tvolp->rootVnode;
2278 afid->Fid.Unique = tvolp->rootUnique;
2282 ObtainSharedLock(&afs_xvcache, 7);
2284 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2285 if (!FidCmp(&(tvc->fid), afid)) {
2286 if (tvc->states & CVInit) {
2287 ReleaseSharedLock(&afs_xvcache);
2288 afs_osi_Sleep(&tvc->states);
2292 /* Grab this vnode, possibly reactivating from the free list */
2293 /* for the present (95.05.25) everything on the hash table is
2294 * definitively NOT in the free list -- at least until afs_reclaim
2295 * can be safely implemented */
2297 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2301 #endif /* AFS_OSF_ENV */
2302 #ifdef AFS_DARWIN80_ENV
2303 if (tvc->states & CDeadVnode) {
2304 ReleaseSharedLock(&afs_xvcache);
2305 afs_osi_Sleep(&tvc->states);
2309 if (vnode_get(tvp)) /* this bumps ref count */
2311 if (vnode_ref(tvp)) {
2313 /* AFSTOV(tvc) may be NULL */
2323 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
2324 /* Mount point no longer stat'd or unknown. FID may have changed. */
2327 AFS_RELE(AFSTOV(tvc));
2330 ReleaseSharedLock(&afs_xvcache);
2331 #ifdef AFS_DARWIN80_ENV
2334 vnode_put(AFSTOV(tvc));
2335 vnode_rele(AFSTOV(tvc));
2344 UpgradeSToWLock(&afs_xvcache, 23);
2345 /* no cache entry, better grab one */
2346 tvc = afs_NewVCache(afid, NULL);
2349 ReleaseWriteLock(&afs_xvcache);
2353 afs_stats_cmperf.vcacheMisses++;
2357 afs_stats_cmperf.vcacheHits++;
2358 #if defined(AFS_OSF_ENV) || defined(AFS_DARWIN80_ENV)
2359 /* we already bumped the ref count in the for loop above */
2360 #else /* AFS_OSF_ENV */
2363 UpgradeSToWLock(&afs_xvcache, 24);
2364 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2365 refpanic("GRVC VLRU inconsistent0");
2367 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2368 refpanic("GRVC VLRU inconsistent1");
2370 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2371 refpanic("GRVC VLRU inconsistent2");
2373 QRemove(&tvc->vlruq); /* move to lruq head */
2374 QAdd(&VLRU, &tvc->vlruq);
2375 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2376 refpanic("GRVC VLRU inconsistent3");
2378 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2379 refpanic("GRVC VLRU inconsistent4");
2381 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2382 refpanic("GRVC VLRU inconsistent5");
2387 ReleaseWriteLock(&afs_xvcache);
2389 if (tvc->states & CStatd) {
2393 ObtainReadLock(&tvc->lock);
2394 tvc->states &= ~CUnique;
2395 tvc->callback = NULL; /* redundant, perhaps */
2396 ReleaseReadLock(&tvc->lock);
2399 ObtainWriteLock(&tvc->lock, 57);
2401 /* It is always appropriate to throw away all the access rights? */
2402 afs_FreeAllAxs(&(tvc->Access));
2405 tvc->states |= CForeign;
2406 if (tvolp->states & VRO)
2408 if (tvolp->states & VBackup)
2409 tvc->states |= CBackup;
2410 /* now copy ".." entry back out of volume structure, if necessary */
2411 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2412 && (tvolp->rootUnique == afid->Fid.Unique)) {
2415 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2417 tvc->mvid = (struct VenusFid *)
2418 osi_AllocSmallSpace(sizeof(struct VenusFid));
2419 *tvc->mvid = tvolp->dotdot;
2423 afs_RemoveVCB(afid);
2426 struct VenusFid tfid;
2429 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2430 origCBs = afs_allCBs; /* ignore InitCallBackState */
2432 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2437 ObtainWriteLock(&afs_xcbhash, 467);
2438 afs_DequeueCallback(tvc);
2439 tvc->callback = NULL;
2440 tvc->states &= ~(CStatd | CUnique);
2441 ReleaseWriteLock(&afs_xcbhash);
2442 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2443 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2444 ReleaseWriteLock(&tvc->lock);
2449 ObtainWriteLock(&afs_xcbhash, 468);
2450 if (origCBs == afs_allCBs) {
2451 tvc->states |= CTruth;
2452 tvc->callback = serverp;
2453 if (CallBack.ExpirationTime != 0) {
2454 tvc->cbExpires = CallBack.ExpirationTime + start;
2455 tvc->states |= CStatd;
2456 tvc->states &= ~CBulkFetching;
2457 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2458 } else if (tvc->states & CRO) {
2459 /* adapt gives us an hour. */
2460 tvc->cbExpires = 3600 + osi_Time();
2461 /*XXX*/ tvc->states |= CStatd;
2462 tvc->states &= ~CBulkFetching;
2463 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2466 afs_DequeueCallback(tvc);
2467 tvc->callback = NULL;
2468 tvc->states &= ~(CStatd | CUnique);
2469 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2470 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2472 ReleaseWriteLock(&afs_xcbhash);
2473 afs_ProcessFS(tvc, &OutStatus, areq);
2475 ReleaseWriteLock(&tvc->lock);
2481 * Update callback status and (sometimes) attributes of a vnode.
2482 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2483 * shouldn't be written to the vcache here.
2488 * \param Outsp Server status after rpc call.
2489 * \param acb Callback for this vnode.
2491 * \note The vcache must be write locked.
2494 afs_UpdateStatus(struct vcache *avc,
2495 struct VenusFid *afid,
2496 struct vrequest *areq,
2497 struct AFSFetchStatus *Outsp,
2498 struct AFSCallBack *acb,
2501 struct volume *volp;
2504 /* Dont write status in vcache if resyncing after a disconnection. */
2505 afs_ProcessFS(avc, Outsp, areq);
2507 volp = afs_GetVolume(afid, areq, READ_LOCK);
2508 ObtainWriteLock(&afs_xcbhash, 469);
2509 avc->states |= CTruth;
2510 if (avc->callback /* check for race */ ) {
2511 if (acb->ExpirationTime != 0) {
2512 avc->cbExpires = acb->ExpirationTime + start;
2513 avc->states |= CStatd;
2514 avc->states &= ~CBulkFetching;
2515 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2516 } else if (avc->states & CRO) {
2517 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2518 avc->cbExpires = 3600 + start;
2519 avc->states |= CStatd;
2520 avc->states &= ~CBulkFetching;
2521 afs_QueueCallback(avc, CBHash(3600), volp);
2523 afs_DequeueCallback(avc);
2524 avc->callback = NULL;
2525 avc->states &= ~(CStatd | CUnique);
2526 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2527 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2530 afs_DequeueCallback(avc);
2531 avc->callback = NULL;
2532 avc->states &= ~(CStatd | CUnique);
2533 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2534 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2536 ReleaseWriteLock(&afs_xcbhash);
2538 afs_PutVolume(volp, READ_LOCK);
2543 * Must be called with avc write-locked
2544 * don't absolutely have to invalidate the hint unless the dv has
2545 * changed, but be sure to get it right else there will be consistency bugs.
2548 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2549 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2552 afs_uint32 start = 0;
2553 register struct afs_conn *tc;
2554 struct AFSCallBack CallBack;
2555 struct AFSVolSync tsync;
2558 tc = afs_Conn(afid, areq, SHARED_LOCK);
2559 avc->dchint = NULL; /* invalidate hints */
2561 avc->callback = tc->srvr->server;
2563 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2566 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2574 } while (afs_Analyze
2575 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2576 SHARED_LOCK, NULL));
2579 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2581 /* used to undo the local callback, but that's too extreme.
2582 * There are plenty of good reasons that fetchstatus might return
2583 * an error, such as EPERM. If we have the vnode cached, statd,
2584 * with callback, might as well keep track of the fact that we
2585 * don't have access...
2587 if (code == EPERM || code == EACCES) {
2588 struct axscache *ac;
2589 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2591 else /* not found, add a new one if possible */
2592 afs_AddAxs(avc->Access, areq->uid, 0);
2603 * Stuff some information into the vcache for the given file.
2606 * afid : File in question.
2607 * OutStatus : Fetch status on the file.
2608 * CallBack : Callback info.
2609 * tc : RPC connection involved.
2610 * areq : vrequest involved.
2613 * Nothing interesting.
2616 afs_StuffVcache(register struct VenusFid *afid,
2617 struct AFSFetchStatus *OutStatus,
2618 struct AFSCallBack *CallBack, register struct afs_conn *tc,
2619 struct vrequest *areq)
2621 register afs_int32 code, i, newvcache = 0;
2622 register struct vcache *tvc;
2623 struct AFSVolSync tsync;
2625 struct axscache *ac;
2628 AFS_STATCNT(afs_StuffVcache);
2629 #ifdef IFS_VCACHECOUNT
2634 ObtainSharedLock(&afs_xvcache, 8);
2636 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2638 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2639 ReleaseSharedLock(&afs_xvcache);
2640 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2646 /* no cache entry, better grab one */
2647 UpgradeSToWLock(&afs_xvcache, 25);
2648 tvc = afs_NewVCache(afid, NULL);
2650 ConvertWToSLock(&afs_xvcache);
2653 ReleaseSharedLock(&afs_xvcache);
2658 ReleaseSharedLock(&afs_xvcache);
2659 ObtainWriteLock(&tvc->lock, 58);
2661 tvc->states &= ~CStatd;
2662 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2663 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2665 /* Is it always appropriate to throw away all the access rights? */
2666 afs_FreeAllAxs(&(tvc->Access));
2668 /*Copy useful per-volume info */
2669 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2671 if (newvcache && (tvp->states & VForeign))
2672 tvc->states |= CForeign;
2673 if (tvp->states & VRO)
2675 if (tvp->states & VBackup)
2676 tvc->states |= CBackup;
2678 * Now, copy ".." entry back out of volume structure, if
2681 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2683 tvc->mvid = (struct VenusFid *)
2684 osi_AllocSmallSpace(sizeof(struct VenusFid));
2685 *tvc->mvid = tvp->dotdot;
2688 /* store the stat on the file */
2689 afs_RemoveVCB(afid);
2690 afs_ProcessFS(tvc, OutStatus, areq);
2691 tvc->callback = tc->srvr->server;
2693 /* we use osi_Time twice below. Ideally, we would use the time at which
2694 * the FetchStatus call began, instead, but we don't have it here. So we
2695 * make do with "now". In the CRO case, it doesn't really matter. In
2696 * the other case, we hope that the difference between "now" and when the
2697 * call actually began execution on the server won't be larger than the
2698 * padding which the server keeps. Subtract 1 second anyway, to be on
2699 * the safe side. Can't subtract more because we don't know how big
2700 * ExpirationTime is. Possible consistency problems may arise if the call
2701 * timeout period becomes longer than the server's expiration padding. */
2702 ObtainWriteLock(&afs_xcbhash, 470);
2703 if (CallBack->ExpirationTime != 0) {
2704 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2705 tvc->states |= CStatd;
2706 tvc->states &= ~CBulkFetching;
2707 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2708 } else if (tvc->states & CRO) {
2709 /* old-fashioned AFS 3.2 style */
2710 tvc->cbExpires = 3600 + osi_Time();
2711 /*XXX*/ tvc->states |= CStatd;
2712 tvc->states &= ~CBulkFetching;
2713 afs_QueueCallback(tvc, CBHash(3600), tvp);
2715 afs_DequeueCallback(tvc);
2716 tvc->callback = NULL;
2717 tvc->states &= ~(CStatd | CUnique);
2718 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2719 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2721 ReleaseWriteLock(&afs_xcbhash);
2723 afs_PutVolume(tvp, READ_LOCK);
2725 /* look in per-pag cache */
2726 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2727 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2728 else /* not found, add a new one if possible */
2729 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2731 ReleaseWriteLock(&tvc->lock);
2732 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2733 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2734 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2736 * Release ref count... hope this guy stays around...
2739 } /*afs_StuffVcache */
2743 * Decrements the reference count on a cache entry.
2745 * \param avc Pointer to the cache entry to decrement.
2747 * \note Environment: Nothing interesting.
2750 afs_PutVCache(register struct vcache *avc)
2752 AFS_STATCNT(afs_PutVCache);
2753 #ifdef AFS_DARWIN80_ENV
2754 vnode_put(AFSTOV(avc));
2758 * Can we use a read lock here?
2760 ObtainReadLock(&afs_xvcache);
2762 ReleaseReadLock(&afs_xvcache);
2764 } /*afs_PutVCache */
2768 * Reset a vcache entry, so local contents are ignored, and the
2769 * server will be reconsulted next time the vcache is used
2771 * \param avc Pointer to the cache entry to reset
2774 * \note avc must be write locked on entry
2777 afs_ResetVCache(struct vcache *avc, struct AFS_UCRED *acred) {
2778 ObtainWriteLock(&afs_xcbhash, 456);
2779 afs_DequeueCallback(avc);
2780 avc->states &= ~(CStatd | CDirty); /* next reference will re-stat */
2781 ReleaseWriteLock(&afs_xcbhash);
2782 /* now find the disk cache entries */
2783 afs_TryToSmush(avc, acred, 1);
2784 osi_dnlc_purgedp(avc);
2785 if (avc->linkData && !(avc->states & CCore)) {
2786 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2787 avc->linkData = NULL;
2792 * Sleepa when searching for a vcache. Releases all the pending locks,
2793 * sleeps then obtains the previously released locks.
2795 * \param vcache Enter sleep state.
2796 * \param flag Determines what locks to use.
2800 static void findvc_sleep(struct vcache *avc, int flag) {
2801 if (flag & IS_SLOCK) {
2802 ReleaseSharedLock(&afs_xvcache);
2804 if (flag & IS_WLOCK) {
2805 ReleaseWriteLock(&afs_xvcache);
2807 ReleaseReadLock(&afs_xvcache);
2810 afs_osi_Sleep(&avc->states);
2811 if (flag & IS_SLOCK) {
2812 ObtainSharedLock(&afs_xvcache, 341);
2814 if (flag & IS_WLOCK) {
2815 ObtainWriteLock(&afs_xvcache, 343);
2817 ObtainReadLock(&afs_xvcache);
2822 * Find a vcache entry given a fid.
2824 * \param afid Pointer to the fid whose cache entry we desire.
2825 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2826 * unlock the vnode, and try again.
2827 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2828 * set if FindVCache is called as part of internal bookkeeping.
2830 * \note Environment: Must be called with the afs_xvcache lock at least held at
2831 * the read level. In order to do the VLRU adjustment, the xvcache lock
2832 * must be shared-- we upgrade it here.
2836 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2839 register struct vcache *tvc;
2841 #if defined( AFS_OSF_ENV)
2844 #ifdef AFS_DARWIN80_ENV
2848 AFS_STATCNT(afs_FindVCache);
2852 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2853 if (FidMatches(afid, tvc)) {
2854 if (tvc->states & CVInit) {
2855 findvc_sleep(tvc, flag);
2859 /* Grab this vnode, possibly reactivating from the free list */
2861 vg = vget(AFSTOV(tvc));
2865 #endif /* AFS_OSF_ENV */
2866 #ifdef AFS_DARWIN80_ENV
2867 if (tvc->states & CDeadVnode) {
2868 findvc_sleep(tvc, flag);
2874 if (vnode_ref(tvp)) {
2876 /* AFSTOV(tvc) may be NULL */
2886 /* should I have a read lock on the vnode here? */
2890 #if !defined(AFS_OSF_ENV) && !defined(AFS_DARWIN80_ENV)
2891 osi_vnhold(tvc, retry); /* already held, above */
2892 if (retry && *retry)
2895 #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
2896 tvc->states |= CUBCinit;
2898 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2899 UBCINFORECLAIMED(AFSTOV(tvc))) {
2900 ubc_info_init(AFSTOV(tvc));
2903 tvc->states &= ~CUBCinit;
2906 * only move to front of vlru if we have proper vcache locking)
2908 if (flag & DO_VLRU) {
2909 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2910 refpanic("FindVC VLRU inconsistent1");
2912 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2913 refpanic("FindVC VLRU inconsistent1");
2915 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2916 refpanic("FindVC VLRU inconsistent2");
2918 UpgradeSToWLock(&afs_xvcache, 26);
2919 QRemove(&tvc->vlruq);
2920 QAdd(&VLRU, &tvc->vlruq);
2921 ConvertWToSLock(&afs_xvcache);
2922 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2923 refpanic("FindVC VLRU inconsistent1");
2925 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2926 refpanic("FindVC VLRU inconsistent2");
2928 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2929 refpanic("FindVC VLRU inconsistent3");
2935 if (flag & DO_STATS) {
2937 afs_stats_cmperf.vcacheHits++;
2939 afs_stats_cmperf.vcacheMisses++;
2940 if (afs_IsPrimaryCellNum(afid->Cell))
2941 afs_stats_cmperf.vlocalAccesses++;
2943 afs_stats_cmperf.vremoteAccesses++;
2946 } /*afs_FindVCache */
2949 * Find a vcache entry given a fid. Does a wildcard match on what we
2950 * have for the fid. If more than one entry, don't return anything.
2952 * \param avcp Fill in pointer if we found one and only one.
2953 * \param afid Pointer to the fid whose cache entry we desire.
2954 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2955 * unlock the vnode, and try again.
2956 * \param flags bit 1 to specify whether to compute hit statistics. Not
2957 * set if FindVCache is called as part of internal bookkeeping.
2959 * \note Environment: Must be called with the afs_xvcache lock at least held at
2960 * the read level. In order to do the VLRU adjustment, the xvcache lock
2961 * must be shared-- we upgrade it here.
2963 * \return Number of matches found.
2966 int afs_duplicate_nfs_fids = 0;
2969 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2971 register struct vcache *tvc;
2973 afs_int32 count = 0;
2974 struct vcache *found_tvc = NULL;
2978 #ifdef AFS_DARWIN80_ENV
2982 AFS_STATCNT(afs_FindVCache);
2986 ObtainSharedLock(&afs_xvcache, 331);
2989 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2990 /* Match only on what we have.... */
2991 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2992 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2993 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2994 && (tvc->fid.Cell == afid->Cell)) {
2995 if (tvc->states & CVInit) {
2996 ReleaseSharedLock(&afs_xvcache);
2997 afs_osi_Sleep(&tvc->states);
3001 /* Grab this vnode, possibly reactivating from the free list */
3003 vg = vget(AFSTOV(tvc));
3006 /* This vnode no longer exists. */
3009 #endif /* AFS_OSF_ENV */
3010 #ifdef AFS_DARWIN80_ENV
3011 if (tvc->states & CDeadVnode) {
3012 ReleaseSharedLock(&afs_xvcache);
3013 afs_osi_Sleep(&tvc->states);
3017 if (vnode_get(tvp)) {
3018 /* This vnode no longer exists. */
3021 if (vnode_ref(tvp)) {
3022 /* This vnode no longer exists. */
3024 /* AFSTOV(tvc) may be NULL */
3029 #endif /* AFS_DARWIN80_ENV */
3034 /* Drop our reference counts. */
3036 vrele(AFSTOV(found_tvc));
3038 afs_duplicate_nfs_fids++;
3039 ReleaseSharedLock(&afs_xvcache);
3040 #ifdef AFS_DARWIN80_ENV
3041 /* Drop our reference counts. */
3042 vnode_put(AFSTOV(tvc));
3043 vnode_put(AFSTOV(found_tvc));
3052 /* should I have a read lock on the vnode here? */
3054 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
3055 afs_int32 retry = 0;
3056 osi_vnhold(tvc, &retry);
3059 found_tvc = (struct vcache *)0;
3060 ReleaseSharedLock(&afs_xvcache);
3061 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
3065 #if !defined(AFS_OSF_ENV)
3066 osi_vnhold(tvc, (int *)0); /* already held, above */
3070 * We obtained the xvcache lock above.
3072 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3073 refpanic("FindVC VLRU inconsistent1");
3075 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3076 refpanic("FindVC VLRU inconsistent1");
3078 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3079 refpanic("FindVC VLRU inconsistent2");
3081 UpgradeSToWLock(&afs_xvcache, 568);
3082 QRemove(&tvc->vlruq);
3083 QAdd(&VLRU, &tvc->vlruq);
3084 ConvertWToSLock(&afs_xvcache);
3085 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3086 refpanic("FindVC VLRU inconsistent1");
3088 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3089 refpanic("FindVC VLRU inconsistent2");
3091 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3092 refpanic("FindVC VLRU inconsistent3");
3098 afs_stats_cmperf.vcacheHits++;
3100 afs_stats_cmperf.vcacheMisses++;
3101 if (afs_IsPrimaryCellNum(afid->Cell))
3102 afs_stats_cmperf.vlocalAccesses++;
3104 afs_stats_cmperf.vremoteAccesses++;
3106 *avcp = tvc; /* May be null */
3108 ReleaseSharedLock(&afs_xvcache);
3109 return (tvc ? 1 : 0);
3111 } /*afs_NFSFindVCache */
3117 * Initialize vcache related variables
3122 afs_vcacheInit(int astatSize)
3124 register struct vcache *tvp;
3126 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
3127 if (!afs_maxvcount) {
3128 #if defined(AFS_LINUX22_ENV)
3129 afs_maxvcount = astatSize; /* no particular limit on linux? */
3130 #elif defined(AFS_OSF30_ENV)
3131 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
3133 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
3135 if (astatSize < afs_maxvcount) {
3136 afs_maxvcount = astatSize;
3139 #else /* AFS_OSF_ENV */
3143 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3144 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3146 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3147 /* Allocate and thread the struct vcache entries */
3148 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
3149 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
3151 Initial_freeVCList = tvp;
3152 freeVCList = &(tvp[0]);
3153 for (i = 0; i < astatSize - 1; i++) {
3154 tvp[i].nextfree = &(tvp[i + 1]);
3156 tvp[astatSize - 1].nextfree = NULL;
3157 #ifdef KERNEL_HAVE_PIN
3158 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
3162 #if defined(AFS_SGI_ENV)
3163 for (i = 0; i < astatSize; i++) {
3164 char name[METER_NAMSZ];
3165 struct vcache *tvc = &tvp[i];
3167 tvc->v.v_number = ++afsvnumbers;
3168 tvc->vc_rwlockid = OSI_NO_LOCKID;
3169 initnsema(&tvc->vc_rwlock, 1,
3170 makesname(name, "vrw", tvc->v.v_number));
3171 #ifndef AFS_SGI53_ENV
3172 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
3174 #ifndef AFS_SGI62_ENV
3175 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
3176 #endif /* AFS_SGI62_ENV */
3180 for(i = 0; i < VCSIZE; ++i)
3181 QInit(&afs_vhashTV[i]);
3188 shutdown_vcache(void)
3191 struct afs_cbr *tsp, *nsp;
3193 * XXX We may potentially miss some of the vcaches because if when there're no
3194 * free vcache entries and all the vcache entries are active ones then we allocate
3195 * an additional one - admittedly we almost never had that occur.
3199 register struct afs_q *tq, *uq;
3200 register struct vcache *tvc;
3201 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
3205 osi_FreeSmallSpace(tvc->mvid);
3206 tvc->mvid = (struct VenusFid *)0;
3209 aix_gnode_rele(AFSTOV(tvc));
3211 if (tvc->linkData) {
3212 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3217 * Also free the remaining ones in the Cache
3219 for (i = 0; i < VCSIZE; i++) {
3220 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3222 osi_FreeSmallSpace(tvc->mvid);
3223 tvc->mvid = (struct VenusFid *)0;
3227 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
3228 #ifdef AFS_AIX32_ENV
3231 vms_delete(tvc->segid);
3233 tvc->segid = tvc->vmh = NULL;
3234 if (VREFCOUNT_GT(tvc,0))
3235 osi_Panic("flushVcache: vm race");
3243 #if defined(AFS_SUN5_ENV)
3249 if (tvc->linkData) {
3250 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3254 afs_FreeAllAxs(&(tvc->Access));
3260 * Free any leftover callback queue
3262 for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
3264 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3268 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3269 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3271 #ifdef KERNEL_HAVE_PIN
3272 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3275 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3276 freeVCList = Initial_freeVCList = 0;
3278 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3279 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3281 for(i = 0; i < VCSIZE; ++i)
3282 QInit(&afs_vhashTV[i]);
3285 #ifdef AFS_DISCON_ENV
3286 void afs_DisconGiveUpCallbacks() {
3291 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3293 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3294 for (i = 0; i < VCSIZE; i++) {
3295 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3296 if ((tvc->states & CRO) == 0 && tvc->callback) {
3298 tvc->callback = NULL;
3303 /*printf("%d callbacks to be discarded. queued ... ", nq);*/
3306 ReleaseWriteLock(&afs_xvcache);
3307 /*printf("gone\n");*/