2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
44 #include "afs/sysincludes.h" /*Standard vendor system headers */
45 #include "afsincludes.h" /*AFS-based standard headers */
46 #include "afs/afs_stats.h"
47 #include "afs/afs_cbqueue.h"
48 #include "afs/afs_osidnlc.h"
50 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
51 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
52 afs_int32 afs_vcount = 0; /* number of vcache in use now */
53 #endif /* AFS_OSF_ENV */
61 #endif /* AFS_SGI64_ENV */
63 /* Exported variables */
65 afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
67 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
68 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
69 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
70 #if !defined(AFS_LINUX22_ENV)
71 static struct vcache *freeVCList; /*Free list for stat cache entries */
72 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
73 static struct vcache *Initial_freeVCList; /*Initial list for above */
75 struct afs_q VLRU; /*vcache LRU */
76 afs_int32 vcachegen = 0;
77 unsigned int afs_paniconwarn = 0;
78 struct vcache *afs_vhashT[VCSIZE];
79 struct afs_q afs_vhashTV[VCSIZE];
80 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
81 afs_int32 afs_bulkStatsLost;
82 int afs_norefpanic = 0;
85 /* Disk backed vcache definitions
86 * Both protected by xvcache */
88 static int afs_nextVcacheSlot = 0;
89 static struct afs_slotlist *afs_freeSlotList = NULL;
92 /* Forward declarations */
93 static afs_int32 afs_QueueVCB(struct vcache *avc);
96 * Generate an index into the hash table for a given Fid.
98 * \return The hash value.
101 afs_HashCBRFid(struct AFSFid *fid)
103 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
107 * Insert a CBR entry into the hash table.
108 * Must be called with afs_xvcb held.
113 afs_InsertHashCBR(struct afs_cbr *cbr)
115 int slot = afs_HashCBRFid(&cbr->fid);
117 cbr->hash_next = afs_cbrHashT[slot];
118 if (afs_cbrHashT[slot])
119 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
121 cbr->hash_pprev = &afs_cbrHashT[slot];
122 afs_cbrHashT[slot] = cbr;
127 * Flush the given vcache entry.
130 * afs_xvcache lock must be held for writing upon entry to
131 * prevent people from changing the vrefCount field, and to
132 * protect the lruq and hnext fields.
133 * LOCK: afs_FlushVCache afs_xvcache W
134 * REFCNT: vcache ref count must be zero on entry except for osf1
135 * RACE: lock is dropped and reobtained, permitting race in caller
137 * \param avc Pointer to vcache entry to flush.
138 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
142 afs_FlushVCache(struct vcache *avc, int *slept)
143 { /*afs_FlushVCache */
146 struct vcache **uvc, *wvc;
149 AFS_STATCNT(afs_FlushVCache);
150 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
151 ICL_TYPE_INT32, avc->f.states);
154 VN_LOCK(AFSTOV(avc));
158 code = osi_VM_FlushVCache(avc, slept);
162 if (avc->f.states & CVFlushed) {
166 #if !defined(AFS_LINUX22_ENV)
167 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
168 refpanic("LRU vs. Free inconsistency");
171 avc->f.states |= CVFlushed;
172 /* pull the entry out of the lruq and put it on the free list */
173 QRemove(&avc->vlruq);
175 /* keep track of # of files that we bulk stat'd, but never used
176 * before they got recycled.
178 if (avc->f.states & CBulkStat)
181 /* remove entry from the hash chain */
182 i = VCHash(&avc->f.fid);
183 uvc = &afs_vhashT[i];
184 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
187 avc->hnext = (struct vcache *)NULL;
192 /* remove entry from the volume hash table */
193 QRemove(&avc->vhashq);
196 osi_FreeSmallSpace(avc->mvid);
197 avc->mvid = (struct VenusFid *)0;
199 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
200 avc->linkData = NULL;
202 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
203 /* OK, there are no internal vrefCounts, so there shouldn't
204 * be any more refs here. */
206 #ifdef AFS_DARWIN80_ENV
207 vnode_clearfsnode(AFSTOV(avc));
208 vnode_removefsref(AFSTOV(avc));
210 avc->v->v_data = NULL; /* remove from vnode */
212 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
215 #ifdef AFS_SUN510_ENV
216 /* As we use private vnodes, cleanup is up to us */
217 vn_reinit(AFSTOV(avc));
219 afs_FreeAllAxs(&(avc->Access));
221 /* we can't really give back callbacks on RO files, since the
222 * server only tracks them on a per-volume basis, and we don't
223 * know whether we still have some other files from the same
225 if ((avc->f.states & CRO) == 0 && avc->callback) {
228 ObtainWriteLock(&afs_xcbhash, 460);
229 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
230 avc->f.states &= ~(CStatd | CUnique);
231 ReleaseWriteLock(&afs_xcbhash);
232 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
233 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
235 osi_dnlc_purgevp(avc);
238 * Next, keep track of which vnodes we've deleted for create's
239 * optimistic synchronization algorithm
242 if (avc->f.fid.Fid.Vnode & 1)
247 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
248 /* put the entry in the free list */
249 avc->nextfree = freeVCList;
251 if (avc->vlruq.prev || avc->vlruq.next) {
252 refpanic("LRU vs. Free inconsistency");
254 avc->f.states |= CVFlushed;
256 /* This should put it back on the vnode free list since usecount is 1 */
259 if (VREFCOUNT_GT(avc,0)) {
260 #if defined(AFS_OSF_ENV)
261 VN_UNLOCK(AFSTOV(avc));
263 AFS_RELE(AFSTOV(avc));
264 afs_stats_cmperf.vcacheXAllocs--;
266 if (afs_norefpanic) {
267 printf("flush vc refcnt < 1");
269 #if defined(AFS_OSF_ENV)
270 (void)vgone(avc, VX_NOSLEEP, NULL);
272 VN_UNLOCK(AFSTOV(avc));
275 osi_Panic("flush vc refcnt < 1");
277 #endif /* AFS_OSF_ENV */
282 VN_UNLOCK(AFSTOV(avc));
286 } /*afs_FlushVCache */
290 * The core of the inactive vnode op for all but IRIX.
296 afs_InactiveVCache(struct vcache *avc, afs_ucred_t *acred)
298 AFS_STATCNT(afs_inactive);
299 if (avc->f.states & CDirty) {
300 /* we can't keep trying to push back dirty data forever. Give up. */
301 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
303 avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
304 avc->f.states &= ~CDirty; /* Turn it off */
305 if (avc->f.states & CUnlinked) {
306 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
307 avc->f.states |= CUnlinkedDel;
310 afs_remunlink(avc, 1); /* ignore any return code */
317 * Allocate a callback return structure from the
318 * free list and return it.
320 * Environment: The alloc and free routines are both called with the afs_xvcb lock
321 * held, so we don't have to worry about blocking in osi_Alloc.
323 * \return The allocated afs_cbr.
325 static struct afs_cbr *afs_cbrSpace = 0;
326 /* if alloc limit below changes, fix me! */
327 static struct afs_cbr *afs_cbrHeads[2];
331 register struct afs_cbr *tsp;
334 while (!afs_cbrSpace) {
335 if (afs_stats_cmperf.CallBackAlloced >= 2) {
336 /* don't allocate more than 2 * AFS_NCBRS for now */
338 afs_stats_cmperf.CallBackFlushes++;
342 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
343 sizeof(struct afs_cbr));
344 for (i = 0; i < AFS_NCBRS - 1; i++) {
345 tsp[i].next = &tsp[i + 1];
347 tsp[AFS_NCBRS - 1].next = 0;
349 afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
350 afs_stats_cmperf.CallBackAlloced++;
354 afs_cbrSpace = tsp->next;
359 * Free a callback return structure, removing it from all lists.
361 * Environment: the xvcb lock is held over these calls.
363 * \param asp The address of the structure to free.
368 afs_FreeCBR(register struct afs_cbr *asp)
370 *(asp->pprev) = asp->next;
372 asp->next->pprev = asp->pprev;
374 *(asp->hash_pprev) = asp->hash_next;
376 asp->hash_next->hash_pprev = asp->hash_pprev;
378 asp->next = afs_cbrSpace;
384 * Flush all queued callbacks to all servers.
386 * Environment: holds xvcb lock over RPC to guard against race conditions
387 * when a new callback is granted for the same file later on.
389 * \return 0 for success.
392 afs_FlushVCBs(afs_int32 lockit)
394 struct AFSFid *tfids;
395 struct AFSCallBack callBacks[1];
396 struct AFSCBFids fidArray;
397 struct AFSCBs cbArray;
399 struct afs_cbr *tcbrp;
403 struct vrequest treq;
405 int safety1, safety2, safety3;
407 if ((code = afs_InitReq(&treq, afs_osi_credp)))
409 treq.flags |= O_NONBLOCK;
410 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
413 MObtainWriteLock(&afs_xvcb, 273);
414 ObtainReadLock(&afs_xserver);
415 for (i = 0; i < NSERVERS; i++) {
416 for (safety1 = 0, tsp = afs_servers[i];
417 tsp && safety1 < afs_totalServers + 10;
418 tsp = tsp->next, safety1++) {
420 if (tsp->cbrs == (struct afs_cbr *)0)
423 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
424 * and make an RPC, over and over again.
426 tcount = 0; /* number found so far */
427 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
428 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
429 /* if buffer is full, or we've queued all we're going
430 * to from this server, we should flush out the
433 fidArray.AFSCBFids_len = tcount;
434 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
435 cbArray.AFSCBs_len = 1;
436 cbArray.AFSCBs_val = callBacks;
437 memset(&callBacks[0], 0, sizeof(callBacks[0]));
438 callBacks[0].CallBackType = CB_EXCLUSIVE;
439 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
440 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
441 tsp->cell->cellNum, &treq, 0,
445 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
448 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
456 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
461 /* ignore return code, since callbacks may have
462 * been returned anyway, we shouldn't leave them
463 * around to be returned again.
465 * Next, see if we are done with this server, and if so,
466 * break to deal with the next one.
472 /* if to flush full buffer */
473 /* if we make it here, we have an entry at the head of cbrs,
474 * which we should copy to the file ID array and then free.
477 tfids[tcount++] = tcbrp->fid;
479 /* Freeing the CBR will unlink it from the server's CBR list */
481 } /* while loop for this one server */
482 if (safety2 > afs_cacheStats) {
483 afs_warn("possible internal error afs_flushVCBs (%d)\n",
486 } /* for loop for this hash chain */
487 } /* loop through all hash chains */
488 if (safety1 > afs_totalServers + 2) {
490 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
491 safety1, afs_totalServers + 2);
493 osi_Panic("afs_flushVCBS safety1");
496 ReleaseReadLock(&afs_xserver);
498 MReleaseWriteLock(&afs_xvcb);
499 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
504 * Queue a callback on the given fid.
507 * Locks the xvcb lock.
508 * Called when the xvcache lock is already held.
510 * \param avc vcache entry
511 * \return 0 for success < 0 otherwise.
515 afs_QueueVCB(struct vcache *avc)
518 struct afs_cbr *tcbp;
520 AFS_STATCNT(afs_QueueVCB);
521 /* The callback is really just a struct server ptr. */
522 tsp = (struct server *)(avc->callback);
524 /* we now have a pointer to the server, so we just allocate
525 * a queue entry and queue it.
527 MObtainWriteLock(&afs_xvcb, 274);
528 tcbp = afs_AllocCBR();
529 tcbp->fid = avc->f.fid.Fid;
531 tcbp->next = tsp->cbrs;
533 tsp->cbrs->pprev = &tcbp->next;
536 tcbp->pprev = &tsp->cbrs;
538 afs_InsertHashCBR(tcbp);
540 /* now release locks and return */
541 MReleaseWriteLock(&afs_xvcb);
547 * Remove a queued callback for a given Fid.
550 * Locks xvcb and xserver locks.
551 * Typically called with xdcache, xvcache and/or individual vcache
554 * \param afid The fid we want cleansed of queued callbacks.
559 afs_RemoveVCB(struct VenusFid *afid)
562 struct afs_cbr *cbr, *ncbr;
564 AFS_STATCNT(afs_RemoveVCB);
565 MObtainWriteLock(&afs_xvcb, 275);
567 slot = afs_HashCBRFid(&afid->Fid);
568 ncbr = afs_cbrHashT[slot];
572 ncbr = cbr->hash_next;
574 if (afid->Fid.Volume == cbr->fid.Volume &&
575 afid->Fid.Vnode == cbr->fid.Vnode &&
576 afid->Fid.Unique == cbr->fid.Unique) {
581 MReleaseWriteLock(&afs_xvcb);
585 afs_FlushReclaimedVcaches(void)
587 #if !defined(AFS_LINUX22_ENV)
590 struct vcache *tmpReclaimedVCList = NULL;
592 ObtainWriteLock(&afs_xvreclaim, 76);
593 while (ReclaimedVCList) {
594 tvc = ReclaimedVCList; /* take from free list */
595 ReclaimedVCList = tvc->nextfree;
596 tvc->nextfree = NULL;
597 code = afs_FlushVCache(tvc, &fv_slept);
599 /* Ok, so, if we got code != 0, uh, wtf do we do? */
600 /* Probably, build a temporary list and then put all back when we
601 get to the end of the list */
602 /* This is actually really crappy, but we need to not leak these.
603 We probably need a way to be smarter about this. */
604 tvc->nextfree = tmpReclaimedVCList;
605 tmpReclaimedVCList = tvc;
606 printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code);
608 if (tvc->f.states & (CVInit
609 #ifdef AFS_DARWIN80_ENV
613 tvc->f.states &= ~(CVInit
614 #ifdef AFS_DARWIN80_ENV
618 afs_osi_Wakeup(&tvc->f.states);
621 if (tmpReclaimedVCList)
622 ReclaimedVCList = tmpReclaimedVCList;
624 ReleaseWriteLock(&afs_xvreclaim);
629 afs_ShakeLooseVCaches(afs_int32 anumber)
631 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
634 struct afs_q *tq, *uq;
636 afs_int32 target = anumber;
639 /* Should probably deal better */
640 if (!ISAFS_GLOCK()) {
645 if (afsd_dynamic_vcaches || afs_vcount >= afs_maxvcount) {
647 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
650 if (tvc->f.states & CVFlushed) {
651 refpanic("CVFlushed on VLRU");
652 } else if (!afsd_dynamic_vcaches && i++ > afs_maxvcount) {
653 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
654 } else if (QNext(uq) != tq) {
655 refpanic("VLRU inconsistent");
656 } else if (!VREFCOUNT_GT(tvc,0)) {
657 refpanic("refcnt 0 on VLRU");
660 #if defined(AFS_LINUX22_ENV)
661 if (tvc != afs_globalVp && VREFCOUNT(tvc) > 1 && tvc->opens == 0) {
662 struct dentry *dentry;
663 struct list_head *cur, *head;
665 #if defined(AFS_LINUX24_ENV)
666 spin_lock(&dcache_lock);
667 #endif /* AFS_LINUX24_ENV */
668 head = &(AFSTOV(tvc))->i_dentry;
672 while ((cur = cur->next) != head) {
673 dentry = list_entry(cur, struct dentry, d_alias);
675 if (d_unhashed(dentry))
680 #if defined(AFS_LINUX24_ENV)
681 spin_unlock(&dcache_lock);
682 #endif /* AFS_LINUX24_ENV */
683 if (d_invalidate(dentry) == -EBUSY) {
685 /* perhaps lock and try to continue? (use cur as head?) */
689 #if defined(AFS_LINUX24_ENV)
690 spin_lock(&dcache_lock);
691 #endif /* AFS_LINUX24_ENV */
694 #if defined(AFS_LINUX24_ENV)
695 spin_unlock(&dcache_lock);
696 #endif /* AFS_LINUX24_ENV */
700 #endif /* AFS_LINUX22_ENV */
702 if (VREFCOUNT_GT(tvc,0) && !VREFCOUNT_GT(tvc,1) &&
704 && (tvc->f.states & CUnlinkedDel) == 0) {
705 code = afs_FlushVCache(tvc, &fv_slept);
712 continue; /* start over - may have raced. */
718 if (!afsd_dynamic_vcaches && anumber == target) {
719 printf("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
720 afs_vcount, afs_maxvcount);
722 } /* finished freeing up space */
724 printf("recycled %d entries\n", target-anumber);
732 /* Alloc new vnode. */
734 static struct vcache *
735 afs_AllocVCache(void)
738 #if defined(AFS_OSF30_ENV)
741 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
742 /* What should we do ???? */
743 osi_Panic("afs_AllocVCache: no more vnodes");
748 tvc->nextfree = NULL;
750 #elif defined(AFS_LINUX22_ENV)
754 ip = new_inode(afs_globalVFS);
756 osi_Panic("afs_AllocVCache: no more inodes");
758 #if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
761 tvc = afs_osi_Alloc(sizeof(struct vcache));
762 ip->u.generic_ip = tvc;
769 if (afsd_dynamic_vcaches && afs_maxvcount < afs_vcount) {
770 afs_maxvcount = afs_vcount;
771 /*printf("peak vnodes: %d\n", afs_maxvcount);*/
774 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
776 /* none free, making one is better than a panic */
777 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
778 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
779 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
780 tvc->v = NULL; /* important to clean this, or use memset 0 */
782 #ifdef KERNEL_HAVE_PIN
783 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
785 #if defined(AFS_SGI_ENV)
787 char name[METER_NAMSZ];
788 memset(tvc, 0, sizeof(struct vcache));
789 tvc->v.v_number = ++afsvnumbers;
790 tvc->vc_rwlockid = OSI_NO_LOCKID;
791 initnsema(&tvc->vc_rwlock, 1,
792 makesname(name, "vrw", tvc->v.v_number));
793 #ifndef AFS_SGI53_ENV
794 initnsema(&tvc->v.v_sync, 0,
795 makesname(name, "vsy", tvc->v.v_number));
797 #ifndef AFS_SGI62_ENV
798 initnlock(&tvc->v.v_lock,
799 makesname(name, "vlk", tvc->v.v_number));
802 #endif /* AFS_SGI_ENV */
804 #ifdef AFS_DISCON_ENV
805 /* If we create a new inode, we either give it a new slot number,
806 * or if one's available, use a slot number from the slot free list
808 if (afs_freeSlotList != NULL) {
809 struct afs_slotlist *tmp;
811 tvc->diskSlot = afs_freeSlotList->slot;
812 tmp = afs_freeSlotList;
813 afs_freeSlotList = tmp->next;
814 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
816 tvc->diskSlot = afs_nextVcacheSlot++;
824 * This routine is responsible for allocating a new cache entry
825 * from the free list. It formats the cache entry and inserts it
826 * into the appropriate hash tables. It must be called with
827 * afs_xvcache write-locked so as to prevent several processes from
828 * trying to create a new cache entry simultaneously.
830 * LOCK: afs_NewVCache afs_xvcache W
832 * \param afid The file id of the file whose cache entry is being created.
834 * \return The new vcache struct.
837 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
841 afs_int32 anumber = VCACHE_FREE;
843 struct gnode *gnodepnt;
845 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
846 struct afs_q *tq, *uq;
850 AFS_STATCNT(afs_NewVCache);
852 afs_FlushReclaimedVcaches();
854 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
855 if(!afsd_dynamic_vcaches) {
856 afs_ShakeLooseVCaches(anumber);
857 if (afs_vcount >= afs_maxvcount) {
858 printf("afs_NewVCache - none freed\n");
862 tvc = afs_AllocVCache();
863 #else /* AFS_OSF_ENV */
864 /* pull out a free cache entry */
868 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
872 if (tvc->f.states & CVFlushed) {
873 refpanic("CVFlushed on VLRU");
874 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
875 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
876 } else if (QNext(uq) != tq) {
877 refpanic("VLRU inconsistent");
878 } else if (tvc->f.states & CVInit) {
882 if (!VREFCOUNT_GT(tvc,0)
883 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL) && !defined(AFS_DARWIN80_ENV)
884 || ((VREFCOUNT(tvc) == 1) &&
885 (UBCINFOEXISTS(AFSTOV(tvc))))
887 && tvc->opens == 0 && (tvc->f.states & CUnlinkedDel) == 0) {
888 #if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
889 #ifdef AFS_DARWIN80_ENV
890 vnode_t tvp = AFSTOV(tvc);
891 /* VREFCOUNT_GT only sees usecounts, not iocounts */
892 /* so this may fail to actually recycle the vnode now */
893 /* must call vnode_get to avoid races. */
895 if (vnode_get(tvp) == 0) {
897 /* must release lock, since vnode_put will immediately
898 reclaim if there are no other users */
899 ReleaseWriteLock(&afs_xvcache);
904 ObtainWriteLock(&afs_xvcache, 336);
906 /* we can't use the vnode_recycle return value to figure
907 * this out, since the iocount we have to hold makes it
909 if (AFSTOV(tvc) == tvp) {
910 if (anumber > 0 && fv_slept) {
911 QRemove(&tvc->vlruq);
912 QAdd(&VLRU, &tvc->vlruq);
917 #else /* AFS_DARWIN80_ENV */
919 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
920 * then it puts the vnode on the free list.
921 * If we don't do this we end up with a cleaned vnode that's
922 * not on the free list.
923 * XXX assume FreeBSD is the same for now.
931 #else /* AFS_DARWIN80_ENV || AFS_XBSD_ENV */
932 code = afs_FlushVCache(tvc, &fv_slept);
933 #endif /* AFS_DARWIN80_ENV || AFS_XBSD_ENV */
942 continue; /* start over - may have raced. */
948 } /* end of if (!freeVCList) */
951 tvc = afs_AllocVCache();
953 tvc = freeVCList; /* take from free list */
954 freeVCList = tvc->nextfree;
955 tvc->nextfree = NULL;
956 } /* end of if (!freeVCList) */
958 #endif /* AFS_OSF_ENV */
960 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
962 panic("afs_NewVCache(): free vcache with vnode attached");
965 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
967 #if defined(AFS_DISCON_ENV)
968 /* We need to preserve the slot that we're being stored into on
972 slot = tvc->diskSlot;
973 memset(tvc, 0, sizeof(struct vcache));
974 tvc->diskSlot = slot;
977 memset(tvc, 0, sizeof(struct vcache));
982 memset(&(tvc->f), 0, sizeof(struct fvcache));
985 AFS_RWLOCK_INIT(&tvc->lock, "vcache lock");
986 #if defined(AFS_SUN5_ENV)
987 AFS_RWLOCK_INIT(&tvc->vlock, "vcache vlock");
988 #endif /* defined(AFS_SUN5_ENV) */
991 tvc->linkData = NULL;
994 tvc->execsOrWriters = 0;
996 tvc->f.states = CVInit;
997 tvc->last_looker = 0;
999 tvc->asynchrony = -1;
1001 #if defined(AFS_LINUX26_ENV)
1005 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
1008 tvc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
1009 hzero(tvc->f.m.DataVersion); /* in case we copy it into flushDV */
1011 tvc->callback = serverp; /* to minimize chance that clear
1012 * request is lost */
1013 #if defined(AFS_DISCON_ENV)
1014 QZero(&tvc->metadirty);
1020 tvc->hnext = afs_vhashT[i];
1021 afs_vhashT[i] = tvc;
1022 QAdd(&afs_vhashTV[j], &tvc->vhashq);
1024 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1025 refpanic("NewVCache VLRU inconsistent");
1027 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1028 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1029 refpanic("NewVCache VLRU inconsistent2");
1031 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1032 refpanic("NewVCache VLRU inconsistent3");
1034 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1035 refpanic("NewVCache VLRU inconsistent4");
1038 /* it should now be safe to drop the xvcache lock */
1040 ReleaseWriteLock(&afs_xvcache);
1042 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
1044 ObtainWriteLock(&afs_xvcache,337);
1045 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1047 #ifdef AFS_DARWIN_ENV
1048 ReleaseWriteLock(&afs_xvcache);
1050 afs_darwin_getnewvnode(tvc); /* includes one refcount */
1052 ObtainWriteLock(&afs_xvcache,338);
1053 #ifdef AFS_DARWIN80_ENV
1054 LOCKINIT(tvc->rwlock);
1056 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1063 ReleaseWriteLock(&afs_xvcache);
1065 #if defined(AFS_FBSD60_ENV)
1066 if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
1067 #elif defined(AFS_FBSD50_ENV)
1068 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
1070 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
1072 panic("afs getnewvnode"); /* can't happen */
1074 ObtainWriteLock(&afs_xvcache,339);
1075 if (tvc->v != NULL) {
1076 /* I'd like to know if this ever happens...
1077 * We don't drop global for the rest of this function,
1078 * so if we do lose the race, the other thread should
1079 * have found the same vnode and finished initializing
1080 * the vcache entry. Is it conceivable that this vcache
1081 * entry could be recycled during this interval? If so,
1082 * then there probably needs to be some sort of additional
1083 * mutual exclusion (an Embryonic flag would suffice).
1085 printf("afs_NewVCache: lost the race\n");
1089 tvc->v->v_data = tvc;
1090 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1094 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
1095 /* Hold it for the LRU (should make count 2) */
1096 VN_HOLD(AFSTOV(tvc));
1097 #else /* AFS_OSF_ENV */
1098 #if !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
1099 VREFCOUNT_SET(tvc, 1); /* us */
1100 #endif /* AFS_XBSD_ENV */
1101 #endif /* AFS_OSF_ENV */
1102 #ifdef AFS_AIX32_ENV
1103 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1104 tvc->vmh = tvc->segid = NULL;
1108 #if defined(AFS_CACHE_BYPASS)
1109 tvc->cachingStates = 0;
1110 tvc->cachingTransitions = 0;
1113 #ifdef AFS_BOZONLOCK_ENV
1114 #if defined(AFS_SUN5_ENV)
1115 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1117 #if defined(AFS_SUN55_ENV)
1118 /* This is required if the kaio (kernel aynchronous io)
1119 ** module is installed. Inside the kernel, the function
1120 ** check_vp( common/os/aio.c) checks to see if the kernel has
1121 ** to provide asynchronous io for this vnode. This
1122 ** function extracts the device number by following the
1123 ** v_data field of the vnode. If we do not set this field
1124 ** then the system panics. The value of the v_data field
1125 ** is not really important for AFS vnodes because the kernel
1126 ** does not do asynchronous io for regular files. Hence,
1127 ** for the time being, we fill up the v_data field with the
1128 ** vnode pointer itself. */
1129 tvc->v.v_data = (char *)tvc;
1130 #endif /* AFS_SUN55_ENV */
1132 afs_BozonInit(&tvc->pvnLock, tvc);
1135 /* initialize vnode data, note vrefCount is v.v_count */
1137 /* Don't forget to free the gnode space */
1138 tvc->v.v_gnode = gnodepnt =
1139 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1140 memset(gnodepnt, 0, sizeof(struct gnode));
1142 #ifdef AFS_SGI64_ENV
1143 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1144 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1145 #ifdef AFS_SGI65_ENV
1146 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1147 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1149 bhv_head_init(&(tvc->v.v_bh));
1150 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1152 #ifdef AFS_SGI65_ENV
1153 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1154 #ifdef VNODE_TRACING
1155 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1157 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1159 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1160 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1162 vnode_pcache_init(&tvc->v);
1163 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1164 /* Above define is never true execpt in SGI test kernels. */
1165 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1167 #ifdef INTR_KTHREADS
1168 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1171 SetAfsVnode(AFSTOV(tvc));
1172 #endif /* AFS_SGI64_ENV */
1174 * The proper value for mvstat (for root fids) is setup by the caller.
1177 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1179 if (afs_globalVFS == 0)
1180 osi_Panic("afs globalvfs");
1181 #if !defined(AFS_LINUX22_ENV)
1182 vSetVfsp(tvc, afs_globalVFS);
1184 vSetType(tvc, VREG);
1186 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1187 tvc->v.v_vfsprev = NULL;
1188 afs_globalVFS->vfs_vnodes = &tvc->v;
1189 if (tvc->v.v_vfsnext != NULL)
1190 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1191 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1192 gnodepnt->gn_vnode = &tvc->v;
1194 #if defined(AFS_DUX40_ENV)
1195 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1198 /* Is this needed??? */
1199 insmntque(tvc, afs_globalVFS);
1200 #endif /* AFS_OSF_ENV */
1201 #endif /* AFS_DUX40_ENV */
1202 #ifdef AFS_FBSD70_ENV
1203 #ifndef AFS_FBSD80_ENV /* yup. they put it back. */
1204 insmntque(AFSTOV(tvc), afs_globalVFS);
1207 #if defined(AFS_SGI_ENV)
1208 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1209 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1211 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1212 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1213 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1214 osi_Assert(tvc->v.v_filocks == NULL);
1215 #if !defined(AFS_SGI65_ENV)
1216 osi_Assert(tvc->v.v_filocksem == NULL);
1218 osi_Assert(tvc->cred == NULL);
1219 #ifdef AFS_SGI64_ENV
1220 vnode_pcache_reinit(&tvc->v);
1221 tvc->v.v_rdev = NODEV;
1223 vn_initlist((struct vnlist *)&tvc->v);
1225 #endif /* AFS_SGI_ENV */
1227 osi_dnlc_purgedp(tvc); /* this may be overkill */
1228 memset(&(tvc->callsort), 0, sizeof(struct afs_q));
1230 tvc->f.states &=~ CVInit;
1231 afs_osi_Wakeup(&tvc->f.states);
1235 } /*afs_NewVCache */
1241 * LOCK: afs_FlushActiveVcaches afs_xvcache N
1243 * \param doflocks : Do we handle flocks?
1246 afs_FlushActiveVcaches(register afs_int32 doflocks)
1248 register struct vcache *tvc;
1250 register struct afs_conn *tc;
1251 register afs_int32 code;
1252 afs_ucred_t *cred = NULL;
1253 struct vrequest treq, ureq;
1254 struct AFSVolSync tsync;
1257 AFS_STATCNT(afs_FlushActiveVcaches);
1258 ObtainReadLock(&afs_xvcache);
1259 for (i = 0; i < VCSIZE; i++) {
1260 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1261 if (tvc->f.states & CVInit) continue;
1262 #ifdef AFS_DARWIN80_ENV
1263 if (tvc->f.states & CDeadVnode &&
1264 (tvc->f.states & (CCore|CUnlinkedDel) ||
1265 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1267 if (doflocks && tvc->flockCount != 0) {
1268 /* if this entry has an flock, send a keep-alive call out */
1270 ReleaseReadLock(&afs_xvcache);
1271 ObtainWriteLock(&tvc->lock, 51);
1273 afs_InitReq(&treq, afs_osi_credp);
1274 treq.flags |= O_NONBLOCK;
1276 tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK);
1278 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1281 RXAFS_ExtendLock(tc->id,
1282 (struct AFSFid *)&tvc->f.fid.Fid,
1288 } while (afs_Analyze
1289 (tc, code, &tvc->f.fid, &treq,
1290 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1292 ReleaseWriteLock(&tvc->lock);
1293 #ifdef AFS_DARWIN80_ENV
1295 ObtainReadLock(&afs_xvcache);
1297 ObtainReadLock(&afs_xvcache);
1302 if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
1304 * Don't let it evaporate in case someone else is in
1305 * this code. Also, drop the afs_xvcache lock while
1306 * getting vcache locks.
1309 ReleaseReadLock(&afs_xvcache);
1310 #ifdef AFS_BOZONLOCK_ENV
1311 afs_BozonLock(&tvc->pvnLock, tvc);
1313 #if defined(AFS_SGI_ENV)
1315 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1317 osi_Assert(VREFCOUNT_GT(tvc,0));
1318 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1320 ObtainWriteLock(&tvc->lock, 52);
1321 if (tvc->f.states & CCore) {
1322 tvc->f.states &= ~CCore;
1323 /* XXXX Find better place-holder for cred XXXX */
1324 cred = (afs_ucred_t *)tvc->linkData;
1325 tvc->linkData = NULL; /* XXX */
1326 afs_InitReq(&ureq, cred);
1327 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1328 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1329 tvc->execsOrWriters);
1330 code = afs_StoreOnLastReference(tvc, &ureq);
1331 ReleaseWriteLock(&tvc->lock);
1332 #ifdef AFS_BOZONLOCK_ENV
1333 afs_BozonUnlock(&tvc->pvnLock, tvc);
1335 hzero(tvc->flushDV);
1338 if (code && code != VNOVNODE) {
1339 afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
1340 /* /dev/console */ 1);
1342 } else if (tvc->f.states & CUnlinkedDel) {
1346 ReleaseWriteLock(&tvc->lock);
1347 #ifdef AFS_BOZONLOCK_ENV
1348 afs_BozonUnlock(&tvc->pvnLock, tvc);
1350 #if defined(AFS_SGI_ENV)
1351 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1353 afs_remunlink(tvc, 0);
1354 #if defined(AFS_SGI_ENV)
1355 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1358 /* lost (or won, perhaps) the race condition */
1359 ReleaseWriteLock(&tvc->lock);
1360 #ifdef AFS_BOZONLOCK_ENV
1361 afs_BozonUnlock(&tvc->pvnLock, tvc);
1364 #if defined(AFS_SGI_ENV)
1365 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1367 #ifdef AFS_DARWIN80_ENV
1370 AFS_RELE(AFSTOV(tvc));
1371 /* Matches write code setting CCore flag */
1374 ObtainReadLock(&afs_xvcache);
1376 ObtainReadLock(&afs_xvcache);
1379 AFS_RELE(AFSTOV(tvc));
1380 /* Matches write code setting CCore flag */
1387 ReleaseReadLock(&afs_xvcache);
1393 * Make sure a cache entry is up-to-date status-wise.
1395 * NOTE: everywhere that calls this can potentially be sped up
1396 * by checking CStatd first, and avoiding doing the InitReq
1397 * if this is up-to-date.
1399 * Anymore, the only places that call this KNOW already that the
1400 * vcache is not up-to-date, so we don't screw around.
1402 * \param avc : Ptr to vcache entry to verify.
1408 * Make sure a cache entry is up-to-date status-wise.
1410 * NOTE: everywhere that calls this can potentially be sped up
1411 * by checking CStatd first, and avoiding doing the InitReq
1412 * if this is up-to-date.
1414 * Anymore, the only places that call this KNOW already that the
1415 * vcache is not up-to-date, so we don't screw around.
1417 * \param avc Pointer to vcache entry to verify.
1420 * \return 0 for success or other error codes.
1423 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1425 register struct vcache *tvc;
1427 AFS_STATCNT(afs_VerifyVCache);
1429 #if defined(AFS_OSF_ENV)
1430 ObtainReadLock(&avc->lock);
1431 if (afs_IsWired(avc)) {
1432 ReleaseReadLock(&avc->lock);
1435 ReleaseReadLock(&avc->lock);
1436 #endif /* AFS_OSF_ENV */
1437 /* otherwise we must fetch the status info */
1439 ObtainWriteLock(&avc->lock, 53);
1440 if (avc->f.states & CStatd) {
1441 ReleaseWriteLock(&avc->lock);
1444 ObtainWriteLock(&afs_xcbhash, 461);
1445 avc->f.states &= ~(CStatd | CUnique);
1446 avc->callback = NULL;
1447 afs_DequeueCallback(avc);
1448 ReleaseWriteLock(&afs_xcbhash);
1449 ReleaseWriteLock(&avc->lock);
1451 /* since we've been called back, or the callback has expired,
1452 * it's possible that the contents of this directory, or this
1453 * file's name have changed, thus invalidating the dnlc contents.
1455 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1456 osi_dnlc_purgedp(avc);
1458 osi_dnlc_purgevp(avc);
1460 /* fetch the status info */
1461 tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
1464 /* Put it back; caller has already incremented vrefCount */
1468 } /*afs_VerifyVCache */
1472 * Simple copy of stat info into cache.
1474 * Callers:as of 1992-04-29, only called by WriteVCache
1476 * \param avc Ptr to vcache entry involved.
1477 * \param astat Ptr to stat info to copy.
1481 afs_SimpleVStat(register struct vcache *avc,
1482 register struct AFSFetchStatus *astat, struct vrequest *areq)
1485 AFS_STATCNT(afs_SimpleVStat);
1488 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1489 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1491 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1493 #ifdef AFS_64BIT_CLIENT
1494 FillInt64(length, astat->Length_hi, astat->Length);
1495 #else /* AFS_64BIT_CLIENT */
1496 length = astat->Length;
1497 #endif /* AFS_64BIT_CLIENT */
1498 #if defined(AFS_SGI_ENV)
1499 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1500 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1501 if (length < avc->f.m.Length) {
1502 vnode_t *vp = (vnode_t *) avc;
1504 osi_Assert(WriteLocked(&avc->lock));
1505 ReleaseWriteLock(&avc->lock);
1507 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1509 ObtainWriteLock(&avc->lock, 67);
1512 /* if writing the file, don't fetch over this value */
1513 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1514 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1515 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1516 avc->f.m.Length = length;
1517 avc->f.m.Date = astat->ClientModTime;
1519 avc->f.m.Owner = astat->Owner;
1520 avc->f.m.Group = astat->Group;
1521 avc->f.m.Mode = astat->UnixModeBits;
1522 if (vType(avc) == VREG) {
1523 avc->f.m.Mode |= S_IFREG;
1524 } else if (vType(avc) == VDIR) {
1525 avc->f.m.Mode |= S_IFDIR;
1526 } else if (vType(avc) == VLNK) {
1527 avc->f.m.Mode |= S_IFLNK;
1528 if ((avc->f.m.Mode & 0111) == 0)
1531 if (avc->f.states & CForeign) {
1532 struct axscache *ac;
1533 avc->f.anyAccess = astat->AnonymousAccess;
1535 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1537 * Caller has at least one bit not covered by anonymous, and
1538 * thus may have interesting rights.
1540 * HOWEVER, this is a really bad idea, because any access query
1541 * for bits which aren't covered by anonymous, on behalf of a user
1542 * who doesn't have any special rights, will result in an answer of
1543 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1544 * It's an especially bad idea under Ultrix, since (due to the lack of
1545 * a proper access() call) it must perform several afs_access() calls
1546 * in order to create magic mode bits that vary according to who makes
1547 * the call. In other words, _every_ stat() generates a test for
1550 #endif /* badidea */
1551 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1552 ac->axess = astat->CallerAccess;
1553 else /* not found, add a new one if possible */
1554 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1557 } /*afs_SimpleVStat */
1561 * Store the status info *only* back to the server for a
1564 * Environment: Must be called with a shared lock held on the vnode.
1566 * \param avc Ptr to the vcache entry.
1567 * \param astatus Ptr to the status info to store.
1568 * \param areq Ptr to the associated vrequest.
1570 * \return Operation status.
1574 afs_WriteVCache(register struct vcache *avc,
1575 register struct AFSStoreStatus *astatus,
1576 struct vrequest *areq)
1579 struct afs_conn *tc;
1580 struct AFSFetchStatus OutStatus;
1581 struct AFSVolSync tsync;
1583 AFS_STATCNT(afs_WriteVCache);
1584 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1585 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
1587 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
1589 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1592 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->f.fid.Fid,
1593 astatus, &OutStatus, &tsync);
1598 } while (afs_Analyze
1599 (tc, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1600 SHARED_LOCK, NULL));
1602 UpgradeSToWLock(&avc->lock, 20);
1604 /* success, do the changes locally */
1605 afs_SimpleVStat(avc, &OutStatus, areq);
1607 * Update the date, too. SimpleVStat didn't do this, since
1608 * it thought we were doing this after fetching new status
1609 * over a file being written.
1611 avc->f.m.Date = OutStatus.ClientModTime;
1613 /* failure, set up to check with server next time */
1614 ObtainWriteLock(&afs_xcbhash, 462);
1615 afs_DequeueCallback(avc);
1616 avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1617 ReleaseWriteLock(&afs_xcbhash);
1618 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1619 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1621 ConvertWToSLock(&avc->lock);
1624 } /*afs_WriteVCache */
1625 #if defined(AFS_DISCON_ENV)
1628 * Store status info only locally, set the proper disconnection flags
1629 * and add to dirty list.
1631 * \param avc The vcache to be written locally.
1632 * \param astatus Get attr fields from local store.
1633 * \param attrs This one is only of the vs_size.
1635 * \note Must be called with a shared lock on the vnode
1637 int afs_WriteVCacheDiscon(register struct vcache *avc,
1638 register struct AFSStoreStatus *astatus,
1639 struct vattr *attrs)
1642 afs_int32 flags = 0;
1644 UpgradeSToWLock(&avc->lock, 700);
1646 if (!astatus->Mask) {
1652 /* Set attributes. */
1653 if (astatus->Mask & AFS_SETMODTIME) {
1654 avc->f.m.Date = astatus->ClientModTime;
1655 flags |= VDisconSetTime;
1658 if (astatus->Mask & AFS_SETOWNER) {
1659 printf("Not allowed yet. \n");
1660 /*avc->f.m.Owner = astatus->Owner;*/
1663 if (astatus->Mask & AFS_SETGROUP) {
1664 printf("Not allowed yet. \n");
1665 /*avc->f.m.Group = astatus->Group;*/
1668 if (astatus->Mask & AFS_SETMODE) {
1669 avc->f.m.Mode = astatus->UnixModeBits;
1671 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1673 if (vType(avc) == VREG) {
1674 avc->f.m.Mode |= S_IFREG;
1675 } else if (vType(avc) == VDIR) {
1676 avc->f.m.Mode |= S_IFDIR;
1677 } else if (vType(avc) == VLNK) {
1678 avc->f.m.Mode |= S_IFLNK;
1679 if ((avc->f.m.Mode & 0111) == 0)
1683 flags |= VDisconSetMode;
1684 } /* if(astatus.Mask & AFS_SETMODE) */
1686 } /* if (!astatus->Mask) */
1688 if (attrs->va_size > 0) {
1689 /* XXX: Do I need more checks? */
1690 /* Truncation operation. */
1691 flags |= VDisconTrunc;
1695 afs_DisconAddDirty(avc, flags, 1);
1697 /* XXX: How about the rest of the fields? */
1699 ConvertWToSLock(&avc->lock);
1707 * Copy astat block into vcache info
1709 * \note This code may get dataversion and length out of sync if the file has
1710 * been modified. This is less than ideal. I haven't thought about it sufficiently
1711 * to be certain that it is adequate.
1713 * \note Environment: Must be called under a write lock
1715 * \param avc Ptr to vcache entry.
1716 * \param astat Ptr to stat block to copy in.
1717 * \param areq Ptr to associated request.
1720 afs_ProcessFS(register struct vcache *avc,
1721 register struct AFSFetchStatus *astat, struct vrequest *areq)
1724 AFS_STATCNT(afs_ProcessFS);
1726 #ifdef AFS_64BIT_CLIENT
1727 FillInt64(length, astat->Length_hi, astat->Length);
1728 #else /* AFS_64BIT_CLIENT */
1729 length = astat->Length;
1730 #endif /* AFS_64BIT_CLIENT */
1731 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1732 * number for each bulk status request. Under no circumstances
1733 * should afs_DoBulkStat store a sequence number if the new
1734 * length will be ignored when afs_ProcessFS is called with
1735 * new stats. If you change the following conditional then you
1736 * also need to change the conditional in afs_DoBulkStat. */
1738 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1739 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1741 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1743 /* if we're writing or mapping this file, don't fetch over these
1746 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1747 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1748 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1749 avc->f.m.Length = length;
1750 avc->f.m.Date = astat->ClientModTime;
1752 hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1753 avc->f.m.Owner = astat->Owner;
1754 avc->f.m.Mode = astat->UnixModeBits;
1755 avc->f.m.Group = astat->Group;
1756 avc->f.m.LinkCount = astat->LinkCount;
1757 if (astat->FileType == File) {
1758 vSetType(avc, VREG);
1759 avc->f.m.Mode |= S_IFREG;
1760 } else if (astat->FileType == Directory) {
1761 vSetType(avc, VDIR);
1762 avc->f.m.Mode |= S_IFDIR;
1763 } else if (astat->FileType == SymbolicLink) {
1764 if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
1765 vSetType(avc, VDIR);
1766 avc->f.m.Mode |= S_IFDIR;
1768 vSetType(avc, VLNK);
1769 avc->f.m.Mode |= S_IFLNK;
1771 if ((avc->f.m.Mode & 0111) == 0) {
1775 avc->f.anyAccess = astat->AnonymousAccess;
1777 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1779 * Caller has at least one bit not covered by anonymous, and
1780 * thus may have interesting rights.
1782 * HOWEVER, this is a really bad idea, because any access query
1783 * for bits which aren't covered by anonymous, on behalf of a user
1784 * who doesn't have any special rights, will result in an answer of
1785 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1786 * It's an especially bad idea under Ultrix, since (due to the lack of
1787 * a proper access() call) it must perform several afs_access() calls
1788 * in order to create magic mode bits that vary according to who makes
1789 * the call. In other words, _every_ stat() generates a test for
1792 #endif /* badidea */
1794 struct axscache *ac;
1795 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1796 ac->axess = astat->CallerAccess;
1797 else /* not found, add a new one if possible */
1798 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1800 } /*afs_ProcessFS */
1804 * Get fid from server.
1807 * \param areq Request to be passed on.
1808 * \param name Name of ?? to lookup.
1809 * \param OutStatus Fetch status.
1814 * \return Success status of operation.
1817 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1818 char *name, struct VenusFid *nfid,
1819 struct AFSFetchStatus *OutStatusp,
1820 struct AFSCallBack *CallBackp, struct server **serverp,
1821 struct AFSVolSync *tsyncp)
1825 register struct afs_conn *tc;
1826 struct AFSFetchStatus OutDirStatus;
1829 name = ""; /* XXX */
1831 tc = afs_Conn(afid, areq, SHARED_LOCK);
1834 *serverp = tc->srvr->server;
1836 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1839 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1840 (struct AFSFid *)&nfid->Fid, OutStatusp,
1841 &OutDirStatus, CallBackp, tsyncp);
1846 } while (afs_Analyze
1847 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1857 * Given a file id and a vrequest structure, fetch the status
1858 * information associated with the file.
1860 * \param afid File ID.
1861 * \param areq Ptr to associated vrequest structure, specifying the
1862 * user whose authentication tokens will be used.
1863 * \param avc Caller may already have a vcache for this file, which is
1866 * \note Environment:
1867 * The cache entry is returned with an increased vrefCount field.
1868 * The entry must be discarded by calling afs_PutVCache when you
1869 * are through using the pointer to the cache entry.
1871 * You should not hold any locks when calling this function, except
1872 * locks on other vcache entries. If you lock more than one vcache
1873 * entry simultaneously, you should lock them in this order:
1875 * 1. Lock all files first, then directories.
1876 * 2. Within a particular type, lock entries in Fid.Vnode order.
1878 * This locking hierarchy is convenient because it allows locking
1879 * of a parent dir cache entry, given a file (to check its access
1880 * control list). It also allows renames to be handled easily by
1881 * locking directories in a constant order.
1883 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1885 * \note Might have a vcache structure already, which must
1886 * already be held by the caller
1889 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1890 afs_int32 * cached, struct vcache *avc)
1893 afs_int32 code, newvcache = 0;
1894 register struct vcache *tvc;
1898 AFS_STATCNT(afs_GetVCache);
1901 *cached = 0; /* Init just in case */
1903 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1907 ObtainSharedLock(&afs_xvcache, 5);
1909 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1911 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1912 ReleaseSharedLock(&afs_xvcache);
1913 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1921 osi_Assert((tvc->f.states & CVInit) == 0);
1922 /* If we are in readdir, return the vnode even if not statd */
1923 if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
1924 ReleaseSharedLock(&afs_xvcache);
1928 UpgradeSToWLock(&afs_xvcache, 21);
1930 /* no cache entry, better grab one */
1931 tvc = afs_NewVCache(afid, NULL);
1934 ConvertWToSLock(&afs_xvcache);
1937 ReleaseSharedLock(&afs_xvcache);
1941 afs_stats_cmperf.vcacheMisses++;
1944 ReleaseSharedLock(&afs_xvcache);
1946 ObtainWriteLock(&tvc->lock, 54);
1948 if (tvc->f.states & CStatd) {
1949 ReleaseWriteLock(&tvc->lock);
1952 #if defined(AFS_OSF_ENV)
1953 if (afs_IsWired(tvc)) {
1954 ReleaseWriteLock(&tvc->lock);
1957 #endif /* AFS_OSF_ENV */
1958 #ifdef AFS_DARWIN80_ENV
1959 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1962 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1964 * XXX - I really don't like this. Should try to understand better.
1965 * It seems that sometimes, when we get called, we already hold the
1966 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1967 * We can't drop the vnode lock, because that could result in a race.
1968 * Sometimes, though, we get here and don't hold the vnode lock.
1969 * I hate code paths that sometimes hold locks and sometimes don't.
1970 * In any event, the dodge we use here is to check whether the vnode
1971 * is locked, and if it isn't, then we gain and drop it around the call
1972 * to vinvalbuf; otherwise, we leave it alone.
1975 struct vnode *vp = AFSTOV(tvc);
1978 #if defined(AFS_DARWIN_ENV)
1979 iheldthelock = VOP_ISLOCKED(vp);
1981 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1982 /* this is messy. we can call fsync which will try to reobtain this */
1983 if (VTOAFS(vp) == tvc)
1984 ReleaseWriteLock(&tvc->lock);
1985 if (UBCINFOEXISTS(vp)) {
1986 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1988 if (VTOAFS(vp) == tvc)
1989 ObtainWriteLock(&tvc->lock, 954);
1991 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1992 #elif defined(AFS_FBSD80_ENV)
1993 iheldthelock = VOP_ISLOCKED(vp);
1994 if (!iheldthelock) {
1995 /* nosleep/sleep lock order reversal */
1996 int glocked = ISAFS_GLOCK();
1999 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2003 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
2006 #elif defined(AFS_FBSD60_ENV)
2007 iheldthelock = VOP_ISLOCKED(vp, curthread);
2009 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
2010 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
2012 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
2013 #elif defined(AFS_FBSD50_ENV)
2014 iheldthelock = VOP_ISLOCKED(vp, curthread);
2016 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
2017 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
2019 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
2020 #elif defined(AFS_FBSD40_ENV)
2021 iheldthelock = VOP_ISLOCKED(vp, curproc);
2023 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
2024 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
2026 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
2027 #elif defined(AFS_OBSD_ENV)
2028 iheldthelock = VOP_ISLOCKED(vp, curproc);
2030 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
2031 uvm_vnp_uncache(vp);
2033 VOP_UNLOCK(vp, 0, curproc);
2039 ObtainWriteLock(&afs_xcbhash, 464);
2040 tvc->f.states &= ~CUnique;
2042 afs_DequeueCallback(tvc);
2043 ReleaseWriteLock(&afs_xcbhash);
2045 /* It is always appropriate to throw away all the access rights? */
2046 afs_FreeAllAxs(&(tvc->Access));
2047 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
2049 if ((tvp->states & VForeign)) {
2051 tvc->f.states |= CForeign;
2052 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2053 && (tvp->rootUnique == afid->Fid.Unique)) {
2057 if (tvp->states & VRO)
2058 tvc->f.states |= CRO;
2059 if (tvp->states & VBackup)
2060 tvc->f.states |= CBackup;
2061 /* now copy ".." entry back out of volume structure, if necessary */
2062 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2064 tvc->mvid = (struct VenusFid *)
2065 osi_AllocSmallSpace(sizeof(struct VenusFid));
2066 *tvc->mvid = tvp->dotdot;
2068 afs_PutVolume(tvp, READ_LOCK);
2072 afs_RemoveVCB(afid);
2074 struct AFSFetchStatus OutStatus;
2076 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
2077 afs_ProcessFS(tvc, &OutStatus, areq);
2078 tvc->f.states |= CStatd | CUnique;
2079 tvc->f.parent.vnode = OutStatus.ParentVnode;
2080 tvc->f.parent.unique = OutStatus.ParentUnique;
2084 if (AFS_IS_DISCONNECTED) {
2085 /* Nothing to do otherwise...*/
2087 printf("Network is down in afs_GetCache");
2089 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
2091 /* For the NFS translator's benefit, make sure
2092 * non-directory vnodes always have their parent FID set
2093 * correctly, even when created as a result of decoding an
2094 * NFS filehandle. It would be nice to also do this for
2095 * directories, but we can't because the fileserver fills
2096 * in the FID of the directory itself instead of that of
2099 if (!code && OutStatus.FileType != Directory &&
2100 !tvc->f.parent.vnode) {
2101 tvc->f.parent.vnode = OutStatus.ParentVnode;
2102 tvc->f.parent.unique = OutStatus.ParentUnique;
2103 /* XXX - SXW - It's conceivable we should mark ourselves
2104 * as dirty again here, incase we've been raced
2105 * out of the FetchStatus call.
2112 ReleaseWriteLock(&tvc->lock);
2118 ReleaseWriteLock(&tvc->lock);
2121 } /*afs_GetVCache */
2126 * Lookup a vcache by fid. Look inside the cache first, if not
2127 * there, lookup the file on the server, and then get it's fresh
2132 * \param cached Is element cached? If NULL, don't answer.
2136 * \return The found element or NULL.
2139 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
2140 afs_int32 * cached, struct vcache *adp, char *aname)
2142 afs_int32 code, now, newvcache = 0;
2143 struct VenusFid nfid;
2144 register struct vcache *tvc;
2146 struct AFSFetchStatus OutStatus;
2147 struct AFSCallBack CallBack;
2148 struct AFSVolSync tsync;
2149 struct server *serverp = 0;
2153 AFS_STATCNT(afs_GetVCache);
2155 *cached = 0; /* Init just in case */
2157 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2161 ObtainReadLock(&afs_xvcache);
2162 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
2165 ReleaseReadLock(&afs_xvcache);
2167 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2168 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2172 ObtainReadLock(&tvc->lock);
2174 if (tvc->f.states & CStatd) {
2178 ReleaseReadLock(&tvc->lock);
2181 tvc->f.states &= ~CUnique;
2183 ReleaseReadLock(&tvc->lock);
2185 ObtainReadLock(&afs_xvcache);
2188 ReleaseReadLock(&afs_xvcache);
2190 /* lookup the file */
2193 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
2195 if (AFS_IS_DISCONNECTED) {
2196 printf("Network is down in afs_LookupVcache\n");
2200 afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
2201 &CallBack, &serverp, &tsync);
2203 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2207 ObtainSharedLock(&afs_xvcache, 6);
2208 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
2210 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2211 ReleaseSharedLock(&afs_xvcache);
2212 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2218 /* no cache entry, better grab one */
2219 UpgradeSToWLock(&afs_xvcache, 22);
2220 tvc = afs_NewVCache(&nfid, serverp);
2222 ConvertWToSLock(&afs_xvcache);
2225 ReleaseSharedLock(&afs_xvcache);
2230 ReleaseSharedLock(&afs_xvcache);
2231 ObtainWriteLock(&tvc->lock, 55);
2233 /* It is always appropriate to throw away all the access rights? */
2234 afs_FreeAllAxs(&(tvc->Access));
2235 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2237 if ((tvp->states & VForeign)) {
2239 tvc->f.states |= CForeign;
2240 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2241 && (tvp->rootUnique == afid->Fid.Unique))
2244 if (tvp->states & VRO)
2245 tvc->f.states |= CRO;
2246 if (tvp->states & VBackup)
2247 tvc->f.states |= CBackup;
2248 /* now copy ".." entry back out of volume structure, if necessary */
2249 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2251 tvc->mvid = (struct VenusFid *)
2252 osi_AllocSmallSpace(sizeof(struct VenusFid));
2253 *tvc->mvid = tvp->dotdot;
2258 ObtainWriteLock(&afs_xcbhash, 465);
2259 afs_DequeueCallback(tvc);
2260 tvc->f.states &= ~(CStatd | CUnique);
2261 ReleaseWriteLock(&afs_xcbhash);
2262 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2263 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2265 afs_PutVolume(tvp, READ_LOCK);
2266 ReleaseWriteLock(&tvc->lock);
2271 ObtainWriteLock(&afs_xcbhash, 466);
2272 if (origCBs == afs_allCBs) {
2273 if (CallBack.ExpirationTime) {
2274 tvc->callback = serverp;
2275 tvc->cbExpires = CallBack.ExpirationTime + now;
2276 tvc->f.states |= CStatd | CUnique;
2277 tvc->f.states &= ~CBulkFetching;
2278 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2279 } else if (tvc->f.states & CRO) {
2280 /* adapt gives us an hour. */
2281 tvc->cbExpires = 3600 + osi_Time();
2282 /*XXX*/ tvc->f.states |= CStatd | CUnique;
2283 tvc->f.states &= ~CBulkFetching;
2284 afs_QueueCallback(tvc, CBHash(3600), tvp);
2286 tvc->callback = NULL;
2287 afs_DequeueCallback(tvc);
2288 tvc->f.states &= ~(CStatd | CUnique);
2289 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2290 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2293 afs_DequeueCallback(tvc);
2294 tvc->f.states &= ~CStatd;
2295 tvc->f.states &= ~CUnique;
2296 tvc->callback = NULL;
2297 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2298 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2300 ReleaseWriteLock(&afs_xcbhash);
2302 afs_PutVolume(tvp, READ_LOCK);
2303 afs_ProcessFS(tvc, &OutStatus, areq);
2305 ReleaseWriteLock(&tvc->lock);
2311 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2312 afs_int32 * cached, struct volume *tvolp)
2314 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2315 afs_int32 getNewFid = 0;
2317 struct VenusFid nfid;
2318 register struct vcache *tvc;
2319 struct server *serverp = 0;
2320 struct AFSFetchStatus OutStatus;
2321 struct AFSCallBack CallBack;
2322 struct AFSVolSync tsync;
2327 #ifdef AFS_DARWIN80_ENV
2334 if (!tvolp->rootVnode || getNewFid) {
2335 struct VenusFid tfid;
2338 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2339 origCBs = afs_allCBs; /* ignore InitCallBackState */
2341 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2346 /* ReleaseReadLock(&tvolp->lock); */
2347 ObtainWriteLock(&tvolp->lock, 56);
2348 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2349 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2350 ReleaseWriteLock(&tvolp->lock);
2351 /* ObtainReadLock(&tvolp->lock);*/
2354 afid->Fid.Vnode = tvolp->rootVnode;
2355 afid->Fid.Unique = tvolp->rootUnique;
2359 ObtainSharedLock(&afs_xvcache, 7);
2361 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2362 if (!FidCmp(&(tvc->f.fid), afid)) {
2363 if (tvc->f.states & CVInit) {
2364 ReleaseSharedLock(&afs_xvcache);
2365 afs_osi_Sleep(&tvc->f.states);
2369 /* Grab this vnode, possibly reactivating from the free list */
2370 /* for the present (95.05.25) everything on the hash table is
2371 * definitively NOT in the free list -- at least until afs_reclaim
2372 * can be safely implemented */
2374 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2378 #endif /* AFS_OSF_ENV */
2379 #ifdef AFS_DARWIN80_ENV
2380 if (tvc->f.states & CDeadVnode) {
2381 ReleaseSharedLock(&afs_xvcache);
2382 afs_osi_Sleep(&tvc->f.states);
2386 if (vnode_get(tvp)) /* this bumps ref count */
2388 if (vnode_ref(tvp)) {
2390 /* AFSTOV(tvc) may be NULL */
2400 if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
2401 /* Mount point no longer stat'd or unknown. FID may have changed. */
2404 AFS_RELE(AFSTOV(tvc));
2407 ReleaseSharedLock(&afs_xvcache);
2408 #ifdef AFS_DARWIN80_ENV
2411 vnode_put(AFSTOV(tvc));
2412 vnode_rele(AFSTOV(tvc));
2421 UpgradeSToWLock(&afs_xvcache, 23);
2422 /* no cache entry, better grab one */
2423 tvc = afs_NewVCache(afid, NULL);
2426 ReleaseWriteLock(&afs_xvcache);
2430 afs_stats_cmperf.vcacheMisses++;
2434 afs_stats_cmperf.vcacheHits++;
2435 #if defined(AFS_OSF_ENV) || defined(AFS_DARWIN80_ENV)
2436 /* we already bumped the ref count in the for loop above */
2437 #else /* AFS_OSF_ENV */
2440 UpgradeSToWLock(&afs_xvcache, 24);
2441 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2442 refpanic("GRVC VLRU inconsistent0");
2444 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2445 refpanic("GRVC VLRU inconsistent1");
2447 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2448 refpanic("GRVC VLRU inconsistent2");
2450 QRemove(&tvc->vlruq); /* move to lruq head */
2451 QAdd(&VLRU, &tvc->vlruq);
2452 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2453 refpanic("GRVC VLRU inconsistent3");
2455 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2456 refpanic("GRVC VLRU inconsistent4");
2458 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2459 refpanic("GRVC VLRU inconsistent5");
2464 ReleaseWriteLock(&afs_xvcache);
2466 if (tvc->f.states & CStatd) {
2470 ObtainReadLock(&tvc->lock);
2471 tvc->f.states &= ~CUnique;
2472 tvc->callback = NULL; /* redundant, perhaps */
2473 ReleaseReadLock(&tvc->lock);
2476 ObtainWriteLock(&tvc->lock, 57);
2478 /* It is always appropriate to throw away all the access rights? */
2479 afs_FreeAllAxs(&(tvc->Access));
2482 tvc->f.states |= CForeign;
2483 if (tvolp->states & VRO)
2484 tvc->f.states |= CRO;
2485 if (tvolp->states & VBackup)
2486 tvc->f.states |= CBackup;
2487 /* now copy ".." entry back out of volume structure, if necessary */
2488 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2489 && (tvolp->rootUnique == afid->Fid.Unique)) {
2492 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2494 tvc->mvid = (struct VenusFid *)
2495 osi_AllocSmallSpace(sizeof(struct VenusFid));
2496 *tvc->mvid = tvolp->dotdot;
2500 afs_RemoveVCB(afid);
2503 struct VenusFid tfid;
2506 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2507 origCBs = afs_allCBs; /* ignore InitCallBackState */
2509 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2514 ObtainWriteLock(&afs_xcbhash, 467);
2515 afs_DequeueCallback(tvc);
2516 tvc->callback = NULL;
2517 tvc->f.states &= ~(CStatd | CUnique);
2518 ReleaseWriteLock(&afs_xcbhash);
2519 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2520 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2521 ReleaseWriteLock(&tvc->lock);
2526 ObtainWriteLock(&afs_xcbhash, 468);
2527 if (origCBs == afs_allCBs) {
2528 tvc->f.states |= CTruth;
2529 tvc->callback = serverp;
2530 if (CallBack.ExpirationTime != 0) {
2531 tvc->cbExpires = CallBack.ExpirationTime + start;
2532 tvc->f.states |= CStatd;
2533 tvc->f.states &= ~CBulkFetching;
2534 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2535 } else if (tvc->f.states & CRO) {
2536 /* adapt gives us an hour. */
2537 tvc->cbExpires = 3600 + osi_Time();
2538 /*XXX*/ tvc->f.states |= CStatd;
2539 tvc->f.states &= ~CBulkFetching;
2540 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2543 afs_DequeueCallback(tvc);
2544 tvc->callback = NULL;
2545 tvc->f.states &= ~(CStatd | CUnique);
2546 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2547 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2549 ReleaseWriteLock(&afs_xcbhash);
2550 afs_ProcessFS(tvc, &OutStatus, areq);
2552 ReleaseWriteLock(&tvc->lock);
2558 * Update callback status and (sometimes) attributes of a vnode.
2559 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2560 * shouldn't be written to the vcache here.
2565 * \param Outsp Server status after rpc call.
2566 * \param acb Callback for this vnode.
2568 * \note The vcache must be write locked.
2571 afs_UpdateStatus(struct vcache *avc,
2572 struct VenusFid *afid,
2573 struct vrequest *areq,
2574 struct AFSFetchStatus *Outsp,
2575 struct AFSCallBack *acb,
2578 struct volume *volp;
2581 /* Dont write status in vcache if resyncing after a disconnection. */
2582 afs_ProcessFS(avc, Outsp, areq);
2584 volp = afs_GetVolume(afid, areq, READ_LOCK);
2585 ObtainWriteLock(&afs_xcbhash, 469);
2586 avc->f.states |= CTruth;
2587 if (avc->callback /* check for race */ ) {
2588 if (acb->ExpirationTime != 0) {
2589 avc->cbExpires = acb->ExpirationTime + start;
2590 avc->f.states |= CStatd;
2591 avc->f.states &= ~CBulkFetching;
2592 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2593 } else if (avc->f.states & CRO) {
2594 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2595 avc->cbExpires = 3600 + start;
2596 avc->f.states |= CStatd;
2597 avc->f.states &= ~CBulkFetching;
2598 afs_QueueCallback(avc, CBHash(3600), volp);
2600 afs_DequeueCallback(avc);
2601 avc->callback = NULL;
2602 avc->f.states &= ~(CStatd | CUnique);
2603 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2604 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2607 afs_DequeueCallback(avc);
2608 avc->callback = NULL;
2609 avc->f.states &= ~(CStatd | CUnique);
2610 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2611 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2613 ReleaseWriteLock(&afs_xcbhash);
2615 afs_PutVolume(volp, READ_LOCK);
2619 * Must be called with avc write-locked
2620 * don't absolutely have to invalidate the hint unless the dv has
2621 * changed, but be sure to get it right else there will be consistency bugs.
2624 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2625 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2628 afs_uint32 start = 0;
2629 register struct afs_conn *tc;
2630 struct AFSCallBack CallBack;
2631 struct AFSVolSync tsync;
2634 tc = afs_Conn(afid, areq, SHARED_LOCK);
2635 avc->dchint = NULL; /* invalidate hints */
2637 avc->callback = tc->srvr->server;
2639 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2642 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2650 } while (afs_Analyze
2651 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2652 SHARED_LOCK, NULL));
2655 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2657 /* used to undo the local callback, but that's too extreme.
2658 * There are plenty of good reasons that fetchstatus might return
2659 * an error, such as EPERM. If we have the vnode cached, statd,
2660 * with callback, might as well keep track of the fact that we
2661 * don't have access...
2663 if (code == EPERM || code == EACCES) {
2664 struct axscache *ac;
2665 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2667 else /* not found, add a new one if possible */
2668 afs_AddAxs(avc->Access, areq->uid, 0);
2679 * Stuff some information into the vcache for the given file.
2682 * afid : File in question.
2683 * OutStatus : Fetch status on the file.
2684 * CallBack : Callback info.
2685 * tc : RPC connection involved.
2686 * areq : vrequest involved.
2689 * Nothing interesting.
2692 afs_StuffVcache(register struct VenusFid *afid,
2693 struct AFSFetchStatus *OutStatus,
2694 struct AFSCallBack *CallBack, register struct afs_conn *tc,
2695 struct vrequest *areq)
2697 register afs_int32 code, i, newvcache = 0;
2698 register struct vcache *tvc;
2699 struct AFSVolSync tsync;
2701 struct axscache *ac;
2704 AFS_STATCNT(afs_StuffVcache);
2705 #ifdef IFS_VCACHECOUNT
2710 ObtainSharedLock(&afs_xvcache, 8);
2712 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2714 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2715 ReleaseSharedLock(&afs_xvcache);
2716 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2722 /* no cache entry, better grab one */
2723 UpgradeSToWLock(&afs_xvcache, 25);
2724 tvc = afs_NewVCache(afid, NULL);
2726 ConvertWToSLock(&afs_xvcache);
2729 ReleaseSharedLock(&afs_xvcache);
2734 ReleaseSharedLock(&afs_xvcache);
2735 ObtainWriteLock(&tvc->lock, 58);
2737 tvc->f.states &= ~CStatd;
2738 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2739 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2741 /* Is it always appropriate to throw away all the access rights? */
2742 afs_FreeAllAxs(&(tvc->Access));
2744 /*Copy useful per-volume info */
2745 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2747 if (newvcache && (tvp->states & VForeign))
2748 tvc->f.states |= CForeign;
2749 if (tvp->states & VRO)
2750 tvc->f.states |= CRO;
2751 if (tvp->states & VBackup)
2752 tvc->f.states |= CBackup;
2754 * Now, copy ".." entry back out of volume structure, if
2757 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2759 tvc->mvid = (struct VenusFid *)
2760 osi_AllocSmallSpace(sizeof(struct VenusFid));
2761 *tvc->mvid = tvp->dotdot;
2764 /* store the stat on the file */
2765 afs_RemoveVCB(afid);
2766 afs_ProcessFS(tvc, OutStatus, areq);
2767 tvc->callback = tc->srvr->server;
2769 /* we use osi_Time twice below. Ideally, we would use the time at which
2770 * the FetchStatus call began, instead, but we don't have it here. So we
2771 * make do with "now". In the CRO case, it doesn't really matter. In
2772 * the other case, we hope that the difference between "now" and when the
2773 * call actually began execution on the server won't be larger than the
2774 * padding which the server keeps. Subtract 1 second anyway, to be on
2775 * the safe side. Can't subtract more because we don't know how big
2776 * ExpirationTime is. Possible consistency problems may arise if the call
2777 * timeout period becomes longer than the server's expiration padding. */
2778 ObtainWriteLock(&afs_xcbhash, 470);
2779 if (CallBack->ExpirationTime != 0) {
2780 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2781 tvc->f.states |= CStatd;
2782 tvc->f.states &= ~CBulkFetching;
2783 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2784 } else if (tvc->f.states & CRO) {
2785 /* old-fashioned AFS 3.2 style */
2786 tvc->cbExpires = 3600 + osi_Time();
2787 /*XXX*/ tvc->f.states |= CStatd;
2788 tvc->f.states &= ~CBulkFetching;
2789 afs_QueueCallback(tvc, CBHash(3600), tvp);
2791 afs_DequeueCallback(tvc);
2792 tvc->callback = NULL;
2793 tvc->f.states &= ~(CStatd | CUnique);
2794 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2795 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2797 ReleaseWriteLock(&afs_xcbhash);
2799 afs_PutVolume(tvp, READ_LOCK);
2801 /* look in per-pag cache */
2802 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2803 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2804 else /* not found, add a new one if possible */
2805 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2807 ReleaseWriteLock(&tvc->lock);
2808 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2809 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2810 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2812 * Release ref count... hope this guy stays around...
2815 } /*afs_StuffVcache */
2819 * Decrements the reference count on a cache entry.
2821 * \param avc Pointer to the cache entry to decrement.
2823 * \note Environment: Nothing interesting.
2826 afs_PutVCache(register struct vcache *avc)
2828 AFS_STATCNT(afs_PutVCache);
2829 #ifdef AFS_DARWIN80_ENV
2830 vnode_put(AFSTOV(avc));
2834 * Can we use a read lock here?
2836 ObtainReadLock(&afs_xvcache);
2838 ReleaseReadLock(&afs_xvcache);
2840 } /*afs_PutVCache */
2844 * Reset a vcache entry, so local contents are ignored, and the
2845 * server will be reconsulted next time the vcache is used
2847 * \param avc Pointer to the cache entry to reset
2850 * \note avc must be write locked on entry
2853 afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred) {
2854 ObtainWriteLock(&afs_xcbhash, 456);
2855 afs_DequeueCallback(avc);
2856 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
2857 ReleaseWriteLock(&afs_xcbhash);
2858 /* now find the disk cache entries */
2859 afs_TryToSmush(avc, acred, 1);
2860 osi_dnlc_purgedp(avc);
2861 if (avc->linkData && !(avc->f.states & CCore)) {
2862 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2863 avc->linkData = NULL;
2868 * Sleepa when searching for a vcache. Releases all the pending locks,
2869 * sleeps then obtains the previously released locks.
2871 * \param vcache Enter sleep state.
2872 * \param flag Determines what locks to use.
2876 static void findvc_sleep(struct vcache *avc, int flag) {
2877 if (flag & IS_SLOCK) {
2878 ReleaseSharedLock(&afs_xvcache);
2880 if (flag & IS_WLOCK) {
2881 ReleaseWriteLock(&afs_xvcache);
2883 ReleaseReadLock(&afs_xvcache);
2886 afs_osi_Sleep(&avc->f.states);
2887 if (flag & IS_SLOCK) {
2888 ObtainSharedLock(&afs_xvcache, 341);
2890 if (flag & IS_WLOCK) {
2891 ObtainWriteLock(&afs_xvcache, 343);
2893 ObtainReadLock(&afs_xvcache);
2898 * Find a vcache entry given a fid.
2900 * \param afid Pointer to the fid whose cache entry we desire.
2901 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2902 * unlock the vnode, and try again.
2903 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2904 * set if FindVCache is called as part of internal bookkeeping.
2906 * \note Environment: Must be called with the afs_xvcache lock at least held at
2907 * the read level. In order to do the VLRU adjustment, the xvcache lock
2908 * must be shared-- we upgrade it here.
2912 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2915 register struct vcache *tvc;
2917 #if defined( AFS_OSF_ENV)
2920 #ifdef AFS_DARWIN80_ENV
2924 AFS_STATCNT(afs_FindVCache);
2928 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2929 if (FidMatches(afid, tvc)) {
2930 if (tvc->f.states & CVInit) {
2931 findvc_sleep(tvc, flag);
2935 /* Grab this vnode, possibly reactivating from the free list */
2937 vg = vget(AFSTOV(tvc));
2941 #endif /* AFS_OSF_ENV */
2942 #ifdef AFS_DARWIN80_ENV
2943 if (tvc->f.states & CDeadVnode) {
2944 findvc_sleep(tvc, flag);
2950 if (vnode_ref(tvp)) {
2952 /* AFSTOV(tvc) may be NULL */
2962 /* should I have a read lock on the vnode here? */
2966 #if !defined(AFS_OSF_ENV) && !defined(AFS_DARWIN80_ENV)
2967 osi_vnhold(tvc, retry); /* already held, above */
2968 if (retry && *retry)
2971 #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
2972 tvc->f.states |= CUBCinit;
2974 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2975 UBCINFORECLAIMED(AFSTOV(tvc))) {
2976 ubc_info_init(AFSTOV(tvc));
2979 tvc->f.states &= ~CUBCinit;
2982 * only move to front of vlru if we have proper vcache locking)
2984 if (flag & DO_VLRU) {
2985 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2986 refpanic("FindVC VLRU inconsistent1");
2988 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2989 refpanic("FindVC VLRU inconsistent1");
2991 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2992 refpanic("FindVC VLRU inconsistent2");
2994 UpgradeSToWLock(&afs_xvcache, 26);
2995 QRemove(&tvc->vlruq);
2996 QAdd(&VLRU, &tvc->vlruq);
2997 ConvertWToSLock(&afs_xvcache);
2998 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2999 refpanic("FindVC VLRU inconsistent1");
3001 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3002 refpanic("FindVC VLRU inconsistent2");
3004 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3005 refpanic("FindVC VLRU inconsistent3");
3011 if (flag & DO_STATS) {
3013 afs_stats_cmperf.vcacheHits++;
3015 afs_stats_cmperf.vcacheMisses++;
3016 if (afs_IsPrimaryCellNum(afid->Cell))
3017 afs_stats_cmperf.vlocalAccesses++;
3019 afs_stats_cmperf.vremoteAccesses++;
3022 } /*afs_FindVCache */
3025 * Find a vcache entry given a fid. Does a wildcard match on what we
3026 * have for the fid. If more than one entry, don't return anything.
3028 * \param avcp Fill in pointer if we found one and only one.
3029 * \param afid Pointer to the fid whose cache entry we desire.
3030 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
3031 * unlock the vnode, and try again.
3032 * \param flags bit 1 to specify whether to compute hit statistics. Not
3033 * set if FindVCache is called as part of internal bookkeeping.
3035 * \note Environment: Must be called with the afs_xvcache lock at least held at
3036 * the read level. In order to do the VLRU adjustment, the xvcache lock
3037 * must be shared-- we upgrade it here.
3039 * \return Number of matches found.
3042 int afs_duplicate_nfs_fids = 0;
3045 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
3047 register struct vcache *tvc;
3049 afs_int32 count = 0;
3050 struct vcache *found_tvc = NULL;
3054 #ifdef AFS_DARWIN80_ENV
3058 AFS_STATCNT(afs_FindVCache);
3062 ObtainSharedLock(&afs_xvcache, 331);
3065 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3066 /* Match only on what we have.... */
3067 if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
3068 && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
3069 && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
3070 && (tvc->f.fid.Cell == afid->Cell)) {
3071 if (tvc->f.states & CVInit) {
3072 ReleaseSharedLock(&afs_xvcache);
3073 afs_osi_Sleep(&tvc->f.states);
3077 /* Grab this vnode, possibly reactivating from the free list */
3079 vg = vget(AFSTOV(tvc));
3082 /* This vnode no longer exists. */
3085 #endif /* AFS_OSF_ENV */
3086 #ifdef AFS_DARWIN80_ENV
3087 if (tvc->f.states & CDeadVnode) {
3088 ReleaseSharedLock(&afs_xvcache);
3089 afs_osi_Sleep(&tvc->f.states);
3093 if (vnode_get(tvp)) {
3094 /* This vnode no longer exists. */
3097 if (vnode_ref(tvp)) {
3098 /* This vnode no longer exists. */
3100 /* AFSTOV(tvc) may be NULL */
3105 #endif /* AFS_DARWIN80_ENV */
3110 /* Drop our reference counts. */
3112 vrele(AFSTOV(found_tvc));
3114 afs_duplicate_nfs_fids++;
3115 ReleaseSharedLock(&afs_xvcache);
3116 #ifdef AFS_DARWIN80_ENV
3117 /* Drop our reference counts. */
3118 vnode_put(AFSTOV(tvc));
3119 vnode_put(AFSTOV(found_tvc));
3128 /* should I have a read lock on the vnode here? */
3130 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
3131 afs_int32 retry = 0;
3132 osi_vnhold(tvc, &retry);
3135 found_tvc = (struct vcache *)0;
3136 ReleaseSharedLock(&afs_xvcache);
3137 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
3141 #if !defined(AFS_OSF_ENV)
3142 osi_vnhold(tvc, (int *)0); /* already held, above */
3146 * We obtained the xvcache lock above.
3148 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3149 refpanic("FindVC VLRU inconsistent1");
3151 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3152 refpanic("FindVC VLRU inconsistent1");
3154 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3155 refpanic("FindVC VLRU inconsistent2");
3157 UpgradeSToWLock(&afs_xvcache, 568);
3158 QRemove(&tvc->vlruq);
3159 QAdd(&VLRU, &tvc->vlruq);
3160 ConvertWToSLock(&afs_xvcache);
3161 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3162 refpanic("FindVC VLRU inconsistent1");
3164 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3165 refpanic("FindVC VLRU inconsistent2");
3167 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3168 refpanic("FindVC VLRU inconsistent3");
3174 afs_stats_cmperf.vcacheHits++;
3176 afs_stats_cmperf.vcacheMisses++;
3177 if (afs_IsPrimaryCellNum(afid->Cell))
3178 afs_stats_cmperf.vlocalAccesses++;
3180 afs_stats_cmperf.vremoteAccesses++;
3182 *avcp = tvc; /* May be null */
3184 ReleaseSharedLock(&afs_xvcache);
3185 return (tvc ? 1 : 0);
3187 } /*afs_NFSFindVCache */
3193 * Initialize vcache related variables
3198 afs_vcacheInit(int astatSize)
3200 #if (!defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)) || defined(AFS_SGI_ENV)
3201 register struct vcache *tvp;
3204 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
3205 if (!afs_maxvcount) {
3206 #if defined(AFS_LINUX22_ENV)
3207 afs_maxvcount = astatSize; /* no particular limit on linux? */
3208 #elif defined(AFS_OSF30_ENV)
3209 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
3211 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
3213 if (astatSize < afs_maxvcount) {
3214 afs_maxvcount = astatSize;
3217 #else /* AFS_OSF_ENV */
3221 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3222 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3224 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3225 /* Allocate and thread the struct vcache entries */
3226 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
3227 memset(tvp, 0, sizeof(struct vcache) * astatSize);
3229 Initial_freeVCList = tvp;
3230 freeVCList = &(tvp[0]);
3231 for (i = 0; i < astatSize - 1; i++) {
3232 tvp[i].nextfree = &(tvp[i + 1]);
3234 tvp[astatSize - 1].nextfree = NULL;
3235 #ifdef KERNEL_HAVE_PIN
3236 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
3240 #if defined(AFS_SGI_ENV)
3241 for (i = 0; i < astatSize; i++) {
3242 char name[METER_NAMSZ];
3243 struct vcache *tvc = &tvp[i];
3245 tvc->v.v_number = ++afsvnumbers;
3246 tvc->vc_rwlockid = OSI_NO_LOCKID;
3247 initnsema(&tvc->vc_rwlock, 1,
3248 makesname(name, "vrw", tvc->v.v_number));
3249 #ifndef AFS_SGI53_ENV
3250 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
3252 #ifndef AFS_SGI62_ENV
3253 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
3254 #endif /* AFS_SGI62_ENV */
3258 for(i = 0; i < VCSIZE; ++i)
3259 QInit(&afs_vhashTV[i]);
3266 shutdown_vcache(void)
3269 struct afs_cbr *tsp;
3271 * XXX We may potentially miss some of the vcaches because if when
3272 * there are no free vcache entries and all the vcache entries are active
3273 * ones then we allocate an additional one - admittedly we almost never
3278 register struct afs_q *tq, *uq = NULL;
3279 register struct vcache *tvc;
3280 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
3284 osi_FreeSmallSpace(tvc->mvid);
3285 tvc->mvid = (struct VenusFid *)0;
3288 aix_gnode_rele(AFSTOV(tvc));
3290 if (tvc->linkData) {
3291 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3296 * Also free the remaining ones in the Cache
3298 for (i = 0; i < VCSIZE; i++) {
3299 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3301 osi_FreeSmallSpace(tvc->mvid);
3302 tvc->mvid = (struct VenusFid *)0;
3306 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
3307 #ifdef AFS_AIX32_ENV
3310 vms_delete(tvc->segid);
3312 tvc->segid = tvc->vmh = NULL;
3313 if (VREFCOUNT_GT(tvc,0))
3314 osi_Panic("flushVcache: vm race");
3322 #if defined(AFS_SUN5_ENV)
3328 if (tvc->linkData) {
3329 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3334 afs_FreeAllAxs(&(tvc->Access));
3340 * Free any leftover callback queue
3342 for (i = 0; i < afs_stats_cmperf.CallBackAlloced; i++) {
3343 tsp = afs_cbrHeads[i];
3344 afs_cbrHeads[i] = 0;
3345 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3349 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3350 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3352 #ifdef KERNEL_HAVE_PIN
3353 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3356 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3357 freeVCList = Initial_freeVCList = 0;
3359 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3360 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3362 for(i = 0; i < VCSIZE; ++i)
3363 QInit(&afs_vhashTV[i]);
3366 void afs_DisconGiveUpCallbacks(void) {
3371 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3373 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3374 for (i = 0; i < VCSIZE; i++) {
3375 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3376 if ((tvc->f.states & CRO) == 0 && tvc->callback) {
3378 tvc->callback = NULL;
3383 /*printf("%d callbacks to be discarded. queued ... ", nq);*/
3386 ReleaseWriteLock(&afs_xvcache);
3387 /*printf("gone\n");*/
3392 * Clear the Statd flag from all vcaches
3394 * This function removes the Statd flag from all vcaches. It's used by
3395 * disconnected mode to tidy up during reconnection
3398 void afs_ClearAllStatdFlag(void) {
3402 ObtainWriteLock(&afs_xvcache, 715);
3404 for (i = 0; i < VCSIZE; i++) {
3405 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3406 tvc->f.states &= ~(CStatd|CUnique);
3409 ReleaseWriteLock(&afs_xvcache);