2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
44 #include "afs/sysincludes.h" /*Standard vendor system headers */
45 #include "afsincludes.h" /*AFS-based standard headers */
46 #include "afs/afs_stats.h"
47 #include "afs/afs_cbqueue.h"
48 #include "afs/afs_osidnlc.h"
50 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
51 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
52 afs_int32 afs_vcount = 0; /* number of vcache in use now */
53 #endif /* AFS_OSF_ENV */
61 #endif /* AFS_SGI64_ENV */
63 /* Exported variables */
65 afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
67 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
68 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
69 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
70 #if !defined(AFS_LINUX22_ENV)
71 static struct vcache *freeVCList; /*Free list for stat cache entries */
72 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
73 static struct vcache *Initial_freeVCList; /*Initial list for above */
75 struct afs_q VLRU; /*vcache LRU */
76 afs_int32 vcachegen = 0;
77 unsigned int afs_paniconwarn = 0;
78 struct vcache *afs_vhashT[VCSIZE];
79 struct afs_q afs_vhashTV[VCSIZE];
80 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
81 afs_int32 afs_bulkStatsLost;
82 int afs_norefpanic = 0;
83 extern int afsd_dynamic_vcaches;
86 /* Disk backed vcache definitions
87 * Both protected by xvcache */
89 static int afs_nextVcacheSlot = 0;
90 static struct afs_slotlist *afs_freeSlotList = NULL;
93 /* Forward declarations */
94 static afs_int32 afs_QueueVCB(struct vcache *avc);
97 * Generate an index into the hash table for a given Fid.
99 * \return The hash value.
102 afs_HashCBRFid(struct AFSFid *fid)
104 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
108 * Insert a CBR entry into the hash table.
109 * Must be called with afs_xvcb held.
114 afs_InsertHashCBR(struct afs_cbr *cbr)
116 int slot = afs_HashCBRFid(&cbr->fid);
118 cbr->hash_next = afs_cbrHashT[slot];
119 if (afs_cbrHashT[slot])
120 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
122 cbr->hash_pprev = &afs_cbrHashT[slot];
123 afs_cbrHashT[slot] = cbr;
128 * Flush the given vcache entry.
131 * afs_xvcache lock must be held for writing upon entry to
132 * prevent people from changing the vrefCount field, and to
133 * protect the lruq and hnext fields.
134 * LOCK: afs_FlushVCache afs_xvcache W
135 * REFCNT: vcache ref count must be zero on entry except for osf1
136 * RACE: lock is dropped and reobtained, permitting race in caller
138 * \param avc Pointer to vcache entry to flush.
139 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
143 afs_FlushVCache(struct vcache *avc, int *slept)
144 { /*afs_FlushVCache */
147 struct vcache **uvc, *wvc;
150 AFS_STATCNT(afs_FlushVCache);
151 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
152 ICL_TYPE_INT32, avc->f.states);
155 VN_LOCK(AFSTOV(avc));
159 code = osi_VM_FlushVCache(avc, slept);
163 if (avc->f.states & CVFlushed) {
167 #if !defined(AFS_LINUX22_ENV)
168 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
169 refpanic("LRU vs. Free inconsistency");
172 avc->f.states |= CVFlushed;
173 /* pull the entry out of the lruq and put it on the free list */
174 QRemove(&avc->vlruq);
176 /* keep track of # of files that we bulk stat'd, but never used
177 * before they got recycled.
179 if (avc->f.states & CBulkStat)
182 /* remove entry from the hash chain */
183 i = VCHash(&avc->f.fid);
184 uvc = &afs_vhashT[i];
185 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
188 avc->hnext = (struct vcache *)NULL;
193 /* remove entry from the volume hash table */
194 QRemove(&avc->vhashq);
197 osi_FreeSmallSpace(avc->mvid);
198 avc->mvid = (struct VenusFid *)0;
200 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
201 avc->linkData = NULL;
203 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
204 /* OK, there are no internal vrefCounts, so there shouldn't
205 * be any more refs here. */
207 #ifdef AFS_DARWIN80_ENV
208 vnode_clearfsnode(AFSTOV(avc));
209 vnode_removefsref(AFSTOV(avc));
211 avc->v->v_data = NULL; /* remove from vnode */
213 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
216 #ifdef AFS_SUN510_ENV
217 /* As we use private vnodes, cleanup is up to us */
218 vn_reinit(AFSTOV(avc));
220 afs_FreeAllAxs(&(avc->Access));
222 /* we can't really give back callbacks on RO files, since the
223 * server only tracks them on a per-volume basis, and we don't
224 * know whether we still have some other files from the same
226 if ((avc->f.states & CRO) == 0 && avc->callback) {
229 ObtainWriteLock(&afs_xcbhash, 460);
230 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
231 avc->f.states &= ~(CStatd | CUnique);
232 ReleaseWriteLock(&afs_xcbhash);
233 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
234 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
236 osi_dnlc_purgevp(avc);
239 * Next, keep track of which vnodes we've deleted for create's
240 * optimistic synchronization algorithm
243 if (avc->f.fid.Fid.Vnode & 1)
248 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
249 /* put the entry in the free list */
250 avc->nextfree = freeVCList;
252 if (avc->vlruq.prev || avc->vlruq.next) {
253 refpanic("LRU vs. Free inconsistency");
255 avc->f.states |= CVFlushed;
257 /* This should put it back on the vnode free list since usecount is 1 */
260 if (VREFCOUNT_GT(avc,0)) {
261 #if defined(AFS_OSF_ENV)
262 VN_UNLOCK(AFSTOV(avc));
264 AFS_RELE(AFSTOV(avc));
265 afs_stats_cmperf.vcacheXAllocs--;
267 if (afs_norefpanic) {
268 printf("flush vc refcnt < 1");
270 #if defined(AFS_OSF_ENV)
271 (void)vgone(avc, VX_NOSLEEP, NULL);
273 VN_UNLOCK(AFSTOV(avc));
276 osi_Panic("flush vc refcnt < 1");
278 #endif /* AFS_OSF_ENV */
283 VN_UNLOCK(AFSTOV(avc));
287 } /*afs_FlushVCache */
291 * The core of the inactive vnode op for all but IRIX.
297 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
299 AFS_STATCNT(afs_inactive);
300 if (avc->f.states & CDirty) {
301 /* we can't keep trying to push back dirty data forever. Give up. */
302 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
304 avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
305 avc->f.states &= ~CDirty; /* Turn it off */
306 if (avc->f.states & CUnlinked) {
307 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
308 avc->f.states |= CUnlinkedDel;
311 afs_remunlink(avc, 1); /* ignore any return code */
318 * Allocate a callback return structure from the
319 * free list and return it.
321 * Environment: The alloc and free routines are both called with the afs_xvcb lock
322 * held, so we don't have to worry about blocking in osi_Alloc.
324 * \return The allocated afs_cbr.
326 static struct afs_cbr *afs_cbrSpace = 0;
327 /* if alloc limit below changes, fix me! */
328 static struct afs_cbr *afs_cbrHeads[2];
332 register struct afs_cbr *tsp;
335 while (!afs_cbrSpace) {
336 if (afs_stats_cmperf.CallBackAlloced >= 2) {
337 /* don't allocate more than 2 * AFS_NCBRS for now */
339 afs_stats_cmperf.CallBackFlushes++;
343 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
344 sizeof(struct afs_cbr));
345 for (i = 0; i < AFS_NCBRS - 1; i++) {
346 tsp[i].next = &tsp[i + 1];
348 tsp[AFS_NCBRS - 1].next = 0;
350 afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
351 afs_stats_cmperf.CallBackAlloced++;
355 afs_cbrSpace = tsp->next;
360 * Free a callback return structure, removing it from all lists.
362 * Environment: the xvcb lock is held over these calls.
364 * \param asp The address of the structure to free.
369 afs_FreeCBR(register struct afs_cbr *asp)
371 *(asp->pprev) = asp->next;
373 asp->next->pprev = asp->pprev;
375 *(asp->hash_pprev) = asp->hash_next;
377 asp->hash_next->hash_pprev = asp->hash_pprev;
379 asp->next = afs_cbrSpace;
385 * Flush all queued callbacks to all servers.
387 * Environment: holds xvcb lock over RPC to guard against race conditions
388 * when a new callback is granted for the same file later on.
390 * \return 0 for success.
393 afs_FlushVCBs(afs_int32 lockit)
395 struct AFSFid *tfids;
396 struct AFSCallBack callBacks[1];
397 struct AFSCBFids fidArray;
398 struct AFSCBs cbArray;
400 struct afs_cbr *tcbrp;
404 struct vrequest treq;
406 int safety1, safety2, safety3;
408 if ((code = afs_InitReq(&treq, afs_osi_credp)))
410 treq.flags |= O_NONBLOCK;
411 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
414 MObtainWriteLock(&afs_xvcb, 273);
415 ObtainReadLock(&afs_xserver);
416 for (i = 0; i < NSERVERS; i++) {
417 for (safety1 = 0, tsp = afs_servers[i];
418 tsp && safety1 < afs_totalServers + 10;
419 tsp = tsp->next, safety1++) {
421 if (tsp->cbrs == (struct afs_cbr *)0)
424 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
425 * and make an RPC, over and over again.
427 tcount = 0; /* number found so far */
428 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
429 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
430 /* if buffer is full, or we've queued all we're going
431 * to from this server, we should flush out the
434 fidArray.AFSCBFids_len = tcount;
435 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
436 cbArray.AFSCBs_len = 1;
437 cbArray.AFSCBs_val = callBacks;
438 memset(&callBacks[0], 0, sizeof(callBacks[0]));
439 callBacks[0].CallBackType = CB_EXCLUSIVE;
440 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
441 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
442 tsp->cell->cellNum, &treq, 0,
446 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
449 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
457 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
462 /* ignore return code, since callbacks may have
463 * been returned anyway, we shouldn't leave them
464 * around to be returned again.
466 * Next, see if we are done with this server, and if so,
467 * break to deal with the next one.
473 /* if to flush full buffer */
474 /* if we make it here, we have an entry at the head of cbrs,
475 * which we should copy to the file ID array and then free.
478 tfids[tcount++] = tcbrp->fid;
480 /* Freeing the CBR will unlink it from the server's CBR list */
482 } /* while loop for this one server */
483 if (safety2 > afs_cacheStats) {
484 afs_warn("possible internal error afs_flushVCBs (%d)\n",
487 } /* for loop for this hash chain */
488 } /* loop through all hash chains */
489 if (safety1 > afs_totalServers + 2) {
491 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
492 safety1, afs_totalServers + 2);
494 osi_Panic("afs_flushVCBS safety1");
497 ReleaseReadLock(&afs_xserver);
499 MReleaseWriteLock(&afs_xvcb);
500 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
505 * Queue a callback on the given fid.
508 * Locks the xvcb lock.
509 * Called when the xvcache lock is already held.
511 * \param avc vcache entry
512 * \return 0 for success < 0 otherwise.
516 afs_QueueVCB(struct vcache *avc)
519 struct afs_cbr *tcbp;
521 AFS_STATCNT(afs_QueueVCB);
522 /* The callback is really just a struct server ptr. */
523 tsp = (struct server *)(avc->callback);
525 /* we now have a pointer to the server, so we just allocate
526 * a queue entry and queue it.
528 MObtainWriteLock(&afs_xvcb, 274);
529 tcbp = afs_AllocCBR();
530 tcbp->fid = avc->f.fid.Fid;
532 tcbp->next = tsp->cbrs;
534 tsp->cbrs->pprev = &tcbp->next;
537 tcbp->pprev = &tsp->cbrs;
539 afs_InsertHashCBR(tcbp);
541 /* now release locks and return */
542 MReleaseWriteLock(&afs_xvcb);
548 * Remove a queued callback for a given Fid.
551 * Locks xvcb and xserver locks.
552 * Typically called with xdcache, xvcache and/or individual vcache
555 * \param afid The fid we want cleansed of queued callbacks.
560 afs_RemoveVCB(struct VenusFid *afid)
563 struct afs_cbr *cbr, *ncbr;
565 AFS_STATCNT(afs_RemoveVCB);
566 MObtainWriteLock(&afs_xvcb, 275);
568 slot = afs_HashCBRFid(&afid->Fid);
569 ncbr = afs_cbrHashT[slot];
573 ncbr = cbr->hash_next;
575 if (afid->Fid.Volume == cbr->fid.Volume &&
576 afid->Fid.Vnode == cbr->fid.Vnode &&
577 afid->Fid.Unique == cbr->fid.Unique) {
582 MReleaseWriteLock(&afs_xvcb);
586 afs_FlushReclaimedVcaches(void)
588 #if !defined(AFS_LINUX22_ENV)
591 struct vcache *tmpReclaimedVCList = NULL;
593 ObtainWriteLock(&afs_xvreclaim, 76);
594 while (ReclaimedVCList) {
595 tvc = ReclaimedVCList; /* take from free list */
596 ReclaimedVCList = tvc->nextfree;
597 tvc->nextfree = NULL;
598 code = afs_FlushVCache(tvc, &fv_slept);
600 /* Ok, so, if we got code != 0, uh, wtf do we do? */
601 /* Probably, build a temporary list and then put all back when we
602 get to the end of the list */
603 /* This is actually really crappy, but we need to not leak these.
604 We probably need a way to be smarter about this. */
605 tvc->nextfree = tmpReclaimedVCList;
606 tmpReclaimedVCList = tvc;
607 printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code);
609 if (tvc->f.states & (CVInit
610 #ifdef AFS_DARWIN80_ENV
614 tvc->f.states &= ~(CVInit
615 #ifdef AFS_DARWIN80_ENV
619 afs_osi_Wakeup(&tvc->f.states);
622 if (tmpReclaimedVCList)
623 ReclaimedVCList = tmpReclaimedVCList;
625 ReleaseWriteLock(&afs_xvreclaim);
630 afs_ShakeLooseVCaches(afs_int32 anumber)
632 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
635 struct afs_q *tq, *uq;
637 afs_int32 target = anumber;
640 /* Should probably deal better */
641 if (!ISAFS_GLOCK()) {
647 #ifdef AFS_MAXVCOUNT_ENV
648 afsd_dynamic_vcaches || /* Always run if dynamic vcaches are enabled. */
650 afs_vcount >= afs_maxvcount
655 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
658 if (tvc->f.states & CVFlushed) {
659 refpanic("CVFlushed on VLRU");
661 #ifdef AFS_MAXVCOUNT_ENV
662 ! afsd_dynamic_vcaches &&
664 i++ > afs_maxvcount) {
665 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
666 } else if (QNext(uq) != tq) {
667 refpanic("VLRU inconsistent");
668 } else if (!VREFCOUNT_GT(tvc,0)) {
669 refpanic("refcnt 0 on VLRU");
672 #if defined(AFS_LINUX22_ENV)
673 if (tvc != afs_globalVp && VREFCOUNT(tvc) > 1 && tvc->opens == 0) {
674 struct dentry *dentry;
675 struct list_head *cur, *head;
677 #if defined(AFS_LINUX24_ENV)
678 spin_lock(&dcache_lock);
679 #endif /* AFS_LINUX24_ENV */
680 head = &(AFSTOV(tvc))->i_dentry;
684 while ((cur = cur->next) != head) {
685 dentry = list_entry(cur, struct dentry, d_alias);
687 if (d_unhashed(dentry))
692 #if defined(AFS_LINUX24_ENV)
693 spin_unlock(&dcache_lock);
694 #endif /* AFS_LINUX24_ENV */
695 if (d_invalidate(dentry) == -EBUSY) {
697 /* perhaps lock and try to continue? (use cur as head?) */
701 #if defined(AFS_LINUX24_ENV)
702 spin_lock(&dcache_lock);
703 #endif /* AFS_LINUX24_ENV */
706 #if defined(AFS_LINUX24_ENV)
707 spin_unlock(&dcache_lock);
708 #endif /* AFS_LINUX24_ENV */
712 #endif /* AFS_LINUX22_ENV */
714 if (VREFCOUNT_GT(tvc,0) && !VREFCOUNT_GT(tvc,1) &&
716 && (tvc->f.states & CUnlinkedDel) == 0) {
717 code = afs_FlushVCache(tvc, &fv_slept);
724 continue; /* start over - may have raced. */
731 #ifdef AFS_MAXVCOUNT_ENV
732 !afsd_dynamic_vcaches &&
735 printf("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
736 afs_vcount, afs_maxvcount);
738 } /* finished freeing up space */
740 printf("recycled %d entries\n", target-anumber);
748 /* Alloc new vnode. */
750 static struct vcache *
751 afs_AllocVCache(void)
754 #if defined(AFS_OSF30_ENV)
757 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
758 /* What should we do ???? */
759 osi_Panic("afs_AllocVCache: no more vnodes");
764 tvc->nextfree = NULL;
766 #elif defined(AFS_LINUX22_ENV)
770 ip = new_inode(afs_globalVFS);
772 osi_Panic("afs_AllocVCache: no more inodes");
774 #if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
777 tvc = afs_osi_Alloc(sizeof(struct vcache));
778 ip->u.generic_ip = tvc;
783 #ifdef AFS_MAXVCOUNT_ENV
785 if (afsd_dynamic_vcaches && afs_maxvcount < afs_vcount) {
786 afs_maxvcount = afs_vcount;
787 /*printf("peak vnodes: %d\n", afs_maxvcount);*/
790 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
792 /* none free, making one is better than a panic */
793 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
794 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
795 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
796 tvc->v = NULL; /* important to clean this, or use memset 0 */
798 #ifdef KERNEL_HAVE_PIN
799 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
801 #if defined(AFS_SGI_ENV)
803 char name[METER_NAMSZ];
804 memset(tvc, 0, sizeof(struct vcache));
805 tvc->v.v_number = ++afsvnumbers;
806 tvc->vc_rwlockid = OSI_NO_LOCKID;
807 initnsema(&tvc->vc_rwlock, 1,
808 makesname(name, "vrw", tvc->v.v_number));
809 #ifndef AFS_SGI53_ENV
810 initnsema(&tvc->v.v_sync, 0,
811 makesname(name, "vsy", tvc->v.v_number));
813 #ifndef AFS_SGI62_ENV
814 initnlock(&tvc->v.v_lock,
815 makesname(name, "vlk", tvc->v.v_number));
818 #endif /* AFS_SGI_ENV */
820 #ifdef AFS_DISCON_ENV
821 /* If we create a new inode, we either give it a new slot number,
822 * or if one's available, use a slot number from the slot free list
824 if (afs_freeSlotList != NULL) {
825 struct afs_slotlist *tmp;
827 tvc->diskSlot = afs_freeSlotList->slot;
828 tmp = afs_freeSlotList;
829 afs_freeSlotList = tmp->next;
830 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
832 tvc->diskSlot = afs_nextVcacheSlot++;
840 * This routine is responsible for allocating a new cache entry
841 * from the free list. It formats the cache entry and inserts it
842 * into the appropriate hash tables. It must be called with
843 * afs_xvcache write-locked so as to prevent several processes from
844 * trying to create a new cache entry simultaneously.
846 * LOCK: afs_NewVCache afs_xvcache W
848 * \param afid The file id of the file whose cache entry is being created.
850 * \return The new vcache struct.
853 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
857 afs_int32 anumber = VCACHE_FREE;
859 struct gnode *gnodepnt;
861 struct afs_q *tq, *uq;
864 AFS_STATCNT(afs_NewVCache);
866 afs_FlushReclaimedVcaches();
868 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
869 #ifdef AFS_MAXVCOUNT_ENV
870 if(!afsd_dynamic_vcaches) {
872 afs_ShakeLooseVCaches(anumber);
873 if (afs_vcount >= afs_maxvcount) {
874 printf("afs_NewVCache - none freed\n");
877 #ifdef AFS_MAXVCOUNT_ENV
880 tvc = afs_AllocVCache();
881 #else /* AFS_OSF_ENV */
882 /* pull out a free cache entry */
886 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
890 if (tvc->f.states & CVFlushed) {
891 refpanic("CVFlushed on VLRU");
892 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
893 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
894 } else if (QNext(uq) != tq) {
895 refpanic("VLRU inconsistent");
896 } else if (tvc->f.states & CVInit) {
900 if (!VREFCOUNT_GT(tvc,0)
901 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL) && !defined(AFS_DARWIN80_ENV)
902 || ((VREFCOUNT(tvc) == 1) &&
903 (UBCINFOEXISTS(AFSTOV(tvc))))
905 && tvc->opens == 0 && (tvc->f.states & CUnlinkedDel) == 0) {
906 #if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
907 #ifdef AFS_DARWIN80_ENV
908 vnode_t tvp = AFSTOV(tvc);
909 /* VREFCOUNT_GT only sees usecounts, not iocounts */
910 /* so this may fail to actually recycle the vnode now */
911 /* must call vnode_get to avoid races. */
913 if (vnode_get(tvp) == 0) {
915 /* must release lock, since vnode_put will immediately
916 reclaim if there are no other users */
917 ReleaseWriteLock(&afs_xvcache);
922 ObtainWriteLock(&afs_xvcache, 336);
924 /* we can't use the vnode_recycle return value to figure
925 * this out, since the iocount we have to hold makes it
927 if (AFSTOV(tvc) == tvp) {
928 if (anumber > 0 && fv_slept) {
929 QRemove(&tvc->vlruq);
930 QAdd(&VLRU, &tvc->vlruq);
935 #else /* AFS_DARWIN80_ENV */
937 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
938 * then it puts the vnode on the free list.
939 * If we don't do this we end up with a cleaned vnode that's
940 * not on the free list.
941 * XXX assume FreeBSD is the same for now.
949 #else /* AFS_DARWIN80_ENV || AFS_XBSD_ENV */
950 code = afs_FlushVCache(tvc, &fv_slept);
951 #endif /* AFS_DARWIN80_ENV || AFS_XBSD_ENV */
960 continue; /* start over - may have raced. */
966 } /* end of if (!freeVCList) */
969 tvc = afs_AllocVCache();
971 tvc = freeVCList; /* take from free list */
972 freeVCList = tvc->nextfree;
973 tvc->nextfree = NULL;
974 } /* end of if (!freeVCList) */
976 #endif /* AFS_OSF_ENV */
978 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
980 panic("afs_NewVCache(): free vcache with vnode attached");
983 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
985 #if defined(AFS_DISCON_ENV)
986 /* We need to preserve the slot that we're being stored into on
990 slot = tvc->diskSlot;
991 memset((char *)tvc, 0, sizeof(struct vcache));
992 tvc->diskSlot = slot;
995 memset((char *)tvc, 0, sizeof(struct vcache));
1000 memset(&(tvc->f), 0, sizeof(struct fvcache));
1003 AFS_RWLOCK_INIT(&tvc->lock, "vcache lock");
1004 #if defined(AFS_SUN5_ENV)
1005 AFS_RWLOCK_INIT(&tvc->vlock, "vcache vlock");
1006 #endif /* defined(AFS_SUN5_ENV) */
1009 tvc->linkData = NULL;
1012 tvc->execsOrWriters = 0;
1013 tvc->flockCount = 0;
1014 tvc->f.states = CVInit;
1015 tvc->last_looker = 0;
1017 tvc->asynchrony = -1;
1020 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
1023 tvc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
1024 hzero(tvc->f.m.DataVersion); /* in case we copy it into flushDV */
1026 tvc->callback = serverp; /* to minimize chance that clear
1027 * request is lost */
1028 #if defined(AFS_DISCON_ENV)
1029 QZero(&tvc->metadirty);
1035 tvc->hnext = afs_vhashT[i];
1036 afs_vhashT[i] = tvc;
1037 QAdd(&afs_vhashTV[j], &tvc->vhashq);
1039 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1040 refpanic("NewVCache VLRU inconsistent");
1042 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1043 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1044 refpanic("NewVCache VLRU inconsistent2");
1046 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1047 refpanic("NewVCache VLRU inconsistent3");
1049 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1050 refpanic("NewVCache VLRU inconsistent4");
1053 /* it should now be safe to drop the xvcache lock */
1055 ReleaseWriteLock(&afs_xvcache);
1057 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
1059 ObtainWriteLock(&afs_xvcache,337);
1060 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1062 #ifdef AFS_DARWIN_ENV
1063 ReleaseWriteLock(&afs_xvcache);
1065 afs_darwin_getnewvnode(tvc); /* includes one refcount */
1067 ObtainWriteLock(&afs_xvcache,338);
1068 #ifdef AFS_DARWIN80_ENV
1069 LOCKINIT(tvc->rwlock);
1071 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1078 ReleaseWriteLock(&afs_xvcache);
1080 #if defined(AFS_FBSD60_ENV)
1081 if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
1082 #elif defined(AFS_FBSD50_ENV)
1083 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
1085 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
1087 panic("afs getnewvnode"); /* can't happen */
1089 ObtainWriteLock(&afs_xvcache,339);
1090 if (tvc->v != NULL) {
1091 /* I'd like to know if this ever happens...
1092 * We don't drop global for the rest of this function,
1093 * so if we do lose the race, the other thread should
1094 * have found the same vnode and finished initializing
1095 * the vcache entry. Is it conceivable that this vcache
1096 * entry could be recycled during this interval? If so,
1097 * then there probably needs to be some sort of additional
1098 * mutual exclusion (an Embryonic flag would suffice).
1100 printf("afs_NewVCache: lost the race\n");
1104 tvc->v->v_data = tvc;
1105 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1109 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
1110 /* Hold it for the LRU (should make count 2) */
1111 VN_HOLD(AFSTOV(tvc));
1112 #else /* AFS_OSF_ENV */
1113 #if !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
1114 VREFCOUNT_SET(tvc, 1); /* us */
1115 #endif /* AFS_XBSD_ENV */
1116 #endif /* AFS_OSF_ENV */
1117 #ifdef AFS_AIX32_ENV
1118 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1119 tvc->vmh = tvc->segid = NULL;
1123 #if defined(AFS_CACHE_BYPASS)
1124 tvc->cachingStates = 0;
1125 tvc->cachingTransitions = 0;
1128 #ifdef AFS_BOZONLOCK_ENV
1129 #if defined(AFS_SUN5_ENV)
1130 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1132 #if defined(AFS_SUN55_ENV)
1133 /* This is required if the kaio (kernel aynchronous io)
1134 ** module is installed. Inside the kernel, the function
1135 ** check_vp( common/os/aio.c) checks to see if the kernel has
1136 ** to provide asynchronous io for this vnode. This
1137 ** function extracts the device number by following the
1138 ** v_data field of the vnode. If we do not set this field
1139 ** then the system panics. The value of the v_data field
1140 ** is not really important for AFS vnodes because the kernel
1141 ** does not do asynchronous io for regular files. Hence,
1142 ** for the time being, we fill up the v_data field with the
1143 ** vnode pointer itself. */
1144 tvc->v.v_data = (char *)tvc;
1145 #endif /* AFS_SUN55_ENV */
1147 afs_BozonInit(&tvc->pvnLock, tvc);
1150 /* initialize vnode data, note vrefCount is v.v_count */
1152 /* Don't forget to free the gnode space */
1153 tvc->v.v_gnode = gnodepnt =
1154 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1155 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1157 #ifdef AFS_SGI64_ENV
1158 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1159 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1160 #ifdef AFS_SGI65_ENV
1161 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1162 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1164 bhv_head_init(&(tvc->v.v_bh));
1165 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1167 #ifdef AFS_SGI65_ENV
1168 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1169 #ifdef VNODE_TRACING
1170 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1172 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1174 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1175 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1177 vnode_pcache_init(&tvc->v);
1178 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1179 /* Above define is never true execpt in SGI test kernels. */
1180 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1182 #ifdef INTR_KTHREADS
1183 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1186 SetAfsVnode(AFSTOV(tvc));
1187 #endif /* AFS_SGI64_ENV */
1189 * The proper value for mvstat (for root fids) is setup by the caller.
1192 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1194 if (afs_globalVFS == 0)
1195 osi_Panic("afs globalvfs");
1196 #if !defined(AFS_LINUX22_ENV)
1197 vSetVfsp(tvc, afs_globalVFS);
1199 vSetType(tvc, VREG);
1201 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1202 tvc->v.v_vfsprev = NULL;
1203 afs_globalVFS->vfs_vnodes = &tvc->v;
1204 if (tvc->v.v_vfsnext != NULL)
1205 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1206 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1207 gnodepnt->gn_vnode = &tvc->v;
1209 #if defined(AFS_DUX40_ENV)
1210 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1213 /* Is this needed??? */
1214 insmntque(tvc, afs_globalVFS);
1215 #endif /* AFS_OSF_ENV */
1216 #endif /* AFS_DUX40_ENV */
1217 #ifdef AFS_FBSD70_ENV
1218 #ifndef AFS_FBSD80_ENV /* yup. they put it back. */
1219 insmntque(AFSTOV(tvc), afs_globalVFS);
1222 #if defined(AFS_SGI_ENV)
1223 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1224 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1226 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1227 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1228 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1229 osi_Assert(tvc->v.v_filocks == NULL);
1230 #if !defined(AFS_SGI65_ENV)
1231 osi_Assert(tvc->v.v_filocksem == NULL);
1233 osi_Assert(tvc->cred == NULL);
1234 #ifdef AFS_SGI64_ENV
1235 vnode_pcache_reinit(&tvc->v);
1236 tvc->v.v_rdev = NODEV;
1238 vn_initlist((struct vnlist *)&tvc->v);
1240 #endif /* AFS_SGI_ENV */
1242 osi_dnlc_purgedp(tvc); /* this may be overkill */
1243 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1245 tvc->f.states &=~ CVInit;
1246 afs_osi_Wakeup(&tvc->f.states);
1250 } /*afs_NewVCache */
1256 * LOCK: afs_FlushActiveVcaches afs_xvcache N
1258 * \param doflocks : Do we handle flocks?
1261 afs_FlushActiveVcaches(register afs_int32 doflocks)
1263 register struct vcache *tvc;
1265 register struct afs_conn *tc;
1266 register afs_int32 code;
1267 register struct AFS_UCRED *cred = NULL;
1268 struct vrequest treq, ureq;
1269 struct AFSVolSync tsync;
1272 AFS_STATCNT(afs_FlushActiveVcaches);
1273 ObtainReadLock(&afs_xvcache);
1274 for (i = 0; i < VCSIZE; i++) {
1275 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1276 if (tvc->f.states & CVInit) continue;
1277 #ifdef AFS_DARWIN80_ENV
1278 if (tvc->f.states & CDeadVnode &&
1279 (tvc->f.states & (CCore|CUnlinkedDel) ||
1280 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1282 if (doflocks && tvc->flockCount != 0) {
1283 /* if this entry has an flock, send a keep-alive call out */
1285 ReleaseReadLock(&afs_xvcache);
1286 ObtainWriteLock(&tvc->lock, 51);
1288 afs_InitReq(&treq, afs_osi_credp);
1289 treq.flags |= O_NONBLOCK;
1291 tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK);
1293 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1296 RXAFS_ExtendLock(tc->id,
1297 (struct AFSFid *)&tvc->f.fid.Fid,
1303 } while (afs_Analyze
1304 (tc, code, &tvc->f.fid, &treq,
1305 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1307 ReleaseWriteLock(&tvc->lock);
1308 #ifdef AFS_DARWIN80_ENV
1310 ObtainReadLock(&afs_xvcache);
1312 ObtainReadLock(&afs_xvcache);
1317 if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
1319 * Don't let it evaporate in case someone else is in
1320 * this code. Also, drop the afs_xvcache lock while
1321 * getting vcache locks.
1324 ReleaseReadLock(&afs_xvcache);
1325 #ifdef AFS_BOZONLOCK_ENV
1326 afs_BozonLock(&tvc->pvnLock, tvc);
1328 #if defined(AFS_SGI_ENV)
1330 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1332 osi_Assert(VREFCOUNT_GT(tvc,0));
1333 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1335 ObtainWriteLock(&tvc->lock, 52);
1336 if (tvc->f.states & CCore) {
1337 tvc->f.states &= ~CCore;
1338 /* XXXX Find better place-holder for cred XXXX */
1339 cred = (struct AFS_UCRED *)tvc->linkData;
1340 tvc->linkData = NULL; /* XXX */
1341 afs_InitReq(&ureq, cred);
1342 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1343 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1344 tvc->execsOrWriters);
1345 code = afs_StoreOnLastReference(tvc, &ureq);
1346 ReleaseWriteLock(&tvc->lock);
1347 #ifdef AFS_BOZONLOCK_ENV
1348 afs_BozonUnlock(&tvc->pvnLock, tvc);
1350 hzero(tvc->flushDV);
1353 if (code && code != VNOVNODE) {
1354 afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
1355 /* /dev/console */ 1);
1357 } else if (tvc->f.states & CUnlinkedDel) {
1361 ReleaseWriteLock(&tvc->lock);
1362 #ifdef AFS_BOZONLOCK_ENV
1363 afs_BozonUnlock(&tvc->pvnLock, tvc);
1365 #if defined(AFS_SGI_ENV)
1366 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1368 afs_remunlink(tvc, 0);
1369 #if defined(AFS_SGI_ENV)
1370 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1373 /* lost (or won, perhaps) the race condition */
1374 ReleaseWriteLock(&tvc->lock);
1375 #ifdef AFS_BOZONLOCK_ENV
1376 afs_BozonUnlock(&tvc->pvnLock, tvc);
1379 #if defined(AFS_SGI_ENV)
1380 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1382 #ifdef AFS_DARWIN80_ENV
1385 AFS_RELE(AFSTOV(tvc));
1386 /* Matches write code setting CCore flag */
1389 ObtainReadLock(&afs_xvcache);
1391 ObtainReadLock(&afs_xvcache);
1394 AFS_RELE(AFSTOV(tvc));
1395 /* Matches write code setting CCore flag */
1402 ReleaseReadLock(&afs_xvcache);
1408 * Make sure a cache entry is up-to-date status-wise.
1410 * NOTE: everywhere that calls this can potentially be sped up
1411 * by checking CStatd first, and avoiding doing the InitReq
1412 * if this is up-to-date.
1414 * Anymore, the only places that call this KNOW already that the
1415 * vcache is not up-to-date, so we don't screw around.
1417 * \param avc : Ptr to vcache entry to verify.
1423 * Make sure a cache entry is up-to-date status-wise.
1425 * NOTE: everywhere that calls this can potentially be sped up
1426 * by checking CStatd first, and avoiding doing the InitReq
1427 * if this is up-to-date.
1429 * Anymore, the only places that call this KNOW already that the
1430 * vcache is not up-to-date, so we don't screw around.
1432 * \param avc Pointer to vcache entry to verify.
1435 * \return 0 for success or other error codes.
1438 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1440 register struct vcache *tvc;
1442 AFS_STATCNT(afs_VerifyVCache);
1444 #if defined(AFS_OSF_ENV)
1445 ObtainReadLock(&avc->lock);
1446 if (afs_IsWired(avc)) {
1447 ReleaseReadLock(&avc->lock);
1450 ReleaseReadLock(&avc->lock);
1451 #endif /* AFS_OSF_ENV */
1452 /* otherwise we must fetch the status info */
1454 ObtainWriteLock(&avc->lock, 53);
1455 if (avc->f.states & CStatd) {
1456 ReleaseWriteLock(&avc->lock);
1459 ObtainWriteLock(&afs_xcbhash, 461);
1460 avc->f.states &= ~(CStatd | CUnique);
1461 avc->callback = NULL;
1462 afs_DequeueCallback(avc);
1463 ReleaseWriteLock(&afs_xcbhash);
1464 ReleaseWriteLock(&avc->lock);
1466 /* since we've been called back, or the callback has expired,
1467 * it's possible that the contents of this directory, or this
1468 * file's name have changed, thus invalidating the dnlc contents.
1470 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1471 osi_dnlc_purgedp(avc);
1473 osi_dnlc_purgevp(avc);
1475 /* fetch the status info */
1476 tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
1479 /* Put it back; caller has already incremented vrefCount */
1483 } /*afs_VerifyVCache */
1487 * Simple copy of stat info into cache.
1489 * Callers:as of 1992-04-29, only called by WriteVCache
1491 * \param avc Ptr to vcache entry involved.
1492 * \param astat Ptr to stat info to copy.
1496 afs_SimpleVStat(register struct vcache *avc,
1497 register struct AFSFetchStatus *astat, struct vrequest *areq)
1500 AFS_STATCNT(afs_SimpleVStat);
1503 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1504 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1506 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1508 #ifdef AFS_64BIT_CLIENT
1509 FillInt64(length, astat->Length_hi, astat->Length);
1510 #else /* AFS_64BIT_CLIENT */
1511 length = astat->Length;
1512 #endif /* AFS_64BIT_CLIENT */
1513 #if defined(AFS_SGI_ENV)
1514 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1515 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1516 if (length < avc->f.m.Length) {
1517 vnode_t *vp = (vnode_t *) avc;
1519 osi_Assert(WriteLocked(&avc->lock));
1520 ReleaseWriteLock(&avc->lock);
1522 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1524 ObtainWriteLock(&avc->lock, 67);
1527 /* if writing the file, don't fetch over this value */
1528 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1529 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1530 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1531 avc->f.m.Length = length;
1532 avc->f.m.Date = astat->ClientModTime;
1534 avc->f.m.Owner = astat->Owner;
1535 avc->f.m.Group = astat->Group;
1536 avc->f.m.Mode = astat->UnixModeBits;
1537 if (vType(avc) == VREG) {
1538 avc->f.m.Mode |= S_IFREG;
1539 } else if (vType(avc) == VDIR) {
1540 avc->f.m.Mode |= S_IFDIR;
1541 } else if (vType(avc) == VLNK) {
1542 avc->f.m.Mode |= S_IFLNK;
1543 if ((avc->f.m.Mode & 0111) == 0)
1546 if (avc->f.states & CForeign) {
1547 struct axscache *ac;
1548 avc->f.anyAccess = astat->AnonymousAccess;
1550 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1552 * Caller has at least one bit not covered by anonymous, and
1553 * thus may have interesting rights.
1555 * HOWEVER, this is a really bad idea, because any access query
1556 * for bits which aren't covered by anonymous, on behalf of a user
1557 * who doesn't have any special rights, will result in an answer of
1558 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1559 * It's an especially bad idea under Ultrix, since (due to the lack of
1560 * a proper access() call) it must perform several afs_access() calls
1561 * in order to create magic mode bits that vary according to who makes
1562 * the call. In other words, _every_ stat() generates a test for
1565 #endif /* badidea */
1566 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1567 ac->axess = astat->CallerAccess;
1568 else /* not found, add a new one if possible */
1569 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1572 } /*afs_SimpleVStat */
1576 * Store the status info *only* back to the server for a
1579 * Environment: Must be called with a shared lock held on the vnode.
1581 * \param avc Ptr to the vcache entry.
1582 * \param astatus Ptr to the status info to store.
1583 * \param areq Ptr to the associated vrequest.
1585 * \return Operation status.
1589 afs_WriteVCache(register struct vcache *avc,
1590 register struct AFSStoreStatus *astatus,
1591 struct vrequest *areq)
1594 struct afs_conn *tc;
1595 struct AFSFetchStatus OutStatus;
1596 struct AFSVolSync tsync;
1598 AFS_STATCNT(afs_WriteVCache);
1599 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1600 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
1602 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
1604 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1607 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->f.fid.Fid,
1608 astatus, &OutStatus, &tsync);
1613 } while (afs_Analyze
1614 (tc, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1615 SHARED_LOCK, NULL));
1617 UpgradeSToWLock(&avc->lock, 20);
1619 /* success, do the changes locally */
1620 afs_SimpleVStat(avc, &OutStatus, areq);
1622 * Update the date, too. SimpleVStat didn't do this, since
1623 * it thought we were doing this after fetching new status
1624 * over a file being written.
1626 avc->f.m.Date = OutStatus.ClientModTime;
1628 /* failure, set up to check with server next time */
1629 ObtainWriteLock(&afs_xcbhash, 462);
1630 afs_DequeueCallback(avc);
1631 avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1632 ReleaseWriteLock(&afs_xcbhash);
1633 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1634 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1636 ConvertWToSLock(&avc->lock);
1639 } /*afs_WriteVCache */
1640 #if defined(AFS_DISCON_ENV)
1643 * Store status info only locally, set the proper disconnection flags
1644 * and add to dirty list.
1646 * \param avc The vcache to be written locally.
1647 * \param astatus Get attr fields from local store.
1648 * \param attrs This one is only of the vs_size.
1650 * \note Must be called with a shared lock on the vnode
1652 int afs_WriteVCacheDiscon(register struct vcache *avc,
1653 register struct AFSStoreStatus *astatus,
1654 struct vattr *attrs)
1657 afs_int32 flags = 0;
1659 UpgradeSToWLock(&avc->lock, 700);
1661 if (!astatus->Mask) {
1667 /* Set attributes. */
1668 if (astatus->Mask & AFS_SETMODTIME) {
1669 avc->f.m.Date = astatus->ClientModTime;
1670 flags |= VDisconSetTime;
1673 if (astatus->Mask & AFS_SETOWNER) {
1674 printf("Not allowed yet. \n");
1675 /*avc->f.m.Owner = astatus->Owner;*/
1678 if (astatus->Mask & AFS_SETGROUP) {
1679 printf("Not allowed yet. \n");
1680 /*avc->f.m.Group = astatus->Group;*/
1683 if (astatus->Mask & AFS_SETMODE) {
1684 avc->f.m.Mode = astatus->UnixModeBits;
1686 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1688 if (vType(avc) == VREG) {
1689 avc->f.m.Mode |= S_IFREG;
1690 } else if (vType(avc) == VDIR) {
1691 avc->f.m.Mode |= S_IFDIR;
1692 } else if (vType(avc) == VLNK) {
1693 avc->f.m.Mode |= S_IFLNK;
1694 if ((avc->f.m.Mode & 0111) == 0)
1698 flags |= VDisconSetMode;
1699 } /* if(astatus.Mask & AFS_SETMODE) */
1701 } /* if (!astatus->Mask) */
1703 if (attrs->va_size > 0) {
1704 /* XXX: Do I need more checks? */
1705 /* Truncation operation. */
1706 flags |= VDisconTrunc;
1710 afs_DisconAddDirty(avc, flags, 1);
1712 /* XXX: How about the rest of the fields? */
1714 ConvertWToSLock(&avc->lock);
1722 * Copy astat block into vcache info
1724 * \note This code may get dataversion and length out of sync if the file has
1725 * been modified. This is less than ideal. I haven't thought about it sufficiently
1726 * to be certain that it is adequate.
1728 * \note Environment: Must be called under a write lock
1730 * \param avc Ptr to vcache entry.
1731 * \param astat Ptr to stat block to copy in.
1732 * \param areq Ptr to associated request.
1735 afs_ProcessFS(register struct vcache *avc,
1736 register struct AFSFetchStatus *astat, struct vrequest *areq)
1739 AFS_STATCNT(afs_ProcessFS);
1741 #ifdef AFS_64BIT_CLIENT
1742 FillInt64(length, astat->Length_hi, astat->Length);
1743 #else /* AFS_64BIT_CLIENT */
1744 length = astat->Length;
1745 #endif /* AFS_64BIT_CLIENT */
1746 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1747 * number for each bulk status request. Under no circumstances
1748 * should afs_DoBulkStat store a sequence number if the new
1749 * length will be ignored when afs_ProcessFS is called with
1750 * new stats. If you change the following conditional then you
1751 * also need to change the conditional in afs_DoBulkStat. */
1753 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1754 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1756 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1758 /* if we're writing or mapping this file, don't fetch over these
1761 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1762 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1763 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1764 avc->f.m.Length = length;
1765 avc->f.m.Date = astat->ClientModTime;
1767 hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1768 avc->f.m.Owner = astat->Owner;
1769 avc->f.m.Mode = astat->UnixModeBits;
1770 avc->f.m.Group = astat->Group;
1771 avc->f.m.LinkCount = astat->LinkCount;
1772 if (astat->FileType == File) {
1773 vSetType(avc, VREG);
1774 avc->f.m.Mode |= S_IFREG;
1775 } else if (astat->FileType == Directory) {
1776 vSetType(avc, VDIR);
1777 avc->f.m.Mode |= S_IFDIR;
1778 } else if (astat->FileType == SymbolicLink) {
1779 if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
1780 vSetType(avc, VDIR);
1781 avc->f.m.Mode |= S_IFDIR;
1783 vSetType(avc, VLNK);
1784 avc->f.m.Mode |= S_IFLNK;
1786 if ((avc->f.m.Mode & 0111) == 0) {
1790 avc->f.anyAccess = astat->AnonymousAccess;
1792 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1794 * Caller has at least one bit not covered by anonymous, and
1795 * thus may have interesting rights.
1797 * HOWEVER, this is a really bad idea, because any access query
1798 * for bits which aren't covered by anonymous, on behalf of a user
1799 * who doesn't have any special rights, will result in an answer of
1800 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1801 * It's an especially bad idea under Ultrix, since (due to the lack of
1802 * a proper access() call) it must perform several afs_access() calls
1803 * in order to create magic mode bits that vary according to who makes
1804 * the call. In other words, _every_ stat() generates a test for
1807 #endif /* badidea */
1809 struct axscache *ac;
1810 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1811 ac->axess = astat->CallerAccess;
1812 else /* not found, add a new one if possible */
1813 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1815 } /*afs_ProcessFS */
1819 * Get fid from server.
1822 * \param areq Request to be passed on.
1823 * \param name Name of ?? to lookup.
1824 * \param OutStatus Fetch status.
1829 * \return Success status of operation.
1832 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1833 char *name, struct VenusFid *nfid,
1834 struct AFSFetchStatus *OutStatusp,
1835 struct AFSCallBack *CallBackp, struct server **serverp,
1836 struct AFSVolSync *tsyncp)
1840 register struct afs_conn *tc;
1841 struct AFSFetchStatus OutDirStatus;
1844 name = ""; /* XXX */
1846 tc = afs_Conn(afid, areq, SHARED_LOCK);
1849 *serverp = tc->srvr->server;
1851 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1854 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1855 (struct AFSFid *)&nfid->Fid, OutStatusp,
1856 &OutDirStatus, CallBackp, tsyncp);
1861 } while (afs_Analyze
1862 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1872 * Given a file id and a vrequest structure, fetch the status
1873 * information associated with the file.
1875 * \param afid File ID.
1876 * \param areq Ptr to associated vrequest structure, specifying the
1877 * user whose authentication tokens will be used.
1878 * \param avc Caller may already have a vcache for this file, which is
1881 * \note Environment:
1882 * The cache entry is returned with an increased vrefCount field.
1883 * The entry must be discarded by calling afs_PutVCache when you
1884 * are through using the pointer to the cache entry.
1886 * You should not hold any locks when calling this function, except
1887 * locks on other vcache entries. If you lock more than one vcache
1888 * entry simultaneously, you should lock them in this order:
1890 * 1. Lock all files first, then directories.
1891 * 2. Within a particular type, lock entries in Fid.Vnode order.
1893 * This locking hierarchy is convenient because it allows locking
1894 * of a parent dir cache entry, given a file (to check its access
1895 * control list). It also allows renames to be handled easily by
1896 * locking directories in a constant order.
1898 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1900 * \note Might have a vcache structure already, which must
1901 * already be held by the caller
1904 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1905 afs_int32 * cached, struct vcache *avc)
1908 afs_int32 code, newvcache = 0;
1909 register struct vcache *tvc;
1913 AFS_STATCNT(afs_GetVCache);
1916 *cached = 0; /* Init just in case */
1918 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1922 ObtainSharedLock(&afs_xvcache, 5);
1924 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1926 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1927 ReleaseSharedLock(&afs_xvcache);
1928 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1936 osi_Assert((tvc->f.states & CVInit) == 0);
1937 /* If we are in readdir, return the vnode even if not statd */
1938 if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
1939 ReleaseSharedLock(&afs_xvcache);
1943 UpgradeSToWLock(&afs_xvcache, 21);
1945 /* no cache entry, better grab one */
1946 tvc = afs_NewVCache(afid, NULL);
1949 ConvertWToSLock(&afs_xvcache);
1952 ReleaseSharedLock(&afs_xvcache);
1956 afs_stats_cmperf.vcacheMisses++;
1959 ReleaseSharedLock(&afs_xvcache);
1961 ObtainWriteLock(&tvc->lock, 54);
1963 if (tvc->f.states & CStatd) {
1964 ReleaseWriteLock(&tvc->lock);
1967 #if defined(AFS_OSF_ENV)
1968 if (afs_IsWired(tvc)) {
1969 ReleaseWriteLock(&tvc->lock);
1972 #endif /* AFS_OSF_ENV */
1973 #ifdef AFS_DARWIN80_ENV
1974 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1977 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1979 * XXX - I really don't like this. Should try to understand better.
1980 * It seems that sometimes, when we get called, we already hold the
1981 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1982 * We can't drop the vnode lock, because that could result in a race.
1983 * Sometimes, though, we get here and don't hold the vnode lock.
1984 * I hate code paths that sometimes hold locks and sometimes don't.
1985 * In any event, the dodge we use here is to check whether the vnode
1986 * is locked, and if it isn't, then we gain and drop it around the call
1987 * to vinvalbuf; otherwise, we leave it alone.
1990 struct vnode *vp = AFSTOV(tvc);
1993 #if defined(AFS_DARWIN_ENV)
1994 iheldthelock = VOP_ISLOCKED(vp);
1996 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1997 /* this is messy. we can call fsync which will try to reobtain this */
1998 if (VTOAFS(vp) == tvc)
1999 ReleaseWriteLock(&tvc->lock);
2000 if (UBCINFOEXISTS(vp)) {
2001 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
2003 if (VTOAFS(vp) == tvc)
2004 ObtainWriteLock(&tvc->lock, 954);
2006 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
2007 #elif defined(AFS_FBSD80_ENV)
2008 iheldthelock = VOP_ISLOCKED(vp);
2009 if (!iheldthelock) {
2010 /* nosleep/sleep lock order reversal */
2011 int glocked = ISAFS_GLOCK();
2014 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2018 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
2021 #elif defined(AFS_FBSD60_ENV)
2022 iheldthelock = VOP_ISLOCKED(vp, curthread);
2024 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
2025 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
2027 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
2028 #elif defined(AFS_FBSD50_ENV)
2029 iheldthelock = VOP_ISLOCKED(vp, curthread);
2031 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
2032 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
2034 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
2035 #elif defined(AFS_FBSD40_ENV)
2036 iheldthelock = VOP_ISLOCKED(vp, curproc);
2038 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
2039 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
2041 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
2042 #elif defined(AFS_OBSD_ENV)
2043 iheldthelock = VOP_ISLOCKED(vp, curproc);
2045 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
2046 uvm_vnp_uncache(vp);
2048 VOP_UNLOCK(vp, 0, curproc);
2054 ObtainWriteLock(&afs_xcbhash, 464);
2055 tvc->f.states &= ~CUnique;
2057 afs_DequeueCallback(tvc);
2058 ReleaseWriteLock(&afs_xcbhash);
2060 /* It is always appropriate to throw away all the access rights? */
2061 afs_FreeAllAxs(&(tvc->Access));
2062 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
2064 if ((tvp->states & VForeign)) {
2066 tvc->f.states |= CForeign;
2067 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2068 && (tvp->rootUnique == afid->Fid.Unique)) {
2072 if (tvp->states & VRO)
2073 tvc->f.states |= CRO;
2074 if (tvp->states & VBackup)
2075 tvc->f.states |= CBackup;
2076 /* now copy ".." entry back out of volume structure, if necessary */
2077 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2079 tvc->mvid = (struct VenusFid *)
2080 osi_AllocSmallSpace(sizeof(struct VenusFid));
2081 *tvc->mvid = tvp->dotdot;
2083 afs_PutVolume(tvp, READ_LOCK);
2087 afs_RemoveVCB(afid);
2089 struct AFSFetchStatus OutStatus;
2091 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
2092 afs_ProcessFS(tvc, &OutStatus, areq);
2093 tvc->f.states |= CStatd | CUnique;
2094 tvc->f.parent.vnode = OutStatus.ParentVnode;
2095 tvc->f.parent.unique = OutStatus.ParentUnique;
2099 if (AFS_IS_DISCONNECTED) {
2100 /* Nothing to do otherwise...*/
2102 printf("Network is down in afs_GetCache");
2104 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
2106 /* For the NFS translator's benefit, make sure
2107 * non-directory vnodes always have their parent FID set
2108 * correctly, even when created as a result of decoding an
2109 * NFS filehandle. It would be nice to also do this for
2110 * directories, but we can't because the fileserver fills
2111 * in the FID of the directory itself instead of that of
2114 if (!code && OutStatus.FileType != Directory &&
2115 !tvc->f.parent.vnode) {
2116 tvc->f.parent.vnode = OutStatus.ParentVnode;
2117 tvc->f.parent.unique = OutStatus.ParentUnique;
2118 /* XXX - SXW - It's conceivable we should mark ourselves
2119 * as dirty again here, incase we've been raced
2120 * out of the FetchStatus call.
2127 ReleaseWriteLock(&tvc->lock);
2133 ReleaseWriteLock(&tvc->lock);
2136 } /*afs_GetVCache */
2141 * Lookup a vcache by fid. Look inside the cache first, if not
2142 * there, lookup the file on the server, and then get it's fresh
2147 * \param cached Is element cached? If NULL, don't answer.
2151 * \return The found element or NULL.
2154 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
2155 afs_int32 * cached, struct vcache *adp, char *aname)
2157 afs_int32 code, now, newvcache = 0;
2158 struct VenusFid nfid;
2159 register struct vcache *tvc;
2161 struct AFSFetchStatus OutStatus;
2162 struct AFSCallBack CallBack;
2163 struct AFSVolSync tsync;
2164 struct server *serverp = 0;
2168 AFS_STATCNT(afs_GetVCache);
2170 *cached = 0; /* Init just in case */
2172 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2176 ObtainReadLock(&afs_xvcache);
2177 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
2180 ReleaseReadLock(&afs_xvcache);
2182 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2183 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2187 ObtainReadLock(&tvc->lock);
2189 if (tvc->f.states & CStatd) {
2193 ReleaseReadLock(&tvc->lock);
2196 tvc->f.states &= ~CUnique;
2198 ReleaseReadLock(&tvc->lock);
2200 ObtainReadLock(&afs_xvcache);
2203 ReleaseReadLock(&afs_xvcache);
2205 /* lookup the file */
2208 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
2210 if (AFS_IS_DISCONNECTED) {
2211 printf("Network is down in afs_LookupVcache\n");
2215 afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
2216 &CallBack, &serverp, &tsync);
2218 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2222 ObtainSharedLock(&afs_xvcache, 6);
2223 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
2225 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2226 ReleaseSharedLock(&afs_xvcache);
2227 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2233 /* no cache entry, better grab one */
2234 UpgradeSToWLock(&afs_xvcache, 22);
2235 tvc = afs_NewVCache(&nfid, serverp);
2237 ConvertWToSLock(&afs_xvcache);
2240 ReleaseSharedLock(&afs_xvcache);
2245 ReleaseSharedLock(&afs_xvcache);
2246 ObtainWriteLock(&tvc->lock, 55);
2248 /* It is always appropriate to throw away all the access rights? */
2249 afs_FreeAllAxs(&(tvc->Access));
2250 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2252 if ((tvp->states & VForeign)) {
2254 tvc->f.states |= CForeign;
2255 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2256 && (tvp->rootUnique == afid->Fid.Unique))
2259 if (tvp->states & VRO)
2260 tvc->f.states |= CRO;
2261 if (tvp->states & VBackup)
2262 tvc->f.states |= CBackup;
2263 /* now copy ".." entry back out of volume structure, if necessary */
2264 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2266 tvc->mvid = (struct VenusFid *)
2267 osi_AllocSmallSpace(sizeof(struct VenusFid));
2268 *tvc->mvid = tvp->dotdot;
2273 ObtainWriteLock(&afs_xcbhash, 465);
2274 afs_DequeueCallback(tvc);
2275 tvc->f.states &= ~(CStatd | CUnique);
2276 ReleaseWriteLock(&afs_xcbhash);
2277 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2278 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2280 afs_PutVolume(tvp, READ_LOCK);
2281 ReleaseWriteLock(&tvc->lock);
2286 ObtainWriteLock(&afs_xcbhash, 466);
2287 if (origCBs == afs_allCBs) {
2288 if (CallBack.ExpirationTime) {
2289 tvc->callback = serverp;
2290 tvc->cbExpires = CallBack.ExpirationTime + now;
2291 tvc->f.states |= CStatd | CUnique;
2292 tvc->f.states &= ~CBulkFetching;
2293 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2294 } else if (tvc->f.states & CRO) {
2295 /* adapt gives us an hour. */
2296 tvc->cbExpires = 3600 + osi_Time();
2297 /*XXX*/ tvc->f.states |= CStatd | CUnique;
2298 tvc->f.states &= ~CBulkFetching;
2299 afs_QueueCallback(tvc, CBHash(3600), tvp);
2301 tvc->callback = NULL;
2302 afs_DequeueCallback(tvc);
2303 tvc->f.states &= ~(CStatd | CUnique);
2304 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2305 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2308 afs_DequeueCallback(tvc);
2309 tvc->f.states &= ~CStatd;
2310 tvc->f.states &= ~CUnique;
2311 tvc->callback = NULL;
2312 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2313 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2315 ReleaseWriteLock(&afs_xcbhash);
2317 afs_PutVolume(tvp, READ_LOCK);
2318 afs_ProcessFS(tvc, &OutStatus, areq);
2320 ReleaseWriteLock(&tvc->lock);
2326 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2327 afs_int32 * cached, struct volume *tvolp)
2329 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2330 afs_int32 getNewFid = 0;
2332 struct VenusFid nfid;
2333 register struct vcache *tvc;
2334 struct server *serverp = 0;
2335 struct AFSFetchStatus OutStatus;
2336 struct AFSCallBack CallBack;
2337 struct AFSVolSync tsync;
2342 #ifdef AFS_DARWIN80_ENV
2349 if (!tvolp->rootVnode || getNewFid) {
2350 struct VenusFid tfid;
2353 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2354 origCBs = afs_allCBs; /* ignore InitCallBackState */
2356 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2361 /* ReleaseReadLock(&tvolp->lock); */
2362 ObtainWriteLock(&tvolp->lock, 56);
2363 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2364 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2365 ReleaseWriteLock(&tvolp->lock);
2366 /* ObtainReadLock(&tvolp->lock);*/
2369 afid->Fid.Vnode = tvolp->rootVnode;
2370 afid->Fid.Unique = tvolp->rootUnique;
2374 ObtainSharedLock(&afs_xvcache, 7);
2376 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2377 if (!FidCmp(&(tvc->f.fid), afid)) {
2378 if (tvc->f.states & CVInit) {
2379 ReleaseSharedLock(&afs_xvcache);
2380 afs_osi_Sleep(&tvc->f.states);
2384 /* Grab this vnode, possibly reactivating from the free list */
2385 /* for the present (95.05.25) everything on the hash table is
2386 * definitively NOT in the free list -- at least until afs_reclaim
2387 * can be safely implemented */
2389 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2393 #endif /* AFS_OSF_ENV */
2394 #ifdef AFS_DARWIN80_ENV
2395 if (tvc->f.states & CDeadVnode) {
2396 ReleaseSharedLock(&afs_xvcache);
2397 afs_osi_Sleep(&tvc->f.states);
2401 if (vnode_get(tvp)) /* this bumps ref count */
2403 if (vnode_ref(tvp)) {
2405 /* AFSTOV(tvc) may be NULL */
2415 if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
2416 /* Mount point no longer stat'd or unknown. FID may have changed. */
2419 AFS_RELE(AFSTOV(tvc));
2422 ReleaseSharedLock(&afs_xvcache);
2423 #ifdef AFS_DARWIN80_ENV
2426 vnode_put(AFSTOV(tvc));
2427 vnode_rele(AFSTOV(tvc));
2436 UpgradeSToWLock(&afs_xvcache, 23);
2437 /* no cache entry, better grab one */
2438 tvc = afs_NewVCache(afid, NULL);
2441 ReleaseWriteLock(&afs_xvcache);
2445 afs_stats_cmperf.vcacheMisses++;
2449 afs_stats_cmperf.vcacheHits++;
2450 #if defined(AFS_OSF_ENV) || defined(AFS_DARWIN80_ENV)
2451 /* we already bumped the ref count in the for loop above */
2452 #else /* AFS_OSF_ENV */
2455 UpgradeSToWLock(&afs_xvcache, 24);
2456 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2457 refpanic("GRVC VLRU inconsistent0");
2459 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2460 refpanic("GRVC VLRU inconsistent1");
2462 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2463 refpanic("GRVC VLRU inconsistent2");
2465 QRemove(&tvc->vlruq); /* move to lruq head */
2466 QAdd(&VLRU, &tvc->vlruq);
2467 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2468 refpanic("GRVC VLRU inconsistent3");
2470 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2471 refpanic("GRVC VLRU inconsistent4");
2473 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2474 refpanic("GRVC VLRU inconsistent5");
2479 ReleaseWriteLock(&afs_xvcache);
2481 if (tvc->f.states & CStatd) {
2485 ObtainReadLock(&tvc->lock);
2486 tvc->f.states &= ~CUnique;
2487 tvc->callback = NULL; /* redundant, perhaps */
2488 ReleaseReadLock(&tvc->lock);
2491 ObtainWriteLock(&tvc->lock, 57);
2493 /* It is always appropriate to throw away all the access rights? */
2494 afs_FreeAllAxs(&(tvc->Access));
2497 tvc->f.states |= CForeign;
2498 if (tvolp->states & VRO)
2499 tvc->f.states |= CRO;
2500 if (tvolp->states & VBackup)
2501 tvc->f.states |= CBackup;
2502 /* now copy ".." entry back out of volume structure, if necessary */
2503 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2504 && (tvolp->rootUnique == afid->Fid.Unique)) {
2507 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2509 tvc->mvid = (struct VenusFid *)
2510 osi_AllocSmallSpace(sizeof(struct VenusFid));
2511 *tvc->mvid = tvolp->dotdot;
2515 afs_RemoveVCB(afid);
2518 struct VenusFid tfid;
2521 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2522 origCBs = afs_allCBs; /* ignore InitCallBackState */
2524 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2529 ObtainWriteLock(&afs_xcbhash, 467);
2530 afs_DequeueCallback(tvc);
2531 tvc->callback = NULL;
2532 tvc->f.states &= ~(CStatd | CUnique);
2533 ReleaseWriteLock(&afs_xcbhash);
2534 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2535 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2536 ReleaseWriteLock(&tvc->lock);
2541 ObtainWriteLock(&afs_xcbhash, 468);
2542 if (origCBs == afs_allCBs) {
2543 tvc->f.states |= CTruth;
2544 tvc->callback = serverp;
2545 if (CallBack.ExpirationTime != 0) {
2546 tvc->cbExpires = CallBack.ExpirationTime + start;
2547 tvc->f.states |= CStatd;
2548 tvc->f.states &= ~CBulkFetching;
2549 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2550 } else if (tvc->f.states & CRO) {
2551 /* adapt gives us an hour. */
2552 tvc->cbExpires = 3600 + osi_Time();
2553 /*XXX*/ tvc->f.states |= CStatd;
2554 tvc->f.states &= ~CBulkFetching;
2555 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2558 afs_DequeueCallback(tvc);
2559 tvc->callback = NULL;
2560 tvc->f.states &= ~(CStatd | CUnique);
2561 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2562 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2564 ReleaseWriteLock(&afs_xcbhash);
2565 afs_ProcessFS(tvc, &OutStatus, areq);
2567 ReleaseWriteLock(&tvc->lock);
2573 * Update callback status and (sometimes) attributes of a vnode.
2574 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2575 * shouldn't be written to the vcache here.
2580 * \param Outsp Server status after rpc call.
2581 * \param acb Callback for this vnode.
2583 * \note The vcache must be write locked.
2586 afs_UpdateStatus(struct vcache *avc,
2587 struct VenusFid *afid,
2588 struct vrequest *areq,
2589 struct AFSFetchStatus *Outsp,
2590 struct AFSCallBack *acb,
2593 struct volume *volp;
2596 /* Dont write status in vcache if resyncing after a disconnection. */
2597 afs_ProcessFS(avc, Outsp, areq);
2599 volp = afs_GetVolume(afid, areq, READ_LOCK);
2600 ObtainWriteLock(&afs_xcbhash, 469);
2601 avc->f.states |= CTruth;
2602 if (avc->callback /* check for race */ ) {
2603 if (acb->ExpirationTime != 0) {
2604 avc->cbExpires = acb->ExpirationTime + start;
2605 avc->f.states |= CStatd;
2606 avc->f.states &= ~CBulkFetching;
2607 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2608 } else if (avc->f.states & CRO) {
2609 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2610 avc->cbExpires = 3600 + start;
2611 avc->f.states |= CStatd;
2612 avc->f.states &= ~CBulkFetching;
2613 afs_QueueCallback(avc, CBHash(3600), volp);
2615 afs_DequeueCallback(avc);
2616 avc->callback = NULL;
2617 avc->f.states &= ~(CStatd | CUnique);
2618 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2619 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2622 afs_DequeueCallback(avc);
2623 avc->callback = NULL;
2624 avc->f.states &= ~(CStatd | CUnique);
2625 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2626 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2628 ReleaseWriteLock(&afs_xcbhash);
2630 afs_PutVolume(volp, READ_LOCK);
2634 * Must be called with avc write-locked
2635 * don't absolutely have to invalidate the hint unless the dv has
2636 * changed, but be sure to get it right else there will be consistency bugs.
2639 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2640 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2643 afs_uint32 start = 0;
2644 register struct afs_conn *tc;
2645 struct AFSCallBack CallBack;
2646 struct AFSVolSync tsync;
2649 tc = afs_Conn(afid, areq, SHARED_LOCK);
2650 avc->dchint = NULL; /* invalidate hints */
2652 avc->callback = tc->srvr->server;
2654 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2657 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2665 } while (afs_Analyze
2666 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2667 SHARED_LOCK, NULL));
2670 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2672 /* used to undo the local callback, but that's too extreme.
2673 * There are plenty of good reasons that fetchstatus might return
2674 * an error, such as EPERM. If we have the vnode cached, statd,
2675 * with callback, might as well keep track of the fact that we
2676 * don't have access...
2678 if (code == EPERM || code == EACCES) {
2679 struct axscache *ac;
2680 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2682 else /* not found, add a new one if possible */
2683 afs_AddAxs(avc->Access, areq->uid, 0);
2694 * Stuff some information into the vcache for the given file.
2697 * afid : File in question.
2698 * OutStatus : Fetch status on the file.
2699 * CallBack : Callback info.
2700 * tc : RPC connection involved.
2701 * areq : vrequest involved.
2704 * Nothing interesting.
2707 afs_StuffVcache(register struct VenusFid *afid,
2708 struct AFSFetchStatus *OutStatus,
2709 struct AFSCallBack *CallBack, register struct afs_conn *tc,
2710 struct vrequest *areq)
2712 register afs_int32 code, i, newvcache = 0;
2713 register struct vcache *tvc;
2714 struct AFSVolSync tsync;
2716 struct axscache *ac;
2719 AFS_STATCNT(afs_StuffVcache);
2720 #ifdef IFS_VCACHECOUNT
2725 ObtainSharedLock(&afs_xvcache, 8);
2727 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2729 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2730 ReleaseSharedLock(&afs_xvcache);
2731 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2737 /* no cache entry, better grab one */
2738 UpgradeSToWLock(&afs_xvcache, 25);
2739 tvc = afs_NewVCache(afid, NULL);
2741 ConvertWToSLock(&afs_xvcache);
2744 ReleaseSharedLock(&afs_xvcache);
2749 ReleaseSharedLock(&afs_xvcache);
2750 ObtainWriteLock(&tvc->lock, 58);
2752 tvc->f.states &= ~CStatd;
2753 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2754 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2756 /* Is it always appropriate to throw away all the access rights? */
2757 afs_FreeAllAxs(&(tvc->Access));
2759 /*Copy useful per-volume info */
2760 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2762 if (newvcache && (tvp->states & VForeign))
2763 tvc->f.states |= CForeign;
2764 if (tvp->states & VRO)
2765 tvc->f.states |= CRO;
2766 if (tvp->states & VBackup)
2767 tvc->f.states |= CBackup;
2769 * Now, copy ".." entry back out of volume structure, if
2772 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2774 tvc->mvid = (struct VenusFid *)
2775 osi_AllocSmallSpace(sizeof(struct VenusFid));
2776 *tvc->mvid = tvp->dotdot;
2779 /* store the stat on the file */
2780 afs_RemoveVCB(afid);
2781 afs_ProcessFS(tvc, OutStatus, areq);
2782 tvc->callback = tc->srvr->server;
2784 /* we use osi_Time twice below. Ideally, we would use the time at which
2785 * the FetchStatus call began, instead, but we don't have it here. So we
2786 * make do with "now". In the CRO case, it doesn't really matter. In
2787 * the other case, we hope that the difference between "now" and when the
2788 * call actually began execution on the server won't be larger than the
2789 * padding which the server keeps. Subtract 1 second anyway, to be on
2790 * the safe side. Can't subtract more because we don't know how big
2791 * ExpirationTime is. Possible consistency problems may arise if the call
2792 * timeout period becomes longer than the server's expiration padding. */
2793 ObtainWriteLock(&afs_xcbhash, 470);
2794 if (CallBack->ExpirationTime != 0) {
2795 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2796 tvc->f.states |= CStatd;
2797 tvc->f.states &= ~CBulkFetching;
2798 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2799 } else if (tvc->f.states & CRO) {
2800 /* old-fashioned AFS 3.2 style */
2801 tvc->cbExpires = 3600 + osi_Time();
2802 /*XXX*/ tvc->f.states |= CStatd;
2803 tvc->f.states &= ~CBulkFetching;
2804 afs_QueueCallback(tvc, CBHash(3600), tvp);
2806 afs_DequeueCallback(tvc);
2807 tvc->callback = NULL;
2808 tvc->f.states &= ~(CStatd | CUnique);
2809 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2810 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2812 ReleaseWriteLock(&afs_xcbhash);
2814 afs_PutVolume(tvp, READ_LOCK);
2816 /* look in per-pag cache */
2817 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2818 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2819 else /* not found, add a new one if possible */
2820 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2822 ReleaseWriteLock(&tvc->lock);
2823 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2824 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2825 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2827 * Release ref count... hope this guy stays around...
2830 } /*afs_StuffVcache */
2834 * Decrements the reference count on a cache entry.
2836 * \param avc Pointer to the cache entry to decrement.
2838 * \note Environment: Nothing interesting.
2841 afs_PutVCache(register struct vcache *avc)
2843 AFS_STATCNT(afs_PutVCache);
2844 #ifdef AFS_DARWIN80_ENV
2845 vnode_put(AFSTOV(avc));
2849 * Can we use a read lock here?
2851 ObtainReadLock(&afs_xvcache);
2853 ReleaseReadLock(&afs_xvcache);
2855 } /*afs_PutVCache */
2859 * Reset a vcache entry, so local contents are ignored, and the
2860 * server will be reconsulted next time the vcache is used
2862 * \param avc Pointer to the cache entry to reset
2865 * \note avc must be write locked on entry
2868 afs_ResetVCache(struct vcache *avc, struct AFS_UCRED *acred) {
2869 ObtainWriteLock(&afs_xcbhash, 456);
2870 afs_DequeueCallback(avc);
2871 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
2872 ReleaseWriteLock(&afs_xcbhash);
2873 /* now find the disk cache entries */
2874 afs_TryToSmush(avc, acred, 1);
2875 osi_dnlc_purgedp(avc);
2876 if (avc->linkData && !(avc->f.states & CCore)) {
2877 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2878 avc->linkData = NULL;
2883 * Sleepa when searching for a vcache. Releases all the pending locks,
2884 * sleeps then obtains the previously released locks.
2886 * \param vcache Enter sleep state.
2887 * \param flag Determines what locks to use.
2891 static void findvc_sleep(struct vcache *avc, int flag) {
2892 if (flag & IS_SLOCK) {
2893 ReleaseSharedLock(&afs_xvcache);
2895 if (flag & IS_WLOCK) {
2896 ReleaseWriteLock(&afs_xvcache);
2898 ReleaseReadLock(&afs_xvcache);
2901 afs_osi_Sleep(&avc->f.states);
2902 if (flag & IS_SLOCK) {
2903 ObtainSharedLock(&afs_xvcache, 341);
2905 if (flag & IS_WLOCK) {
2906 ObtainWriteLock(&afs_xvcache, 343);
2908 ObtainReadLock(&afs_xvcache);
2913 * Find a vcache entry given a fid.
2915 * \param afid Pointer to the fid whose cache entry we desire.
2916 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2917 * unlock the vnode, and try again.
2918 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2919 * set if FindVCache is called as part of internal bookkeeping.
2921 * \note Environment: Must be called with the afs_xvcache lock at least held at
2922 * the read level. In order to do the VLRU adjustment, the xvcache lock
2923 * must be shared-- we upgrade it here.
2927 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2930 register struct vcache *tvc;
2932 #if defined( AFS_OSF_ENV)
2935 #ifdef AFS_DARWIN80_ENV
2939 AFS_STATCNT(afs_FindVCache);
2943 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2944 if (FidMatches(afid, tvc)) {
2945 if (tvc->f.states & CVInit) {
2946 findvc_sleep(tvc, flag);
2950 /* Grab this vnode, possibly reactivating from the free list */
2952 vg = vget(AFSTOV(tvc));
2956 #endif /* AFS_OSF_ENV */
2957 #ifdef AFS_DARWIN80_ENV
2958 if (tvc->f.states & CDeadVnode) {
2959 findvc_sleep(tvc, flag);
2965 if (vnode_ref(tvp)) {
2967 /* AFSTOV(tvc) may be NULL */
2977 /* should I have a read lock on the vnode here? */
2981 #if !defined(AFS_OSF_ENV) && !defined(AFS_DARWIN80_ENV)
2982 osi_vnhold(tvc, retry); /* already held, above */
2983 if (retry && *retry)
2986 #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
2987 tvc->f.states |= CUBCinit;
2989 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2990 UBCINFORECLAIMED(AFSTOV(tvc))) {
2991 ubc_info_init(AFSTOV(tvc));
2994 tvc->f.states &= ~CUBCinit;
2997 * only move to front of vlru if we have proper vcache locking)
2999 if (flag & DO_VLRU) {
3000 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3001 refpanic("FindVC VLRU inconsistent1");
3003 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3004 refpanic("FindVC VLRU inconsistent1");
3006 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3007 refpanic("FindVC VLRU inconsistent2");
3009 UpgradeSToWLock(&afs_xvcache, 26);
3010 QRemove(&tvc->vlruq);
3011 QAdd(&VLRU, &tvc->vlruq);
3012 ConvertWToSLock(&afs_xvcache);
3013 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3014 refpanic("FindVC VLRU inconsistent1");
3016 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3017 refpanic("FindVC VLRU inconsistent2");
3019 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3020 refpanic("FindVC VLRU inconsistent3");
3026 if (flag & DO_STATS) {
3028 afs_stats_cmperf.vcacheHits++;
3030 afs_stats_cmperf.vcacheMisses++;
3031 if (afs_IsPrimaryCellNum(afid->Cell))
3032 afs_stats_cmperf.vlocalAccesses++;
3034 afs_stats_cmperf.vremoteAccesses++;
3037 } /*afs_FindVCache */
3040 * Find a vcache entry given a fid. Does a wildcard match on what we
3041 * have for the fid. If more than one entry, don't return anything.
3043 * \param avcp Fill in pointer if we found one and only one.
3044 * \param afid Pointer to the fid whose cache entry we desire.
3045 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
3046 * unlock the vnode, and try again.
3047 * \param flags bit 1 to specify whether to compute hit statistics. Not
3048 * set if FindVCache is called as part of internal bookkeeping.
3050 * \note Environment: Must be called with the afs_xvcache lock at least held at
3051 * the read level. In order to do the VLRU adjustment, the xvcache lock
3052 * must be shared-- we upgrade it here.
3054 * \return Number of matches found.
3057 int afs_duplicate_nfs_fids = 0;
3060 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
3062 register struct vcache *tvc;
3064 afs_int32 count = 0;
3065 struct vcache *found_tvc = NULL;
3069 #ifdef AFS_DARWIN80_ENV
3073 AFS_STATCNT(afs_FindVCache);
3077 ObtainSharedLock(&afs_xvcache, 331);
3080 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3081 /* Match only on what we have.... */
3082 if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
3083 && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
3084 && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
3085 && (tvc->f.fid.Cell == afid->Cell)) {
3086 if (tvc->f.states & CVInit) {
3087 ReleaseSharedLock(&afs_xvcache);
3088 afs_osi_Sleep(&tvc->f.states);
3092 /* Grab this vnode, possibly reactivating from the free list */
3094 vg = vget(AFSTOV(tvc));
3097 /* This vnode no longer exists. */
3100 #endif /* AFS_OSF_ENV */
3101 #ifdef AFS_DARWIN80_ENV
3102 if (tvc->f.states & CDeadVnode) {
3103 ReleaseSharedLock(&afs_xvcache);
3104 afs_osi_Sleep(&tvc->f.states);
3108 if (vnode_get(tvp)) {
3109 /* This vnode no longer exists. */
3112 if (vnode_ref(tvp)) {
3113 /* This vnode no longer exists. */
3115 /* AFSTOV(tvc) may be NULL */
3120 #endif /* AFS_DARWIN80_ENV */
3125 /* Drop our reference counts. */
3127 vrele(AFSTOV(found_tvc));
3129 afs_duplicate_nfs_fids++;
3130 ReleaseSharedLock(&afs_xvcache);
3131 #ifdef AFS_DARWIN80_ENV
3132 /* Drop our reference counts. */
3133 vnode_put(AFSTOV(tvc));
3134 vnode_put(AFSTOV(found_tvc));
3143 /* should I have a read lock on the vnode here? */
3145 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
3146 afs_int32 retry = 0;
3147 osi_vnhold(tvc, &retry);
3150 found_tvc = (struct vcache *)0;
3151 ReleaseSharedLock(&afs_xvcache);
3152 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
3156 #if !defined(AFS_OSF_ENV)
3157 osi_vnhold(tvc, (int *)0); /* already held, above */
3161 * We obtained the xvcache lock above.
3163 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3164 refpanic("FindVC VLRU inconsistent1");
3166 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3167 refpanic("FindVC VLRU inconsistent1");
3169 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3170 refpanic("FindVC VLRU inconsistent2");
3172 UpgradeSToWLock(&afs_xvcache, 568);
3173 QRemove(&tvc->vlruq);
3174 QAdd(&VLRU, &tvc->vlruq);
3175 ConvertWToSLock(&afs_xvcache);
3176 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3177 refpanic("FindVC VLRU inconsistent1");
3179 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3180 refpanic("FindVC VLRU inconsistent2");
3182 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3183 refpanic("FindVC VLRU inconsistent3");
3189 afs_stats_cmperf.vcacheHits++;
3191 afs_stats_cmperf.vcacheMisses++;
3192 if (afs_IsPrimaryCellNum(afid->Cell))
3193 afs_stats_cmperf.vlocalAccesses++;
3195 afs_stats_cmperf.vremoteAccesses++;
3197 *avcp = tvc; /* May be null */
3199 ReleaseSharedLock(&afs_xvcache);
3200 return (tvc ? 1 : 0);
3202 } /*afs_NFSFindVCache */
3208 * Initialize vcache related variables
3213 afs_vcacheInit(int astatSize)
3215 #if (!defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)) || defined(AFS_SGI_ENV)
3216 register struct vcache *tvp;
3219 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
3220 if (!afs_maxvcount) {
3221 #if defined(AFS_LINUX22_ENV)
3222 afs_maxvcount = astatSize; /* no particular limit on linux? */
3223 #elif defined(AFS_OSF30_ENV)
3224 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
3226 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
3228 if (astatSize < afs_maxvcount) {
3229 afs_maxvcount = astatSize;
3232 #else /* AFS_OSF_ENV */
3236 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3237 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3239 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3240 /* Allocate and thread the struct vcache entries */
3241 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
3242 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
3244 Initial_freeVCList = tvp;
3245 freeVCList = &(tvp[0]);
3246 for (i = 0; i < astatSize - 1; i++) {
3247 tvp[i].nextfree = &(tvp[i + 1]);
3249 tvp[astatSize - 1].nextfree = NULL;
3250 #ifdef KERNEL_HAVE_PIN
3251 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
3255 #if defined(AFS_SGI_ENV)
3256 for (i = 0; i < astatSize; i++) {
3257 char name[METER_NAMSZ];
3258 struct vcache *tvc = &tvp[i];
3260 tvc->v.v_number = ++afsvnumbers;
3261 tvc->vc_rwlockid = OSI_NO_LOCKID;
3262 initnsema(&tvc->vc_rwlock, 1,
3263 makesname(name, "vrw", tvc->v.v_number));
3264 #ifndef AFS_SGI53_ENV
3265 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
3267 #ifndef AFS_SGI62_ENV
3268 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
3269 #endif /* AFS_SGI62_ENV */
3273 for(i = 0; i < VCSIZE; ++i)
3274 QInit(&afs_vhashTV[i]);
3281 shutdown_vcache(void)
3284 struct afs_cbr *tsp;
3286 * XXX We may potentially miss some of the vcaches because if when
3287 * there are no free vcache entries and all the vcache entries are active
3288 * ones then we allocate an additional one - admittedly we almost never
3293 register struct afs_q *tq, *uq = NULL;
3294 register struct vcache *tvc;
3295 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
3299 osi_FreeSmallSpace(tvc->mvid);
3300 tvc->mvid = (struct VenusFid *)0;
3303 aix_gnode_rele(AFSTOV(tvc));
3305 if (tvc->linkData) {
3306 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3311 * Also free the remaining ones in the Cache
3313 for (i = 0; i < VCSIZE; i++) {
3314 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3316 osi_FreeSmallSpace(tvc->mvid);
3317 tvc->mvid = (struct VenusFid *)0;
3321 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
3322 #ifdef AFS_AIX32_ENV
3325 vms_delete(tvc->segid);
3327 tvc->segid = tvc->vmh = NULL;
3328 if (VREFCOUNT_GT(tvc,0))
3329 osi_Panic("flushVcache: vm race");
3337 #if defined(AFS_SUN5_ENV)
3343 if (tvc->linkData) {
3344 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3349 afs_FreeAllAxs(&(tvc->Access));
3355 * Free any leftover callback queue
3357 for (i = 0; i < afs_stats_cmperf.CallBackAlloced; i++) {
3358 tsp = afs_cbrHeads[i];
3359 afs_cbrHeads[i] = 0;
3360 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3364 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3365 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3367 #ifdef KERNEL_HAVE_PIN
3368 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3371 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3372 freeVCList = Initial_freeVCList = 0;
3374 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3375 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3377 for(i = 0; i < VCSIZE; ++i)
3378 QInit(&afs_vhashTV[i]);
3381 void afs_DisconGiveUpCallbacks(void) {
3386 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3388 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3389 for (i = 0; i < VCSIZE; i++) {
3390 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3391 if ((tvc->f.states & CRO) == 0 && tvc->callback) {
3393 tvc->callback = NULL;
3398 /*printf("%d callbacks to be discarded. queued ... ", nq);*/
3401 ReleaseWriteLock(&afs_xvcache);
3402 /*printf("gone\n");*/
3407 * Clear the Statd flag from all vcaches
3409 * This function removes the Statd flag from all vcaches. It's used by
3410 * disconnected mode to tidy up during reconnection
3413 void afs_ClearAllStatdFlag(void) {
3417 ObtainWriteLock(&afs_xvcache, 715);
3419 for (i = 0; i < VCSIZE; i++) {
3420 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3421 tvc->f.states &= ~(CStatd|CUnique);
3424 ReleaseWriteLock(&afs_xvcache);