2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
43 #include "afs/sysincludes.h" /*Standard vendor system headers */
44 #include "afsincludes.h" /*AFS-based standard headers */
45 #include "afs/afs_stats.h"
46 #include "afs/afs_cbqueue.h"
47 #include "afs/afs_osidnlc.h"
49 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
50 afs_int32 afs_vcount = 0; /* number of vcache in use now */
58 #endif /* AFS_SGI64_ENV */
60 /* Exported variables */
61 afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
62 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
63 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
64 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
65 #if !defined(AFS_LINUX22_ENV)
66 static struct vcache *freeVCList; /*Free list for stat cache entries */
67 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
68 static struct vcache *Initial_freeVCList; /*Initial list for above */
70 struct afs_q VLRU; /*vcache LRU */
71 afs_int32 vcachegen = 0;
72 unsigned int afs_paniconwarn = 0;
73 struct vcache *afs_vhashT[VCSIZE];
74 struct afs_q afs_vhashTV[VCSIZE];
75 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
76 afs_int32 afs_bulkStatsLost;
77 int afs_norefpanic = 0;
80 /* Disk backed vcache definitions
81 * Both protected by xvcache */
82 static int afs_nextVcacheSlot = 0;
83 static struct afs_slotlist *afs_freeSlotList = NULL;
85 /* Forward declarations */
86 static afs_int32 afs_QueueVCB(struct vcache *avc, int *slept);
89 * Generate an index into the hash table for a given Fid.
91 * \return The hash value.
94 afs_HashCBRFid(struct AFSFid *fid)
96 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
100 * Insert a CBR entry into the hash table.
101 * Must be called with afs_xvcb held.
106 afs_InsertHashCBR(struct afs_cbr *cbr)
108 int slot = afs_HashCBRFid(&cbr->fid);
110 cbr->hash_next = afs_cbrHashT[slot];
111 if (afs_cbrHashT[slot])
112 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
114 cbr->hash_pprev = &afs_cbrHashT[slot];
115 afs_cbrHashT[slot] = cbr;
120 * Flush the given vcache entry.
123 * afs_xvcache lock must be held for writing upon entry to
124 * prevent people from changing the vrefCount field, and to
125 * protect the lruq and hnext fields.
126 * LOCK: afs_FlushVCache afs_xvcache W
127 * REFCNT: vcache ref count must be zero on entry except for osf1
128 * RACE: lock is dropped and reobtained, permitting race in caller
130 * \param avc Pointer to vcache entry to flush.
131 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
135 afs_FlushVCache(struct vcache *avc, int *slept)
136 { /*afs_FlushVCache */
139 struct vcache **uvc, *wvc;
142 AFS_STATCNT(afs_FlushVCache);
143 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
144 ICL_TYPE_INT32, avc->f.states);
146 code = osi_VM_FlushVCache(avc, slept);
150 if (avc->f.states & CVFlushed) {
154 #if !defined(AFS_LINUX22_ENV)
155 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
156 refpanic("LRU vs. Free inconsistency");
159 avc->f.states |= CVFlushed;
160 /* pull the entry out of the lruq and put it on the free list */
161 QRemove(&avc->vlruq);
163 /* keep track of # of files that we bulk stat'd, but never used
164 * before they got recycled.
166 if (avc->f.states & CBulkStat)
169 /* remove entry from the hash chain */
170 i = VCHash(&avc->f.fid);
171 uvc = &afs_vhashT[i];
172 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
180 /* remove entry from the volume hash table */
181 QRemove(&avc->vhashq);
184 osi_FreeSmallSpace(avc->mvid);
185 avc->mvid = (struct VenusFid *)0;
187 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
188 avc->linkData = NULL;
190 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
191 /* OK, there are no internal vrefCounts, so there shouldn't
192 * be any more refs here. */
194 #ifdef AFS_DARWIN80_ENV
195 vnode_clearfsnode(AFSTOV(avc));
196 vnode_removefsref(AFSTOV(avc));
198 avc->v->v_data = NULL; /* remove from vnode */
200 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
203 #ifdef AFS_SUN510_ENV
204 /* As we use private vnodes, cleanup is up to us */
205 vn_reinit(AFSTOV(avc));
207 afs_FreeAllAxs(&(avc->Access));
208 ObtainWriteLock(&afs_xcbhash, 460);
209 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
210 avc->f.states &= ~(CStatd | CUnique);
211 ReleaseWriteLock(&afs_xcbhash);
212 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
213 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
215 osi_dnlc_purgevp(avc);
217 if (!afs_shuttingdown)
218 afs_QueueVCB(avc, slept);
221 * Next, keep track of which vnodes we've deleted for create's
222 * optimistic synchronization algorithm
225 if (avc->f.fid.Fid.Vnode & 1)
231 #if !defined(AFS_LINUX22_ENV)
232 /* put the entry in the free list */
233 avc->nextfree = freeVCList;
235 if (avc->vlruq.prev || avc->vlruq.next) {
236 refpanic("LRU vs. Free inconsistency");
238 avc->f.states |= CVFlushed;
240 /* This should put it back on the vnode free list since usecount is 1 */
242 if (VREFCOUNT_GT(avc,0)) {
243 AFS_RELE(AFSTOV(avc));
244 afs_stats_cmperf.vcacheXAllocs--;
246 if (afs_norefpanic) {
247 afs_warn("flush vc refcnt < 1");
250 osi_Panic("flush vc refcnt < 1");
252 #endif /* AFS_LINUX22_ENV */
257 } /*afs_FlushVCache */
261 * The core of the inactive vnode op for all but IRIX.
267 afs_InactiveVCache(struct vcache *avc, afs_ucred_t *acred)
269 AFS_STATCNT(afs_inactive);
270 if (avc->f.states & CDirty) {
271 /* we can't keep trying to push back dirty data forever. Give up. */
272 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
274 avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
275 avc->f.states &= ~CDirty; /* Turn it off */
276 if (avc->f.states & CUnlinked) {
277 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
278 avc->f.states |= CUnlinkedDel;
281 afs_remunlink(avc, 1); /* ignore any return code */
288 * Allocate a callback return structure from the
289 * free list and return it.
291 * Environment: The alloc and free routines are both called with the afs_xvcb lock
292 * held, so we don't have to worry about blocking in osi_Alloc.
294 * \return The allocated afs_cbr.
296 static struct afs_cbr *afs_cbrSpace = 0;
297 /* if alloc limit below changes, fix me! */
298 static struct afs_cbr *afs_cbrHeads[16];
305 while (!afs_cbrSpace) {
306 if (afs_stats_cmperf.CallBackAlloced >= sizeof(afs_cbrHeads)/sizeof(afs_cbrHeads[0])) {
307 /* don't allocate more than 16 * AFS_NCBRS for now */
309 afs_stats_cmperf.CallBackFlushes++;
312 tsp = afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
313 osi_Assert(tsp != NULL);
314 for (i = 0; i < AFS_NCBRS - 1; i++) {
315 tsp[i].next = &tsp[i + 1];
317 tsp[AFS_NCBRS - 1].next = 0;
319 afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
320 afs_stats_cmperf.CallBackAlloced++;
324 afs_cbrSpace = tsp->next;
329 * Free a callback return structure, removing it from all lists.
331 * Environment: the xvcb lock is held over these calls.
333 * \param asp The address of the structure to free.
338 afs_FreeCBR(struct afs_cbr *asp)
340 *(asp->pprev) = asp->next;
342 asp->next->pprev = asp->pprev;
344 *(asp->hash_pprev) = asp->hash_next;
346 asp->hash_next->hash_pprev = asp->hash_pprev;
348 asp->next = afs_cbrSpace;
354 FlushAllVCBs(int nconns, struct rx_connection **rxconns,
355 struct afs_conn **conns)
360 results = afs_osi_Alloc(nconns * sizeof (afs_int32));
361 osi_Assert(results != NULL);
364 multi_Rx(rxconns,nconns)
366 multi_RXAFS_GiveUpAllCallBacks();
367 results[multi_i] = multi_error;
372 * Freeing the CBR will unlink it from the server's CBR list
373 * do it here, not in the loop, because a dynamic CBR will call
374 * into the memory management routines.
376 for ( i = 0 ; i < nconns ; i++ ) {
377 if (results[i] == 0) {
378 /* Unchain all of them */
379 while (conns[i]->parent->srvr->server->cbrs)
380 afs_FreeCBR(conns[i]->parent->srvr->server->cbrs);
383 afs_osi_Free(results, nconns * sizeof(afs_int32));
387 * Flush all queued callbacks to all servers.
389 * Environment: holds xvcb lock over RPC to guard against race conditions
390 * when a new callback is granted for the same file later on.
392 * \return 0 for success.
395 afs_FlushVCBs(afs_int32 lockit)
397 struct AFSFid *tfids;
398 struct AFSCallBack callBacks[1];
399 struct AFSCBFids fidArray;
400 struct AFSCBs cbArray;
402 struct afs_cbr *tcbrp;
406 struct vrequest treq;
408 int safety1, safety2, safety3;
411 if (AFS_IS_DISCONNECTED)
414 if ((code = afs_InitReq(&treq, afs_osi_credp)))
416 treq.flags |= O_NONBLOCK;
417 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
418 osi_Assert(tfids != NULL);
421 ObtainWriteLock(&afs_xvcb, 273);
424 * First, attempt a multi across everything, all addresses
425 * for all servers we know of.
429 afs_LoopServers(AFS_LS_ALL, NULL, 0, FlushAllVCBs, NULL);
431 ObtainReadLock(&afs_xserver);
432 for (i = 0; i < NSERVERS; i++) {
433 for (safety1 = 0, tsp = afs_servers[i];
434 tsp && safety1 < afs_totalServers + 10;
435 tsp = tsp->next, safety1++) {
437 if (tsp->cbrs == (struct afs_cbr *)0)
440 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
441 * and make an RPC, over and over again.
443 tcount = 0; /* number found so far */
444 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
445 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
446 struct rx_connection *rxconn;
447 /* if buffer is full, or we've queued all we're going
448 * to from this server, we should flush out the
451 fidArray.AFSCBFids_len = tcount;
452 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
453 cbArray.AFSCBs_len = 1;
454 cbArray.AFSCBs_val = callBacks;
455 memset(&callBacks[0], 0, sizeof(callBacks[0]));
456 callBacks[0].CallBackType = CB_EXCLUSIVE;
457 for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
458 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
459 tsp->cell->cellNum, &treq, 0,
460 SHARED_LOCK, 0, &rxconn);
463 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
466 RXAFS_GiveUpCallBacks(rxconn, &fidArray,
473 (tc, rxconn, code, 0, &treq,
474 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
479 /* ignore return code, since callbacks may have
480 * been returned anyway, we shouldn't leave them
481 * around to be returned again.
483 * Next, see if we are done with this server, and if so,
484 * break to deal with the next one.
490 /* if to flush full buffer */
491 /* if we make it here, we have an entry at the head of cbrs,
492 * which we should copy to the file ID array and then free.
495 tfids[tcount++] = tcbrp->fid;
497 /* Freeing the CBR will unlink it from the server's CBR list */
499 } /* while loop for this one server */
500 if (safety2 > afs_cacheStats) {
501 afs_warn("possible internal error afs_flushVCBs (%d)\n",
504 } /* for loop for this hash chain */
505 } /* loop through all hash chains */
506 if (safety1 > afs_totalServers + 2) {
508 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
509 safety1, afs_totalServers + 2);
511 osi_Panic("afs_flushVCBS safety1");
514 ReleaseReadLock(&afs_xserver);
516 ReleaseWriteLock(&afs_xvcb);
517 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
522 * Queue a callback on the given fid.
525 * Locks the xvcb lock.
526 * Called when the xvcache lock is already held.
527 * RACE: afs_xvcache may be dropped and reacquired
529 * \param avc vcache entry
530 * \param slep Set to 1 if we dropped afs_xvcache
531 * \return 1 if queued, 0 otherwise
535 afs_QueueVCB(struct vcache *avc, int *slept)
539 struct afs_cbr *tcbp;
542 AFS_STATCNT(afs_QueueVCB);
544 ObtainWriteLock(&afs_xvcb, 274);
546 /* we can't really give back callbacks on RO files, since the
547 * server only tracks them on a per-volume basis, and we don't
548 * know whether we still have some other files from the same
550 if (!((avc->f.states & CRO) == 0 && avc->callback)) {
554 /* The callback is really just a struct server ptr. */
555 tsp = (struct server *)(avc->callback);
558 /* If we don't have CBR space, AllocCBR may block or hit the net for
559 * clearing up CBRs. Hitting the net may involve a fileserver
560 * needing to contact us, so we must drop xvcache so we don't block
561 * those requests from going through. */
562 reacquire = *slept = 1;
563 ReleaseWriteLock(&afs_xvcache);
566 /* we now have a pointer to the server, so we just allocate
567 * a queue entry and queue it.
569 tcbp = afs_AllocCBR();
570 tcbp->fid = avc->f.fid.Fid;
572 tcbp->next = tsp->cbrs;
574 tsp->cbrs->pprev = &tcbp->next;
577 tcbp->pprev = &tsp->cbrs;
579 afs_InsertHashCBR(tcbp);
583 /* now release locks and return */
584 ReleaseWriteLock(&afs_xvcb);
587 /* make sure this is after dropping xvcb, for locking order */
588 ObtainWriteLock(&afs_xvcache, 279);
595 * Remove a queued callback for a given Fid.
598 * Locks xvcb and xserver locks.
599 * Typically called with xdcache, xvcache and/or individual vcache
602 * \param afid The fid we want cleansed of queued callbacks.
607 afs_RemoveVCB(struct VenusFid *afid)
610 struct afs_cbr *cbr, *ncbr;
612 AFS_STATCNT(afs_RemoveVCB);
613 ObtainWriteLock(&afs_xvcb, 275);
615 slot = afs_HashCBRFid(&afid->Fid);
616 ncbr = afs_cbrHashT[slot];
620 ncbr = cbr->hash_next;
622 if (afid->Fid.Volume == cbr->fid.Volume &&
623 afid->Fid.Vnode == cbr->fid.Vnode &&
624 afid->Fid.Unique == cbr->fid.Unique) {
629 ReleaseWriteLock(&afs_xvcb);
633 afs_FlushReclaimedVcaches(void)
635 #if !defined(AFS_LINUX22_ENV)
638 struct vcache *tmpReclaimedVCList = NULL;
640 ObtainWriteLock(&afs_xvreclaim, 76);
641 while (ReclaimedVCList) {
642 tvc = ReclaimedVCList; /* take from free list */
643 ReclaimedVCList = tvc->nextfree;
644 tvc->nextfree = NULL;
645 code = afs_FlushVCache(tvc, &fv_slept);
647 /* Ok, so, if we got code != 0, uh, wtf do we do? */
648 /* Probably, build a temporary list and then put all back when we
649 get to the end of the list */
650 /* This is actually really crappy, but we need to not leak these.
651 We probably need a way to be smarter about this. */
652 tvc->nextfree = tmpReclaimedVCList;
653 tmpReclaimedVCList = tvc;
654 /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
656 if (tvc->f.states & (CVInit
657 #ifdef AFS_DARWIN80_ENV
661 tvc->f.states &= ~(CVInit
662 #ifdef AFS_DARWIN80_ENV
666 afs_osi_Wakeup(&tvc->f.states);
669 if (tmpReclaimedVCList)
670 ReclaimedVCList = tmpReclaimedVCList;
672 ReleaseWriteLock(&afs_xvreclaim);
677 afs_PostPopulateVCache(struct vcache *avc, struct VenusFid *afid, int seq)
680 * The proper value for mvstat (for root fids) is setup by the caller.
683 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
686 if (afs_globalVFS == 0)
687 osi_Panic("afs globalvfs");
689 osi_PostPopulateVCache(avc);
692 osi_dnlc_purgedp(avc); /* this may be overkill */
693 memset(&(avc->callsort), 0, sizeof(struct afs_q));
695 avc->f.states &=~ CVInit;
697 avc->f.states |= CBulkFetching;
698 avc->f.m.Length = seq;
700 afs_osi_Wakeup(&avc->f.states);
704 afs_ShakeLooseVCaches(afs_int32 anumber)
708 struct afs_q *tq, *uq;
709 int fv_slept, defersleep = 0;
710 afs_int32 target = anumber;
716 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
719 if (tvc->f.states & CVFlushed) {
720 refpanic("CVFlushed on VLRU");
721 } else if (i++ > afs_vcount) {
722 refpanic("Found too many AFS vnodes on VLRU (VLRU cycle?)");
723 } else if (QNext(uq) != tq) {
724 refpanic("VLRU inconsistent");
725 } else if (tvc->f.states & CVInit) {
730 if (osi_TryEvictVCache(tvc, &fv_slept, defersleep))
736 goto retry; /* start over - may have raced. */
739 if (anumber && !defersleep) {
746 if (!afsd_dynamic_vcaches && anumber == target) {
747 afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
748 afs_vcount, afs_maxvcount);
754 /* Alloc new vnode. */
756 static struct vcache *
757 afs_AllocVCache(void)
761 tvc = osi_NewVnode();
766 if (afsd_dynamic_vcaches && afs_maxvcount < afs_vcount) {
767 afs_maxvcount = afs_vcount;
768 /*printf("peak vnodes: %d\n", afs_maxvcount);*/
771 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
773 /* If we create a new inode, we either give it a new slot number,
774 * or if one's available, use a slot number from the slot free list
776 if (afs_freeSlotList != NULL) {
777 struct afs_slotlist *tmp;
779 tvc->diskSlot = afs_freeSlotList->slot;
780 tmp = afs_freeSlotList;
781 afs_freeSlotList = tmp->next;
782 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
784 tvc->diskSlot = afs_nextVcacheSlot++;
790 /* Pre populate a newly allocated vcache. On platforms where the actual
791 * vnode is attached to the vcache, this function is called before attachment,
792 * therefore it cannot perform any actions on the vnode itself */
795 afs_PrePopulateVCache(struct vcache *avc, struct VenusFid *afid,
796 struct server *serverp) {
799 slot = avc->diskSlot;
801 osi_PrePopulateVCache(avc);
803 avc->diskSlot = slot;
804 QZero(&avc->metadirty);
806 AFS_RWLOCK_INIT(&avc->lock, "vcache lock");
809 avc->linkData = NULL;
812 avc->execsOrWriters = 0;
814 avc->f.states = CVInit;
815 avc->last_looker = 0;
817 avc->asynchrony = -1;
821 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
822 hzero(avc->f.m.DataVersion); /* in case we copy it into flushDV */
824 avc->callback = serverp; /* to minimize chance that clear
827 #if defined(AFS_CACHE_BYPASS)
828 avc->cachingStates = 0;
829 avc->cachingTransitions = 0;
834 afs_FlushAllVCaches(void)
837 struct vcache *tvc, *nvc;
839 ObtainWriteLock(&afs_xvcache, 867);
842 for (i = 0; i < VCSIZE; i++) {
843 for (tvc = afs_vhashT[i]; tvc; tvc = nvc) {
847 if (afs_FlushVCache(tvc, &slept)) {
848 afs_warn("Failed to flush vcache 0x%lx\n", (unsigned long)(uintptrsz)tvc);
856 ReleaseWriteLock(&afs_xvcache);
860 * This routine is responsible for allocating a new cache entry
861 * from the free list. It formats the cache entry and inserts it
862 * into the appropriate hash tables. It must be called with
863 * afs_xvcache write-locked so as to prevent several processes from
864 * trying to create a new cache entry simultaneously.
866 * LOCK: afs_NewVCache afs_xvcache W
868 * \param afid The file id of the file whose cache entry is being created.
870 * \return The new vcache struct.
873 static_inline struct vcache *
874 afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
878 afs_int32 anumber = VCACHE_FREE;
880 AFS_STATCNT(afs_NewVCache);
882 afs_FlushReclaimedVcaches();
884 #if defined(AFS_LINUX22_ENV)
885 if(!afsd_dynamic_vcaches && afs_vcount >= afs_maxvcount) {
886 afs_ShakeLooseVCaches(anumber);
887 if (afs_vcount >= afs_maxvcount) {
888 afs_warn("afs_NewVCache - none freed\n");
892 tvc = afs_AllocVCache();
893 #else /* AFS_LINUX22_ENV */
894 /* pull out a free cache entry */
896 afs_ShakeLooseVCaches(anumber);
900 tvc = afs_AllocVCache();
902 tvc = freeVCList; /* take from free list */
903 freeVCList = tvc->nextfree;
904 tvc->nextfree = NULL;
905 afs_vcount++; /* balanced by FlushVCache */
906 } /* end of if (!freeVCList) */
908 #endif /* AFS_LINUX22_ENV */
910 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
912 panic("afs_NewVCache(): free vcache with vnode attached");
915 /* Populate the vcache with as much as we can. */
916 afs_PrePopulateVCache(tvc, afid, serverp);
918 /* Thread the vcache onto the VLRU */
923 tvc->hnext = afs_vhashT[i];
925 QAdd(&afs_vhashTV[j], &tvc->vhashq);
927 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
928 refpanic("NewVCache VLRU inconsistent");
930 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
931 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
932 refpanic("NewVCache VLRU inconsistent2");
934 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
935 refpanic("NewVCache VLRU inconsistent3");
937 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
938 refpanic("NewVCache VLRU inconsistent4");
942 /* it should now be safe to drop the xvcache lock - so attach an inode
943 * to this vcache, where necessary */
944 osi_AttachVnode(tvc, seq);
946 /* Get a reference count to hold this vcache for the VLRUQ. Note that
947 * we have to do this after attaching the vnode, because the reference
948 * count may be held in the vnode itself */
950 #if defined(AFS_LINUX22_ENV)
951 /* Hold it for the LRU (should make count 2) */
953 #elif !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
954 VREFCOUNT_SET(tvc, 1); /* us */
957 #if defined (AFS_FBSD_ENV)
958 if (tvc->f.states & CVInit)
960 afs_PostPopulateVCache(tvc, afid, seq);
967 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
969 return afs_NewVCache_int(afid, serverp, 0);
973 afs_NewBulkVCache(struct VenusFid *afid, struct server *serverp, int seq)
975 return afs_NewVCache_int(afid, serverp, seq);
981 * LOCK: afs_FlushActiveVcaches afs_xvcache N
983 * \param doflocks : Do we handle flocks?
986 afs_FlushActiveVcaches(afs_int32 doflocks)
992 afs_ucred_t *cred = NULL;
993 struct vrequest treq, ureq;
994 struct AFSVolSync tsync;
997 AFS_STATCNT(afs_FlushActiveVcaches);
998 ObtainReadLock(&afs_xvcache);
999 for (i = 0; i < VCSIZE; i++) {
1000 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1001 if (tvc->f.states & CVInit) continue;
1002 #ifdef AFS_DARWIN80_ENV
1003 if (tvc->f.states & CDeadVnode &&
1004 (tvc->f.states & (CCore|CUnlinkedDel) ||
1005 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1007 if (doflocks && tvc->flockCount != 0) {
1008 struct rx_connection *rxconn;
1009 /* if this entry has an flock, send a keep-alive call out */
1011 ReleaseReadLock(&afs_xvcache);
1012 ObtainWriteLock(&tvc->lock, 51);
1014 afs_InitReq(&treq, afs_osi_credp);
1015 treq.flags |= O_NONBLOCK;
1017 tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK, &rxconn);
1019 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1022 RXAFS_ExtendLock(rxconn,
1023 (struct AFSFid *)&tvc->f.fid.Fid,
1029 } while (afs_Analyze
1030 (tc, rxconn, code, &tvc->f.fid, &treq,
1031 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1033 ReleaseWriteLock(&tvc->lock);
1034 #ifdef AFS_DARWIN80_ENV
1036 ObtainReadLock(&afs_xvcache);
1038 ObtainReadLock(&afs_xvcache);
1043 if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
1045 * Don't let it evaporate in case someone else is in
1046 * this code. Also, drop the afs_xvcache lock while
1047 * getting vcache locks.
1050 ReleaseReadLock(&afs_xvcache);
1051 #ifdef AFS_BOZONLOCK_ENV
1052 afs_BozonLock(&tvc->pvnLock, tvc);
1054 #if defined(AFS_SGI_ENV)
1056 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1058 osi_Assert(VREFCOUNT_GT(tvc,0));
1059 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1061 ObtainWriteLock(&tvc->lock, 52);
1062 if (tvc->f.states & CCore) {
1063 tvc->f.states &= ~CCore;
1064 /* XXXX Find better place-holder for cred XXXX */
1065 cred = (afs_ucred_t *)tvc->linkData;
1066 tvc->linkData = NULL; /* XXX */
1067 afs_InitReq(&ureq, cred);
1068 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1069 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1070 tvc->execsOrWriters);
1071 code = afs_StoreOnLastReference(tvc, &ureq);
1072 ReleaseWriteLock(&tvc->lock);
1073 #ifdef AFS_BOZONLOCK_ENV
1074 afs_BozonUnlock(&tvc->pvnLock, tvc);
1076 hzero(tvc->flushDV);
1079 if (code && code != VNOVNODE) {
1080 afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
1081 /* /dev/console */ 1);
1083 } else if (tvc->f.states & CUnlinkedDel) {
1087 ReleaseWriteLock(&tvc->lock);
1088 #ifdef AFS_BOZONLOCK_ENV
1089 afs_BozonUnlock(&tvc->pvnLock, tvc);
1091 #if defined(AFS_SGI_ENV)
1092 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1094 afs_remunlink(tvc, 0);
1095 #if defined(AFS_SGI_ENV)
1096 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1099 /* lost (or won, perhaps) the race condition */
1100 ReleaseWriteLock(&tvc->lock);
1101 #ifdef AFS_BOZONLOCK_ENV
1102 afs_BozonUnlock(&tvc->pvnLock, tvc);
1105 #if defined(AFS_SGI_ENV)
1106 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1108 #ifdef AFS_DARWIN80_ENV
1111 AFS_RELE(AFSTOV(tvc));
1112 /* Matches write code setting CCore flag */
1115 ObtainReadLock(&afs_xvcache);
1117 ObtainReadLock(&afs_xvcache);
1120 AFS_RELE(AFSTOV(tvc));
1121 /* Matches write code setting CCore flag */
1128 ReleaseReadLock(&afs_xvcache);
1134 * Make sure a cache entry is up-to-date status-wise.
1136 * NOTE: everywhere that calls this can potentially be sped up
1137 * by checking CStatd first, and avoiding doing the InitReq
1138 * if this is up-to-date.
1140 * Anymore, the only places that call this KNOW already that the
1141 * vcache is not up-to-date, so we don't screw around.
1143 * \param avc : Ptr to vcache entry to verify.
1149 * Make sure a cache entry is up-to-date status-wise.
1151 * NOTE: everywhere that calls this can potentially be sped up
1152 * by checking CStatd first, and avoiding doing the InitReq
1153 * if this is up-to-date.
1155 * Anymore, the only places that call this KNOW already that the
1156 * vcache is not up-to-date, so we don't screw around.
1158 * \param avc Pointer to vcache entry to verify.
1161 * \return 0 for success or other error codes.
1164 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1168 AFS_STATCNT(afs_VerifyVCache);
1170 /* otherwise we must fetch the status info */
1172 ObtainWriteLock(&avc->lock, 53);
1173 if (avc->f.states & CStatd) {
1174 ReleaseWriteLock(&avc->lock);
1177 ObtainWriteLock(&afs_xcbhash, 461);
1178 avc->f.states &= ~(CStatd | CUnique);
1179 avc->callback = NULL;
1180 afs_DequeueCallback(avc);
1181 ReleaseWriteLock(&afs_xcbhash);
1182 ReleaseWriteLock(&avc->lock);
1184 /* since we've been called back, or the callback has expired,
1185 * it's possible that the contents of this directory, or this
1186 * file's name have changed, thus invalidating the dnlc contents.
1188 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1189 osi_dnlc_purgedp(avc);
1191 osi_dnlc_purgevp(avc);
1193 /* fetch the status info */
1194 tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
1197 /* Put it back; caller has already incremented vrefCount */
1201 } /*afs_VerifyVCache */
1205 * Simple copy of stat info into cache.
1207 * Callers:as of 1992-04-29, only called by WriteVCache
1209 * \param avc Ptr to vcache entry involved.
1210 * \param astat Ptr to stat info to copy.
1214 afs_SimpleVStat(struct vcache *avc,
1215 struct AFSFetchStatus *astat, struct vrequest *areq)
1218 AFS_STATCNT(afs_SimpleVStat);
1220 #ifdef AFS_64BIT_CLIENT
1221 FillInt64(length, astat->Length_hi, astat->Length);
1222 #else /* AFS_64BIT_CLIENT */
1223 length = astat->Length;
1224 #endif /* AFS_64BIT_CLIENT */
1226 #if defined(AFS_SGI_ENV)
1227 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1228 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1229 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1230 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1231 if (length < avc->f.m.Length) {
1232 vnode_t *vp = (vnode_t *) avc;
1234 osi_Assert(WriteLocked(&avc->lock));
1235 ReleaseWriteLock(&avc->lock);
1237 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1239 ObtainWriteLock(&avc->lock, 67);
1244 if (!afs_DirtyPages(avc)) {
1245 /* if actively writing the file, don't fetch over this value */
1246 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1247 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1248 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1249 avc->f.m.Length = length;
1250 avc->f.m.Date = astat->ClientModTime;
1252 avc->f.m.Owner = astat->Owner;
1253 avc->f.m.Group = astat->Group;
1254 avc->f.m.Mode = astat->UnixModeBits;
1255 if (vType(avc) == VREG) {
1256 avc->f.m.Mode |= S_IFREG;
1257 } else if (vType(avc) == VDIR) {
1258 avc->f.m.Mode |= S_IFDIR;
1259 } else if (vType(avc) == VLNK) {
1260 avc->f.m.Mode |= S_IFLNK;
1261 if ((avc->f.m.Mode & 0111) == 0)
1264 if (avc->f.states & CForeign) {
1265 struct axscache *ac;
1266 avc->f.anyAccess = astat->AnonymousAccess;
1268 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1270 * Caller has at least one bit not covered by anonymous, and
1271 * thus may have interesting rights.
1273 * HOWEVER, this is a really bad idea, because any access query
1274 * for bits which aren't covered by anonymous, on behalf of a user
1275 * who doesn't have any special rights, will result in an answer of
1276 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1277 * It's an especially bad idea under Ultrix, since (due to the lack of
1278 * a proper access() call) it must perform several afs_access() calls
1279 * in order to create magic mode bits that vary according to who makes
1280 * the call. In other words, _every_ stat() generates a test for
1283 #endif /* badidea */
1284 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1285 ac->axess = astat->CallerAccess;
1286 else /* not found, add a new one if possible */
1287 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1290 } /*afs_SimpleVStat */
1294 * Store the status info *only* back to the server for a
1297 * Environment: Must be called with a shared lock held on the vnode.
1299 * \param avc Ptr to the vcache entry.
1300 * \param astatus Ptr to the status info to store.
1301 * \param areq Ptr to the associated vrequest.
1303 * \return Operation status.
1307 afs_WriteVCache(struct vcache *avc,
1308 struct AFSStoreStatus *astatus,
1309 struct vrequest *areq)
1312 struct afs_conn *tc;
1313 struct AFSFetchStatus OutStatus;
1314 struct AFSVolSync tsync;
1315 struct rx_connection *rxconn;
1317 AFS_STATCNT(afs_WriteVCache);
1318 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1319 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
1321 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
1323 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1326 RXAFS_StoreStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
1327 astatus, &OutStatus, &tsync);
1332 } while (afs_Analyze
1333 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1334 SHARED_LOCK, NULL));
1336 UpgradeSToWLock(&avc->lock, 20);
1338 /* success, do the changes locally */
1339 afs_SimpleVStat(avc, &OutStatus, areq);
1341 * Update the date, too. SimpleVStat didn't do this, since
1342 * it thought we were doing this after fetching new status
1343 * over a file being written.
1345 avc->f.m.Date = OutStatus.ClientModTime;
1347 /* failure, set up to check with server next time */
1348 ObtainWriteLock(&afs_xcbhash, 462);
1349 afs_DequeueCallback(avc);
1350 avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1351 ReleaseWriteLock(&afs_xcbhash);
1352 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1353 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1355 ConvertWToSLock(&avc->lock);
1358 } /*afs_WriteVCache */
1361 * Store status info only locally, set the proper disconnection flags
1362 * and add to dirty list.
1364 * \param avc The vcache to be written locally.
1365 * \param astatus Get attr fields from local store.
1366 * \param attrs This one is only of the vs_size.
1368 * \note Must be called with a shared lock on the vnode
1371 afs_WriteVCacheDiscon(struct vcache *avc,
1372 struct AFSStoreStatus *astatus,
1373 struct vattr *attrs)
1376 afs_int32 flags = 0;
1378 UpgradeSToWLock(&avc->lock, 700);
1380 if (!astatus->Mask) {
1386 /* Set attributes. */
1387 if (astatus->Mask & AFS_SETMODTIME) {
1388 avc->f.m.Date = astatus->ClientModTime;
1389 flags |= VDisconSetTime;
1392 if (astatus->Mask & AFS_SETOWNER) {
1393 /* printf("Not allowed yet. \n"); */
1394 /*avc->f.m.Owner = astatus->Owner;*/
1397 if (astatus->Mask & AFS_SETGROUP) {
1398 /* printf("Not allowed yet. \n"); */
1399 /*avc->f.m.Group = astatus->Group;*/
1402 if (astatus->Mask & AFS_SETMODE) {
1403 avc->f.m.Mode = astatus->UnixModeBits;
1405 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1407 if (vType(avc) == VREG) {
1408 avc->f.m.Mode |= S_IFREG;
1409 } else if (vType(avc) == VDIR) {
1410 avc->f.m.Mode |= S_IFDIR;
1411 } else if (vType(avc) == VLNK) {
1412 avc->f.m.Mode |= S_IFLNK;
1413 if ((avc->f.m.Mode & 0111) == 0)
1417 flags |= VDisconSetMode;
1418 } /* if(astatus.Mask & AFS_SETMODE) */
1420 } /* if (!astatus->Mask) */
1422 if (attrs->va_size > 0) {
1423 /* XXX: Do I need more checks? */
1424 /* Truncation operation. */
1425 flags |= VDisconTrunc;
1429 afs_DisconAddDirty(avc, flags, 1);
1431 /* XXX: How about the rest of the fields? */
1433 ConvertWToSLock(&avc->lock);
1439 * Copy astat block into vcache info
1441 * \note This code may get dataversion and length out of sync if the file has
1442 * been modified. This is less than ideal. I haven't thought about it sufficiently
1443 * to be certain that it is adequate.
1445 * \note Environment: Must be called under a write lock
1447 * \param avc Ptr to vcache entry.
1448 * \param astat Ptr to stat block to copy in.
1449 * \param areq Ptr to associated request.
1452 afs_ProcessFS(struct vcache *avc,
1453 struct AFSFetchStatus *astat, struct vrequest *areq)
1456 AFS_STATCNT(afs_ProcessFS);
1458 #ifdef AFS_64BIT_CLIENT
1459 FillInt64(length, astat->Length_hi, astat->Length);
1460 #else /* AFS_64BIT_CLIENT */
1461 length = astat->Length;
1462 #endif /* AFS_64BIT_CLIENT */
1463 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1464 * number for each bulk status request. Under no circumstances
1465 * should afs_DoBulkStat store a sequence number if the new
1466 * length will be ignored when afs_ProcessFS is called with
1467 * new stats. If you change the following conditional then you
1468 * also need to change the conditional in afs_DoBulkStat. */
1470 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1471 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1473 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1475 /* if we're writing or mapping this file, don't fetch over these
1478 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1479 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1480 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1481 avc->f.m.Length = length;
1482 avc->f.m.Date = astat->ClientModTime;
1484 hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1485 avc->f.m.Owner = astat->Owner;
1486 avc->f.m.Mode = astat->UnixModeBits;
1487 avc->f.m.Group = astat->Group;
1488 avc->f.m.LinkCount = astat->LinkCount;
1489 if (astat->FileType == File) {
1490 vSetType(avc, VREG);
1491 avc->f.m.Mode |= S_IFREG;
1492 } else if (astat->FileType == Directory) {
1493 vSetType(avc, VDIR);
1494 avc->f.m.Mode |= S_IFDIR;
1495 } else if (astat->FileType == SymbolicLink) {
1496 if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
1497 vSetType(avc, VDIR);
1498 avc->f.m.Mode |= S_IFDIR;
1500 vSetType(avc, VLNK);
1501 avc->f.m.Mode |= S_IFLNK;
1503 if ((avc->f.m.Mode & 0111) == 0) {
1507 avc->f.anyAccess = astat->AnonymousAccess;
1509 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1511 * Caller has at least one bit not covered by anonymous, and
1512 * thus may have interesting rights.
1514 * HOWEVER, this is a really bad idea, because any access query
1515 * for bits which aren't covered by anonymous, on behalf of a user
1516 * who doesn't have any special rights, will result in an answer of
1517 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1518 * It's an especially bad idea under Ultrix, since (due to the lack of
1519 * a proper access() call) it must perform several afs_access() calls
1520 * in order to create magic mode bits that vary according to who makes
1521 * the call. In other words, _every_ stat() generates a test for
1524 #endif /* badidea */
1526 struct axscache *ac;
1527 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1528 ac->axess = astat->CallerAccess;
1529 else /* not found, add a new one if possible */
1530 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1532 } /*afs_ProcessFS */
1536 * Get fid from server.
1539 * \param areq Request to be passed on.
1540 * \param name Name of ?? to lookup.
1541 * \param OutStatus Fetch status.
1546 * \return Success status of operation.
1549 afs_RemoteLookup(struct VenusFid *afid, struct vrequest *areq,
1550 char *name, struct VenusFid *nfid,
1551 struct AFSFetchStatus *OutStatusp,
1552 struct AFSCallBack *CallBackp, struct server **serverp,
1553 struct AFSVolSync *tsyncp)
1556 struct afs_conn *tc;
1557 struct rx_connection *rxconn;
1558 struct AFSFetchStatus OutDirStatus;
1561 name = ""; /* XXX */
1563 tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
1566 *serverp = tc->parent->srvr->server;
1567 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1570 RXAFS_Lookup(rxconn, (struct AFSFid *)&afid->Fid, name,
1571 (struct AFSFid *)&nfid->Fid, OutStatusp,
1572 &OutDirStatus, CallBackp, tsyncp);
1577 } while (afs_Analyze
1578 (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1588 * Given a file id and a vrequest structure, fetch the status
1589 * information associated with the file.
1591 * \param afid File ID.
1592 * \param areq Ptr to associated vrequest structure, specifying the
1593 * user whose authentication tokens will be used.
1594 * \param avc Caller may already have a vcache for this file, which is
1597 * \note Environment:
1598 * The cache entry is returned with an increased vrefCount field.
1599 * The entry must be discarded by calling afs_PutVCache when you
1600 * are through using the pointer to the cache entry.
1602 * You should not hold any locks when calling this function, except
1603 * locks on other vcache entries. If you lock more than one vcache
1604 * entry simultaneously, you should lock them in this order:
1606 * 1. Lock all files first, then directories.
1607 * 2. Within a particular type, lock entries in Fid.Vnode order.
1609 * This locking hierarchy is convenient because it allows locking
1610 * of a parent dir cache entry, given a file (to check its access
1611 * control list). It also allows renames to be handled easily by
1612 * locking directories in a constant order.
1614 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1616 * \note Might have a vcache structure already, which must
1617 * already be held by the caller
1620 afs_GetVCache(struct VenusFid *afid, struct vrequest *areq,
1621 afs_int32 * cached, struct vcache *avc)
1624 afs_int32 code, newvcache = 0;
1629 AFS_STATCNT(afs_GetVCache);
1632 *cached = 0; /* Init just in case */
1634 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1638 ObtainSharedLock(&afs_xvcache, 5);
1640 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1642 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1643 ReleaseSharedLock(&afs_xvcache);
1644 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1651 osi_Assert((tvc->f.states & CVInit) == 0);
1652 /* If we are in readdir, return the vnode even if not statd */
1653 if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
1654 ReleaseSharedLock(&afs_xvcache);
1658 UpgradeSToWLock(&afs_xvcache, 21);
1660 /* no cache entry, better grab one */
1661 tvc = afs_NewVCache(afid, NULL);
1664 ConvertWToSLock(&afs_xvcache);
1667 ReleaseSharedLock(&afs_xvcache);
1671 afs_stats_cmperf.vcacheMisses++;
1674 ReleaseSharedLock(&afs_xvcache);
1676 ObtainWriteLock(&tvc->lock, 54);
1678 if (tvc->f.states & CStatd) {
1679 ReleaseWriteLock(&tvc->lock);
1682 #ifdef AFS_DARWIN80_ENV
1683 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1686 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1688 * XXX - I really don't like this. Should try to understand better.
1689 * It seems that sometimes, when we get called, we already hold the
1690 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1691 * We can't drop the vnode lock, because that could result in a race.
1692 * Sometimes, though, we get here and don't hold the vnode lock.
1693 * I hate code paths that sometimes hold locks and sometimes don't.
1694 * In any event, the dodge we use here is to check whether the vnode
1695 * is locked, and if it isn't, then we gain and drop it around the call
1696 * to vinvalbuf; otherwise, we leave it alone.
1699 struct vnode *vp = AFSTOV(tvc);
1702 #if defined(AFS_DARWIN_ENV)
1703 iheldthelock = VOP_ISLOCKED(vp);
1705 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1706 /* this is messy. we can call fsync which will try to reobtain this */
1707 if (VTOAFS(vp) == tvc)
1708 ReleaseWriteLock(&tvc->lock);
1709 if (UBCINFOEXISTS(vp)) {
1710 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1712 if (VTOAFS(vp) == tvc)
1713 ObtainWriteLock(&tvc->lock, 954);
1715 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1716 #elif defined(AFS_FBSD80_ENV)
1717 iheldthelock = VOP_ISLOCKED(vp);
1718 if (!iheldthelock) {
1719 /* nosleep/sleep lock order reversal */
1720 int glocked = ISAFS_GLOCK();
1723 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1727 vinvalbuf(vp, V_SAVE, PINOD, 0); /* changed late in 8.0-CURRENT */
1730 #elif defined(AFS_FBSD60_ENV)
1731 iheldthelock = VOP_ISLOCKED(vp, curthread);
1733 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1735 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1738 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1739 #elif defined(AFS_FBSD_ENV)
1740 iheldthelock = VOP_ISLOCKED(vp, curthread);
1742 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1743 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1745 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1746 #elif defined(AFS_OBSD_ENV)
1747 iheldthelock = VOP_ISLOCKED(vp, curproc);
1749 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1750 uvm_vnp_uncache(vp);
1752 VOP_UNLOCK(vp, 0, curproc);
1753 #elif defined(AFS_NBSD40_ENV)
1754 iheldthelock = VOP_ISLOCKED(vp);
1755 if (!iheldthelock) {
1756 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
1758 uvm_vnp_uncache(vp);
1766 ObtainWriteLock(&afs_xcbhash, 464);
1767 tvc->f.states &= ~CUnique;
1769 afs_DequeueCallback(tvc);
1770 ReleaseWriteLock(&afs_xcbhash);
1772 /* It is always appropriate to throw away all the access rights? */
1773 afs_FreeAllAxs(&(tvc->Access));
1774 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1776 if ((tvp->states & VForeign)) {
1778 tvc->f.states |= CForeign;
1779 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1780 && (tvp->rootUnique == afid->Fid.Unique)) {
1784 if (tvp->states & VRO)
1785 tvc->f.states |= CRO;
1786 if (tvp->states & VBackup)
1787 tvc->f.states |= CBackup;
1788 /* now copy ".." entry back out of volume structure, if necessary */
1789 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1791 tvc->mvid = (struct VenusFid *)
1792 osi_AllocSmallSpace(sizeof(struct VenusFid));
1793 *tvc->mvid = tvp->dotdot;
1795 afs_PutVolume(tvp, READ_LOCK);
1799 afs_RemoveVCB(afid);
1801 struct AFSFetchStatus OutStatus;
1803 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1804 afs_ProcessFS(tvc, &OutStatus, areq);
1805 tvc->f.states |= CStatd | CUnique;
1806 tvc->f.parent.vnode = OutStatus.ParentVnode;
1807 tvc->f.parent.unique = OutStatus.ParentUnique;
1811 if (AFS_IS_DISCONNECTED) {
1812 /* Nothing to do otherwise...*/
1814 /* printf("Network is down in afs_GetCache"); */
1816 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1818 /* For the NFS translator's benefit, make sure
1819 * non-directory vnodes always have their parent FID set
1820 * correctly, even when created as a result of decoding an
1821 * NFS filehandle. It would be nice to also do this for
1822 * directories, but we can't because the fileserver fills
1823 * in the FID of the directory itself instead of that of
1826 if (!code && OutStatus.FileType != Directory &&
1827 !tvc->f.parent.vnode) {
1828 tvc->f.parent.vnode = OutStatus.ParentVnode;
1829 tvc->f.parent.unique = OutStatus.ParentUnique;
1830 /* XXX - SXW - It's conceivable we should mark ourselves
1831 * as dirty again here, incase we've been raced
1832 * out of the FetchStatus call.
1839 ReleaseWriteLock(&tvc->lock);
1845 ReleaseWriteLock(&tvc->lock);
1848 } /*afs_GetVCache */
1853 * Lookup a vcache by fid. Look inside the cache first, if not
1854 * there, lookup the file on the server, and then get it's fresh
1859 * \param cached Is element cached? If NULL, don't answer.
1863 * \return The found element or NULL.
1866 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1867 afs_int32 * cached, struct vcache *adp, char *aname)
1869 afs_int32 code, now, newvcache = 0;
1870 struct VenusFid nfid;
1873 struct AFSFetchStatus OutStatus;
1874 struct AFSCallBack CallBack;
1875 struct AFSVolSync tsync;
1876 struct server *serverp = 0;
1880 AFS_STATCNT(afs_GetVCache);
1882 *cached = 0; /* Init just in case */
1884 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1888 ObtainReadLock(&afs_xvcache);
1889 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1892 ReleaseReadLock(&afs_xvcache);
1894 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1895 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1899 ObtainReadLock(&tvc->lock);
1901 if (tvc->f.states & CStatd) {
1905 ReleaseReadLock(&tvc->lock);
1908 tvc->f.states &= ~CUnique;
1910 ReleaseReadLock(&tvc->lock);
1912 ObtainReadLock(&afs_xvcache);
1915 ReleaseReadLock(&afs_xvcache);
1917 /* lookup the file */
1920 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1922 if (AFS_IS_DISCONNECTED) {
1923 /* printf("Network is down in afs_LookupVcache\n"); */
1927 afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
1928 &CallBack, &serverp, &tsync);
1930 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1934 ObtainSharedLock(&afs_xvcache, 6);
1935 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
1937 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1938 ReleaseSharedLock(&afs_xvcache);
1939 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1945 /* no cache entry, better grab one */
1946 UpgradeSToWLock(&afs_xvcache, 22);
1947 tvc = afs_NewVCache(&nfid, serverp);
1949 ConvertWToSLock(&afs_xvcache);
1952 ReleaseSharedLock(&afs_xvcache);
1957 ReleaseSharedLock(&afs_xvcache);
1958 ObtainWriteLock(&tvc->lock, 55);
1960 /* It is always appropriate to throw away all the access rights? */
1961 afs_FreeAllAxs(&(tvc->Access));
1962 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1964 if ((tvp->states & VForeign)) {
1966 tvc->f.states |= CForeign;
1967 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1968 && (tvp->rootUnique == afid->Fid.Unique))
1971 if (tvp->states & VRO)
1972 tvc->f.states |= CRO;
1973 if (tvp->states & VBackup)
1974 tvc->f.states |= CBackup;
1975 /* now copy ".." entry back out of volume structure, if necessary */
1976 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1978 tvc->mvid = (struct VenusFid *)
1979 osi_AllocSmallSpace(sizeof(struct VenusFid));
1980 *tvc->mvid = tvp->dotdot;
1985 ObtainWriteLock(&afs_xcbhash, 465);
1986 afs_DequeueCallback(tvc);
1987 tvc->f.states &= ~(CStatd | CUnique);
1988 ReleaseWriteLock(&afs_xcbhash);
1989 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
1990 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
1992 afs_PutVolume(tvp, READ_LOCK);
1993 ReleaseWriteLock(&tvc->lock);
1998 ObtainWriteLock(&afs_xcbhash, 466);
1999 if (origCBs == afs_allCBs) {
2000 if (CallBack.ExpirationTime) {
2001 tvc->callback = serverp;
2002 tvc->cbExpires = CallBack.ExpirationTime + now;
2003 tvc->f.states |= CStatd | CUnique;
2004 tvc->f.states &= ~CBulkFetching;
2005 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2006 } else if (tvc->f.states & CRO) {
2007 /* adapt gives us an hour. */
2008 tvc->cbExpires = 3600 + osi_Time();
2009 /*XXX*/ tvc->f.states |= CStatd | CUnique;
2010 tvc->f.states &= ~CBulkFetching;
2011 afs_QueueCallback(tvc, CBHash(3600), tvp);
2013 tvc->callback = NULL;
2014 afs_DequeueCallback(tvc);
2015 tvc->f.states &= ~(CStatd | CUnique);
2016 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2017 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2020 afs_DequeueCallback(tvc);
2021 tvc->f.states &= ~CStatd;
2022 tvc->f.states &= ~CUnique;
2023 tvc->callback = NULL;
2024 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2025 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2027 ReleaseWriteLock(&afs_xcbhash);
2029 afs_PutVolume(tvp, READ_LOCK);
2030 afs_ProcessFS(tvc, &OutStatus, areq);
2032 ReleaseWriteLock(&tvc->lock);
2038 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2039 afs_int32 * cached, struct volume *tvolp)
2041 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2042 afs_int32 getNewFid = 0;
2044 struct VenusFid nfid;
2046 struct server *serverp = 0;
2047 struct AFSFetchStatus OutStatus;
2048 struct AFSCallBack CallBack;
2049 struct AFSVolSync tsync;
2051 #ifdef AFS_DARWIN80_ENV
2058 if (!tvolp->rootVnode || getNewFid) {
2059 struct VenusFid tfid;
2062 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2063 origCBs = afs_allCBs; /* ignore InitCallBackState */
2065 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2070 /* ReleaseReadLock(&tvolp->lock); */
2071 ObtainWriteLock(&tvolp->lock, 56);
2072 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2073 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2074 ReleaseWriteLock(&tvolp->lock);
2075 /* ObtainReadLock(&tvolp->lock);*/
2078 afid->Fid.Vnode = tvolp->rootVnode;
2079 afid->Fid.Unique = tvolp->rootUnique;
2083 ObtainSharedLock(&afs_xvcache, 7);
2085 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2086 if (!FidCmp(&(tvc->f.fid), afid)) {
2087 if (tvc->f.states & CVInit) {
2088 ReleaseSharedLock(&afs_xvcache);
2089 afs_osi_Sleep(&tvc->f.states);
2092 #ifdef AFS_DARWIN80_ENV
2093 if (tvc->f.states & CDeadVnode) {
2094 ReleaseSharedLock(&afs_xvcache);
2095 afs_osi_Sleep(&tvc->f.states);
2099 if (vnode_get(tvp)) /* this bumps ref count */
2101 if (vnode_ref(tvp)) {
2103 /* AFSTOV(tvc) may be NULL */
2113 if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
2114 /* Mount point no longer stat'd or unknown. FID may have changed. */
2116 ReleaseSharedLock(&afs_xvcache);
2117 #ifdef AFS_DARWIN80_ENV
2120 vnode_put(AFSTOV(tvc));
2121 vnode_rele(AFSTOV(tvc));
2130 UpgradeSToWLock(&afs_xvcache, 23);
2131 /* no cache entry, better grab one */
2132 tvc = afs_NewVCache(afid, NULL);
2135 ReleaseWriteLock(&afs_xvcache);
2139 afs_stats_cmperf.vcacheMisses++;
2143 afs_stats_cmperf.vcacheHits++;
2144 #if defined(AFS_DARWIN80_ENV)
2145 /* we already bumped the ref count in the for loop above */
2146 #else /* AFS_DARWIN80_ENV */
2149 UpgradeSToWLock(&afs_xvcache, 24);
2150 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2151 refpanic("GRVC VLRU inconsistent0");
2153 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2154 refpanic("GRVC VLRU inconsistent1");
2156 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2157 refpanic("GRVC VLRU inconsistent2");
2159 QRemove(&tvc->vlruq); /* move to lruq head */
2160 QAdd(&VLRU, &tvc->vlruq);
2161 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2162 refpanic("GRVC VLRU inconsistent3");
2164 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2165 refpanic("GRVC VLRU inconsistent4");
2167 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2168 refpanic("GRVC VLRU inconsistent5");
2173 ReleaseWriteLock(&afs_xvcache);
2175 if (tvc->f.states & CStatd) {
2179 ObtainReadLock(&tvc->lock);
2180 tvc->f.states &= ~CUnique;
2181 tvc->callback = NULL; /* redundant, perhaps */
2182 ReleaseReadLock(&tvc->lock);
2185 ObtainWriteLock(&tvc->lock, 57);
2187 /* It is always appropriate to throw away all the access rights? */
2188 afs_FreeAllAxs(&(tvc->Access));
2191 tvc->f.states |= CForeign;
2192 if (tvolp->states & VRO)
2193 tvc->f.states |= CRO;
2194 if (tvolp->states & VBackup)
2195 tvc->f.states |= CBackup;
2196 /* now copy ".." entry back out of volume structure, if necessary */
2197 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2198 && (tvolp->rootUnique == afid->Fid.Unique)) {
2201 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2203 tvc->mvid = (struct VenusFid *)
2204 osi_AllocSmallSpace(sizeof(struct VenusFid));
2205 *tvc->mvid = tvolp->dotdot;
2209 afs_RemoveVCB(afid);
2212 struct VenusFid tfid;
2215 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2216 origCBs = afs_allCBs; /* ignore InitCallBackState */
2218 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2223 ObtainWriteLock(&afs_xcbhash, 467);
2224 afs_DequeueCallback(tvc);
2225 tvc->callback = NULL;
2226 tvc->f.states &= ~(CStatd | CUnique);
2227 ReleaseWriteLock(&afs_xcbhash);
2228 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2229 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2230 ReleaseWriteLock(&tvc->lock);
2235 ObtainWriteLock(&afs_xcbhash, 468);
2236 if (origCBs == afs_allCBs) {
2237 tvc->f.states |= CTruth;
2238 tvc->callback = serverp;
2239 if (CallBack.ExpirationTime != 0) {
2240 tvc->cbExpires = CallBack.ExpirationTime + start;
2241 tvc->f.states |= CStatd;
2242 tvc->f.states &= ~CBulkFetching;
2243 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2244 } else if (tvc->f.states & CRO) {
2245 /* adapt gives us an hour. */
2246 tvc->cbExpires = 3600 + osi_Time();
2247 /*XXX*/ tvc->f.states |= CStatd;
2248 tvc->f.states &= ~CBulkFetching;
2249 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2252 afs_DequeueCallback(tvc);
2253 tvc->callback = NULL;
2254 tvc->f.states &= ~(CStatd | CUnique);
2255 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2256 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2258 ReleaseWriteLock(&afs_xcbhash);
2259 afs_ProcessFS(tvc, &OutStatus, areq);
2261 ReleaseWriteLock(&tvc->lock);
2267 * Update callback status and (sometimes) attributes of a vnode.
2268 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2269 * shouldn't be written to the vcache here.
2274 * \param Outsp Server status after rpc call.
2275 * \param acb Callback for this vnode.
2277 * \note The vcache must be write locked.
2280 afs_UpdateStatus(struct vcache *avc, struct VenusFid *afid,
2281 struct vrequest *areq, struct AFSFetchStatus *Outsp,
2282 struct AFSCallBack *acb, afs_uint32 start)
2284 struct volume *volp;
2287 /* Dont write status in vcache if resyncing after a disconnection. */
2288 afs_ProcessFS(avc, Outsp, areq);
2290 volp = afs_GetVolume(afid, areq, READ_LOCK);
2291 ObtainWriteLock(&afs_xcbhash, 469);
2292 avc->f.states |= CTruth;
2293 if (avc->callback /* check for race */ ) {
2294 if (acb->ExpirationTime != 0) {
2295 avc->cbExpires = acb->ExpirationTime + start;
2296 avc->f.states |= CStatd;
2297 avc->f.states &= ~CBulkFetching;
2298 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2299 } else if (avc->f.states & CRO) {
2300 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2301 avc->cbExpires = 3600 + start;
2302 avc->f.states |= CStatd;
2303 avc->f.states &= ~CBulkFetching;
2304 afs_QueueCallback(avc, CBHash(3600), volp);
2306 afs_DequeueCallback(avc);
2307 avc->callback = NULL;
2308 avc->f.states &= ~(CStatd | CUnique);
2309 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2310 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2313 afs_DequeueCallback(avc);
2314 avc->callback = NULL;
2315 avc->f.states &= ~(CStatd | CUnique);
2316 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2317 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2319 ReleaseWriteLock(&afs_xcbhash);
2321 afs_PutVolume(volp, READ_LOCK);
2325 afs_BadFetchStatus(struct afs_conn *tc)
2327 int addr = ntohl(tc->parent->srvr->sa_ip);
2328 afs_warn("afs: Invalid AFSFetchStatus from server %u.%u.%u.%u\n",
2329 (addr >> 24) & 0xff, (addr >> 16) & 0xff, (addr >> 8) & 0xff,
2331 afs_warn("afs: This suggests the server may be sending bad data that "
2332 "can lead to availability issues or data corruption. The "
2333 "issue has been avoided for now, but it may not always be "
2334 "detectable. Please upgrade the server if possible.\n");
2338 * Check if a given AFSFetchStatus structure is sane.
2340 * @param[in] tc The server from which we received the status
2341 * @param[in] status The status we received
2343 * @return whether the given structure is valid or not
2344 * @retval 0 the structure is fine
2345 * @retval nonzero the structure looks like garbage; act as if we received
2346 * the returned error code from the server
2349 afs_CheckFetchStatus(struct afs_conn *tc, struct AFSFetchStatus *status)
2351 if (status->errorCode ||
2352 status->InterfaceVersion != 1 ||
2353 !(status->FileType > Invalid && status->FileType <= SymbolicLink) ||
2354 status->ParentVnode == 0 || status->ParentUnique == 0) {
2356 afs_warn("afs: FetchStatus ec %u iv %u ft %u pv %u pu %u\n",
2357 (unsigned)status->errorCode, (unsigned)status->InterfaceVersion,
2358 (unsigned)status->FileType, (unsigned)status->ParentVnode,
2359 (unsigned)status->ParentUnique);
2360 afs_BadFetchStatus(tc);
2368 * Must be called with avc write-locked
2369 * don't absolutely have to invalidate the hint unless the dv has
2370 * changed, but be sure to get it right else there will be consistency bugs.
2373 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2374 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2377 afs_uint32 start = 0;
2378 struct afs_conn *tc;
2379 struct AFSCallBack CallBack;
2380 struct AFSVolSync tsync;
2381 struct rx_connection *rxconn;
2384 tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
2385 avc->dchint = NULL; /* invalidate hints */
2387 avc->callback = tc->parent->srvr->server;
2389 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2392 RXAFS_FetchStatus(rxconn, (struct AFSFid *)&afid->Fid, Outsp,
2399 code = afs_CheckFetchStatus(tc, Outsp);
2404 } while (afs_Analyze
2405 (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2406 SHARED_LOCK, NULL));
2409 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2411 /* used to undo the local callback, but that's too extreme.
2412 * There are plenty of good reasons that fetchstatus might return
2413 * an error, such as EPERM. If we have the vnode cached, statd,
2414 * with callback, might as well keep track of the fact that we
2415 * don't have access...
2417 if (code == EPERM || code == EACCES) {
2418 struct axscache *ac;
2419 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2421 else /* not found, add a new one if possible */
2422 afs_AddAxs(avc->Access, areq->uid, 0);
2433 * Stuff some information into the vcache for the given file.
2436 * afid : File in question.
2437 * OutStatus : Fetch status on the file.
2438 * CallBack : Callback info.
2439 * tc : RPC connection involved.
2440 * areq : vrequest involved.
2443 * Nothing interesting.
2446 afs_StuffVcache(struct VenusFid *afid,
2447 struct AFSFetchStatus *OutStatus,
2448 struct AFSCallBack *CallBack, struct afs_conn *tc,
2449 struct vrequest *areq)
2451 afs_int32 code, i, newvcache = 0;
2453 struct AFSVolSync tsync;
2455 struct axscache *ac;
2458 AFS_STATCNT(afs_StuffVcache);
2459 #ifdef IFS_VCACHECOUNT
2464 ObtainSharedLock(&afs_xvcache, 8);
2466 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2468 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2469 ReleaseSharedLock(&afs_xvcache);
2470 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2476 /* no cache entry, better grab one */
2477 UpgradeSToWLock(&afs_xvcache, 25);
2478 tvc = afs_NewVCache(afid, NULL);
2480 ConvertWToSLock(&afs_xvcache);
2483 ReleaseSharedLock(&afs_xvcache);
2488 ReleaseSharedLock(&afs_xvcache);
2489 ObtainWriteLock(&tvc->lock, 58);
2491 tvc->f.states &= ~CStatd;
2492 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2493 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2495 /* Is it always appropriate to throw away all the access rights? */
2496 afs_FreeAllAxs(&(tvc->Access));
2498 /*Copy useful per-volume info */
2499 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2501 if (newvcache && (tvp->states & VForeign))
2502 tvc->f.states |= CForeign;
2503 if (tvp->states & VRO)
2504 tvc->f.states |= CRO;
2505 if (tvp->states & VBackup)
2506 tvc->f.states |= CBackup;
2508 * Now, copy ".." entry back out of volume structure, if
2511 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2513 tvc->mvid = (struct VenusFid *)
2514 osi_AllocSmallSpace(sizeof(struct VenusFid));
2515 *tvc->mvid = tvp->dotdot;
2518 /* store the stat on the file */
2519 afs_RemoveVCB(afid);
2520 afs_ProcessFS(tvc, OutStatus, areq);
2521 tvc->callback = tc->srvr->server;
2523 /* we use osi_Time twice below. Ideally, we would use the time at which
2524 * the FetchStatus call began, instead, but we don't have it here. So we
2525 * make do with "now". In the CRO case, it doesn't really matter. In
2526 * the other case, we hope that the difference between "now" and when the
2527 * call actually began execution on the server won't be larger than the
2528 * padding which the server keeps. Subtract 1 second anyway, to be on
2529 * the safe side. Can't subtract more because we don't know how big
2530 * ExpirationTime is. Possible consistency problems may arise if the call
2531 * timeout period becomes longer than the server's expiration padding. */
2532 ObtainWriteLock(&afs_xcbhash, 470);
2533 if (CallBack->ExpirationTime != 0) {
2534 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2535 tvc->f.states |= CStatd;
2536 tvc->f.states &= ~CBulkFetching;
2537 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2538 } else if (tvc->f.states & CRO) {
2539 /* old-fashioned AFS 3.2 style */
2540 tvc->cbExpires = 3600 + osi_Time();
2541 /*XXX*/ tvc->f.states |= CStatd;
2542 tvc->f.states &= ~CBulkFetching;
2543 afs_QueueCallback(tvc, CBHash(3600), tvp);
2545 afs_DequeueCallback(tvc);
2546 tvc->callback = NULL;
2547 tvc->f.states &= ~(CStatd | CUnique);
2548 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2549 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2551 ReleaseWriteLock(&afs_xcbhash);
2553 afs_PutVolume(tvp, READ_LOCK);
2555 /* look in per-pag cache */
2556 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2557 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2558 else /* not found, add a new one if possible */
2559 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2561 ReleaseWriteLock(&tvc->lock);
2562 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2563 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2564 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2566 * Release ref count... hope this guy stays around...
2569 } /*afs_StuffVcache */
2573 * Decrements the reference count on a cache entry.
2575 * \param avc Pointer to the cache entry to decrement.
2577 * \note Environment: Nothing interesting.
2580 afs_PutVCache(struct vcache *avc)
2582 AFS_STATCNT(afs_PutVCache);
2583 #ifdef AFS_DARWIN80_ENV
2584 vnode_put(AFSTOV(avc));
2588 * Can we use a read lock here?
2590 ObtainReadLock(&afs_xvcache);
2592 ReleaseReadLock(&afs_xvcache);
2594 } /*afs_PutVCache */
2598 * Reset a vcache entry, so local contents are ignored, and the
2599 * server will be reconsulted next time the vcache is used
2601 * \param avc Pointer to the cache entry to reset
2603 * \param skipdnlc skip the dnlc purge for this vnode
2605 * \note avc must be write locked on entry
2607 * \note The caller should purge the dnlc when skipdnlc is set.
2610 afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred, afs_int32 skipdnlc)
2612 ObtainWriteLock(&afs_xcbhash, 456);
2613 afs_DequeueCallback(avc);
2614 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
2615 ReleaseWriteLock(&afs_xcbhash);
2616 /* now find the disk cache entries */
2617 afs_TryToSmush(avc, acred, 1);
2619 osi_dnlc_purgedp(avc);
2621 if (avc->linkData && !(avc->f.states & CCore)) {
2622 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2623 avc->linkData = NULL;
2628 * Sleepa when searching for a vcache. Releases all the pending locks,
2629 * sleeps then obtains the previously released locks.
2631 * \param vcache Enter sleep state.
2632 * \param flag Determines what locks to use.
2637 findvc_sleep(struct vcache *avc, int flag)
2639 if (flag & IS_SLOCK) {
2640 ReleaseSharedLock(&afs_xvcache);
2642 if (flag & IS_WLOCK) {
2643 ReleaseWriteLock(&afs_xvcache);
2645 ReleaseReadLock(&afs_xvcache);
2648 afs_osi_Sleep(&avc->f.states);
2649 if (flag & IS_SLOCK) {
2650 ObtainSharedLock(&afs_xvcache, 341);
2652 if (flag & IS_WLOCK) {
2653 ObtainWriteLock(&afs_xvcache, 343);
2655 ObtainReadLock(&afs_xvcache);
2661 * Add a reference on an existing vcache entry.
2663 * \param tvc Pointer to the vcache.
2665 * \note Environment: Must be called with at least one reference from
2666 * elsewhere on the vcache, even if that reference will be dropped.
2667 * The global lock is required.
2669 * \return 0 on success, -1 on failure.
2673 afs_RefVCache(struct vcache *tvc)
2675 #ifdef AFS_DARWIN80_ENV
2679 /* AFS_STATCNT(afs_RefVCache); */
2681 #ifdef AFS_DARWIN80_ENV
2685 if (vnode_ref(tvp)) {
2687 /* AFSTOV(tvc) may be NULL */
2696 } /*afs_RefVCache */
2699 * Find a vcache entry given a fid.
2701 * \param afid Pointer to the fid whose cache entry we desire.
2702 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2703 * unlock the vnode, and try again.
2704 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2705 * set if FindVCache is called as part of internal bookkeeping.
2707 * \note Environment: Must be called with the afs_xvcache lock at least held at
2708 * the read level. In order to do the VLRU adjustment, the xvcache lock
2709 * must be shared-- we upgrade it here.
2713 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2718 #ifdef AFS_DARWIN80_ENV
2719 struct vcache *deadvc = NULL, *livevc = NULL;
2723 AFS_STATCNT(afs_FindVCache);
2727 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2728 if (FidMatches(afid, tvc)) {
2729 if (tvc->f.states & CVInit) {
2730 findvc_sleep(tvc, flag);
2733 #ifdef AFS_DARWIN80_ENV
2734 if (tvc->f.states & CDeadVnode) {
2735 findvc_sleep(tvc, flag);
2743 /* should I have a read lock on the vnode here? */
2747 #if defined(AFS_DARWIN80_ENV)
2751 if (tvp && vnode_ref(tvp)) {
2753 /* AFSTOV(tvc) may be NULL */
2762 #elif defined(AFS_DARWIN_ENV)
2763 tvc->f.states |= CUBCinit;
2765 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2766 UBCINFORECLAIMED(AFSTOV(tvc))) {
2767 ubc_info_init(AFSTOV(tvc));
2770 tvc->f.states &= ~CUBCinit;
2772 osi_vnhold(tvc, retry); /* already held, above */
2773 if (retry && *retry)
2777 * only move to front of vlru if we have proper vcache locking)
2779 if (flag & DO_VLRU) {
2780 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2781 refpanic("FindVC VLRU inconsistent1");
2783 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2784 refpanic("FindVC VLRU inconsistent1");
2786 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2787 refpanic("FindVC VLRU inconsistent2");
2789 UpgradeSToWLock(&afs_xvcache, 26);
2790 QRemove(&tvc->vlruq);
2791 QAdd(&VLRU, &tvc->vlruq);
2792 ConvertWToSLock(&afs_xvcache);
2793 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2794 refpanic("FindVC VLRU inconsistent1");
2796 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2797 refpanic("FindVC VLRU inconsistent2");
2799 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2800 refpanic("FindVC VLRU inconsistent3");
2806 if (flag & DO_STATS) {
2808 afs_stats_cmperf.vcacheHits++;
2810 afs_stats_cmperf.vcacheMisses++;
2811 if (afs_IsPrimaryCellNum(afid->Cell))
2812 afs_stats_cmperf.vlocalAccesses++;
2814 afs_stats_cmperf.vremoteAccesses++;
2817 } /*afs_FindVCache */
2820 * Find a vcache entry given a fid. Does a wildcard match on what we
2821 * have for the fid. If more than one entry, don't return anything.
2823 * \param avcp Fill in pointer if we found one and only one.
2824 * \param afid Pointer to the fid whose cache entry we desire.
2825 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2826 * unlock the vnode, and try again.
2827 * \param flags bit 1 to specify whether to compute hit statistics. Not
2828 * set if FindVCache is called as part of internal bookkeeping.
2830 * \note Environment: Must be called with the afs_xvcache lock at least held at
2831 * the read level. In order to do the VLRU adjustment, the xvcache lock
2832 * must be shared-- we upgrade it here.
2834 * \return Number of matches found.
2837 int afs_duplicate_nfs_fids = 0;
2840 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2844 afs_int32 count = 0;
2845 struct vcache *found_tvc = NULL;
2846 #ifdef AFS_DARWIN80_ENV
2850 AFS_STATCNT(afs_FindVCache);
2854 ObtainSharedLock(&afs_xvcache, 331);
2857 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2858 /* Match only on what we have.... */
2859 if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2860 && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
2861 && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2862 && (tvc->f.fid.Cell == afid->Cell)) {
2863 if (tvc->f.states & CVInit) {
2864 ReleaseSharedLock(&afs_xvcache);
2865 afs_osi_Sleep(&tvc->f.states);
2868 #ifdef AFS_DARWIN80_ENV
2869 if (tvc->f.states & CDeadVnode) {
2870 ReleaseSharedLock(&afs_xvcache);
2871 afs_osi_Sleep(&tvc->f.states);
2875 if (vnode_get(tvp)) {
2876 /* This vnode no longer exists. */
2879 if (vnode_ref(tvp)) {
2880 /* This vnode no longer exists. */
2882 /* AFSTOV(tvc) may be NULL */
2887 #endif /* AFS_DARWIN80_ENV */
2891 afs_duplicate_nfs_fids++;
2892 ReleaseSharedLock(&afs_xvcache);
2893 #ifdef AFS_DARWIN80_ENV
2894 /* Drop our reference counts. */
2895 vnode_put(AFSTOV(tvc));
2896 vnode_put(AFSTOV(found_tvc));
2905 /* should I have a read lock on the vnode here? */
2907 #ifndef AFS_DARWIN80_ENV
2908 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2909 afs_int32 retry = 0;
2910 osi_vnhold(tvc, &retry);
2913 found_tvc = (struct vcache *)0;
2914 ReleaseSharedLock(&afs_xvcache);
2915 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2919 osi_vnhold(tvc, (int *)0); /* already held, above */
2923 * We obtained the xvcache lock above.
2925 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2926 refpanic("FindVC VLRU inconsistent1");
2928 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2929 refpanic("FindVC VLRU inconsistent1");
2931 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2932 refpanic("FindVC VLRU inconsistent2");
2934 UpgradeSToWLock(&afs_xvcache, 568);
2935 QRemove(&tvc->vlruq);
2936 QAdd(&VLRU, &tvc->vlruq);
2937 ConvertWToSLock(&afs_xvcache);
2938 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2939 refpanic("FindVC VLRU inconsistent1");
2941 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2942 refpanic("FindVC VLRU inconsistent2");
2944 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2945 refpanic("FindVC VLRU inconsistent3");
2951 afs_stats_cmperf.vcacheHits++;
2953 afs_stats_cmperf.vcacheMisses++;
2954 if (afs_IsPrimaryCellNum(afid->Cell))
2955 afs_stats_cmperf.vlocalAccesses++;
2957 afs_stats_cmperf.vremoteAccesses++;
2959 *avcp = tvc; /* May be null */
2961 ReleaseSharedLock(&afs_xvcache);
2962 return (tvc ? 1 : 0);
2964 } /*afs_NFSFindVCache */
2970 * Initialize vcache related variables
2975 afs_vcacheInit(int astatSize)
2977 #if !defined(AFS_LINUX22_ENV)
2981 if (!afs_maxvcount) {
2982 afs_maxvcount = astatSize; /* no particular limit on linux? */
2984 #if !defined(AFS_LINUX22_ENV)
2988 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2989 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2991 #if !defined(AFS_LINUX22_ENV)
2992 /* Allocate and thread the struct vcache entries */
2993 tvp = afs_osi_Alloc(astatSize * sizeof(struct vcache));
2994 osi_Assert(tvp != NULL);
2995 memset(tvp, 0, sizeof(struct vcache) * astatSize);
2997 Initial_freeVCList = tvp;
2998 freeVCList = &(tvp[0]);
2999 for (i = 0; i < astatSize - 1; i++) {
3000 tvp[i].nextfree = &(tvp[i + 1]);
3002 tvp[astatSize - 1].nextfree = NULL;
3003 # ifdef KERNEL_HAVE_PIN
3004 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
3008 #if defined(AFS_SGI_ENV)
3009 for (i = 0; i < astatSize; i++) {
3010 char name[METER_NAMSZ];
3011 struct vcache *tvc = &tvp[i];
3013 tvc->v.v_number = ++afsvnumbers;
3014 tvc->vc_rwlockid = OSI_NO_LOCKID;
3015 initnsema(&tvc->vc_rwlock, 1,
3016 makesname(name, "vrw", tvc->v.v_number));
3017 #ifndef AFS_SGI53_ENV
3018 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
3020 #ifndef AFS_SGI62_ENV
3021 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
3022 #endif /* AFS_SGI62_ENV */
3026 for(i = 0; i < VCSIZE; ++i)
3027 QInit(&afs_vhashTV[i]);
3034 shutdown_vcache(void)
3037 struct afs_cbr *tsp;
3039 * XXX We may potentially miss some of the vcaches because if when
3040 * there are no free vcache entries and all the vcache entries are active
3041 * ones then we allocate an additional one - admittedly we almost never
3046 struct afs_q *tq, *uq = NULL;
3048 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
3052 osi_FreeSmallSpace(tvc->mvid);
3053 tvc->mvid = (struct VenusFid *)0;
3056 aix_gnode_rele(AFSTOV(tvc));
3058 if (tvc->linkData) {
3059 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3064 * Also free the remaining ones in the Cache
3066 for (i = 0; i < VCSIZE; i++) {
3067 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3069 osi_FreeSmallSpace(tvc->mvid);
3070 tvc->mvid = (struct VenusFid *)0;
3074 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
3075 #ifdef AFS_AIX32_ENV
3078 vms_delete(tvc->segid);
3080 tvc->segid = tvc->vmh = NULL;
3081 if (VREFCOUNT_GT(tvc,0))
3082 osi_Panic("flushVcache: vm race");
3090 #if defined(AFS_SUN5_ENV)
3096 if (tvc->linkData) {
3097 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3102 afs_FreeAllAxs(&(tvc->Access));
3108 * Free any leftover callback queue
3110 for (i = 0; i < afs_stats_cmperf.CallBackAlloced; i++) {
3111 tsp = afs_cbrHeads[i];
3112 afs_cbrHeads[i] = 0;
3113 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3117 #if !defined(AFS_LINUX22_ENV)
3118 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3120 # ifdef KERNEL_HAVE_PIN
3121 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3124 freeVCList = Initial_freeVCList = 0;
3127 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3128 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3130 for(i = 0; i < VCSIZE; ++i)
3131 QInit(&afs_vhashTV[i]);
3135 afs_DisconGiveUpCallbacks(void)
3141 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3144 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3145 for (i = 0; i < VCSIZE; i++) {
3146 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3148 if (afs_QueueVCB(tvc, &slept)) {
3149 tvc->callback = NULL;
3158 ReleaseWriteLock(&afs_xvcache);
3165 * Clear the Statd flag from all vcaches
3167 * This function removes the Statd flag from all vcaches. It's used by
3168 * disconnected mode to tidy up during reconnection
3172 afs_ClearAllStatdFlag(void)
3177 ObtainWriteLock(&afs_xvcache, 715);
3179 for (i = 0; i < VCSIZE; i++) {
3180 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3181 tvc->f.states &= ~(CStatd|CUnique);
3184 ReleaseWriteLock(&afs_xvcache);