2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
43 #include "afs/sysincludes.h" /*Standard vendor system headers */
44 #include "afsincludes.h" /*AFS-based standard headers */
45 #include "afs/afs_stats.h"
46 #include "afs/afs_cbqueue.h"
47 #include "afs/afs_osidnlc.h"
49 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
50 afs_int32 afs_vcount = 0; /* number of vcache in use now */
58 #endif /* AFS_SGI64_ENV */
60 /* Exported variables */
61 afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
62 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
63 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
64 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
65 #if !defined(AFS_LINUX22_ENV)
66 static struct vcache *freeVCList; /*Free list for stat cache entries */
67 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
68 static struct vcache *Initial_freeVCList; /*Initial list for above */
70 struct afs_q VLRU; /*vcache LRU */
71 afs_int32 vcachegen = 0;
72 unsigned int afs_paniconwarn = 0;
73 struct vcache *afs_vhashT[VCSIZE];
74 struct afs_q afs_vhashTV[VCSIZE];
75 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
76 afs_int32 afs_bulkStatsLost;
77 int afs_norefpanic = 0;
80 /* Disk backed vcache definitions
81 * Both protected by xvcache */
82 static int afs_nextVcacheSlot = 0;
83 static struct afs_slotlist *afs_freeSlotList = NULL;
85 /* Forward declarations */
86 static afs_int32 afs_QueueVCB(struct vcache *avc, int *slept);
89 * Generate an index into the hash table for a given Fid.
91 * \return The hash value.
94 afs_HashCBRFid(struct AFSFid *fid)
96 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
100 * Insert a CBR entry into the hash table.
101 * Must be called with afs_xvcb held.
106 afs_InsertHashCBR(struct afs_cbr *cbr)
108 int slot = afs_HashCBRFid(&cbr->fid);
110 cbr->hash_next = afs_cbrHashT[slot];
111 if (afs_cbrHashT[slot])
112 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
114 cbr->hash_pprev = &afs_cbrHashT[slot];
115 afs_cbrHashT[slot] = cbr;
120 * Flush the given vcache entry.
123 * afs_xvcache lock must be held for writing upon entry to
124 * prevent people from changing the vrefCount field, and to
125 * protect the lruq and hnext fields.
126 * LOCK: afs_FlushVCache afs_xvcache W
127 * REFCNT: vcache ref count must be zero on entry except for osf1
128 * RACE: lock is dropped and reobtained, permitting race in caller
130 * \param avc Pointer to vcache entry to flush.
131 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
135 afs_FlushVCache(struct vcache *avc, int *slept)
136 { /*afs_FlushVCache */
139 struct vcache **uvc, *wvc;
142 AFS_STATCNT(afs_FlushVCache);
143 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
144 ICL_TYPE_INT32, avc->f.states);
146 code = osi_VM_FlushVCache(avc, slept);
150 if (avc->f.states & CVFlushed) {
154 #if !defined(AFS_LINUX22_ENV)
155 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
156 refpanic("LRU vs. Free inconsistency");
159 avc->f.states |= CVFlushed;
160 /* pull the entry out of the lruq and put it on the free list */
161 QRemove(&avc->vlruq);
163 /* keep track of # of files that we bulk stat'd, but never used
164 * before they got recycled.
166 if (avc->f.states & CBulkStat)
169 /* remove entry from the hash chain */
170 i = VCHash(&avc->f.fid);
171 uvc = &afs_vhashT[i];
172 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
180 /* remove entry from the volume hash table */
181 QRemove(&avc->vhashq);
184 osi_FreeSmallSpace(avc->mvid);
185 avc->mvid = (struct VenusFid *)0;
187 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
188 avc->linkData = NULL;
190 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
191 /* OK, there are no internal vrefCounts, so there shouldn't
192 * be any more refs here. */
194 #ifdef AFS_DARWIN80_ENV
195 vnode_clearfsnode(AFSTOV(avc));
196 vnode_removefsref(AFSTOV(avc));
198 avc->v->v_data = NULL; /* remove from vnode */
200 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
203 #ifdef AFS_SUN510_ENV
204 /* As we use private vnodes, cleanup is up to us */
205 vn_reinit(AFSTOV(avc));
207 afs_FreeAllAxs(&(avc->Access));
208 ObtainWriteLock(&afs_xcbhash, 460);
209 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
210 avc->f.states &= ~(CStatd | CUnique);
211 ReleaseWriteLock(&afs_xcbhash);
212 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
213 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
215 osi_dnlc_purgevp(avc);
217 if (!afs_shuttingdown)
218 afs_QueueVCB(avc, slept);
221 * Next, keep track of which vnodes we've deleted for create's
222 * optimistic synchronization algorithm
225 if (avc->f.fid.Fid.Vnode & 1)
231 #if !defined(AFS_LINUX22_ENV)
232 /* put the entry in the free list */
233 avc->nextfree = freeVCList;
235 if (avc->vlruq.prev || avc->vlruq.next) {
236 refpanic("LRU vs. Free inconsistency");
238 avc->f.states |= CVFlushed;
240 /* This should put it back on the vnode free list since usecount is 1 */
242 if (VREFCOUNT_GT(avc,0)) {
243 AFS_RELE(AFSTOV(avc));
244 afs_stats_cmperf.vcacheXAllocs--;
246 if (afs_norefpanic) {
247 afs_warn("flush vc refcnt < 1");
250 osi_Panic("flush vc refcnt < 1");
252 #endif /* AFS_LINUX22_ENV */
257 } /*afs_FlushVCache */
261 * The core of the inactive vnode op for all but IRIX.
267 afs_InactiveVCache(struct vcache *avc, afs_ucred_t *acred)
269 AFS_STATCNT(afs_inactive);
270 if (avc->f.states & CDirty) {
271 /* we can't keep trying to push back dirty data forever. Give up. */
272 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
274 avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
275 avc->f.states &= ~CDirty; /* Turn it off */
276 if (avc->f.states & CUnlinked) {
277 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
278 avc->f.states |= CUnlinkedDel;
281 afs_remunlink(avc, 1); /* ignore any return code */
288 * Allocate a callback return structure from the
289 * free list and return it.
291 * Environment: The alloc and free routines are both called with the afs_xvcb lock
292 * held, so we don't have to worry about blocking in osi_Alloc.
294 * \return The allocated afs_cbr.
296 static struct afs_cbr *afs_cbrSpace = 0;
297 /* if alloc limit below changes, fix me! */
298 static struct afs_cbr *afs_cbrHeads[16];
305 while (!afs_cbrSpace) {
306 if (afs_stats_cmperf.CallBackAlloced >= sizeof(afs_cbrHeads)/sizeof(afs_cbrHeads[0])) {
307 /* don't allocate more than 16 * AFS_NCBRS for now */
309 afs_stats_cmperf.CallBackFlushes++;
312 tsp = afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
313 osi_Assert(tsp != NULL);
314 for (i = 0; i < AFS_NCBRS - 1; i++) {
315 tsp[i].next = &tsp[i + 1];
317 tsp[AFS_NCBRS - 1].next = 0;
319 afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
320 afs_stats_cmperf.CallBackAlloced++;
324 afs_cbrSpace = tsp->next;
329 * Free a callback return structure, removing it from all lists.
331 * Environment: the xvcb lock is held over these calls.
333 * \param asp The address of the structure to free.
338 afs_FreeCBR(struct afs_cbr *asp)
340 *(asp->pprev) = asp->next;
342 asp->next->pprev = asp->pprev;
344 *(asp->hash_pprev) = asp->hash_next;
346 asp->hash_next->hash_pprev = asp->hash_pprev;
348 asp->next = afs_cbrSpace;
354 FlushAllVCBs(int nconns, struct rx_connection **rxconns,
355 struct afs_conn **conns)
360 results = afs_osi_Alloc(nconns * sizeof (afs_int32));
361 osi_Assert(results != NULL);
364 multi_Rx(rxconns,nconns)
366 multi_RXAFS_GiveUpAllCallBacks();
367 results[multi_i] = multi_error;
372 * Freeing the CBR will unlink it from the server's CBR list
373 * do it here, not in the loop, because a dynamic CBR will call
374 * into the memory management routines.
376 for ( i = 0 ; i < nconns ; i++ ) {
377 if (results[i] == 0) {
378 /* Unchain all of them */
379 while (conns[i]->parent->srvr->server->cbrs)
380 afs_FreeCBR(conns[i]->parent->srvr->server->cbrs);
383 afs_osi_Free(results, nconns * sizeof(afs_int32));
387 * Flush all queued callbacks to all servers.
389 * Environment: holds xvcb lock over RPC to guard against race conditions
390 * when a new callback is granted for the same file later on.
392 * \return 0 for success.
395 afs_FlushVCBs(afs_int32 lockit)
397 struct AFSFid *tfids;
398 struct AFSCallBack callBacks[1];
399 struct AFSCBFids fidArray;
400 struct AFSCBs cbArray;
402 struct afs_cbr *tcbrp;
406 struct vrequest *treq = NULL;
408 int safety1, safety2, safety3;
411 if (AFS_IS_DISCONNECTED)
414 if ((code = afs_CreateReq(&treq, afs_osi_credp)))
416 treq->flags |= O_NONBLOCK;
417 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
418 osi_Assert(tfids != NULL);
421 ObtainWriteLock(&afs_xvcb, 273);
424 * First, attempt a multi across everything, all addresses
425 * for all servers we know of.
429 afs_LoopServers(AFS_LS_ALL, NULL, 0, FlushAllVCBs, NULL);
431 ObtainReadLock(&afs_xserver);
432 for (i = 0; i < NSERVERS; i++) {
433 for (safety1 = 0, tsp = afs_servers[i];
434 tsp && safety1 < afs_totalServers + 10;
435 tsp = tsp->next, safety1++) {
437 if (tsp->cbrs == (struct afs_cbr *)0)
440 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
441 * and make an RPC, over and over again.
443 tcount = 0; /* number found so far */
444 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
445 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
446 struct rx_connection *rxconn;
447 /* if buffer is full, or we've queued all we're going
448 * to from this server, we should flush out the
451 fidArray.AFSCBFids_len = tcount;
452 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
453 cbArray.AFSCBs_len = 1;
454 cbArray.AFSCBs_val = callBacks;
455 memset(&callBacks[0], 0, sizeof(callBacks[0]));
456 callBacks[0].CallBackType = CB_EXCLUSIVE;
457 for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
458 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
459 tsp->cell->cellNum, treq, 0,
460 SHARED_LOCK, 0, &rxconn);
463 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
466 RXAFS_GiveUpCallBacks(rxconn, &fidArray,
473 (tc, rxconn, code, 0, treq,
474 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
479 /* ignore return code, since callbacks may have
480 * been returned anyway, we shouldn't leave them
481 * around to be returned again.
483 * Next, see if we are done with this server, and if so,
484 * break to deal with the next one.
490 /* if to flush full buffer */
491 /* if we make it here, we have an entry at the head of cbrs,
492 * which we should copy to the file ID array and then free.
495 tfids[tcount++] = tcbrp->fid;
497 /* Freeing the CBR will unlink it from the server's CBR list */
499 } /* while loop for this one server */
500 if (safety2 > afs_cacheStats) {
501 afs_warn("possible internal error afs_flushVCBs (%d)\n",
504 } /* for loop for this hash chain */
505 } /* loop through all hash chains */
506 if (safety1 > afs_totalServers + 2) {
508 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
509 safety1, afs_totalServers + 2);
511 osi_Panic("afs_flushVCBS safety1");
514 ReleaseReadLock(&afs_xserver);
516 ReleaseWriteLock(&afs_xvcb);
517 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
518 afs_DestroyReq(treq);
523 * Queue a callback on the given fid.
526 * Locks the xvcb lock.
527 * Called when the xvcache lock is already held.
528 * RACE: afs_xvcache may be dropped and reacquired
530 * \param avc vcache entry
531 * \param slep Set to 1 if we dropped afs_xvcache
532 * \return 1 if queued, 0 otherwise
536 afs_QueueVCB(struct vcache *avc, int *slept)
540 struct afs_cbr *tcbp;
543 AFS_STATCNT(afs_QueueVCB);
545 ObtainWriteLock(&afs_xvcb, 274);
547 /* we can't really give back callbacks on RO files, since the
548 * server only tracks them on a per-volume basis, and we don't
549 * know whether we still have some other files from the same
551 if (!((avc->f.states & CRO) == 0 && avc->callback)) {
555 /* The callback is really just a struct server ptr. */
556 tsp = (struct server *)(avc->callback);
559 /* If we don't have CBR space, AllocCBR may block or hit the net for
560 * clearing up CBRs. Hitting the net may involve a fileserver
561 * needing to contact us, so we must drop xvcache so we don't block
562 * those requests from going through. */
563 reacquire = *slept = 1;
564 ReleaseWriteLock(&afs_xvcache);
567 /* we now have a pointer to the server, so we just allocate
568 * a queue entry and queue it.
570 tcbp = afs_AllocCBR();
571 tcbp->fid = avc->f.fid.Fid;
573 tcbp->next = tsp->cbrs;
575 tsp->cbrs->pprev = &tcbp->next;
578 tcbp->pprev = &tsp->cbrs;
580 afs_InsertHashCBR(tcbp);
584 /* now release locks and return */
585 ReleaseWriteLock(&afs_xvcb);
588 /* make sure this is after dropping xvcb, for locking order */
589 ObtainWriteLock(&afs_xvcache, 279);
596 * Remove a queued callback for a given Fid.
599 * Locks xvcb and xserver locks.
600 * Typically called with xdcache, xvcache and/or individual vcache
603 * \param afid The fid we want cleansed of queued callbacks.
608 afs_RemoveVCB(struct VenusFid *afid)
611 struct afs_cbr *cbr, *ncbr;
613 AFS_STATCNT(afs_RemoveVCB);
614 ObtainWriteLock(&afs_xvcb, 275);
616 slot = afs_HashCBRFid(&afid->Fid);
617 ncbr = afs_cbrHashT[slot];
621 ncbr = cbr->hash_next;
623 if (afid->Fid.Volume == cbr->fid.Volume &&
624 afid->Fid.Vnode == cbr->fid.Vnode &&
625 afid->Fid.Unique == cbr->fid.Unique) {
630 ReleaseWriteLock(&afs_xvcb);
634 afs_FlushReclaimedVcaches(void)
636 #if !defined(AFS_LINUX22_ENV)
639 struct vcache *tmpReclaimedVCList = NULL;
641 ObtainWriteLock(&afs_xvreclaim, 76);
642 while (ReclaimedVCList) {
643 tvc = ReclaimedVCList; /* take from free list */
644 ReclaimedVCList = tvc->nextfree;
645 tvc->nextfree = NULL;
646 code = afs_FlushVCache(tvc, &fv_slept);
648 /* Ok, so, if we got code != 0, uh, wtf do we do? */
649 /* Probably, build a temporary list and then put all back when we
650 get to the end of the list */
651 /* This is actually really crappy, but we need to not leak these.
652 We probably need a way to be smarter about this. */
653 tvc->nextfree = tmpReclaimedVCList;
654 tmpReclaimedVCList = tvc;
655 /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
657 if (tvc->f.states & (CVInit
658 #ifdef AFS_DARWIN80_ENV
662 tvc->f.states &= ~(CVInit
663 #ifdef AFS_DARWIN80_ENV
667 afs_osi_Wakeup(&tvc->f.states);
670 if (tmpReclaimedVCList)
671 ReclaimedVCList = tmpReclaimedVCList;
673 ReleaseWriteLock(&afs_xvreclaim);
678 afs_PostPopulateVCache(struct vcache *avc, struct VenusFid *afid, int seq)
681 * The proper value for mvstat (for root fids) is setup by the caller.
684 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
687 if (afs_globalVFS == 0)
688 osi_Panic("afs globalvfs");
690 osi_PostPopulateVCache(avc);
693 osi_dnlc_purgedp(avc); /* this may be overkill */
694 memset(&(avc->callsort), 0, sizeof(struct afs_q));
696 avc->f.states &=~ CVInit;
698 avc->f.states |= CBulkFetching;
699 avc->f.m.Length = seq;
701 afs_osi_Wakeup(&avc->f.states);
705 afs_ShakeLooseVCaches(afs_int32 anumber)
709 struct afs_q *tq, *uq;
710 int fv_slept, defersleep = 0;
712 afs_int32 target = anumber;
719 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
722 if (tvc->f.states & CVFlushed) {
723 refpanic("CVFlushed on VLRU");
724 } else if (i++ > limit) {
725 afs_warn("afs_ShakeLooseVCaches: i %d limit %d afs_vcount %d afs_maxvcount %d\n",
726 (int)i, limit, (int)afs_vcount, (int)afs_maxvcount);
727 refpanic("Found too many AFS vnodes on VLRU (VLRU cycle?)");
728 } else if (QNext(uq) != tq) {
729 refpanic("VLRU inconsistent");
730 } else if (tvc->f.states & CVInit) {
735 if (osi_TryEvictVCache(tvc, &fv_slept, defersleep))
741 goto retry; /* start over - may have raced. */
744 if (anumber && !defersleep) {
751 if (!afsd_dynamic_vcaches && anumber == target) {
752 afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
753 afs_vcount, afs_maxvcount);
759 /* Alloc new vnode. */
761 static struct vcache *
762 afs_AllocVCache(void)
766 tvc = osi_NewVnode();
771 if (afsd_dynamic_vcaches && afs_maxvcount < afs_vcount) {
772 afs_maxvcount = afs_vcount;
773 /*printf("peak vnodes: %d\n", afs_maxvcount);*/
776 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
778 /* If we create a new inode, we either give it a new slot number,
779 * or if one's available, use a slot number from the slot free list
781 if (afs_freeSlotList != NULL) {
782 struct afs_slotlist *tmp;
784 tvc->diskSlot = afs_freeSlotList->slot;
785 tmp = afs_freeSlotList;
786 afs_freeSlotList = tmp->next;
787 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
789 tvc->diskSlot = afs_nextVcacheSlot++;
795 /* Pre populate a newly allocated vcache. On platforms where the actual
796 * vnode is attached to the vcache, this function is called before attachment,
797 * therefore it cannot perform any actions on the vnode itself */
800 afs_PrePopulateVCache(struct vcache *avc, struct VenusFid *afid,
801 struct server *serverp) {
804 slot = avc->diskSlot;
806 osi_PrePopulateVCache(avc);
808 avc->diskSlot = slot;
809 QZero(&avc->metadirty);
811 AFS_RWLOCK_INIT(&avc->lock, "vcache lock");
814 avc->linkData = NULL;
817 avc->execsOrWriters = 0;
819 avc->f.states = CVInit;
820 avc->last_looker = 0;
822 avc->asynchrony = -1;
826 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
827 hzero(avc->f.m.DataVersion); /* in case we copy it into flushDV */
829 avc->callback = serverp; /* to minimize chance that clear
832 #if defined(AFS_CACHE_BYPASS)
833 avc->cachingStates = 0;
834 avc->cachingTransitions = 0;
839 afs_FlushAllVCaches(void)
842 struct vcache *tvc, *nvc;
844 ObtainWriteLock(&afs_xvcache, 867);
847 for (i = 0; i < VCSIZE; i++) {
848 for (tvc = afs_vhashT[i]; tvc; tvc = nvc) {
852 if (afs_FlushVCache(tvc, &slept)) {
853 afs_warn("Failed to flush vcache 0x%lx\n", (unsigned long)(uintptrsz)tvc);
861 ReleaseWriteLock(&afs_xvcache);
865 * This routine is responsible for allocating a new cache entry
866 * from the free list. It formats the cache entry and inserts it
867 * into the appropriate hash tables. It must be called with
868 * afs_xvcache write-locked so as to prevent several processes from
869 * trying to create a new cache entry simultaneously.
871 * LOCK: afs_NewVCache afs_xvcache W
873 * \param afid The file id of the file whose cache entry is being created.
875 * \return The new vcache struct.
878 static_inline struct vcache *
879 afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
883 afs_int32 anumber = VCACHE_FREE;
885 AFS_STATCNT(afs_NewVCache);
887 afs_FlushReclaimedVcaches();
889 #if defined(AFS_LINUX22_ENV)
890 if(!afsd_dynamic_vcaches && afs_vcount >= afs_maxvcount) {
891 afs_ShakeLooseVCaches(anumber);
892 if (afs_vcount >= afs_maxvcount) {
893 afs_warn("afs_NewVCache - none freed\n");
897 tvc = afs_AllocVCache();
898 #else /* AFS_LINUX22_ENV */
899 /* pull out a free cache entry */
901 afs_ShakeLooseVCaches(anumber);
905 tvc = afs_AllocVCache();
907 tvc = freeVCList; /* take from free list */
908 freeVCList = tvc->nextfree;
909 tvc->nextfree = NULL;
910 afs_vcount++; /* balanced by FlushVCache */
911 } /* end of if (!freeVCList) */
913 #endif /* AFS_LINUX22_ENV */
915 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
917 panic("afs_NewVCache(): free vcache with vnode attached");
920 /* Populate the vcache with as much as we can. */
921 afs_PrePopulateVCache(tvc, afid, serverp);
923 /* Thread the vcache onto the VLRU */
928 tvc->hnext = afs_vhashT[i];
930 QAdd(&afs_vhashTV[j], &tvc->vhashq);
932 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
933 refpanic("NewVCache VLRU inconsistent");
935 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
936 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
937 refpanic("NewVCache VLRU inconsistent2");
939 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
940 refpanic("NewVCache VLRU inconsistent3");
942 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
943 refpanic("NewVCache VLRU inconsistent4");
947 /* it should now be safe to drop the xvcache lock - so attach an inode
948 * to this vcache, where necessary */
949 osi_AttachVnode(tvc, seq);
951 /* Get a reference count to hold this vcache for the VLRUQ. Note that
952 * we have to do this after attaching the vnode, because the reference
953 * count may be held in the vnode itself */
955 #if defined(AFS_LINUX22_ENV)
956 /* Hold it for the LRU (should make count 2) */
958 #elif !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
959 VREFCOUNT_SET(tvc, 1); /* us */
962 #if defined (AFS_FBSD_ENV)
963 if (tvc->f.states & CVInit)
965 afs_PostPopulateVCache(tvc, afid, seq);
972 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
974 return afs_NewVCache_int(afid, serverp, 0);
978 afs_NewBulkVCache(struct VenusFid *afid, struct server *serverp, int seq)
980 return afs_NewVCache_int(afid, serverp, seq);
986 * LOCK: afs_FlushActiveVcaches afs_xvcache N
988 * \param doflocks : Do we handle flocks?
991 afs_FlushActiveVcaches(afs_int32 doflocks)
997 afs_ucred_t *cred = NULL;
998 struct vrequest *treq = NULL;
999 struct AFSVolSync tsync;
1002 AFS_STATCNT(afs_FlushActiveVcaches);
1004 code = afs_CreateReq(&treq, NULL);
1006 afs_warn("unable to alloc treq\n");
1010 ObtainReadLock(&afs_xvcache);
1011 for (i = 0; i < VCSIZE; i++) {
1012 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1013 if (tvc->f.states & CVInit) continue;
1014 #ifdef AFS_DARWIN80_ENV
1015 if (tvc->f.states & CDeadVnode &&
1016 (tvc->f.states & (CCore|CUnlinkedDel) ||
1017 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1019 if (doflocks && tvc->flockCount != 0) {
1020 struct rx_connection *rxconn;
1021 /* if this entry has an flock, send a keep-alive call out */
1023 ReleaseReadLock(&afs_xvcache);
1024 ObtainWriteLock(&tvc->lock, 51);
1026 code = afs_InitReq(treq, afs_osi_credp);
1029 break; /* shutting down: do not try to extend the lock */
1031 treq->flags |= O_NONBLOCK;
1033 tc = afs_Conn(&tvc->f.fid, treq, SHARED_LOCK, &rxconn);
1035 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1038 RXAFS_ExtendLock(rxconn,
1039 (struct AFSFid *)&tvc->f.fid.Fid,
1045 } while (afs_Analyze
1046 (tc, rxconn, code, &tvc->f.fid, treq,
1047 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1049 ReleaseWriteLock(&tvc->lock);
1050 #ifdef AFS_DARWIN80_ENV
1052 ObtainReadLock(&afs_xvcache);
1054 ObtainReadLock(&afs_xvcache);
1059 if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
1061 * Don't let it evaporate in case someone else is in
1062 * this code. Also, drop the afs_xvcache lock while
1063 * getting vcache locks.
1066 ReleaseReadLock(&afs_xvcache);
1067 #ifdef AFS_BOZONLOCK_ENV
1068 afs_BozonLock(&tvc->pvnLock, tvc);
1070 #if defined(AFS_SGI_ENV)
1072 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1074 osi_Assert(VREFCOUNT_GT(tvc,0));
1075 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1077 ObtainWriteLock(&tvc->lock, 52);
1078 if (tvc->f.states & CCore) {
1079 tvc->f.states &= ~CCore;
1080 /* XXXX Find better place-holder for cred XXXX */
1081 cred = (afs_ucred_t *)tvc->linkData;
1082 tvc->linkData = NULL; /* XXX */
1083 code = afs_InitReq(treq, cred);
1084 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1085 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1086 tvc->execsOrWriters);
1087 if (!code) { /* avoid store when shutting down */
1088 code = afs_StoreOnLastReference(tvc, treq);
1090 ReleaseWriteLock(&tvc->lock);
1091 #ifdef AFS_BOZONLOCK_ENV
1092 afs_BozonUnlock(&tvc->pvnLock, tvc);
1094 hzero(tvc->flushDV);
1097 if (code && code != VNOVNODE) {
1098 afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
1099 /* /dev/console */ 1);
1101 } else if (tvc->f.states & CUnlinkedDel) {
1105 ReleaseWriteLock(&tvc->lock);
1106 #ifdef AFS_BOZONLOCK_ENV
1107 afs_BozonUnlock(&tvc->pvnLock, tvc);
1109 #if defined(AFS_SGI_ENV)
1110 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1112 afs_remunlink(tvc, 0);
1113 #if defined(AFS_SGI_ENV)
1114 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1117 /* lost (or won, perhaps) the race condition */
1118 ReleaseWriteLock(&tvc->lock);
1119 #ifdef AFS_BOZONLOCK_ENV
1120 afs_BozonUnlock(&tvc->pvnLock, tvc);
1123 #if defined(AFS_SGI_ENV)
1124 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1126 #ifdef AFS_DARWIN80_ENV
1129 AFS_RELE(AFSTOV(tvc));
1130 /* Matches write code setting CCore flag */
1133 ObtainReadLock(&afs_xvcache);
1135 ObtainReadLock(&afs_xvcache);
1138 AFS_RELE(AFSTOV(tvc));
1139 /* Matches write code setting CCore flag */
1146 ReleaseReadLock(&afs_xvcache);
1147 afs_DestroyReq(treq);
1153 * Make sure a cache entry is up-to-date status-wise.
1155 * NOTE: everywhere that calls this can potentially be sped up
1156 * by checking CStatd first, and avoiding doing the InitReq
1157 * if this is up-to-date.
1159 * Anymore, the only places that call this KNOW already that the
1160 * vcache is not up-to-date, so we don't screw around.
1162 * \param avc : Ptr to vcache entry to verify.
1168 * Make sure a cache entry is up-to-date status-wise.
1170 * NOTE: everywhere that calls this can potentially be sped up
1171 * by checking CStatd first, and avoiding doing the InitReq
1172 * if this is up-to-date.
1174 * Anymore, the only places that call this KNOW already that the
1175 * vcache is not up-to-date, so we don't screw around.
1177 * \param avc Pointer to vcache entry to verify.
1180 * \return 0 for success or other error codes.
1183 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1187 AFS_STATCNT(afs_VerifyVCache);
1189 /* otherwise we must fetch the status info */
1191 ObtainWriteLock(&avc->lock, 53);
1192 if (avc->f.states & CStatd) {
1193 ReleaseWriteLock(&avc->lock);
1196 ObtainWriteLock(&afs_xcbhash, 461);
1197 avc->f.states &= ~(CStatd | CUnique);
1198 avc->callback = NULL;
1199 afs_DequeueCallback(avc);
1200 ReleaseWriteLock(&afs_xcbhash);
1201 ReleaseWriteLock(&avc->lock);
1203 /* since we've been called back, or the callback has expired,
1204 * it's possible that the contents of this directory, or this
1205 * file's name have changed, thus invalidating the dnlc contents.
1207 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1208 osi_dnlc_purgedp(avc);
1210 osi_dnlc_purgevp(avc);
1212 /* fetch the status info */
1213 tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
1216 /* Put it back; caller has already incremented vrefCount */
1220 } /*afs_VerifyVCache */
1224 * Simple copy of stat info into cache.
1226 * Callers:as of 1992-04-29, only called by WriteVCache
1228 * \param avc Ptr to vcache entry involved.
1229 * \param astat Ptr to stat info to copy.
1233 afs_SimpleVStat(struct vcache *avc,
1234 struct AFSFetchStatus *astat, struct vrequest *areq)
1237 AFS_STATCNT(afs_SimpleVStat);
1239 #ifdef AFS_64BIT_CLIENT
1240 FillInt64(length, astat->Length_hi, astat->Length);
1241 #else /* AFS_64BIT_CLIENT */
1242 length = astat->Length;
1243 #endif /* AFS_64BIT_CLIENT */
1245 #if defined(AFS_SGI_ENV)
1246 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1247 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1248 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1249 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1250 if (length < avc->f.m.Length) {
1251 vnode_t *vp = (vnode_t *) avc;
1253 osi_Assert(WriteLocked(&avc->lock));
1254 ReleaseWriteLock(&avc->lock);
1256 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1258 ObtainWriteLock(&avc->lock, 67);
1263 if (!afs_DirtyPages(avc)) {
1264 /* if actively writing the file, don't fetch over this value */
1265 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1266 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1267 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1268 avc->f.m.Length = length;
1269 avc->f.m.Date = astat->ClientModTime;
1271 avc->f.m.Owner = astat->Owner;
1272 avc->f.m.Group = astat->Group;
1273 avc->f.m.Mode = astat->UnixModeBits;
1274 if (vType(avc) == VREG) {
1275 avc->f.m.Mode |= S_IFREG;
1276 } else if (vType(avc) == VDIR) {
1277 avc->f.m.Mode |= S_IFDIR;
1278 } else if (vType(avc) == VLNK) {
1279 avc->f.m.Mode |= S_IFLNK;
1280 if ((avc->f.m.Mode & 0111) == 0)
1283 if (avc->f.states & CForeign) {
1284 struct axscache *ac;
1285 avc->f.anyAccess = astat->AnonymousAccess;
1287 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1289 * Caller has at least one bit not covered by anonymous, and
1290 * thus may have interesting rights.
1292 * HOWEVER, this is a really bad idea, because any access query
1293 * for bits which aren't covered by anonymous, on behalf of a user
1294 * who doesn't have any special rights, will result in an answer of
1295 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1296 * It's an especially bad idea under Ultrix, since (due to the lack of
1297 * a proper access() call) it must perform several afs_access() calls
1298 * in order to create magic mode bits that vary according to who makes
1299 * the call. In other words, _every_ stat() generates a test for
1302 #endif /* badidea */
1303 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1304 ac->axess = astat->CallerAccess;
1305 else /* not found, add a new one if possible */
1306 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1309 } /*afs_SimpleVStat */
1313 * Store the status info *only* back to the server for a
1316 * Environment: Must be called with a shared lock held on the vnode.
1318 * \param avc Ptr to the vcache entry.
1319 * \param astatus Ptr to the status info to store.
1320 * \param areq Ptr to the associated vrequest.
1322 * \return Operation status.
1326 afs_WriteVCache(struct vcache *avc,
1327 struct AFSStoreStatus *astatus,
1328 struct vrequest *areq)
1331 struct afs_conn *tc;
1332 struct AFSFetchStatus OutStatus;
1333 struct AFSVolSync tsync;
1334 struct rx_connection *rxconn;
1336 AFS_STATCNT(afs_WriteVCache);
1337 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1338 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
1340 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn);
1342 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1345 RXAFS_StoreStatus(rxconn, (struct AFSFid *)&avc->f.fid.Fid,
1346 astatus, &OutStatus, &tsync);
1351 } while (afs_Analyze
1352 (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1353 SHARED_LOCK, NULL));
1355 UpgradeSToWLock(&avc->lock, 20);
1357 /* success, do the changes locally */
1358 afs_SimpleVStat(avc, &OutStatus, areq);
1360 * Update the date, too. SimpleVStat didn't do this, since
1361 * it thought we were doing this after fetching new status
1362 * over a file being written.
1364 avc->f.m.Date = OutStatus.ClientModTime;
1366 /* failure, set up to check with server next time */
1367 ObtainWriteLock(&afs_xcbhash, 462);
1368 afs_DequeueCallback(avc);
1369 avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1370 ReleaseWriteLock(&afs_xcbhash);
1371 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1372 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1374 ConvertWToSLock(&avc->lock);
1377 } /*afs_WriteVCache */
1380 * Store status info only locally, set the proper disconnection flags
1381 * and add to dirty list.
1383 * \param avc The vcache to be written locally.
1384 * \param astatus Get attr fields from local store.
1385 * \param attrs This one is only of the vs_size.
1387 * \note Must be called with a shared lock on the vnode
1390 afs_WriteVCacheDiscon(struct vcache *avc,
1391 struct AFSStoreStatus *astatus,
1392 struct vattr *attrs)
1395 afs_int32 flags = 0;
1397 UpgradeSToWLock(&avc->lock, 700);
1399 if (!astatus->Mask) {
1405 /* Set attributes. */
1406 if (astatus->Mask & AFS_SETMODTIME) {
1407 avc->f.m.Date = astatus->ClientModTime;
1408 flags |= VDisconSetTime;
1411 if (astatus->Mask & AFS_SETOWNER) {
1412 /* printf("Not allowed yet. \n"); */
1413 /*avc->f.m.Owner = astatus->Owner;*/
1416 if (astatus->Mask & AFS_SETGROUP) {
1417 /* printf("Not allowed yet. \n"); */
1418 /*avc->f.m.Group = astatus->Group;*/
1421 if (astatus->Mask & AFS_SETMODE) {
1422 avc->f.m.Mode = astatus->UnixModeBits;
1424 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1426 if (vType(avc) == VREG) {
1427 avc->f.m.Mode |= S_IFREG;
1428 } else if (vType(avc) == VDIR) {
1429 avc->f.m.Mode |= S_IFDIR;
1430 } else if (vType(avc) == VLNK) {
1431 avc->f.m.Mode |= S_IFLNK;
1432 if ((avc->f.m.Mode & 0111) == 0)
1436 flags |= VDisconSetMode;
1437 } /* if(astatus.Mask & AFS_SETMODE) */
1439 } /* if (!astatus->Mask) */
1441 if (attrs->va_size > 0) {
1442 /* XXX: Do I need more checks? */
1443 /* Truncation operation. */
1444 flags |= VDisconTrunc;
1448 afs_DisconAddDirty(avc, flags, 1);
1450 /* XXX: How about the rest of the fields? */
1452 ConvertWToSLock(&avc->lock);
1458 * Copy astat block into vcache info
1460 * \note This code may get dataversion and length out of sync if the file has
1461 * been modified. This is less than ideal. I haven't thought about it sufficiently
1462 * to be certain that it is adequate.
1464 * \note Environment: Must be called under a write lock
1466 * \param avc Ptr to vcache entry.
1467 * \param astat Ptr to stat block to copy in.
1468 * \param areq Ptr to associated request.
1471 afs_ProcessFS(struct vcache *avc,
1472 struct AFSFetchStatus *astat, struct vrequest *areq)
1475 AFS_STATCNT(afs_ProcessFS);
1477 #ifdef AFS_64BIT_CLIENT
1478 FillInt64(length, astat->Length_hi, astat->Length);
1479 #else /* AFS_64BIT_CLIENT */
1480 length = astat->Length;
1481 #endif /* AFS_64BIT_CLIENT */
1482 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1483 * number for each bulk status request. Under no circumstances
1484 * should afs_DoBulkStat store a sequence number if the new
1485 * length will be ignored when afs_ProcessFS is called with
1486 * new stats. If you change the following conditional then you
1487 * also need to change the conditional in afs_DoBulkStat. */
1489 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1490 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1492 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1494 /* if we're writing or mapping this file, don't fetch over these
1497 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1498 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1499 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1500 avc->f.m.Length = length;
1501 avc->f.m.Date = astat->ClientModTime;
1503 hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1504 avc->f.m.Owner = astat->Owner;
1505 avc->f.m.Mode = astat->UnixModeBits;
1506 avc->f.m.Group = astat->Group;
1507 avc->f.m.LinkCount = astat->LinkCount;
1508 if (astat->FileType == File) {
1509 vSetType(avc, VREG);
1510 avc->f.m.Mode |= S_IFREG;
1511 } else if (astat->FileType == Directory) {
1512 vSetType(avc, VDIR);
1513 avc->f.m.Mode |= S_IFDIR;
1514 } else if (astat->FileType == SymbolicLink) {
1515 if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
1516 vSetType(avc, VDIR);
1517 avc->f.m.Mode |= S_IFDIR;
1519 vSetType(avc, VLNK);
1520 avc->f.m.Mode |= S_IFLNK;
1522 if ((avc->f.m.Mode & 0111) == 0) {
1526 avc->f.anyAccess = astat->AnonymousAccess;
1528 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1530 * Caller has at least one bit not covered by anonymous, and
1531 * thus may have interesting rights.
1533 * HOWEVER, this is a really bad idea, because any access query
1534 * for bits which aren't covered by anonymous, on behalf of a user
1535 * who doesn't have any special rights, will result in an answer of
1536 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1537 * It's an especially bad idea under Ultrix, since (due to the lack of
1538 * a proper access() call) it must perform several afs_access() calls
1539 * in order to create magic mode bits that vary according to who makes
1540 * the call. In other words, _every_ stat() generates a test for
1543 #endif /* badidea */
1545 struct axscache *ac;
1546 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1547 ac->axess = astat->CallerAccess;
1548 else /* not found, add a new one if possible */
1549 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1551 } /*afs_ProcessFS */
1555 * Get fid from server.
1558 * \param areq Request to be passed on.
1559 * \param name Name of ?? to lookup.
1560 * \param OutStatus Fetch status.
1565 * \return Success status of operation.
1568 afs_RemoteLookup(struct VenusFid *afid, struct vrequest *areq,
1569 char *name, struct VenusFid *nfid,
1570 struct AFSFetchStatus *OutStatusp,
1571 struct AFSCallBack *CallBackp, struct server **serverp,
1572 struct AFSVolSync *tsyncp)
1575 struct afs_conn *tc;
1576 struct rx_connection *rxconn;
1577 struct AFSFetchStatus OutDirStatus;
1580 name = ""; /* XXX */
1582 tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
1585 *serverp = tc->parent->srvr->server;
1586 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1589 RXAFS_Lookup(rxconn, (struct AFSFid *)&afid->Fid, name,
1590 (struct AFSFid *)&nfid->Fid, OutStatusp,
1591 &OutDirStatus, CallBackp, tsyncp);
1596 } while (afs_Analyze
1597 (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1607 * Given a file id and a vrequest structure, fetch the status
1608 * information associated with the file.
1610 * \param afid File ID.
1611 * \param areq Ptr to associated vrequest structure, specifying the
1612 * user whose authentication tokens will be used.
1613 * \param avc Caller may already have a vcache for this file, which is
1616 * \note Environment:
1617 * The cache entry is returned with an increased vrefCount field.
1618 * The entry must be discarded by calling afs_PutVCache when you
1619 * are through using the pointer to the cache entry.
1621 * You should not hold any locks when calling this function, except
1622 * locks on other vcache entries. If you lock more than one vcache
1623 * entry simultaneously, you should lock them in this order:
1625 * 1. Lock all files first, then directories.
1626 * 2. Within a particular type, lock entries in Fid.Vnode order.
1628 * This locking hierarchy is convenient because it allows locking
1629 * of a parent dir cache entry, given a file (to check its access
1630 * control list). It also allows renames to be handled easily by
1631 * locking directories in a constant order.
1633 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1635 * \note Might have a vcache structure already, which must
1636 * already be held by the caller
1639 afs_GetVCache(struct VenusFid *afid, struct vrequest *areq,
1640 afs_int32 * cached, struct vcache *avc)
1643 afs_int32 code, newvcache = 0;
1648 AFS_STATCNT(afs_GetVCache);
1651 *cached = 0; /* Init just in case */
1653 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1657 ObtainSharedLock(&afs_xvcache, 5);
1659 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1661 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1662 ReleaseSharedLock(&afs_xvcache);
1663 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1670 osi_Assert((tvc->f.states & CVInit) == 0);
1671 /* If we are in readdir, return the vnode even if not statd */
1672 if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
1673 ReleaseSharedLock(&afs_xvcache);
1677 UpgradeSToWLock(&afs_xvcache, 21);
1679 /* no cache entry, better grab one */
1680 tvc = afs_NewVCache(afid, NULL);
1683 ConvertWToSLock(&afs_xvcache);
1686 ReleaseSharedLock(&afs_xvcache);
1690 afs_stats_cmperf.vcacheMisses++;
1693 ReleaseSharedLock(&afs_xvcache);
1695 ObtainWriteLock(&tvc->lock, 54);
1697 if (tvc->f.states & CStatd) {
1698 ReleaseWriteLock(&tvc->lock);
1701 #ifdef AFS_DARWIN80_ENV
1702 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1705 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1707 * XXX - I really don't like this. Should try to understand better.
1708 * It seems that sometimes, when we get called, we already hold the
1709 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1710 * We can't drop the vnode lock, because that could result in a race.
1711 * Sometimes, though, we get here and don't hold the vnode lock.
1712 * I hate code paths that sometimes hold locks and sometimes don't.
1713 * In any event, the dodge we use here is to check whether the vnode
1714 * is locked, and if it isn't, then we gain and drop it around the call
1715 * to vinvalbuf; otherwise, we leave it alone.
1718 struct vnode *vp = AFSTOV(tvc);
1721 #if defined(AFS_DARWIN_ENV)
1722 iheldthelock = VOP_ISLOCKED(vp);
1724 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1725 /* this is messy. we can call fsync which will try to reobtain this */
1726 if (VTOAFS(vp) == tvc)
1727 ReleaseWriteLock(&tvc->lock);
1728 if (UBCINFOEXISTS(vp)) {
1729 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1731 if (VTOAFS(vp) == tvc)
1732 ObtainWriteLock(&tvc->lock, 954);
1734 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1735 #elif defined(AFS_FBSD80_ENV)
1736 iheldthelock = VOP_ISLOCKED(vp);
1737 if (!iheldthelock) {
1738 /* nosleep/sleep lock order reversal */
1739 int glocked = ISAFS_GLOCK();
1742 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1746 vinvalbuf(vp, V_SAVE, PINOD, 0); /* changed late in 8.0-CURRENT */
1749 #elif defined(AFS_FBSD60_ENV)
1750 iheldthelock = VOP_ISLOCKED(vp, curthread);
1752 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1754 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1757 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1758 #elif defined(AFS_FBSD_ENV)
1759 iheldthelock = VOP_ISLOCKED(vp, curthread);
1761 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1762 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1764 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1765 #elif defined(AFS_OBSD_ENV)
1766 iheldthelock = VOP_ISLOCKED(vp, curproc);
1768 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1769 uvm_vnp_uncache(vp);
1771 VOP_UNLOCK(vp, 0, curproc);
1772 #elif defined(AFS_NBSD40_ENV)
1773 iheldthelock = VOP_ISLOCKED(vp);
1774 if (!iheldthelock) {
1775 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
1777 uvm_vnp_uncache(vp);
1785 ObtainWriteLock(&afs_xcbhash, 464);
1786 tvc->f.states &= ~CUnique;
1788 afs_DequeueCallback(tvc);
1789 ReleaseWriteLock(&afs_xcbhash);
1791 /* It is always appropriate to throw away all the access rights? */
1792 afs_FreeAllAxs(&(tvc->Access));
1793 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1795 if ((tvp->states & VForeign)) {
1797 tvc->f.states |= CForeign;
1798 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1799 && (tvp->rootUnique == afid->Fid.Unique)) {
1803 if (tvp->states & VRO)
1804 tvc->f.states |= CRO;
1805 if (tvp->states & VBackup)
1806 tvc->f.states |= CBackup;
1807 /* now copy ".." entry back out of volume structure, if necessary */
1808 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1810 tvc->mvid = (struct VenusFid *)
1811 osi_AllocSmallSpace(sizeof(struct VenusFid));
1812 *tvc->mvid = tvp->dotdot;
1814 afs_PutVolume(tvp, READ_LOCK);
1818 afs_RemoveVCB(afid);
1820 struct AFSFetchStatus OutStatus;
1822 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1823 afs_ProcessFS(tvc, &OutStatus, areq);
1824 tvc->f.states |= CStatd | CUnique;
1825 tvc->f.parent.vnode = OutStatus.ParentVnode;
1826 tvc->f.parent.unique = OutStatus.ParentUnique;
1830 if (AFS_IS_DISCONNECTED) {
1831 /* Nothing to do otherwise...*/
1833 /* printf("Network is down in afs_GetCache"); */
1835 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1837 /* For the NFS translator's benefit, make sure
1838 * non-directory vnodes always have their parent FID set
1839 * correctly, even when created as a result of decoding an
1840 * NFS filehandle. It would be nice to also do this for
1841 * directories, but we can't because the fileserver fills
1842 * in the FID of the directory itself instead of that of
1845 if (!code && OutStatus.FileType != Directory &&
1846 !tvc->f.parent.vnode) {
1847 tvc->f.parent.vnode = OutStatus.ParentVnode;
1848 tvc->f.parent.unique = OutStatus.ParentUnique;
1849 /* XXX - SXW - It's conceivable we should mark ourselves
1850 * as dirty again here, incase we've been raced
1851 * out of the FetchStatus call.
1858 ReleaseWriteLock(&tvc->lock);
1864 ReleaseWriteLock(&tvc->lock);
1867 } /*afs_GetVCache */
1872 * Lookup a vcache by fid. Look inside the cache first, if not
1873 * there, lookup the file on the server, and then get it's fresh
1878 * \param cached Is element cached? If NULL, don't answer.
1882 * \return The found element or NULL.
1885 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1886 afs_int32 * cached, struct vcache *adp, char *aname)
1888 afs_int32 code, now, newvcache = 0;
1889 struct VenusFid nfid;
1892 struct AFSFetchStatus OutStatus;
1893 struct AFSCallBack CallBack;
1894 struct AFSVolSync tsync;
1895 struct server *serverp = 0;
1899 AFS_STATCNT(afs_GetVCache);
1901 *cached = 0; /* Init just in case */
1903 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1907 ObtainReadLock(&afs_xvcache);
1908 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1911 ReleaseReadLock(&afs_xvcache);
1913 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1914 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1918 ObtainReadLock(&tvc->lock);
1920 if (tvc->f.states & CStatd) {
1924 ReleaseReadLock(&tvc->lock);
1927 tvc->f.states &= ~CUnique;
1929 ReleaseReadLock(&tvc->lock);
1931 ObtainReadLock(&afs_xvcache);
1934 ReleaseReadLock(&afs_xvcache);
1936 /* lookup the file */
1939 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1941 if (AFS_IS_DISCONNECTED) {
1942 /* printf("Network is down in afs_LookupVcache\n"); */
1946 afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
1947 &CallBack, &serverp, &tsync);
1949 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1953 ObtainSharedLock(&afs_xvcache, 6);
1954 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
1956 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1957 ReleaseSharedLock(&afs_xvcache);
1958 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1964 /* no cache entry, better grab one */
1965 UpgradeSToWLock(&afs_xvcache, 22);
1966 tvc = afs_NewVCache(&nfid, serverp);
1968 ConvertWToSLock(&afs_xvcache);
1971 ReleaseSharedLock(&afs_xvcache);
1976 ReleaseSharedLock(&afs_xvcache);
1977 ObtainWriteLock(&tvc->lock, 55);
1979 /* It is always appropriate to throw away all the access rights? */
1980 afs_FreeAllAxs(&(tvc->Access));
1981 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1983 if ((tvp->states & VForeign)) {
1985 tvc->f.states |= CForeign;
1986 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1987 && (tvp->rootUnique == afid->Fid.Unique))
1990 if (tvp->states & VRO)
1991 tvc->f.states |= CRO;
1992 if (tvp->states & VBackup)
1993 tvc->f.states |= CBackup;
1994 /* now copy ".." entry back out of volume structure, if necessary */
1995 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1997 tvc->mvid = (struct VenusFid *)
1998 osi_AllocSmallSpace(sizeof(struct VenusFid));
1999 *tvc->mvid = tvp->dotdot;
2004 ObtainWriteLock(&afs_xcbhash, 465);
2005 afs_DequeueCallback(tvc);
2006 tvc->f.states &= ~(CStatd | CUnique);
2007 ReleaseWriteLock(&afs_xcbhash);
2008 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2009 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2011 afs_PutVolume(tvp, READ_LOCK);
2012 ReleaseWriteLock(&tvc->lock);
2017 ObtainWriteLock(&afs_xcbhash, 466);
2018 if (origCBs == afs_allCBs) {
2019 if (CallBack.ExpirationTime) {
2020 tvc->callback = serverp;
2021 tvc->cbExpires = CallBack.ExpirationTime + now;
2022 tvc->f.states |= CStatd | CUnique;
2023 tvc->f.states &= ~CBulkFetching;
2024 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2025 } else if (tvc->f.states & CRO) {
2026 /* adapt gives us an hour. */
2027 tvc->cbExpires = 3600 + osi_Time();
2028 /*XXX*/ tvc->f.states |= CStatd | CUnique;
2029 tvc->f.states &= ~CBulkFetching;
2030 afs_QueueCallback(tvc, CBHash(3600), tvp);
2032 tvc->callback = NULL;
2033 afs_DequeueCallback(tvc);
2034 tvc->f.states &= ~(CStatd | CUnique);
2035 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2036 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2039 afs_DequeueCallback(tvc);
2040 tvc->f.states &= ~CStatd;
2041 tvc->f.states &= ~CUnique;
2042 tvc->callback = NULL;
2043 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2044 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2046 ReleaseWriteLock(&afs_xcbhash);
2048 afs_PutVolume(tvp, READ_LOCK);
2049 afs_ProcessFS(tvc, &OutStatus, areq);
2051 ReleaseWriteLock(&tvc->lock);
2057 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2058 afs_int32 * cached, struct volume *tvolp)
2060 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2061 afs_int32 getNewFid = 0;
2063 struct VenusFid nfid;
2065 struct server *serverp = 0;
2066 struct AFSFetchStatus OutStatus;
2067 struct AFSCallBack CallBack;
2068 struct AFSVolSync tsync;
2070 #ifdef AFS_DARWIN80_ENV
2077 if (!tvolp->rootVnode || getNewFid) {
2078 struct VenusFid tfid;
2081 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2082 origCBs = afs_allCBs; /* ignore InitCallBackState */
2084 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2089 /* ReleaseReadLock(&tvolp->lock); */
2090 ObtainWriteLock(&tvolp->lock, 56);
2091 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2092 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2093 ReleaseWriteLock(&tvolp->lock);
2094 /* ObtainReadLock(&tvolp->lock);*/
2097 afid->Fid.Vnode = tvolp->rootVnode;
2098 afid->Fid.Unique = tvolp->rootUnique;
2102 ObtainSharedLock(&afs_xvcache, 7);
2104 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2105 if (!FidCmp(&(tvc->f.fid), afid)) {
2106 if (tvc->f.states & CVInit) {
2107 ReleaseSharedLock(&afs_xvcache);
2108 afs_osi_Sleep(&tvc->f.states);
2111 #ifdef AFS_DARWIN80_ENV
2112 if (tvc->f.states & CDeadVnode) {
2113 ReleaseSharedLock(&afs_xvcache);
2114 afs_osi_Sleep(&tvc->f.states);
2118 if (vnode_get(tvp)) /* this bumps ref count */
2120 if (vnode_ref(tvp)) {
2122 /* AFSTOV(tvc) may be NULL */
2132 if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
2133 /* Mount point no longer stat'd or unknown. FID may have changed. */
2135 ReleaseSharedLock(&afs_xvcache);
2136 #ifdef AFS_DARWIN80_ENV
2139 vnode_put(AFSTOV(tvc));
2140 vnode_rele(AFSTOV(tvc));
2149 UpgradeSToWLock(&afs_xvcache, 23);
2150 /* no cache entry, better grab one */
2151 tvc = afs_NewVCache(afid, NULL);
2154 ReleaseWriteLock(&afs_xvcache);
2158 afs_stats_cmperf.vcacheMisses++;
2162 afs_stats_cmperf.vcacheHits++;
2163 #if defined(AFS_DARWIN80_ENV)
2164 /* we already bumped the ref count in the for loop above */
2165 #else /* AFS_DARWIN80_ENV */
2168 UpgradeSToWLock(&afs_xvcache, 24);
2169 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2170 refpanic("GRVC VLRU inconsistent0");
2172 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2173 refpanic("GRVC VLRU inconsistent1");
2175 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2176 refpanic("GRVC VLRU inconsistent2");
2178 QRemove(&tvc->vlruq); /* move to lruq head */
2179 QAdd(&VLRU, &tvc->vlruq);
2180 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2181 refpanic("GRVC VLRU inconsistent3");
2183 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2184 refpanic("GRVC VLRU inconsistent4");
2186 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2187 refpanic("GRVC VLRU inconsistent5");
2192 ReleaseWriteLock(&afs_xvcache);
2194 if (tvc->f.states & CStatd) {
2198 ObtainReadLock(&tvc->lock);
2199 tvc->f.states &= ~CUnique;
2200 tvc->callback = NULL; /* redundant, perhaps */
2201 ReleaseReadLock(&tvc->lock);
2204 ObtainWriteLock(&tvc->lock, 57);
2206 /* It is always appropriate to throw away all the access rights? */
2207 afs_FreeAllAxs(&(tvc->Access));
2210 tvc->f.states |= CForeign;
2211 if (tvolp->states & VRO)
2212 tvc->f.states |= CRO;
2213 if (tvolp->states & VBackup)
2214 tvc->f.states |= CBackup;
2215 /* now copy ".." entry back out of volume structure, if necessary */
2216 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2217 && (tvolp->rootUnique == afid->Fid.Unique)) {
2220 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2222 tvc->mvid = (struct VenusFid *)
2223 osi_AllocSmallSpace(sizeof(struct VenusFid));
2224 *tvc->mvid = tvolp->dotdot;
2228 afs_RemoveVCB(afid);
2231 struct VenusFid tfid;
2234 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2235 origCBs = afs_allCBs; /* ignore InitCallBackState */
2237 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2242 ObtainWriteLock(&afs_xcbhash, 467);
2243 afs_DequeueCallback(tvc);
2244 tvc->callback = NULL;
2245 tvc->f.states &= ~(CStatd | CUnique);
2246 ReleaseWriteLock(&afs_xcbhash);
2247 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2248 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2249 ReleaseWriteLock(&tvc->lock);
2254 ObtainWriteLock(&afs_xcbhash, 468);
2255 if (origCBs == afs_allCBs) {
2256 tvc->f.states |= CTruth;
2257 tvc->callback = serverp;
2258 if (CallBack.ExpirationTime != 0) {
2259 tvc->cbExpires = CallBack.ExpirationTime + start;
2260 tvc->f.states |= CStatd;
2261 tvc->f.states &= ~CBulkFetching;
2262 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2263 } else if (tvc->f.states & CRO) {
2264 /* adapt gives us an hour. */
2265 tvc->cbExpires = 3600 + osi_Time();
2266 /*XXX*/ tvc->f.states |= CStatd;
2267 tvc->f.states &= ~CBulkFetching;
2268 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2271 afs_DequeueCallback(tvc);
2272 tvc->callback = NULL;
2273 tvc->f.states &= ~(CStatd | CUnique);
2274 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2275 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2277 ReleaseWriteLock(&afs_xcbhash);
2278 afs_ProcessFS(tvc, &OutStatus, areq);
2280 ReleaseWriteLock(&tvc->lock);
2286 * Update callback status and (sometimes) attributes of a vnode.
2287 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2288 * shouldn't be written to the vcache here.
2293 * \param Outsp Server status after rpc call.
2294 * \param acb Callback for this vnode.
2296 * \note The vcache must be write locked.
2299 afs_UpdateStatus(struct vcache *avc, struct VenusFid *afid,
2300 struct vrequest *areq, struct AFSFetchStatus *Outsp,
2301 struct AFSCallBack *acb, afs_uint32 start)
2303 struct volume *volp;
2306 /* Dont write status in vcache if resyncing after a disconnection. */
2307 afs_ProcessFS(avc, Outsp, areq);
2309 volp = afs_GetVolume(afid, areq, READ_LOCK);
2310 ObtainWriteLock(&afs_xcbhash, 469);
2311 avc->f.states |= CTruth;
2312 if (avc->callback /* check for race */ ) {
2313 if (acb->ExpirationTime != 0) {
2314 avc->cbExpires = acb->ExpirationTime + start;
2315 avc->f.states |= CStatd;
2316 avc->f.states &= ~CBulkFetching;
2317 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2318 } else if (avc->f.states & CRO) {
2319 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2320 avc->cbExpires = 3600 + start;
2321 avc->f.states |= CStatd;
2322 avc->f.states &= ~CBulkFetching;
2323 afs_QueueCallback(avc, CBHash(3600), volp);
2325 afs_DequeueCallback(avc);
2326 avc->callback = NULL;
2327 avc->f.states &= ~(CStatd | CUnique);
2328 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2329 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2332 afs_DequeueCallback(avc);
2333 avc->callback = NULL;
2334 avc->f.states &= ~(CStatd | CUnique);
2335 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2336 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2338 ReleaseWriteLock(&afs_xcbhash);
2340 afs_PutVolume(volp, READ_LOCK);
2344 afs_BadFetchStatus(struct afs_conn *tc)
2346 int addr = ntohl(tc->parent->srvr->sa_ip);
2347 afs_warn("afs: Invalid AFSFetchStatus from server %u.%u.%u.%u\n",
2348 (addr >> 24) & 0xff, (addr >> 16) & 0xff, (addr >> 8) & 0xff,
2350 afs_warn("afs: This suggests the server may be sending bad data that "
2351 "can lead to availability issues or data corruption. The "
2352 "issue has been avoided for now, but it may not always be "
2353 "detectable. Please upgrade the server if possible.\n");
2357 * Check if a given AFSFetchStatus structure is sane.
2359 * @param[in] tc The server from which we received the status
2360 * @param[in] status The status we received
2362 * @return whether the given structure is valid or not
2363 * @retval 0 the structure is fine
2364 * @retval nonzero the structure looks like garbage; act as if we received
2365 * the returned error code from the server
2368 afs_CheckFetchStatus(struct afs_conn *tc, struct AFSFetchStatus *status)
2370 if (status->errorCode ||
2371 status->InterfaceVersion != 1 ||
2372 !(status->FileType > Invalid && status->FileType <= SymbolicLink) ||
2373 status->ParentVnode == 0 || status->ParentUnique == 0) {
2375 afs_warn("afs: FetchStatus ec %u iv %u ft %u pv %u pu %u\n",
2376 (unsigned)status->errorCode, (unsigned)status->InterfaceVersion,
2377 (unsigned)status->FileType, (unsigned)status->ParentVnode,
2378 (unsigned)status->ParentUnique);
2379 afs_BadFetchStatus(tc);
2387 * Must be called with avc write-locked
2388 * don't absolutely have to invalidate the hint unless the dv has
2389 * changed, but be sure to get it right else there will be consistency bugs.
2392 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2393 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2396 afs_uint32 start = 0;
2397 struct afs_conn *tc;
2398 struct AFSCallBack CallBack;
2399 struct AFSVolSync tsync;
2400 struct rx_connection *rxconn;
2403 tc = afs_Conn(afid, areq, SHARED_LOCK, &rxconn);
2404 avc->dchint = NULL; /* invalidate hints */
2406 avc->callback = tc->parent->srvr->server;
2408 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2411 RXAFS_FetchStatus(rxconn, (struct AFSFid *)&afid->Fid, Outsp,
2418 code = afs_CheckFetchStatus(tc, Outsp);
2423 } while (afs_Analyze
2424 (tc, rxconn, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2425 SHARED_LOCK, NULL));
2428 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2430 /* used to undo the local callback, but that's too extreme.
2431 * There are plenty of good reasons that fetchstatus might return
2432 * an error, such as EPERM. If we have the vnode cached, statd,
2433 * with callback, might as well keep track of the fact that we
2434 * don't have access...
2436 if (code == EPERM || code == EACCES) {
2437 struct axscache *ac;
2438 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2440 else /* not found, add a new one if possible */
2441 afs_AddAxs(avc->Access, areq->uid, 0);
2452 * Stuff some information into the vcache for the given file.
2455 * afid : File in question.
2456 * OutStatus : Fetch status on the file.
2457 * CallBack : Callback info.
2458 * tc : RPC connection involved.
2459 * areq : vrequest involved.
2462 * Nothing interesting.
2465 afs_StuffVcache(struct VenusFid *afid,
2466 struct AFSFetchStatus *OutStatus,
2467 struct AFSCallBack *CallBack, struct afs_conn *tc,
2468 struct vrequest *areq)
2470 afs_int32 code, i, newvcache = 0;
2472 struct AFSVolSync tsync;
2474 struct axscache *ac;
2477 AFS_STATCNT(afs_StuffVcache);
2478 #ifdef IFS_VCACHECOUNT
2483 ObtainSharedLock(&afs_xvcache, 8);
2485 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2487 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2488 ReleaseSharedLock(&afs_xvcache);
2489 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2495 /* no cache entry, better grab one */
2496 UpgradeSToWLock(&afs_xvcache, 25);
2497 tvc = afs_NewVCache(afid, NULL);
2499 ConvertWToSLock(&afs_xvcache);
2502 ReleaseSharedLock(&afs_xvcache);
2507 ReleaseSharedLock(&afs_xvcache);
2508 ObtainWriteLock(&tvc->lock, 58);
2510 tvc->f.states &= ~CStatd;
2511 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2512 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2514 /* Is it always appropriate to throw away all the access rights? */
2515 afs_FreeAllAxs(&(tvc->Access));
2517 /*Copy useful per-volume info */
2518 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2520 if (newvcache && (tvp->states & VForeign))
2521 tvc->f.states |= CForeign;
2522 if (tvp->states & VRO)
2523 tvc->f.states |= CRO;
2524 if (tvp->states & VBackup)
2525 tvc->f.states |= CBackup;
2527 * Now, copy ".." entry back out of volume structure, if
2530 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2532 tvc->mvid = (struct VenusFid *)
2533 osi_AllocSmallSpace(sizeof(struct VenusFid));
2534 *tvc->mvid = tvp->dotdot;
2537 /* store the stat on the file */
2538 afs_RemoveVCB(afid);
2539 afs_ProcessFS(tvc, OutStatus, areq);
2540 tvc->callback = tc->srvr->server;
2542 /* we use osi_Time twice below. Ideally, we would use the time at which
2543 * the FetchStatus call began, instead, but we don't have it here. So we
2544 * make do with "now". In the CRO case, it doesn't really matter. In
2545 * the other case, we hope that the difference between "now" and when the
2546 * call actually began execution on the server won't be larger than the
2547 * padding which the server keeps. Subtract 1 second anyway, to be on
2548 * the safe side. Can't subtract more because we don't know how big
2549 * ExpirationTime is. Possible consistency problems may arise if the call
2550 * timeout period becomes longer than the server's expiration padding. */
2551 ObtainWriteLock(&afs_xcbhash, 470);
2552 if (CallBack->ExpirationTime != 0) {
2553 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2554 tvc->f.states |= CStatd;
2555 tvc->f.states &= ~CBulkFetching;
2556 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2557 } else if (tvc->f.states & CRO) {
2558 /* old-fashioned AFS 3.2 style */
2559 tvc->cbExpires = 3600 + osi_Time();
2560 /*XXX*/ tvc->f.states |= CStatd;
2561 tvc->f.states &= ~CBulkFetching;
2562 afs_QueueCallback(tvc, CBHash(3600), tvp);
2564 afs_DequeueCallback(tvc);
2565 tvc->callback = NULL;
2566 tvc->f.states &= ~(CStatd | CUnique);
2567 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2568 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2570 ReleaseWriteLock(&afs_xcbhash);
2572 afs_PutVolume(tvp, READ_LOCK);
2574 /* look in per-pag cache */
2575 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2576 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2577 else /* not found, add a new one if possible */
2578 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2580 ReleaseWriteLock(&tvc->lock);
2581 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2582 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2583 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2585 * Release ref count... hope this guy stays around...
2588 } /*afs_StuffVcache */
2592 * Decrements the reference count on a cache entry.
2594 * \param avc Pointer to the cache entry to decrement.
2596 * \note Environment: Nothing interesting.
2599 afs_PutVCache(struct vcache *avc)
2601 AFS_STATCNT(afs_PutVCache);
2602 #ifdef AFS_DARWIN80_ENV
2603 vnode_put(AFSTOV(avc));
2607 * Can we use a read lock here?
2609 ObtainReadLock(&afs_xvcache);
2611 ReleaseReadLock(&afs_xvcache);
2613 } /*afs_PutVCache */
2617 * Reset a vcache entry, so local contents are ignored, and the
2618 * server will be reconsulted next time the vcache is used
2620 * \param avc Pointer to the cache entry to reset
2622 * \param skipdnlc skip the dnlc purge for this vnode
2624 * \note avc must be write locked on entry
2626 * \note The caller should purge the dnlc when skipdnlc is set.
2629 afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred, afs_int32 skipdnlc)
2631 ObtainWriteLock(&afs_xcbhash, 456);
2632 afs_DequeueCallback(avc);
2633 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
2634 ReleaseWriteLock(&afs_xcbhash);
2635 /* now find the disk cache entries */
2636 afs_TryToSmush(avc, acred, 1);
2638 osi_dnlc_purgedp(avc);
2640 if (avc->linkData && !(avc->f.states & CCore)) {
2641 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2642 avc->linkData = NULL;
2647 * Sleepa when searching for a vcache. Releases all the pending locks,
2648 * sleeps then obtains the previously released locks.
2650 * \param vcache Enter sleep state.
2651 * \param flag Determines what locks to use.
2656 findvc_sleep(struct vcache *avc, int flag)
2658 if (flag & IS_SLOCK) {
2659 ReleaseSharedLock(&afs_xvcache);
2661 if (flag & IS_WLOCK) {
2662 ReleaseWriteLock(&afs_xvcache);
2664 ReleaseReadLock(&afs_xvcache);
2667 afs_osi_Sleep(&avc->f.states);
2668 if (flag & IS_SLOCK) {
2669 ObtainSharedLock(&afs_xvcache, 341);
2671 if (flag & IS_WLOCK) {
2672 ObtainWriteLock(&afs_xvcache, 343);
2674 ObtainReadLock(&afs_xvcache);
2680 * Add a reference on an existing vcache entry.
2682 * \param tvc Pointer to the vcache.
2684 * \note Environment: Must be called with at least one reference from
2685 * elsewhere on the vcache, even if that reference will be dropped.
2686 * The global lock is required.
2688 * \return 0 on success, -1 on failure.
2692 afs_RefVCache(struct vcache *tvc)
2694 #ifdef AFS_DARWIN80_ENV
2698 /* AFS_STATCNT(afs_RefVCache); */
2700 #ifdef AFS_DARWIN80_ENV
2704 if (vnode_ref(tvp)) {
2706 /* AFSTOV(tvc) may be NULL */
2715 } /*afs_RefVCache */
2718 * Find a vcache entry given a fid.
2720 * \param afid Pointer to the fid whose cache entry we desire.
2721 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2722 * unlock the vnode, and try again.
2723 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2724 * set if FindVCache is called as part of internal bookkeeping.
2726 * \note Environment: Must be called with the afs_xvcache lock at least held at
2727 * the read level. In order to do the VLRU adjustment, the xvcache lock
2728 * must be shared-- we upgrade it here.
2732 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2737 #ifdef AFS_DARWIN80_ENV
2738 struct vcache *deadvc = NULL, *livevc = NULL;
2742 AFS_STATCNT(afs_FindVCache);
2746 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2747 if (FidMatches(afid, tvc)) {
2748 if (tvc->f.states & CVInit) {
2749 findvc_sleep(tvc, flag);
2752 #ifdef AFS_DARWIN80_ENV
2753 if (tvc->f.states & CDeadVnode) {
2754 findvc_sleep(tvc, flag);
2762 /* should I have a read lock on the vnode here? */
2766 #if defined(AFS_DARWIN80_ENV)
2770 if (tvp && vnode_ref(tvp)) {
2772 /* AFSTOV(tvc) may be NULL */
2781 #elif defined(AFS_DARWIN_ENV)
2782 tvc->f.states |= CUBCinit;
2784 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2785 UBCINFORECLAIMED(AFSTOV(tvc))) {
2786 ubc_info_init(AFSTOV(tvc));
2789 tvc->f.states &= ~CUBCinit;
2791 osi_vnhold(tvc, retry); /* already held, above */
2792 if (retry && *retry)
2796 * only move to front of vlru if we have proper vcache locking)
2798 if (flag & DO_VLRU) {
2799 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2800 refpanic("FindVC VLRU inconsistent1");
2802 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2803 refpanic("FindVC VLRU inconsistent1");
2805 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2806 refpanic("FindVC VLRU inconsistent2");
2808 UpgradeSToWLock(&afs_xvcache, 26);
2809 QRemove(&tvc->vlruq);
2810 QAdd(&VLRU, &tvc->vlruq);
2811 ConvertWToSLock(&afs_xvcache);
2812 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2813 refpanic("FindVC VLRU inconsistent1");
2815 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2816 refpanic("FindVC VLRU inconsistent2");
2818 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2819 refpanic("FindVC VLRU inconsistent3");
2825 if (flag & DO_STATS) {
2827 afs_stats_cmperf.vcacheHits++;
2829 afs_stats_cmperf.vcacheMisses++;
2830 if (afs_IsPrimaryCellNum(afid->Cell))
2831 afs_stats_cmperf.vlocalAccesses++;
2833 afs_stats_cmperf.vremoteAccesses++;
2836 } /*afs_FindVCache */
2839 * Find a vcache entry given a fid. Does a wildcard match on what we
2840 * have for the fid. If more than one entry, don't return anything.
2842 * \param avcp Fill in pointer if we found one and only one.
2843 * \param afid Pointer to the fid whose cache entry we desire.
2844 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2845 * unlock the vnode, and try again.
2846 * \param flags bit 1 to specify whether to compute hit statistics. Not
2847 * set if FindVCache is called as part of internal bookkeeping.
2849 * \note Environment: Must be called with the afs_xvcache lock at least held at
2850 * the read level. In order to do the VLRU adjustment, the xvcache lock
2851 * must be shared-- we upgrade it here.
2853 * \return Number of matches found.
2856 int afs_duplicate_nfs_fids = 0;
2859 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2863 afs_int32 count = 0;
2864 struct vcache *found_tvc = NULL;
2865 #ifdef AFS_DARWIN80_ENV
2869 AFS_STATCNT(afs_FindVCache);
2873 ObtainSharedLock(&afs_xvcache, 331);
2876 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2877 /* Match only on what we have.... */
2878 if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2879 && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
2880 && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2881 && (tvc->f.fid.Cell == afid->Cell)) {
2882 if (tvc->f.states & CVInit) {
2883 ReleaseSharedLock(&afs_xvcache);
2884 afs_osi_Sleep(&tvc->f.states);
2887 #ifdef AFS_DARWIN80_ENV
2888 if (tvc->f.states & CDeadVnode) {
2889 ReleaseSharedLock(&afs_xvcache);
2890 afs_osi_Sleep(&tvc->f.states);
2894 if (vnode_get(tvp)) {
2895 /* This vnode no longer exists. */
2898 if (vnode_ref(tvp)) {
2899 /* This vnode no longer exists. */
2901 /* AFSTOV(tvc) may be NULL */
2906 #endif /* AFS_DARWIN80_ENV */
2910 afs_duplicate_nfs_fids++;
2911 ReleaseSharedLock(&afs_xvcache);
2912 #ifdef AFS_DARWIN80_ENV
2913 /* Drop our reference counts. */
2914 vnode_put(AFSTOV(tvc));
2915 vnode_put(AFSTOV(found_tvc));
2924 /* should I have a read lock on the vnode here? */
2926 #ifndef AFS_DARWIN80_ENV
2927 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2928 afs_int32 retry = 0;
2929 osi_vnhold(tvc, &retry);
2932 found_tvc = (struct vcache *)0;
2933 ReleaseSharedLock(&afs_xvcache);
2934 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2938 osi_vnhold(tvc, (int *)0); /* already held, above */
2942 * We obtained the xvcache lock above.
2944 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2945 refpanic("FindVC VLRU inconsistent1");
2947 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2948 refpanic("FindVC VLRU inconsistent1");
2950 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2951 refpanic("FindVC VLRU inconsistent2");
2953 UpgradeSToWLock(&afs_xvcache, 568);
2954 QRemove(&tvc->vlruq);
2955 QAdd(&VLRU, &tvc->vlruq);
2956 ConvertWToSLock(&afs_xvcache);
2957 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2958 refpanic("FindVC VLRU inconsistent1");
2960 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2961 refpanic("FindVC VLRU inconsistent2");
2963 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2964 refpanic("FindVC VLRU inconsistent3");
2970 afs_stats_cmperf.vcacheHits++;
2972 afs_stats_cmperf.vcacheMisses++;
2973 if (afs_IsPrimaryCellNum(afid->Cell))
2974 afs_stats_cmperf.vlocalAccesses++;
2976 afs_stats_cmperf.vremoteAccesses++;
2978 *avcp = tvc; /* May be null */
2980 ReleaseSharedLock(&afs_xvcache);
2981 return (tvc ? 1 : 0);
2983 } /*afs_NFSFindVCache */
2989 * Initialize vcache related variables
2994 afs_vcacheInit(int astatSize)
2996 #if !defined(AFS_LINUX22_ENV)
3000 if (!afs_maxvcount) {
3001 afs_maxvcount = astatSize; /* no particular limit on linux? */
3003 #if !defined(AFS_LINUX22_ENV)
3007 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3008 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3010 #if !defined(AFS_LINUX22_ENV)
3011 /* Allocate and thread the struct vcache entries */
3012 tvp = afs_osi_Alloc(astatSize * sizeof(struct vcache));
3013 osi_Assert(tvp != NULL);
3014 memset(tvp, 0, sizeof(struct vcache) * astatSize);
3016 Initial_freeVCList = tvp;
3017 freeVCList = &(tvp[0]);
3018 for (i = 0; i < astatSize - 1; i++) {
3019 tvp[i].nextfree = &(tvp[i + 1]);
3021 tvp[astatSize - 1].nextfree = NULL;
3022 # ifdef KERNEL_HAVE_PIN
3023 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
3027 #if defined(AFS_SGI_ENV)
3028 for (i = 0; i < astatSize; i++) {
3029 char name[METER_NAMSZ];
3030 struct vcache *tvc = &tvp[i];
3032 tvc->v.v_number = ++afsvnumbers;
3033 tvc->vc_rwlockid = OSI_NO_LOCKID;
3034 initnsema(&tvc->vc_rwlock, 1,
3035 makesname(name, "vrw", tvc->v.v_number));
3036 #ifndef AFS_SGI53_ENV
3037 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
3039 #ifndef AFS_SGI62_ENV
3040 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
3041 #endif /* AFS_SGI62_ENV */
3045 for(i = 0; i < VCSIZE; ++i)
3046 QInit(&afs_vhashTV[i]);
3053 shutdown_vcache(void)
3056 struct afs_cbr *tsp;
3058 * XXX We may potentially miss some of the vcaches because if when
3059 * there are no free vcache entries and all the vcache entries are active
3060 * ones then we allocate an additional one - admittedly we almost never
3065 struct afs_q *tq, *uq = NULL;
3067 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
3071 osi_FreeSmallSpace(tvc->mvid);
3072 tvc->mvid = (struct VenusFid *)0;
3075 aix_gnode_rele(AFSTOV(tvc));
3077 if (tvc->linkData) {
3078 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3083 * Also free the remaining ones in the Cache
3085 for (i = 0; i < VCSIZE; i++) {
3086 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3088 osi_FreeSmallSpace(tvc->mvid);
3089 tvc->mvid = (struct VenusFid *)0;
3093 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
3094 #ifdef AFS_AIX32_ENV
3097 vms_delete(tvc->segid);
3099 tvc->segid = tvc->vmh = NULL;
3100 if (VREFCOUNT_GT(tvc,0))
3101 osi_Panic("flushVcache: vm race");
3109 #if defined(AFS_SUN5_ENV)
3115 if (tvc->linkData) {
3116 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3121 afs_FreeAllAxs(&(tvc->Access));
3127 * Free any leftover callback queue
3129 for (i = 0; i < afs_stats_cmperf.CallBackAlloced; i++) {
3130 tsp = afs_cbrHeads[i];
3131 afs_cbrHeads[i] = 0;
3132 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3136 #if !defined(AFS_LINUX22_ENV)
3137 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3139 # ifdef KERNEL_HAVE_PIN
3140 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3143 freeVCList = Initial_freeVCList = 0;
3146 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3147 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3149 for(i = 0; i < VCSIZE; ++i)
3150 QInit(&afs_vhashTV[i]);
3154 afs_DisconGiveUpCallbacks(void)
3160 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3163 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3164 for (i = 0; i < VCSIZE; i++) {
3165 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3167 if (afs_QueueVCB(tvc, &slept)) {
3168 tvc->callback = NULL;
3177 ReleaseWriteLock(&afs_xvcache);
3184 * Clear the Statd flag from all vcaches
3186 * This function removes the Statd flag from all vcaches. It's used by
3187 * disconnected mode to tidy up during reconnection
3191 afs_ClearAllStatdFlag(void)
3196 ObtainWriteLock(&afs_xvcache, 715);
3198 for (i = 0; i < VCSIZE; i++) {
3199 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3200 tvc->f.states &= ~(CStatd|CUnique);
3203 ReleaseWriteLock(&afs_xvcache);