2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
43 #include "afs/sysincludes.h" /*Standard vendor system headers */
44 #include "afsincludes.h" /*AFS-based standard headers */
45 #include "afs/afs_stats.h"
46 #include "afs/afs_cbqueue.h"
47 #include "afs/afs_osidnlc.h"
49 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
50 afs_int32 afs_vcount = 0; /* number of vcache in use now */
58 #endif /* AFS_SGI64_ENV */
60 /* Exported variables */
61 afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
62 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
63 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
64 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
65 #if !defined(AFS_LINUX22_ENV)
66 static struct vcache *freeVCList; /*Free list for stat cache entries */
67 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
68 static struct vcache *Initial_freeVCList; /*Initial list for above */
70 struct afs_q VLRU; /*vcache LRU */
71 afs_int32 vcachegen = 0;
72 unsigned int afs_paniconwarn = 0;
73 struct vcache *afs_vhashT[VCSIZE];
74 struct afs_q afs_vhashTV[VCSIZE];
75 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
76 afs_int32 afs_bulkStatsLost;
77 int afs_norefpanic = 0;
80 /* Disk backed vcache definitions
81 * Both protected by xvcache */
82 static int afs_nextVcacheSlot = 0;
83 static struct afs_slotlist *afs_freeSlotList = NULL;
85 /* Forward declarations */
86 static afs_int32 afs_QueueVCB(struct vcache *avc);
89 * Generate an index into the hash table for a given Fid.
91 * \return The hash value.
94 afs_HashCBRFid(struct AFSFid *fid)
96 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
100 * Insert a CBR entry into the hash table.
101 * Must be called with afs_xvcb held.
106 afs_InsertHashCBR(struct afs_cbr *cbr)
108 int slot = afs_HashCBRFid(&cbr->fid);
110 cbr->hash_next = afs_cbrHashT[slot];
111 if (afs_cbrHashT[slot])
112 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
114 cbr->hash_pprev = &afs_cbrHashT[slot];
115 afs_cbrHashT[slot] = cbr;
120 * Flush the given vcache entry.
123 * afs_xvcache lock must be held for writing upon entry to
124 * prevent people from changing the vrefCount field, and to
125 * protect the lruq and hnext fields.
126 * LOCK: afs_FlushVCache afs_xvcache W
127 * REFCNT: vcache ref count must be zero on entry except for osf1
128 * RACE: lock is dropped and reobtained, permitting race in caller
130 * \param avc Pointer to vcache entry to flush.
131 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
135 afs_FlushVCache(struct vcache *avc, int *slept)
136 { /*afs_FlushVCache */
139 struct vcache **uvc, *wvc;
142 AFS_STATCNT(afs_FlushVCache);
143 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
144 ICL_TYPE_INT32, avc->f.states);
146 code = osi_VM_FlushVCache(avc, slept);
150 if (avc->f.states & CVFlushed) {
154 #if !defined(AFS_LINUX22_ENV)
155 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
156 refpanic("LRU vs. Free inconsistency");
159 avc->f.states |= CVFlushed;
160 /* pull the entry out of the lruq and put it on the free list */
161 QRemove(&avc->vlruq);
163 /* keep track of # of files that we bulk stat'd, but never used
164 * before they got recycled.
166 if (avc->f.states & CBulkStat)
169 /* remove entry from the hash chain */
170 i = VCHash(&avc->f.fid);
171 uvc = &afs_vhashT[i];
172 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
175 avc->hnext = (struct vcache *)NULL;
180 /* remove entry from the volume hash table */
181 QRemove(&avc->vhashq);
184 osi_FreeSmallSpace(avc->mvid);
185 avc->mvid = (struct VenusFid *)0;
187 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
188 avc->linkData = NULL;
190 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
191 /* OK, there are no internal vrefCounts, so there shouldn't
192 * be any more refs here. */
194 #ifdef AFS_DARWIN80_ENV
195 vnode_clearfsnode(AFSTOV(avc));
196 vnode_removefsref(AFSTOV(avc));
198 avc->v->v_data = NULL; /* remove from vnode */
200 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
203 #ifdef AFS_SUN510_ENV
204 /* As we use private vnodes, cleanup is up to us */
205 vn_reinit(AFSTOV(avc));
207 afs_FreeAllAxs(&(avc->Access));
208 if (!afs_shuttingdown)
210 ObtainWriteLock(&afs_xcbhash, 460);
211 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
212 avc->f.states &= ~(CStatd | CUnique);
213 ReleaseWriteLock(&afs_xcbhash);
214 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
215 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
217 osi_dnlc_purgevp(avc);
220 * Next, keep track of which vnodes we've deleted for create's
221 * optimistic synchronization algorithm
224 if (avc->f.fid.Fid.Vnode & 1)
230 #if !defined(AFS_LINUX22_ENV)
231 /* put the entry in the free list */
232 avc->nextfree = freeVCList;
234 if (avc->vlruq.prev || avc->vlruq.next) {
235 refpanic("LRU vs. Free inconsistency");
237 avc->f.states |= CVFlushed;
239 /* This should put it back on the vnode free list since usecount is 1 */
241 if (VREFCOUNT_GT(avc,0)) {
242 AFS_RELE(AFSTOV(avc));
243 afs_stats_cmperf.vcacheXAllocs--;
245 if (afs_norefpanic) {
246 afs_warn("flush vc refcnt < 1");
249 osi_Panic("flush vc refcnt < 1");
251 #endif /* AFS_LINUX22_ENV */
256 } /*afs_FlushVCache */
260 * The core of the inactive vnode op for all but IRIX.
266 afs_InactiveVCache(struct vcache *avc, afs_ucred_t *acred)
268 AFS_STATCNT(afs_inactive);
269 if (avc->f.states & CDirty) {
270 /* we can't keep trying to push back dirty data forever. Give up. */
271 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
273 avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
274 avc->f.states &= ~CDirty; /* Turn it off */
275 if (avc->f.states & CUnlinked) {
276 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
277 avc->f.states |= CUnlinkedDel;
280 afs_remunlink(avc, 1); /* ignore any return code */
287 * Allocate a callback return structure from the
288 * free list and return it.
290 * Environment: The alloc and free routines are both called with the afs_xvcb lock
291 * held, so we don't have to worry about blocking in osi_Alloc.
293 * \return The allocated afs_cbr.
295 static struct afs_cbr *afs_cbrSpace = 0;
296 /* if alloc limit below changes, fix me! */
297 static struct afs_cbr *afs_cbrHeads[16];
305 afs_osi_CancelWait(&AFS_WaitHandler); /* trigger FlushVCBs asap */
307 if (afs_stats_cmperf.CallBackAlloced >= sizeof(afs_cbrHeads)/sizeof(afs_cbrHeads[0])) {
308 /* don't allocate more than 16 * AFS_NCBRS for now */
309 tsp = (struct afs_cbr *)osi_AllocSmallSpace(sizeof(*tsp));
312 afs_stats_cmperf.CallBackFlushes++;
315 tsp = afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
316 osi_Assert(tsp != NULL);
317 for (i = 0; i < AFS_NCBRS - 1; i++) {
318 tsp[i].next = &tsp[i + 1];
321 tsp[AFS_NCBRS - 1].next = 0;
322 tsp[AFS_NCBRS - 1].dynalloc = 0;
323 afs_cbrSpace = tsp->next;
324 afs_cbrHeads[afs_stats_cmperf.CallBackAlloced] = tsp;
325 afs_stats_cmperf.CallBackAlloced++;
329 afs_cbrSpace = tsp->next;
335 * Free a callback return structure, removing it from all lists.
337 * Environment: the xvcb lock is held over these calls.
339 * \param asp The address of the structure to free.
344 afs_FreeCBR(struct afs_cbr *asp)
346 *(asp->pprev) = asp->next;
348 asp->next->pprev = asp->pprev;
350 *(asp->hash_pprev) = asp->hash_next;
352 asp->hash_next->hash_pprev = asp->hash_pprev;
355 osi_FreeSmallSpace(asp);
357 asp->next = afs_cbrSpace;
364 * Flush all queued callbacks to all servers.
366 * Environment: holds xvcb lock over RPC to guard against race conditions
367 * when a new callback is granted for the same file later on.
369 * \return 0 for success.
372 afs_FlushVCBs(afs_int32 lockit)
374 struct AFSFid *tfids;
375 struct AFSCallBack callBacks[1];
376 struct AFSCBFids fidArray;
377 struct AFSCBs cbArray;
379 struct afs_cbr *tcbrp;
383 struct vrequest treq;
385 int safety1, safety2, safety3;
387 if ((code = afs_InitReq(&treq, afs_osi_credp)))
389 treq.flags |= O_NONBLOCK;
390 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
391 osi_Assert(tfids != NULL);
394 ObtainWriteLock(&afs_xvcb, 273);
395 ObtainReadLock(&afs_xserver);
396 for (i = 0; i < NSERVERS; i++) {
397 for (safety1 = 0, tsp = afs_servers[i];
398 tsp && safety1 < afs_totalServers + 10;
399 tsp = tsp->next, safety1++) {
401 if (tsp->cbrs == (struct afs_cbr *)0)
404 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
405 * and make an RPC, over and over again.
407 tcount = 0; /* number found so far */
408 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
409 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
410 /* if buffer is full, or we've queued all we're going
411 * to from this server, we should flush out the
414 fidArray.AFSCBFids_len = tcount;
415 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
416 cbArray.AFSCBs_len = 1;
417 cbArray.AFSCBs_val = callBacks;
418 memset(&callBacks[0], 0, sizeof(callBacks[0]));
419 callBacks[0].CallBackType = CB_EXCLUSIVE;
420 for (safety3 = 0; safety3 < AFS_MAXHOSTS * 2; safety3++) {
421 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
422 tsp->cell->cellNum, &treq, 0,
426 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
429 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
437 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
442 /* ignore return code, since callbacks may have
443 * been returned anyway, we shouldn't leave them
444 * around to be returned again.
446 * Next, see if we are done with this server, and if so,
447 * break to deal with the next one.
453 /* if to flush full buffer */
454 /* if we make it here, we have an entry at the head of cbrs,
455 * which we should copy to the file ID array and then free.
458 tfids[tcount++] = tcbrp->fid;
460 /* Freeing the CBR will unlink it from the server's CBR list */
462 } /* while loop for this one server */
463 if (safety2 > afs_cacheStats) {
464 afs_warn("possible internal error afs_flushVCBs (%d)\n",
467 } /* for loop for this hash chain */
468 } /* loop through all hash chains */
469 if (safety1 > afs_totalServers + 2) {
471 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
472 safety1, afs_totalServers + 2);
474 osi_Panic("afs_flushVCBS safety1");
477 ReleaseReadLock(&afs_xserver);
479 ReleaseWriteLock(&afs_xvcb);
480 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
485 * Queue a callback on the given fid.
488 * Locks the xvcb lock.
489 * Called when the xvcache lock is already held.
491 * \param avc vcache entry
492 * \return 1 if queued, 0 otherwise
496 afs_QueueVCB(struct vcache *avc)
500 struct afs_cbr *tcbp;
502 AFS_STATCNT(afs_QueueVCB);
504 ObtainWriteLock(&afs_xvcb, 274);
506 /* we can't really give back callbacks on RO files, since the
507 * server only tracks them on a per-volume basis, and we don't
508 * know whether we still have some other files from the same
510 if (!((avc->f.states & CRO) == 0 && avc->callback)) {
514 /* The callback is really just a struct server ptr. */
515 tsp = (struct server *)(avc->callback);
517 /* we now have a pointer to the server, so we just allocate
518 * a queue entry and queue it.
520 tcbp = afs_AllocCBR();
521 tcbp->fid = avc->f.fid.Fid;
523 tcbp->next = tsp->cbrs;
525 tsp->cbrs->pprev = &tcbp->next;
528 tcbp->pprev = &tsp->cbrs;
530 afs_InsertHashCBR(tcbp);
534 /* now release locks and return */
535 ReleaseWriteLock(&afs_xvcb);
541 * Remove a queued callback for a given Fid.
544 * Locks xvcb and xserver locks.
545 * Typically called with xdcache, xvcache and/or individual vcache
548 * \param afid The fid we want cleansed of queued callbacks.
553 afs_RemoveVCB(struct VenusFid *afid)
556 struct afs_cbr *cbr, *ncbr;
558 AFS_STATCNT(afs_RemoveVCB);
559 ObtainWriteLock(&afs_xvcb, 275);
561 slot = afs_HashCBRFid(&afid->Fid);
562 ncbr = afs_cbrHashT[slot];
566 ncbr = cbr->hash_next;
568 if (afid->Fid.Volume == cbr->fid.Volume &&
569 afid->Fid.Vnode == cbr->fid.Vnode &&
570 afid->Fid.Unique == cbr->fid.Unique) {
575 ReleaseWriteLock(&afs_xvcb);
579 afs_FlushReclaimedVcaches(void)
581 #if !defined(AFS_LINUX22_ENV)
584 struct vcache *tmpReclaimedVCList = NULL;
586 ObtainWriteLock(&afs_xvreclaim, 76);
587 while (ReclaimedVCList) {
588 tvc = ReclaimedVCList; /* take from free list */
589 ReclaimedVCList = tvc->nextfree;
590 tvc->nextfree = NULL;
591 code = afs_FlushVCache(tvc, &fv_slept);
593 /* Ok, so, if we got code != 0, uh, wtf do we do? */
594 /* Probably, build a temporary list and then put all back when we
595 get to the end of the list */
596 /* This is actually really crappy, but we need to not leak these.
597 We probably need a way to be smarter about this. */
598 tvc->nextfree = tmpReclaimedVCList;
599 tmpReclaimedVCList = tvc;
600 /* printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code); */
602 if (tvc->f.states & (CVInit
603 #ifdef AFS_DARWIN80_ENV
607 tvc->f.states &= ~(CVInit
608 #ifdef AFS_DARWIN80_ENV
612 afs_osi_Wakeup(&tvc->f.states);
615 if (tmpReclaimedVCList)
616 ReclaimedVCList = tmpReclaimedVCList;
618 ReleaseWriteLock(&afs_xvreclaim);
623 afs_PostPopulateVCache(struct vcache *avc, struct VenusFid *afid, int seq)
626 * The proper value for mvstat (for root fids) is setup by the caller.
629 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
632 if (afs_globalVFS == 0)
633 osi_Panic("afs globalvfs");
635 osi_PostPopulateVCache(avc);
638 osi_dnlc_purgedp(avc); /* this may be overkill */
639 memset(&(avc->callsort), 0, sizeof(struct afs_q));
641 avc->f.states &=~ CVInit;
643 avc->f.states |= CBulkFetching;
644 avc->f.m.Length = seq;
646 afs_osi_Wakeup(&avc->f.states);
650 afs_ShakeLooseVCaches(afs_int32 anumber)
654 struct afs_q *tq, *uq;
656 afs_int32 target = anumber;
660 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
663 if (tvc->f.states & CVFlushed) {
664 refpanic("CVFlushed on VLRU");
665 /* In the other path, this was 2 * afs_cacheStats */
666 } else if (!afsd_dynamic_vcaches && i++ > afs_maxvcount) {
667 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
668 } else if (QNext(uq) != tq) {
669 refpanic("VLRU inconsistent");
670 } else if (tvc->f.states & CVInit) {
675 if (osi_TryEvictVCache(tvc, &fv_slept))
683 continue; /* start over - may have raced. */
688 if (!afsd_dynamic_vcaches && anumber == target) {
689 afs_warn("afs_ShakeLooseVCaches: warning none freed, using %d of %d\n",
690 afs_vcount, afs_maxvcount);
696 /* Alloc new vnode. */
698 static struct vcache *
699 afs_AllocVCache(void)
703 tvc = osi_NewVnode();
708 if (afsd_dynamic_vcaches && afs_maxvcount < afs_vcount) {
709 afs_maxvcount = afs_vcount;
710 /*printf("peak vnodes: %d\n", afs_maxvcount);*/
713 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
715 /* If we create a new inode, we either give it a new slot number,
716 * or if one's available, use a slot number from the slot free list
718 if (afs_freeSlotList != NULL) {
719 struct afs_slotlist *tmp;
721 tvc->diskSlot = afs_freeSlotList->slot;
722 tmp = afs_freeSlotList;
723 afs_freeSlotList = tmp->next;
724 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
726 tvc->diskSlot = afs_nextVcacheSlot++;
732 /* Pre populate a newly allocated vcache. On platforms where the actual
733 * vnode is attached to the vcache, this function is called before attachment,
734 * therefore it cannot perform any actions on the vnode itself */
737 afs_PrePopulateVCache(struct vcache *avc, struct VenusFid *afid,
738 struct server *serverp) {
741 slot = avc->diskSlot;
743 osi_PrePopulateVCache(avc);
745 avc->diskSlot = slot;
746 QZero(&avc->metadirty);
748 AFS_RWLOCK_INIT(&avc->lock, "vcache lock");
751 avc->linkData = NULL;
754 avc->execsOrWriters = 0;
756 avc->f.states = CVInit;
757 avc->last_looker = 0;
759 avc->asynchrony = -1;
763 avc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
764 hzero(avc->f.m.DataVersion); /* in case we copy it into flushDV */
766 avc->callback = serverp; /* to minimize chance that clear
769 #if defined(AFS_CACHE_BYPASS)
770 avc->cachingStates = 0;
771 avc->cachingTransitions = 0;
776 * This routine is responsible for allocating a new cache entry
777 * from the free list. It formats the cache entry and inserts it
778 * into the appropriate hash tables. It must be called with
779 * afs_xvcache write-locked so as to prevent several processes from
780 * trying to create a new cache entry simultaneously.
782 * LOCK: afs_NewVCache afs_xvcache W
784 * \param afid The file id of the file whose cache entry is being created.
786 * \return The new vcache struct.
789 static_inline struct vcache *
790 afs_NewVCache_int(struct VenusFid *afid, struct server *serverp, int seq)
794 afs_int32 anumber = VCACHE_FREE;
796 AFS_STATCNT(afs_NewVCache);
798 afs_FlushReclaimedVcaches();
800 #if defined(AFS_LINUX22_ENV)
801 if(!afsd_dynamic_vcaches) {
802 afs_ShakeLooseVCaches(anumber);
803 if (afs_vcount >= afs_maxvcount) {
804 afs_warn("afs_NewVCache - none freed\n");
808 tvc = afs_AllocVCache();
809 #else /* AFS_LINUX22_ENV */
810 /* pull out a free cache entry */
812 afs_ShakeLooseVCaches(anumber);
816 tvc = afs_AllocVCache();
818 tvc = freeVCList; /* take from free list */
819 freeVCList = tvc->nextfree;
820 tvc->nextfree = NULL;
821 afs_vcount++; /* balanced by FlushVCache */
822 } /* end of if (!freeVCList) */
824 #endif /* AFS_LINUX22_ENV */
826 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
828 panic("afs_NewVCache(): free vcache with vnode attached");
831 /* Populate the vcache with as much as we can. */
832 afs_PrePopulateVCache(tvc, afid, serverp);
834 /* Thread the vcache onto the VLRU */
839 tvc->hnext = afs_vhashT[i];
841 QAdd(&afs_vhashTV[j], &tvc->vhashq);
843 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
844 refpanic("NewVCache VLRU inconsistent");
846 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
847 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
848 refpanic("NewVCache VLRU inconsistent2");
850 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
851 refpanic("NewVCache VLRU inconsistent3");
853 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
854 refpanic("NewVCache VLRU inconsistent4");
858 /* it should now be safe to drop the xvcache lock - so attach an inode
859 * to this vcache, where necessary */
860 osi_AttachVnode(tvc, seq);
862 /* Get a reference count to hold this vcache for the VLRUQ. Note that
863 * we have to do this after attaching the vnode, because the reference
864 * count may be held in the vnode itself */
866 #if defined(AFS_LINUX22_ENV)
867 /* Hold it for the LRU (should make count 2) */
869 #elif !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
870 VREFCOUNT_SET(tvc, 1); /* us */
873 #if defined (AFS_FBSD_ENV)
874 if (tvc->f.states & CVInit)
876 afs_PostPopulateVCache(tvc, afid, seq);
883 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
885 return afs_NewVCache_int(afid, serverp, 0);
889 afs_NewBulkVCache(struct VenusFid *afid, struct server *serverp, int seq)
891 return afs_NewVCache_int(afid, serverp, seq);
897 * LOCK: afs_FlushActiveVcaches afs_xvcache N
899 * \param doflocks : Do we handle flocks?
902 afs_FlushActiveVcaches(afs_int32 doflocks)
908 afs_ucred_t *cred = NULL;
909 struct vrequest treq, ureq;
910 struct AFSVolSync tsync;
913 AFS_STATCNT(afs_FlushActiveVcaches);
914 ObtainReadLock(&afs_xvcache);
915 for (i = 0; i < VCSIZE; i++) {
916 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
917 if (tvc->f.states & CVInit) continue;
918 #ifdef AFS_DARWIN80_ENV
919 if (tvc->f.states & CDeadVnode &&
920 (tvc->f.states & (CCore|CUnlinkedDel) ||
921 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
923 if (doflocks && tvc->flockCount != 0) {
924 /* if this entry has an flock, send a keep-alive call out */
926 ReleaseReadLock(&afs_xvcache);
927 ObtainWriteLock(&tvc->lock, 51);
929 afs_InitReq(&treq, afs_osi_credp);
930 treq.flags |= O_NONBLOCK;
932 tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK);
934 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
937 RXAFS_ExtendLock(tc->id,
938 (struct AFSFid *)&tvc->f.fid.Fid,
945 (tc, code, &tvc->f.fid, &treq,
946 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
948 ReleaseWriteLock(&tvc->lock);
949 #ifdef AFS_DARWIN80_ENV
951 ObtainReadLock(&afs_xvcache);
953 ObtainReadLock(&afs_xvcache);
958 if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
960 * Don't let it evaporate in case someone else is in
961 * this code. Also, drop the afs_xvcache lock while
962 * getting vcache locks.
965 ReleaseReadLock(&afs_xvcache);
966 #ifdef AFS_BOZONLOCK_ENV
967 afs_BozonLock(&tvc->pvnLock, tvc);
969 #if defined(AFS_SGI_ENV)
971 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
973 osi_Assert(VREFCOUNT_GT(tvc,0));
974 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
976 ObtainWriteLock(&tvc->lock, 52);
977 if (tvc->f.states & CCore) {
978 tvc->f.states &= ~CCore;
979 /* XXXX Find better place-holder for cred XXXX */
980 cred = (afs_ucred_t *)tvc->linkData;
981 tvc->linkData = NULL; /* XXX */
982 afs_InitReq(&ureq, cred);
983 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
984 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
985 tvc->execsOrWriters);
986 code = afs_StoreOnLastReference(tvc, &ureq);
987 ReleaseWriteLock(&tvc->lock);
988 #ifdef AFS_BOZONLOCK_ENV
989 afs_BozonUnlock(&tvc->pvnLock, tvc);
994 if (code && code != VNOVNODE) {
995 afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
996 /* /dev/console */ 1);
998 } else if (tvc->f.states & CUnlinkedDel) {
1002 ReleaseWriteLock(&tvc->lock);
1003 #ifdef AFS_BOZONLOCK_ENV
1004 afs_BozonUnlock(&tvc->pvnLock, tvc);
1006 #if defined(AFS_SGI_ENV)
1007 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1009 afs_remunlink(tvc, 0);
1010 #if defined(AFS_SGI_ENV)
1011 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1014 /* lost (or won, perhaps) the race condition */
1015 ReleaseWriteLock(&tvc->lock);
1016 #ifdef AFS_BOZONLOCK_ENV
1017 afs_BozonUnlock(&tvc->pvnLock, tvc);
1020 #if defined(AFS_SGI_ENV)
1021 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1023 #ifdef AFS_DARWIN80_ENV
1026 AFS_RELE(AFSTOV(tvc));
1027 /* Matches write code setting CCore flag */
1030 ObtainReadLock(&afs_xvcache);
1032 ObtainReadLock(&afs_xvcache);
1035 AFS_RELE(AFSTOV(tvc));
1036 /* Matches write code setting CCore flag */
1043 ReleaseReadLock(&afs_xvcache);
1049 * Make sure a cache entry is up-to-date status-wise.
1051 * NOTE: everywhere that calls this can potentially be sped up
1052 * by checking CStatd first, and avoiding doing the InitReq
1053 * if this is up-to-date.
1055 * Anymore, the only places that call this KNOW already that the
1056 * vcache is not up-to-date, so we don't screw around.
1058 * \param avc : Ptr to vcache entry to verify.
1064 * Make sure a cache entry is up-to-date status-wise.
1066 * NOTE: everywhere that calls this can potentially be sped up
1067 * by checking CStatd first, and avoiding doing the InitReq
1068 * if this is up-to-date.
1070 * Anymore, the only places that call this KNOW already that the
1071 * vcache is not up-to-date, so we don't screw around.
1073 * \param avc Pointer to vcache entry to verify.
1076 * \return 0 for success or other error codes.
1079 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1083 AFS_STATCNT(afs_VerifyVCache);
1085 /* otherwise we must fetch the status info */
1087 ObtainWriteLock(&avc->lock, 53);
1088 if (avc->f.states & CStatd) {
1089 ReleaseWriteLock(&avc->lock);
1092 ObtainWriteLock(&afs_xcbhash, 461);
1093 avc->f.states &= ~(CStatd | CUnique);
1094 avc->callback = NULL;
1095 afs_DequeueCallback(avc);
1096 ReleaseWriteLock(&afs_xcbhash);
1097 ReleaseWriteLock(&avc->lock);
1099 /* since we've been called back, or the callback has expired,
1100 * it's possible that the contents of this directory, or this
1101 * file's name have changed, thus invalidating the dnlc contents.
1103 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1104 osi_dnlc_purgedp(avc);
1106 osi_dnlc_purgevp(avc);
1108 /* fetch the status info */
1109 tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
1112 /* Put it back; caller has already incremented vrefCount */
1116 } /*afs_VerifyVCache */
1120 * Simple copy of stat info into cache.
1122 * Callers:as of 1992-04-29, only called by WriteVCache
1124 * \param avc Ptr to vcache entry involved.
1125 * \param astat Ptr to stat info to copy.
1129 afs_SimpleVStat(struct vcache *avc,
1130 struct AFSFetchStatus *astat, struct vrequest *areq)
1133 AFS_STATCNT(afs_SimpleVStat);
1135 #ifdef AFS_64BIT_CLIENT
1136 FillInt64(length, astat->Length_hi, astat->Length);
1137 #else /* AFS_64BIT_CLIENT */
1138 length = astat->Length;
1139 #endif /* AFS_64BIT_CLIENT */
1141 #if defined(AFS_SGI_ENV)
1142 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1143 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1144 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1145 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1146 if (length < avc->f.m.Length) {
1147 vnode_t *vp = (vnode_t *) avc;
1149 osi_Assert(WriteLocked(&avc->lock));
1150 ReleaseWriteLock(&avc->lock);
1152 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1154 ObtainWriteLock(&avc->lock, 67);
1159 if (!afs_DirtyPages(avc)) {
1160 /* if actively writing the file, don't fetch over this value */
1161 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1162 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1163 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1164 avc->f.m.Length = length;
1165 avc->f.m.Date = astat->ClientModTime;
1167 avc->f.m.Owner = astat->Owner;
1168 avc->f.m.Group = astat->Group;
1169 avc->f.m.Mode = astat->UnixModeBits;
1170 if (vType(avc) == VREG) {
1171 avc->f.m.Mode |= S_IFREG;
1172 } else if (vType(avc) == VDIR) {
1173 avc->f.m.Mode |= S_IFDIR;
1174 } else if (vType(avc) == VLNK) {
1175 avc->f.m.Mode |= S_IFLNK;
1176 if ((avc->f.m.Mode & 0111) == 0)
1179 if (avc->f.states & CForeign) {
1180 struct axscache *ac;
1181 avc->f.anyAccess = astat->AnonymousAccess;
1183 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1185 * Caller has at least one bit not covered by anonymous, and
1186 * thus may have interesting rights.
1188 * HOWEVER, this is a really bad idea, because any access query
1189 * for bits which aren't covered by anonymous, on behalf of a user
1190 * who doesn't have any special rights, will result in an answer of
1191 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1192 * It's an especially bad idea under Ultrix, since (due to the lack of
1193 * a proper access() call) it must perform several afs_access() calls
1194 * in order to create magic mode bits that vary according to who makes
1195 * the call. In other words, _every_ stat() generates a test for
1198 #endif /* badidea */
1199 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1200 ac->axess = astat->CallerAccess;
1201 else /* not found, add a new one if possible */
1202 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1205 } /*afs_SimpleVStat */
1209 * Store the status info *only* back to the server for a
1212 * Environment: Must be called with a shared lock held on the vnode.
1214 * \param avc Ptr to the vcache entry.
1215 * \param astatus Ptr to the status info to store.
1216 * \param areq Ptr to the associated vrequest.
1218 * \return Operation status.
1222 afs_WriteVCache(struct vcache *avc,
1223 struct AFSStoreStatus *astatus,
1224 struct vrequest *areq)
1227 struct afs_conn *tc;
1228 struct AFSFetchStatus OutStatus;
1229 struct AFSVolSync tsync;
1231 AFS_STATCNT(afs_WriteVCache);
1232 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1233 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
1235 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
1237 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1240 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->f.fid.Fid,
1241 astatus, &OutStatus, &tsync);
1246 } while (afs_Analyze
1247 (tc, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1248 SHARED_LOCK, NULL));
1250 UpgradeSToWLock(&avc->lock, 20);
1252 /* success, do the changes locally */
1253 afs_SimpleVStat(avc, &OutStatus, areq);
1255 * Update the date, too. SimpleVStat didn't do this, since
1256 * it thought we were doing this after fetching new status
1257 * over a file being written.
1259 avc->f.m.Date = OutStatus.ClientModTime;
1261 /* failure, set up to check with server next time */
1262 ObtainWriteLock(&afs_xcbhash, 462);
1263 afs_DequeueCallback(avc);
1264 avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1265 ReleaseWriteLock(&afs_xcbhash);
1266 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1267 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1269 ConvertWToSLock(&avc->lock);
1272 } /*afs_WriteVCache */
1275 * Store status info only locally, set the proper disconnection flags
1276 * and add to dirty list.
1278 * \param avc The vcache to be written locally.
1279 * \param astatus Get attr fields from local store.
1280 * \param attrs This one is only of the vs_size.
1282 * \note Must be called with a shared lock on the vnode
1285 afs_WriteVCacheDiscon(struct vcache *avc,
1286 struct AFSStoreStatus *astatus,
1287 struct vattr *attrs)
1290 afs_int32 flags = 0;
1292 UpgradeSToWLock(&avc->lock, 700);
1294 if (!astatus->Mask) {
1300 /* Set attributes. */
1301 if (astatus->Mask & AFS_SETMODTIME) {
1302 avc->f.m.Date = astatus->ClientModTime;
1303 flags |= VDisconSetTime;
1306 if (astatus->Mask & AFS_SETOWNER) {
1307 /* printf("Not allowed yet. \n"); */
1308 /*avc->f.m.Owner = astatus->Owner;*/
1311 if (astatus->Mask & AFS_SETGROUP) {
1312 /* printf("Not allowed yet. \n"); */
1313 /*avc->f.m.Group = astatus->Group;*/
1316 if (astatus->Mask & AFS_SETMODE) {
1317 avc->f.m.Mode = astatus->UnixModeBits;
1319 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1321 if (vType(avc) == VREG) {
1322 avc->f.m.Mode |= S_IFREG;
1323 } else if (vType(avc) == VDIR) {
1324 avc->f.m.Mode |= S_IFDIR;
1325 } else if (vType(avc) == VLNK) {
1326 avc->f.m.Mode |= S_IFLNK;
1327 if ((avc->f.m.Mode & 0111) == 0)
1331 flags |= VDisconSetMode;
1332 } /* if(astatus.Mask & AFS_SETMODE) */
1334 } /* if (!astatus->Mask) */
1336 if (attrs->va_size > 0) {
1337 /* XXX: Do I need more checks? */
1338 /* Truncation operation. */
1339 flags |= VDisconTrunc;
1343 afs_DisconAddDirty(avc, flags, 1);
1345 /* XXX: How about the rest of the fields? */
1347 ConvertWToSLock(&avc->lock);
1353 * Copy astat block into vcache info
1355 * \note This code may get dataversion and length out of sync if the file has
1356 * been modified. This is less than ideal. I haven't thought about it sufficiently
1357 * to be certain that it is adequate.
1359 * \note Environment: Must be called under a write lock
1361 * \param avc Ptr to vcache entry.
1362 * \param astat Ptr to stat block to copy in.
1363 * \param areq Ptr to associated request.
1366 afs_ProcessFS(struct vcache *avc,
1367 struct AFSFetchStatus *astat, struct vrequest *areq)
1370 AFS_STATCNT(afs_ProcessFS);
1372 #ifdef AFS_64BIT_CLIENT
1373 FillInt64(length, astat->Length_hi, astat->Length);
1374 #else /* AFS_64BIT_CLIENT */
1375 length = astat->Length;
1376 #endif /* AFS_64BIT_CLIENT */
1377 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1378 * number for each bulk status request. Under no circumstances
1379 * should afs_DoBulkStat store a sequence number if the new
1380 * length will be ignored when afs_ProcessFS is called with
1381 * new stats. If you change the following conditional then you
1382 * also need to change the conditional in afs_DoBulkStat. */
1384 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1385 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1387 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1389 /* if we're writing or mapping this file, don't fetch over these
1392 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1393 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1394 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1395 avc->f.m.Length = length;
1396 avc->f.m.Date = astat->ClientModTime;
1398 hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1399 avc->f.m.Owner = astat->Owner;
1400 avc->f.m.Mode = astat->UnixModeBits;
1401 avc->f.m.Group = astat->Group;
1402 avc->f.m.LinkCount = astat->LinkCount;
1403 if (astat->FileType == File) {
1404 vSetType(avc, VREG);
1405 avc->f.m.Mode |= S_IFREG;
1406 } else if (astat->FileType == Directory) {
1407 vSetType(avc, VDIR);
1408 avc->f.m.Mode |= S_IFDIR;
1409 } else if (astat->FileType == SymbolicLink) {
1410 if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
1411 vSetType(avc, VDIR);
1412 avc->f.m.Mode |= S_IFDIR;
1414 vSetType(avc, VLNK);
1415 avc->f.m.Mode |= S_IFLNK;
1417 if ((avc->f.m.Mode & 0111) == 0) {
1421 avc->f.anyAccess = astat->AnonymousAccess;
1423 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1425 * Caller has at least one bit not covered by anonymous, and
1426 * thus may have interesting rights.
1428 * HOWEVER, this is a really bad idea, because any access query
1429 * for bits which aren't covered by anonymous, on behalf of a user
1430 * who doesn't have any special rights, will result in an answer of
1431 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1432 * It's an especially bad idea under Ultrix, since (due to the lack of
1433 * a proper access() call) it must perform several afs_access() calls
1434 * in order to create magic mode bits that vary according to who makes
1435 * the call. In other words, _every_ stat() generates a test for
1438 #endif /* badidea */
1440 struct axscache *ac;
1441 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1442 ac->axess = astat->CallerAccess;
1443 else /* not found, add a new one if possible */
1444 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1446 } /*afs_ProcessFS */
1450 * Get fid from server.
1453 * \param areq Request to be passed on.
1454 * \param name Name of ?? to lookup.
1455 * \param OutStatus Fetch status.
1460 * \return Success status of operation.
1463 afs_RemoteLookup(struct VenusFid *afid, struct vrequest *areq,
1464 char *name, struct VenusFid *nfid,
1465 struct AFSFetchStatus *OutStatusp,
1466 struct AFSCallBack *CallBackp, struct server **serverp,
1467 struct AFSVolSync *tsyncp)
1471 struct afs_conn *tc;
1472 struct AFSFetchStatus OutDirStatus;
1475 name = ""; /* XXX */
1477 tc = afs_Conn(afid, areq, SHARED_LOCK);
1480 *serverp = tc->srvr->server;
1482 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1485 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1486 (struct AFSFid *)&nfid->Fid, OutStatusp,
1487 &OutDirStatus, CallBackp, tsyncp);
1492 } while (afs_Analyze
1493 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1503 * Given a file id and a vrequest structure, fetch the status
1504 * information associated with the file.
1506 * \param afid File ID.
1507 * \param areq Ptr to associated vrequest structure, specifying the
1508 * user whose authentication tokens will be used.
1509 * \param avc Caller may already have a vcache for this file, which is
1512 * \note Environment:
1513 * The cache entry is returned with an increased vrefCount field.
1514 * The entry must be discarded by calling afs_PutVCache when you
1515 * are through using the pointer to the cache entry.
1517 * You should not hold any locks when calling this function, except
1518 * locks on other vcache entries. If you lock more than one vcache
1519 * entry simultaneously, you should lock them in this order:
1521 * 1. Lock all files first, then directories.
1522 * 2. Within a particular type, lock entries in Fid.Vnode order.
1524 * This locking hierarchy is convenient because it allows locking
1525 * of a parent dir cache entry, given a file (to check its access
1526 * control list). It also allows renames to be handled easily by
1527 * locking directories in a constant order.
1529 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1531 * \note Might have a vcache structure already, which must
1532 * already be held by the caller
1535 afs_GetVCache(struct VenusFid *afid, struct vrequest *areq,
1536 afs_int32 * cached, struct vcache *avc)
1539 afs_int32 code, newvcache = 0;
1544 AFS_STATCNT(afs_GetVCache);
1547 *cached = 0; /* Init just in case */
1549 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1553 ObtainSharedLock(&afs_xvcache, 5);
1555 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1557 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1558 ReleaseSharedLock(&afs_xvcache);
1559 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1566 osi_Assert((tvc->f.states & CVInit) == 0);
1567 /* If we are in readdir, return the vnode even if not statd */
1568 if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
1569 ReleaseSharedLock(&afs_xvcache);
1573 UpgradeSToWLock(&afs_xvcache, 21);
1575 /* no cache entry, better grab one */
1576 tvc = afs_NewVCache(afid, NULL);
1579 ConvertWToSLock(&afs_xvcache);
1582 ReleaseSharedLock(&afs_xvcache);
1586 afs_stats_cmperf.vcacheMisses++;
1589 ReleaseSharedLock(&afs_xvcache);
1591 ObtainWriteLock(&tvc->lock, 54);
1593 if (tvc->f.states & CStatd) {
1594 ReleaseWriteLock(&tvc->lock);
1597 #ifdef AFS_DARWIN80_ENV
1598 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1601 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1603 * XXX - I really don't like this. Should try to understand better.
1604 * It seems that sometimes, when we get called, we already hold the
1605 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1606 * We can't drop the vnode lock, because that could result in a race.
1607 * Sometimes, though, we get here and don't hold the vnode lock.
1608 * I hate code paths that sometimes hold locks and sometimes don't.
1609 * In any event, the dodge we use here is to check whether the vnode
1610 * is locked, and if it isn't, then we gain and drop it around the call
1611 * to vinvalbuf; otherwise, we leave it alone.
1614 struct vnode *vp = AFSTOV(tvc);
1617 #if defined(AFS_DARWIN_ENV)
1618 iheldthelock = VOP_ISLOCKED(vp);
1620 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1621 /* this is messy. we can call fsync which will try to reobtain this */
1622 if (VTOAFS(vp) == tvc)
1623 ReleaseWriteLock(&tvc->lock);
1624 if (UBCINFOEXISTS(vp)) {
1625 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1627 if (VTOAFS(vp) == tvc)
1628 ObtainWriteLock(&tvc->lock, 954);
1630 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1631 #elif defined(AFS_FBSD80_ENV)
1632 iheldthelock = VOP_ISLOCKED(vp);
1633 if (!iheldthelock) {
1634 /* nosleep/sleep lock order reversal */
1635 int glocked = ISAFS_GLOCK();
1638 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1642 vinvalbuf(vp, V_SAVE, PINOD, 0); /* changed late in 8.0-CURRENT */
1645 #elif defined(AFS_FBSD60_ENV)
1646 iheldthelock = VOP_ISLOCKED(vp, curthread);
1648 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1649 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1651 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1652 #elif defined(AFS_FBSD_ENV)
1653 iheldthelock = VOP_ISLOCKED(vp, curthread);
1655 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1656 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1658 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1659 #elif defined(AFS_OBSD_ENV)
1660 iheldthelock = VOP_ISLOCKED(vp, curproc);
1662 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1663 uvm_vnp_uncache(vp);
1665 VOP_UNLOCK(vp, 0, curproc);
1666 #elif defined(AFS_NBSD40_ENV)
1667 iheldthelock = VOP_ISLOCKED(vp);
1668 if (!iheldthelock) {
1669 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY);
1671 uvm_vnp_uncache(vp);
1679 ObtainWriteLock(&afs_xcbhash, 464);
1680 tvc->f.states &= ~CUnique;
1682 afs_DequeueCallback(tvc);
1683 ReleaseWriteLock(&afs_xcbhash);
1685 /* It is always appropriate to throw away all the access rights? */
1686 afs_FreeAllAxs(&(tvc->Access));
1687 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1689 if ((tvp->states & VForeign)) {
1691 tvc->f.states |= CForeign;
1692 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1693 && (tvp->rootUnique == afid->Fid.Unique)) {
1697 if (tvp->states & VRO)
1698 tvc->f.states |= CRO;
1699 if (tvp->states & VBackup)
1700 tvc->f.states |= CBackup;
1701 /* now copy ".." entry back out of volume structure, if necessary */
1702 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1704 tvc->mvid = (struct VenusFid *)
1705 osi_AllocSmallSpace(sizeof(struct VenusFid));
1706 *tvc->mvid = tvp->dotdot;
1708 afs_PutVolume(tvp, READ_LOCK);
1712 afs_RemoveVCB(afid);
1714 struct AFSFetchStatus OutStatus;
1716 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1717 afs_ProcessFS(tvc, &OutStatus, areq);
1718 tvc->f.states |= CStatd | CUnique;
1719 tvc->f.parent.vnode = OutStatus.ParentVnode;
1720 tvc->f.parent.unique = OutStatus.ParentUnique;
1724 if (AFS_IS_DISCONNECTED) {
1725 /* Nothing to do otherwise...*/
1727 /* printf("Network is down in afs_GetCache"); */
1729 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1731 /* For the NFS translator's benefit, make sure
1732 * non-directory vnodes always have their parent FID set
1733 * correctly, even when created as a result of decoding an
1734 * NFS filehandle. It would be nice to also do this for
1735 * directories, but we can't because the fileserver fills
1736 * in the FID of the directory itself instead of that of
1739 if (!code && OutStatus.FileType != Directory &&
1740 !tvc->f.parent.vnode) {
1741 tvc->f.parent.vnode = OutStatus.ParentVnode;
1742 tvc->f.parent.unique = OutStatus.ParentUnique;
1743 /* XXX - SXW - It's conceivable we should mark ourselves
1744 * as dirty again here, incase we've been raced
1745 * out of the FetchStatus call.
1752 ReleaseWriteLock(&tvc->lock);
1758 ReleaseWriteLock(&tvc->lock);
1761 } /*afs_GetVCache */
1766 * Lookup a vcache by fid. Look inside the cache first, if not
1767 * there, lookup the file on the server, and then get it's fresh
1772 * \param cached Is element cached? If NULL, don't answer.
1776 * \return The found element or NULL.
1779 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1780 afs_int32 * cached, struct vcache *adp, char *aname)
1782 afs_int32 code, now, newvcache = 0;
1783 struct VenusFid nfid;
1786 struct AFSFetchStatus OutStatus;
1787 struct AFSCallBack CallBack;
1788 struct AFSVolSync tsync;
1789 struct server *serverp = 0;
1793 AFS_STATCNT(afs_GetVCache);
1795 *cached = 0; /* Init just in case */
1797 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1801 ObtainReadLock(&afs_xvcache);
1802 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1805 ReleaseReadLock(&afs_xvcache);
1807 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1808 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1812 ObtainReadLock(&tvc->lock);
1814 if (tvc->f.states & CStatd) {
1818 ReleaseReadLock(&tvc->lock);
1821 tvc->f.states &= ~CUnique;
1823 ReleaseReadLock(&tvc->lock);
1825 ObtainReadLock(&afs_xvcache);
1828 ReleaseReadLock(&afs_xvcache);
1830 /* lookup the file */
1833 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1835 if (AFS_IS_DISCONNECTED) {
1836 /* printf("Network is down in afs_LookupVcache\n"); */
1840 afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
1841 &CallBack, &serverp, &tsync);
1843 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1847 ObtainSharedLock(&afs_xvcache, 6);
1848 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
1850 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1851 ReleaseSharedLock(&afs_xvcache);
1852 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1858 /* no cache entry, better grab one */
1859 UpgradeSToWLock(&afs_xvcache, 22);
1860 tvc = afs_NewVCache(&nfid, serverp);
1862 ConvertWToSLock(&afs_xvcache);
1865 ReleaseSharedLock(&afs_xvcache);
1870 ReleaseSharedLock(&afs_xvcache);
1871 ObtainWriteLock(&tvc->lock, 55);
1873 /* It is always appropriate to throw away all the access rights? */
1874 afs_FreeAllAxs(&(tvc->Access));
1875 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1877 if ((tvp->states & VForeign)) {
1879 tvc->f.states |= CForeign;
1880 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1881 && (tvp->rootUnique == afid->Fid.Unique))
1884 if (tvp->states & VRO)
1885 tvc->f.states |= CRO;
1886 if (tvp->states & VBackup)
1887 tvc->f.states |= CBackup;
1888 /* now copy ".." entry back out of volume structure, if necessary */
1889 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1891 tvc->mvid = (struct VenusFid *)
1892 osi_AllocSmallSpace(sizeof(struct VenusFid));
1893 *tvc->mvid = tvp->dotdot;
1898 ObtainWriteLock(&afs_xcbhash, 465);
1899 afs_DequeueCallback(tvc);
1900 tvc->f.states &= ~(CStatd | CUnique);
1901 ReleaseWriteLock(&afs_xcbhash);
1902 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
1903 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
1905 afs_PutVolume(tvp, READ_LOCK);
1906 ReleaseWriteLock(&tvc->lock);
1911 ObtainWriteLock(&afs_xcbhash, 466);
1912 if (origCBs == afs_allCBs) {
1913 if (CallBack.ExpirationTime) {
1914 tvc->callback = serverp;
1915 tvc->cbExpires = CallBack.ExpirationTime + now;
1916 tvc->f.states |= CStatd | CUnique;
1917 tvc->f.states &= ~CBulkFetching;
1918 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
1919 } else if (tvc->f.states & CRO) {
1920 /* adapt gives us an hour. */
1921 tvc->cbExpires = 3600 + osi_Time();
1922 /*XXX*/ tvc->f.states |= CStatd | CUnique;
1923 tvc->f.states &= ~CBulkFetching;
1924 afs_QueueCallback(tvc, CBHash(3600), tvp);
1926 tvc->callback = NULL;
1927 afs_DequeueCallback(tvc);
1928 tvc->f.states &= ~(CStatd | CUnique);
1929 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
1930 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
1933 afs_DequeueCallback(tvc);
1934 tvc->f.states &= ~CStatd;
1935 tvc->f.states &= ~CUnique;
1936 tvc->callback = NULL;
1937 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
1938 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
1940 ReleaseWriteLock(&afs_xcbhash);
1942 afs_PutVolume(tvp, READ_LOCK);
1943 afs_ProcessFS(tvc, &OutStatus, areq);
1945 ReleaseWriteLock(&tvc->lock);
1951 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
1952 afs_int32 * cached, struct volume *tvolp)
1954 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
1955 afs_int32 getNewFid = 0;
1957 struct VenusFid nfid;
1959 struct server *serverp = 0;
1960 struct AFSFetchStatus OutStatus;
1961 struct AFSCallBack CallBack;
1962 struct AFSVolSync tsync;
1964 #ifdef AFS_DARWIN80_ENV
1971 if (!tvolp->rootVnode || getNewFid) {
1972 struct VenusFid tfid;
1975 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
1976 origCBs = afs_allCBs; /* ignore InitCallBackState */
1978 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
1983 /* ReleaseReadLock(&tvolp->lock); */
1984 ObtainWriteLock(&tvolp->lock, 56);
1985 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
1986 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
1987 ReleaseWriteLock(&tvolp->lock);
1988 /* ObtainReadLock(&tvolp->lock);*/
1991 afid->Fid.Vnode = tvolp->rootVnode;
1992 afid->Fid.Unique = tvolp->rootUnique;
1996 ObtainSharedLock(&afs_xvcache, 7);
1998 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1999 if (!FidCmp(&(tvc->f.fid), afid)) {
2000 if (tvc->f.states & CVInit) {
2001 ReleaseSharedLock(&afs_xvcache);
2002 afs_osi_Sleep(&tvc->f.states);
2005 #ifdef AFS_DARWIN80_ENV
2006 if (tvc->f.states & CDeadVnode) {
2007 if (!(tvc->f.states & CBulkFetching)) {
2008 ReleaseSharedLock(&afs_xvcache);
2009 afs_osi_Sleep(&tvc->f.states);
2014 if (vnode_get(tvp)) /* this bumps ref count */
2016 if (vnode_ref(tvp)) {
2018 /* AFSTOV(tvc) may be NULL */
2023 if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
2025 vnode_recycle(AFSTOV(tvc));
2033 if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
2034 /* Mount point no longer stat'd or unknown. FID may have changed. */
2036 ReleaseSharedLock(&afs_xvcache);
2037 #ifdef AFS_DARWIN80_ENV
2040 vnode_put(AFSTOV(tvc));
2041 vnode_rele(AFSTOV(tvc));
2050 UpgradeSToWLock(&afs_xvcache, 23);
2051 /* no cache entry, better grab one */
2052 tvc = afs_NewVCache(afid, NULL);
2055 ReleaseWriteLock(&afs_xvcache);
2059 afs_stats_cmperf.vcacheMisses++;
2063 afs_stats_cmperf.vcacheHits++;
2064 #if defined(AFS_DARWIN80_ENV)
2065 /* we already bumped the ref count in the for loop above */
2066 #else /* AFS_DARWIN80_ENV */
2069 UpgradeSToWLock(&afs_xvcache, 24);
2070 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2071 refpanic("GRVC VLRU inconsistent0");
2073 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2074 refpanic("GRVC VLRU inconsistent1");
2076 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2077 refpanic("GRVC VLRU inconsistent2");
2079 QRemove(&tvc->vlruq); /* move to lruq head */
2080 QAdd(&VLRU, &tvc->vlruq);
2081 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2082 refpanic("GRVC VLRU inconsistent3");
2084 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2085 refpanic("GRVC VLRU inconsistent4");
2087 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2088 refpanic("GRVC VLRU inconsistent5");
2093 ReleaseWriteLock(&afs_xvcache);
2095 if (tvc->f.states & CStatd) {
2099 ObtainReadLock(&tvc->lock);
2100 tvc->f.states &= ~CUnique;
2101 tvc->callback = NULL; /* redundant, perhaps */
2102 ReleaseReadLock(&tvc->lock);
2105 ObtainWriteLock(&tvc->lock, 57);
2107 /* It is always appropriate to throw away all the access rights? */
2108 afs_FreeAllAxs(&(tvc->Access));
2111 tvc->f.states |= CForeign;
2112 if (tvolp->states & VRO)
2113 tvc->f.states |= CRO;
2114 if (tvolp->states & VBackup)
2115 tvc->f.states |= CBackup;
2116 /* now copy ".." entry back out of volume structure, if necessary */
2117 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2118 && (tvolp->rootUnique == afid->Fid.Unique)) {
2121 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2123 tvc->mvid = (struct VenusFid *)
2124 osi_AllocSmallSpace(sizeof(struct VenusFid));
2125 *tvc->mvid = tvolp->dotdot;
2129 afs_RemoveVCB(afid);
2132 struct VenusFid tfid;
2135 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2136 origCBs = afs_allCBs; /* ignore InitCallBackState */
2138 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2143 ObtainWriteLock(&afs_xcbhash, 467);
2144 afs_DequeueCallback(tvc);
2145 tvc->callback = NULL;
2146 tvc->f.states &= ~(CStatd | CUnique);
2147 ReleaseWriteLock(&afs_xcbhash);
2148 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2149 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2150 ReleaseWriteLock(&tvc->lock);
2155 ObtainWriteLock(&afs_xcbhash, 468);
2156 if (origCBs == afs_allCBs) {
2157 tvc->f.states |= CTruth;
2158 tvc->callback = serverp;
2159 if (CallBack.ExpirationTime != 0) {
2160 tvc->cbExpires = CallBack.ExpirationTime + start;
2161 tvc->f.states |= CStatd;
2162 tvc->f.states &= ~CBulkFetching;
2163 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2164 } else if (tvc->f.states & CRO) {
2165 /* adapt gives us an hour. */
2166 tvc->cbExpires = 3600 + osi_Time();
2167 /*XXX*/ tvc->f.states |= CStatd;
2168 tvc->f.states &= ~CBulkFetching;
2169 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2172 afs_DequeueCallback(tvc);
2173 tvc->callback = NULL;
2174 tvc->f.states &= ~(CStatd | CUnique);
2175 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2176 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2178 ReleaseWriteLock(&afs_xcbhash);
2179 afs_ProcessFS(tvc, &OutStatus, areq);
2181 ReleaseWriteLock(&tvc->lock);
2187 * Update callback status and (sometimes) attributes of a vnode.
2188 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2189 * shouldn't be written to the vcache here.
2194 * \param Outsp Server status after rpc call.
2195 * \param acb Callback for this vnode.
2197 * \note The vcache must be write locked.
2200 afs_UpdateStatus(struct vcache *avc, struct VenusFid *afid,
2201 struct vrequest *areq, struct AFSFetchStatus *Outsp,
2202 struct AFSCallBack *acb, afs_uint32 start)
2204 struct volume *volp;
2207 /* Dont write status in vcache if resyncing after a disconnection. */
2208 afs_ProcessFS(avc, Outsp, areq);
2210 volp = afs_GetVolume(afid, areq, READ_LOCK);
2211 ObtainWriteLock(&afs_xcbhash, 469);
2212 avc->f.states |= CTruth;
2213 if (avc->callback /* check for race */ ) {
2214 if (acb->ExpirationTime != 0) {
2215 avc->cbExpires = acb->ExpirationTime + start;
2216 avc->f.states |= CStatd;
2217 avc->f.states &= ~CBulkFetching;
2218 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2219 } else if (avc->f.states & CRO) {
2220 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2221 avc->cbExpires = 3600 + start;
2222 avc->f.states |= CStatd;
2223 avc->f.states &= ~CBulkFetching;
2224 afs_QueueCallback(avc, CBHash(3600), volp);
2226 afs_DequeueCallback(avc);
2227 avc->callback = NULL;
2228 avc->f.states &= ~(CStatd | CUnique);
2229 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2230 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2233 afs_DequeueCallback(avc);
2234 avc->callback = NULL;
2235 avc->f.states &= ~(CStatd | CUnique);
2236 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2237 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2239 ReleaseWriteLock(&afs_xcbhash);
2241 afs_PutVolume(volp, READ_LOCK);
2245 * Must be called with avc write-locked
2246 * don't absolutely have to invalidate the hint unless the dv has
2247 * changed, but be sure to get it right else there will be consistency bugs.
2250 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2251 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2254 afs_uint32 start = 0;
2255 struct afs_conn *tc;
2256 struct AFSCallBack CallBack;
2257 struct AFSVolSync tsync;
2260 tc = afs_Conn(afid, areq, SHARED_LOCK);
2261 avc->dchint = NULL; /* invalidate hints */
2263 avc->callback = tc->srvr->server;
2265 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2268 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2276 } while (afs_Analyze
2277 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2278 SHARED_LOCK, NULL));
2281 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2283 /* used to undo the local callback, but that's too extreme.
2284 * There are plenty of good reasons that fetchstatus might return
2285 * an error, such as EPERM. If we have the vnode cached, statd,
2286 * with callback, might as well keep track of the fact that we
2287 * don't have access...
2289 if (code == EPERM || code == EACCES) {
2290 struct axscache *ac;
2291 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2293 else /* not found, add a new one if possible */
2294 afs_AddAxs(avc->Access, areq->uid, 0);
2305 * Stuff some information into the vcache for the given file.
2308 * afid : File in question.
2309 * OutStatus : Fetch status on the file.
2310 * CallBack : Callback info.
2311 * tc : RPC connection involved.
2312 * areq : vrequest involved.
2315 * Nothing interesting.
2318 afs_StuffVcache(struct VenusFid *afid,
2319 struct AFSFetchStatus *OutStatus,
2320 struct AFSCallBack *CallBack, struct afs_conn *tc,
2321 struct vrequest *areq)
2323 afs_int32 code, i, newvcache = 0;
2325 struct AFSVolSync tsync;
2327 struct axscache *ac;
2330 AFS_STATCNT(afs_StuffVcache);
2331 #ifdef IFS_VCACHECOUNT
2336 ObtainSharedLock(&afs_xvcache, 8);
2338 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2340 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2341 ReleaseSharedLock(&afs_xvcache);
2342 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2348 /* no cache entry, better grab one */
2349 UpgradeSToWLock(&afs_xvcache, 25);
2350 tvc = afs_NewVCache(afid, NULL);
2352 ConvertWToSLock(&afs_xvcache);
2355 ReleaseSharedLock(&afs_xvcache);
2360 ReleaseSharedLock(&afs_xvcache);
2361 ObtainWriteLock(&tvc->lock, 58);
2363 tvc->f.states &= ~CStatd;
2364 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2365 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2367 /* Is it always appropriate to throw away all the access rights? */
2368 afs_FreeAllAxs(&(tvc->Access));
2370 /*Copy useful per-volume info */
2371 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2373 if (newvcache && (tvp->states & VForeign))
2374 tvc->f.states |= CForeign;
2375 if (tvp->states & VRO)
2376 tvc->f.states |= CRO;
2377 if (tvp->states & VBackup)
2378 tvc->f.states |= CBackup;
2380 * Now, copy ".." entry back out of volume structure, if
2383 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2385 tvc->mvid = (struct VenusFid *)
2386 osi_AllocSmallSpace(sizeof(struct VenusFid));
2387 *tvc->mvid = tvp->dotdot;
2390 /* store the stat on the file */
2391 afs_RemoveVCB(afid);
2392 afs_ProcessFS(tvc, OutStatus, areq);
2393 tvc->callback = tc->srvr->server;
2395 /* we use osi_Time twice below. Ideally, we would use the time at which
2396 * the FetchStatus call began, instead, but we don't have it here. So we
2397 * make do with "now". In the CRO case, it doesn't really matter. In
2398 * the other case, we hope that the difference between "now" and when the
2399 * call actually began execution on the server won't be larger than the
2400 * padding which the server keeps. Subtract 1 second anyway, to be on
2401 * the safe side. Can't subtract more because we don't know how big
2402 * ExpirationTime is. Possible consistency problems may arise if the call
2403 * timeout period becomes longer than the server's expiration padding. */
2404 ObtainWriteLock(&afs_xcbhash, 470);
2405 if (CallBack->ExpirationTime != 0) {
2406 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2407 tvc->f.states |= CStatd;
2408 tvc->f.states &= ~CBulkFetching;
2409 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2410 } else if (tvc->f.states & CRO) {
2411 /* old-fashioned AFS 3.2 style */
2412 tvc->cbExpires = 3600 + osi_Time();
2413 /*XXX*/ tvc->f.states |= CStatd;
2414 tvc->f.states &= ~CBulkFetching;
2415 afs_QueueCallback(tvc, CBHash(3600), tvp);
2417 afs_DequeueCallback(tvc);
2418 tvc->callback = NULL;
2419 tvc->f.states &= ~(CStatd | CUnique);
2420 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2421 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2423 ReleaseWriteLock(&afs_xcbhash);
2425 afs_PutVolume(tvp, READ_LOCK);
2427 /* look in per-pag cache */
2428 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2429 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2430 else /* not found, add a new one if possible */
2431 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2433 ReleaseWriteLock(&tvc->lock);
2434 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2435 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2436 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2438 * Release ref count... hope this guy stays around...
2441 } /*afs_StuffVcache */
2445 * Decrements the reference count on a cache entry.
2447 * \param avc Pointer to the cache entry to decrement.
2449 * \note Environment: Nothing interesting.
2452 afs_PutVCache(struct vcache *avc)
2454 AFS_STATCNT(afs_PutVCache);
2455 #ifdef AFS_DARWIN80_ENV
2456 vnode_put(AFSTOV(avc));
2460 * Can we use a read lock here?
2462 ObtainReadLock(&afs_xvcache);
2464 ReleaseReadLock(&afs_xvcache);
2466 } /*afs_PutVCache */
2470 * Reset a vcache entry, so local contents are ignored, and the
2471 * server will be reconsulted next time the vcache is used
2473 * \param avc Pointer to the cache entry to reset
2476 * \note avc must be write locked on entry
2479 afs_ResetVCache(struct vcache *avc, afs_ucred_t *acred)
2481 ObtainWriteLock(&afs_xcbhash, 456);
2482 afs_DequeueCallback(avc);
2483 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
2484 ReleaseWriteLock(&afs_xcbhash);
2485 /* now find the disk cache entries */
2486 afs_TryToSmush(avc, acred, 1);
2487 osi_dnlc_purgedp(avc);
2488 if (avc->linkData && !(avc->f.states & CCore)) {
2489 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2490 avc->linkData = NULL;
2495 * Sleepa when searching for a vcache. Releases all the pending locks,
2496 * sleeps then obtains the previously released locks.
2498 * \param vcache Enter sleep state.
2499 * \param flag Determines what locks to use.
2504 findvc_sleep(struct vcache *avc, int flag)
2506 int fstates = avc->f.states;
2507 if (flag & IS_SLOCK) {
2508 ReleaseSharedLock(&afs_xvcache);
2510 if (flag & IS_WLOCK) {
2511 ReleaseWriteLock(&afs_xvcache);
2513 ReleaseReadLock(&afs_xvcache);
2516 if (flag & FIND_CDEAD) {
2517 ObtainWriteLock(&afs_xvcache, 342);
2518 afs_FlushReclaimedVcaches();
2519 if (fstates == avc->f.states) {
2520 ReleaseWriteLock(&afs_xvcache);
2521 afs_osi_Sleep(&avc->f.states);
2523 ReleaseWriteLock(&afs_xvcache);
2525 afs_osi_Sleep(&avc->f.states);
2526 if (flag & IS_SLOCK) {
2527 ObtainSharedLock(&afs_xvcache, 341);
2529 if (flag & IS_WLOCK) {
2530 ObtainWriteLock(&afs_xvcache, 343);
2532 ObtainReadLock(&afs_xvcache);
2538 * Add a reference on an existing vcache entry.
2540 * \param tvc Pointer to the vcache.
2542 * \note Environment: Must be called with at least one reference from
2543 * elsewhere on the vcache, even if that reference will be dropped.
2544 * The global lock is required.
2546 * \return 0 on success, -1 on failure.
2550 afs_RefVCache(struct vcache *tvc)
2552 #ifdef AFS_DARWIN80_ENV
2556 /* AFS_STATCNT(afs_RefVCache); */
2558 #ifdef AFS_DARWIN80_ENV
2562 if (vnode_ref(tvp)) {
2564 /* AFSTOV(tvc) may be NULL */
2573 } /*afs_RefVCache */
2576 * Find a vcache entry given a fid.
2578 * \param afid Pointer to the fid whose cache entry we desire.
2579 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2580 * unlock the vnode, and try again.
2581 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2582 * set if FindVCache is called as part of internal bookkeeping.
2584 * \note Environment: Must be called with the afs_xvcache lock at least held at
2585 * the read level. In order to do the VLRU adjustment, the xvcache lock
2586 * must be shared-- we upgrade it here.
2590 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2595 #ifdef AFS_DARWIN80_ENV
2599 AFS_STATCNT(afs_FindVCache);
2603 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2604 if (FidMatches(afid, tvc)) {
2605 if (tvc->f.states & CVInit) {
2606 findvc_sleep(tvc, flag);
2609 #ifdef AFS_DARWIN80_ENV
2610 if (tvc->f.states & CDeadVnode) {
2611 if (!(flag & FIND_CDEAD)) {
2612 findvc_sleep(tvc, flag);
2619 if (vnode_ref(tvp)) {
2621 /* AFSTOV(tvc) may be NULL */
2626 if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
2628 vnode_recycle(AFSTOV(tvc));
2636 /* should I have a read lock on the vnode here? */
2640 #if !defined(AFS_DARWIN80_ENV)
2641 osi_vnhold(tvc, retry); /* already held, above */
2642 if (retry && *retry)
2645 #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
2646 tvc->f.states |= CUBCinit;
2648 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2649 UBCINFORECLAIMED(AFSTOV(tvc))) {
2650 ubc_info_init(AFSTOV(tvc));
2653 tvc->f.states &= ~CUBCinit;
2656 * only move to front of vlru if we have proper vcache locking)
2658 if (flag & DO_VLRU) {
2659 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2660 refpanic("FindVC VLRU inconsistent1");
2662 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2663 refpanic("FindVC VLRU inconsistent1");
2665 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2666 refpanic("FindVC VLRU inconsistent2");
2668 UpgradeSToWLock(&afs_xvcache, 26);
2669 QRemove(&tvc->vlruq);
2670 QAdd(&VLRU, &tvc->vlruq);
2671 ConvertWToSLock(&afs_xvcache);
2672 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2673 refpanic("FindVC VLRU inconsistent1");
2675 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2676 refpanic("FindVC VLRU inconsistent2");
2678 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2679 refpanic("FindVC VLRU inconsistent3");
2685 if (flag & DO_STATS) {
2687 afs_stats_cmperf.vcacheHits++;
2689 afs_stats_cmperf.vcacheMisses++;
2690 if (afs_IsPrimaryCellNum(afid->Cell))
2691 afs_stats_cmperf.vlocalAccesses++;
2693 afs_stats_cmperf.vremoteAccesses++;
2696 } /*afs_FindVCache */
2699 * Find a vcache entry given a fid. Does a wildcard match on what we
2700 * have for the fid. If more than one entry, don't return anything.
2702 * \param avcp Fill in pointer if we found one and only one.
2703 * \param afid Pointer to the fid whose cache entry we desire.
2704 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2705 * unlock the vnode, and try again.
2706 * \param flags bit 1 to specify whether to compute hit statistics. Not
2707 * set if FindVCache is called as part of internal bookkeeping.
2709 * \note Environment: Must be called with the afs_xvcache lock at least held at
2710 * the read level. In order to do the VLRU adjustment, the xvcache lock
2711 * must be shared-- we upgrade it here.
2713 * \return Number of matches found.
2716 int afs_duplicate_nfs_fids = 0;
2719 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2723 afs_int32 count = 0;
2724 struct vcache *found_tvc = NULL;
2725 #ifdef AFS_DARWIN80_ENV
2729 AFS_STATCNT(afs_FindVCache);
2733 ObtainSharedLock(&afs_xvcache, 331);
2736 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2737 /* Match only on what we have.... */
2738 if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2739 && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
2740 && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2741 && (tvc->f.fid.Cell == afid->Cell)) {
2742 if (tvc->f.states & CVInit) {
2743 ReleaseSharedLock(&afs_xvcache);
2744 afs_osi_Sleep(&tvc->f.states);
2747 #ifdef AFS_DARWIN80_ENV
2748 if (tvc->f.states & CDeadVnode) {
2749 if (!(tvc->f.states & CBulkFetching)) {
2750 ReleaseSharedLock(&afs_xvcache);
2751 afs_osi_Sleep(&tvc->f.states);
2756 if (vnode_get(tvp)) {
2757 /* This vnode no longer exists. */
2760 if (vnode_ref(tvp)) {
2761 /* This vnode no longer exists. */
2763 /* AFSTOV(tvc) may be NULL */
2768 if (tvc->f.states & (CBulkFetching|CDeadVnode)) {
2770 vnode_recycle(AFSTOV(tvc));
2773 #endif /* AFS_DARWIN80_ENV */
2777 afs_duplicate_nfs_fids++;
2778 ReleaseSharedLock(&afs_xvcache);
2779 #ifdef AFS_DARWIN80_ENV
2780 /* Drop our reference counts. */
2781 vnode_put(AFSTOV(tvc));
2782 vnode_put(AFSTOV(found_tvc));
2791 /* should I have a read lock on the vnode here? */
2793 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2794 afs_int32 retry = 0;
2795 osi_vnhold(tvc, &retry);
2798 found_tvc = (struct vcache *)0;
2799 ReleaseSharedLock(&afs_xvcache);
2800 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2804 osi_vnhold(tvc, (int *)0); /* already held, above */
2807 * We obtained the xvcache lock above.
2809 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2810 refpanic("FindVC VLRU inconsistent1");
2812 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2813 refpanic("FindVC VLRU inconsistent1");
2815 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2816 refpanic("FindVC VLRU inconsistent2");
2818 UpgradeSToWLock(&afs_xvcache, 568);
2819 QRemove(&tvc->vlruq);
2820 QAdd(&VLRU, &tvc->vlruq);
2821 ConvertWToSLock(&afs_xvcache);
2822 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2823 refpanic("FindVC VLRU inconsistent1");
2825 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2826 refpanic("FindVC VLRU inconsistent2");
2828 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2829 refpanic("FindVC VLRU inconsistent3");
2835 afs_stats_cmperf.vcacheHits++;
2837 afs_stats_cmperf.vcacheMisses++;
2838 if (afs_IsPrimaryCellNum(afid->Cell))
2839 afs_stats_cmperf.vlocalAccesses++;
2841 afs_stats_cmperf.vremoteAccesses++;
2843 *avcp = tvc; /* May be null */
2845 ReleaseSharedLock(&afs_xvcache);
2846 return (tvc ? 1 : 0);
2848 } /*afs_NFSFindVCache */
2854 * Initialize vcache related variables
2859 afs_vcacheInit(int astatSize)
2861 #if !defined(AFS_LINUX22_ENV)
2865 if (!afs_maxvcount) {
2866 afs_maxvcount = astatSize; /* no particular limit on linux? */
2868 #if !defined(AFS_LINUX22_ENV)
2872 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2873 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2875 #if !defined(AFS_LINUX22_ENV)
2876 /* Allocate and thread the struct vcache entries */
2877 tvp = afs_osi_Alloc(astatSize * sizeof(struct vcache));
2878 osi_Assert(tvp != NULL);
2879 memset(tvp, 0, sizeof(struct vcache) * astatSize);
2881 Initial_freeVCList = tvp;
2882 freeVCList = &(tvp[0]);
2883 for (i = 0; i < astatSize - 1; i++) {
2884 tvp[i].nextfree = &(tvp[i + 1]);
2886 tvp[astatSize - 1].nextfree = NULL;
2887 # ifdef KERNEL_HAVE_PIN
2888 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2892 #if defined(AFS_SGI_ENV)
2893 for (i = 0; i < astatSize; i++) {
2894 char name[METER_NAMSZ];
2895 struct vcache *tvc = &tvp[i];
2897 tvc->v.v_number = ++afsvnumbers;
2898 tvc->vc_rwlockid = OSI_NO_LOCKID;
2899 initnsema(&tvc->vc_rwlock, 1,
2900 makesname(name, "vrw", tvc->v.v_number));
2901 #ifndef AFS_SGI53_ENV
2902 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2904 #ifndef AFS_SGI62_ENV
2905 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2906 #endif /* AFS_SGI62_ENV */
2910 for(i = 0; i < VCSIZE; ++i)
2911 QInit(&afs_vhashTV[i]);
2918 shutdown_vcache(void)
2921 struct afs_cbr *tsp;
2923 * XXX We may potentially miss some of the vcaches because if when
2924 * there are no free vcache entries and all the vcache entries are active
2925 * ones then we allocate an additional one - admittedly we almost never
2930 struct afs_q *tq, *uq = NULL;
2932 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2936 osi_FreeSmallSpace(tvc->mvid);
2937 tvc->mvid = (struct VenusFid *)0;
2940 aix_gnode_rele(AFSTOV(tvc));
2942 if (tvc->linkData) {
2943 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2948 * Also free the remaining ones in the Cache
2950 for (i = 0; i < VCSIZE; i++) {
2951 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2953 osi_FreeSmallSpace(tvc->mvid);
2954 tvc->mvid = (struct VenusFid *)0;
2958 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2959 #ifdef AFS_AIX32_ENV
2962 vms_delete(tvc->segid);
2964 tvc->segid = tvc->vmh = NULL;
2965 if (VREFCOUNT_GT(tvc,0))
2966 osi_Panic("flushVcache: vm race");
2974 #if defined(AFS_SUN5_ENV)
2980 if (tvc->linkData) {
2981 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2986 afs_FreeAllAxs(&(tvc->Access));
2992 * Free any leftover callback queue
2994 for (i = 0; i < afs_stats_cmperf.CallBackAlloced; i++) {
2995 tsp = afs_cbrHeads[i];
2996 afs_cbrHeads[i] = 0;
2997 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3001 #if !defined(AFS_LINUX22_ENV)
3002 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3004 # ifdef KERNEL_HAVE_PIN
3005 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3008 freeVCList = Initial_freeVCList = 0;
3011 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3012 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3014 for(i = 0; i < VCSIZE; ++i)
3015 QInit(&afs_vhashTV[i]);
3019 afs_DisconGiveUpCallbacks(void)
3025 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3027 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3028 for (i = 0; i < VCSIZE; i++) {
3029 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3030 if (afs_QueueVCB(tvc)) {
3031 tvc->callback = NULL;
3037 ReleaseWriteLock(&afs_xvcache);
3044 * Clear the Statd flag from all vcaches
3046 * This function removes the Statd flag from all vcaches. It's used by
3047 * disconnected mode to tidy up during reconnection
3051 afs_ClearAllStatdFlag(void)
3056 ObtainWriteLock(&afs_xvcache, 715);
3058 for (i = 0; i < VCSIZE; i++) {
3059 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3060 tvc->f.states &= ~(CStatd|CUnique);
3063 ReleaseWriteLock(&afs_xvcache);