2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include <afsconfig.h>
39 #include "afs/param.h"
44 #include "afs/sysincludes.h" /*Standard vendor system headers */
45 #include "afsincludes.h" /*AFS-based standard headers */
46 #include "afs/afs_stats.h"
47 #include "afs/afs_cbqueue.h"
48 #include "afs/afs_osidnlc.h"
50 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
51 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
52 afs_int32 afs_vcount = 0; /* number of vcache in use now */
53 #endif /* AFS_OSF_ENV */
61 #endif /* AFS_SGI64_ENV */
63 /* Exported variables */
64 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
65 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
66 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
67 #if !defined(AFS_LINUX22_ENV)
68 static struct vcache *freeVCList; /*Free list for stat cache entries */
69 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
70 static struct vcache *Initial_freeVCList; /*Initial list for above */
72 struct afs_q VLRU; /*vcache LRU */
73 afs_int32 vcachegen = 0;
74 unsigned int afs_paniconwarn = 0;
75 struct vcache *afs_vhashT[VCSIZE];
76 struct afs_q afs_vhashTV[VCSIZE];
77 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
78 afs_int32 afs_bulkStatsLost;
79 int afs_norefpanic = 0;
81 /* Forward declarations */
82 static afs_int32 afs_QueueVCB(struct vcache *avc);
87 * Generate an index into the hash table for a given Fid.
90 afs_HashCBRFid(struct AFSFid *fid)
92 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
98 * Insert a CBR entry into the hash table.
99 * Must be called with afs_xvcb held.
102 afs_InsertHashCBR(struct afs_cbr *cbr)
104 int slot = afs_HashCBRFid(&cbr->fid);
106 cbr->hash_next = afs_cbrHashT[slot];
107 if (afs_cbrHashT[slot])
108 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
110 cbr->hash_pprev = &afs_cbrHashT[slot];
111 afs_cbrHashT[slot] = cbr;
118 * Flush the given vcache entry.
121 * avc : Pointer to vcache entry to flush.
122 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
125 * afs_xvcache lock must be held for writing upon entry to
126 * prevent people from changing the vrefCount field, and to
127 * protect the lruq and hnext fields.
128 * LOCK: afs_FlushVCache afs_xvcache W
129 * REFCNT: vcache ref count must be zero on entry except for osf1
130 * RACE: lock is dropped and reobtained, permitting race in caller
134 afs_FlushVCache(struct vcache *avc, int *slept)
135 { /*afs_FlushVCache */
138 struct vcache **uvc, *wvc;
141 AFS_STATCNT(afs_FlushVCache);
142 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
143 ICL_TYPE_INT32, avc->states);
146 VN_LOCK(AFSTOV(avc));
150 code = osi_VM_FlushVCache(avc, slept);
154 if (avc->states & CVFlushed) {
158 #if !defined(AFS_LINUX22_ENV)
159 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
160 refpanic("LRU vs. Free inconsistency");
163 avc->states |= CVFlushed;
164 /* pull the entry out of the lruq and put it on the free list */
165 QRemove(&avc->vlruq);
166 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *)0;
168 /* keep track of # of files that we bulk stat'd, but never used
169 * before they got recycled.
171 if (avc->states & CBulkStat)
174 /* remove entry from the hash chain */
175 i = VCHash(&avc->fid);
176 uvc = &afs_vhashT[i];
177 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
180 avc->hnext = (struct vcache *)NULL;
185 /* remove entry from the volume hash table */
186 QRemove(&avc->vhashq);
189 osi_FreeSmallSpace(avc->mvid);
190 avc->mvid = (struct VenusFid *)0;
192 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
193 avc->linkData = NULL;
195 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
196 /* OK, there are no internal vrefCounts, so there shouldn't
197 * be any more refs here. */
199 #ifdef AFS_DARWIN80_ENV
200 vnode_clearfsnode(AFSTOV(avc));
201 vnode_removefsref(AFSTOV(avc));
203 avc->v->v_data = NULL; /* remove from vnode */
205 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
208 afs_FreeAllAxs(&(avc->Access));
210 /* we can't really give back callbacks on RO files, since the
211 * server only tracks them on a per-volume basis, and we don't
212 * know whether we still have some other files from the same
214 if ((avc->states & CRO) == 0 && avc->callback) {
217 ObtainWriteLock(&afs_xcbhash, 460);
218 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
219 avc->states &= ~(CStatd | CUnique);
220 ReleaseWriteLock(&afs_xcbhash);
221 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
222 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
224 osi_dnlc_purgevp(avc);
227 * Next, keep track of which vnodes we've deleted for create's
228 * optimistic synchronization algorithm
231 if (avc->fid.Fid.Vnode & 1)
236 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
237 /* put the entry in the free list */
238 avc->nextfree = freeVCList;
240 if (avc->vlruq.prev || avc->vlruq.next) {
241 refpanic("LRU vs. Free inconsistency");
243 avc->states |= CVFlushed;
245 /* This should put it back on the vnode free list since usecount is 1 */
248 if (VREFCOUNT_GT(avc,0)) {
249 #if defined(AFS_OSF_ENV)
250 VN_UNLOCK(AFSTOV(avc));
252 AFS_RELE(AFSTOV(avc));
254 if (afs_norefpanic) {
255 printf("flush vc refcnt < 1");
257 #if defined(AFS_OSF_ENV)
258 (void)vgone(avc, VX_NOSLEEP, NULL);
260 VN_UNLOCK(AFSTOV(avc));
263 osi_Panic("flush vc refcnt < 1");
265 #endif /* AFS_OSF_ENV */
270 VN_UNLOCK(AFSTOV(avc));
274 } /*afs_FlushVCache */
280 * The core of the inactive vnode op for all but IRIX.
283 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
285 AFS_STATCNT(afs_inactive);
286 if (avc->states & CDirty) {
287 /* we can't keep trying to push back dirty data forever. Give up. */
288 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
290 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
291 avc->states &= ~CDirty; /* Turn it off */
292 if (avc->states & CUnlinked) {
293 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
294 avc->states |= CUnlinkedDel;
297 afs_remunlink(avc, 1); /* ignore any return code */
306 * Description: allocate a callback return structure from the
307 * free list and return it.
309 * Env: The alloc and free routines are both called with the afs_xvcb lock
310 * held, so we don't have to worry about blocking in osi_Alloc.
312 static struct afs_cbr *afs_cbrSpace = 0;
316 register struct afs_cbr *tsp;
319 while (!afs_cbrSpace) {
320 if (afs_stats_cmperf.CallBackAlloced >= 2) {
321 /* don't allocate more than 2 * AFS_NCBRS for now */
323 afs_stats_cmperf.CallBackFlushes++;
327 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
328 sizeof(struct afs_cbr));
329 for (i = 0; i < AFS_NCBRS - 1; i++) {
330 tsp[i].next = &tsp[i + 1];
332 tsp[AFS_NCBRS - 1].next = 0;
334 afs_stats_cmperf.CallBackAlloced++;
338 afs_cbrSpace = tsp->next;
345 * Description: free a callback return structure, removing it from all lists.
348 * asp -- the address of the structure to free.
350 * Environment: the xvcb lock is held over these calls.
353 afs_FreeCBR(register struct afs_cbr *asp)
355 *(asp->pprev) = asp->next;
357 asp->next->pprev = asp->pprev;
359 *(asp->hash_pprev) = asp->hash_next;
361 asp->hash_next->hash_pprev = asp->hash_pprev;
363 asp->next = afs_cbrSpace;
371 * Description: flush all queued callbacks to all servers.
375 * Environment: holds xvcb lock over RPC to guard against race conditions
376 * when a new callback is granted for the same file later on.
379 afs_FlushVCBs(afs_int32 lockit)
381 struct AFSFid *tfids;
382 struct AFSCallBack callBacks[1];
383 struct AFSCBFids fidArray;
384 struct AFSCBs cbArray;
386 struct afs_cbr *tcbrp;
390 struct vrequest treq;
392 int safety1, safety2, safety3;
394 if ((code = afs_InitReq(&treq, afs_osi_credp)))
396 treq.flags |= O_NONBLOCK;
397 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
400 MObtainWriteLock(&afs_xvcb, 273);
401 ObtainReadLock(&afs_xserver);
402 for (i = 0; i < NSERVERS; i++) {
403 for (safety1 = 0, tsp = afs_servers[i];
404 tsp && safety1 < afs_totalServers + 10;
405 tsp = tsp->next, safety1++) {
407 if (tsp->cbrs == (struct afs_cbr *)0)
410 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
411 * and make an RPC, over and over again.
413 tcount = 0; /* number found so far */
414 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
415 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
416 /* if buffer is full, or we've queued all we're going
417 * to from this server, we should flush out the
420 fidArray.AFSCBFids_len = tcount;
421 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
422 cbArray.AFSCBs_len = 1;
423 cbArray.AFSCBs_val = callBacks;
424 memset(&callBacks[0], 0, sizeof(callBacks[0]));
425 callBacks[0].CallBackType = CB_EXCLUSIVE;
426 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
427 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
428 tsp->cell->cellNum, &treq, 0,
432 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
435 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
443 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
448 /* ignore return code, since callbacks may have
449 * been returned anyway, we shouldn't leave them
450 * around to be returned again.
452 * Next, see if we are done with this server, and if so,
453 * break to deal with the next one.
459 /* if to flush full buffer */
460 /* if we make it here, we have an entry at the head of cbrs,
461 * which we should copy to the file ID array and then free.
464 tfids[tcount++] = tcbrp->fid;
466 /* Freeing the CBR will unlink it from the server's CBR list */
468 } /* while loop for this one server */
469 if (safety2 > afs_cacheStats) {
470 afs_warn("possible internal error afs_flushVCBs (%d)\n",
473 } /* for loop for this hash chain */
474 } /* loop through all hash chains */
475 if (safety1 > afs_totalServers + 2) {
477 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
478 safety1, afs_totalServers + 2);
480 osi_Panic("afs_flushVCBS safety1");
483 ReleaseReadLock(&afs_xserver);
485 MReleaseWriteLock(&afs_xvcb);
486 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
494 * Queue a callback on the given fid.
500 * Locks the xvcb lock.
501 * Called when the xvcache lock is already held.
505 afs_QueueVCB(struct vcache *avc)
508 struct afs_cbr *tcbp;
510 AFS_STATCNT(afs_QueueVCB);
511 /* The callback is really just a struct server ptr. */
512 tsp = (struct server *)(avc->callback);
514 /* we now have a pointer to the server, so we just allocate
515 * a queue entry and queue it.
517 MObtainWriteLock(&afs_xvcb, 274);
518 tcbp = afs_AllocCBR();
519 tcbp->fid = avc->fid.Fid;
521 tcbp->next = tsp->cbrs;
523 tsp->cbrs->pprev = &tcbp->next;
526 tcbp->pprev = &tsp->cbrs;
528 afs_InsertHashCBR(tcbp);
530 /* now release locks and return */
531 MReleaseWriteLock(&afs_xvcb);
540 * Remove a queued callback for a given Fid.
543 * afid: The fid we want cleansed of queued callbacks.
546 * Locks xvcb and xserver locks.
547 * Typically called with xdcache, xvcache and/or individual vcache
552 afs_RemoveVCB(struct VenusFid *afid)
555 struct afs_cbr *cbr, *ncbr;
557 AFS_STATCNT(afs_RemoveVCB);
558 MObtainWriteLock(&afs_xvcb, 275);
560 slot = afs_HashCBRFid(&afid->Fid);
561 ncbr = afs_cbrHashT[slot];
565 ncbr = cbr->hash_next;
567 if (afid->Fid.Volume == cbr->fid.Volume &&
568 afid->Fid.Vnode == cbr->fid.Vnode &&
569 afid->Fid.Unique == cbr->fid.Unique) {
574 MReleaseWriteLock(&afs_xvcb);
578 afs_FlushReclaimedVcaches(void)
580 #if !defined(AFS_LINUX22_ENV)
583 struct vcache *tmpReclaimedVCList = NULL;
585 ObtainWriteLock(&afs_xvreclaim, 76);
586 while (ReclaimedVCList) {
587 tvc = ReclaimedVCList; /* take from free list */
588 ReclaimedVCList = tvc->nextfree;
589 tvc->nextfree = NULL;
590 code = afs_FlushVCache(tvc, &fv_slept);
592 /* Ok, so, if we got code != 0, uh, wtf do we do? */
593 /* Probably, build a temporary list and then put all back when we
594 get to the end of the list */
595 /* This is actually really crappy, but we need to not leak these.
596 We probably need a way to be smarter about this. */
597 tvc->nextfree = tmpReclaimedVCList;
598 tmpReclaimedVCList = tvc;
599 printf("Reclaim list flush %x failed: %d\n", tvc, code);
602 if (tmpReclaimedVCList)
603 ReclaimedVCList = tmpReclaimedVCList;
605 ReleaseWriteLock(&afs_xvreclaim);
613 * This routine is responsible for allocating a new cache entry
614 * from the free list. It formats the cache entry and inserts it
615 * into the appropriate hash tables. It must be called with
616 * afs_xvcache write-locked so as to prevent several processes from
617 * trying to create a new cache entry simultaneously.
620 * afid : The file id of the file whose cache entry is being
623 /* LOCK: afs_NewVCache afs_xvcache W */
625 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
629 afs_int32 anumber = VCACHE_FREE;
631 struct gnode *gnodepnt;
635 #endif /* AFS_OSF_ENV */
636 struct afs_q *tq, *uq;
639 AFS_STATCNT(afs_NewVCache);
641 afs_FlushReclaimedVcaches();
643 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
644 #if defined(AFS_OSF30_ENV) || defined(AFS_LINUX22_ENV)
645 if (afs_vcount >= afs_maxvcount)
648 * If we are using > 33 % of the total system vnodes for AFS vcache
649 * entries or we are using the maximum number of vcache entries,
650 * then free some. (if our usage is > 33% we should free some, if
651 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
652 * we _must_ free some -- no choice).
654 if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount))
661 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
664 if (tvc->states & CVFlushed) {
665 refpanic("CVFlushed on VLRU");
666 } else if (i++ > afs_maxvcount) {
667 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
668 } else if (QNext(uq) != tq) {
669 refpanic("VLRU inconsistent");
670 } else if (!VREFCOUNT_GT(tvc,0)) {
671 refpanic("refcnt 0 on VLRU");
674 #if defined(AFS_LINUX22_ENV)
675 if (tvc != afs_globalVp && VREFCOUNT(tvc) > 1 && tvc->opens == 0) {
676 struct dentry *dentry;
677 struct list_head *cur, *head;
680 #if defined(AFS_LINUX24_ENV)
681 spin_lock(&dcache_lock);
683 head = &(AFSTOV(tvc))->i_dentry;
687 while ((cur = cur->next) != head) {
688 dentry = list_entry(cur, struct dentry, d_alias);
690 if (d_unhashed(dentry))
695 #if defined(AFS_LINUX24_ENV)
696 spin_unlock(&dcache_lock);
698 if (d_invalidate(dentry) == -EBUSY) {
700 /* perhaps lock and try to continue? (use cur as head?) */
704 #if defined(AFS_LINUX24_ENV)
705 spin_lock(&dcache_lock);
709 #if defined(AFS_LINUX24_ENV)
710 spin_unlock(&dcache_lock);
718 if (VREFCOUNT_GT(tvc,0) && !VREFCOUNT_GT(tvc,1) &&
720 && (tvc->states & CUnlinkedDel) == 0) {
721 code = afs_FlushVCache(tvc, &fv_slept);
728 continue; /* start over - may have raced. */
734 if (anumber == VCACHE_FREE) {
735 printf("afs_NewVCache: warning none freed, using %d of %d\n",
736 afs_vcount, afs_maxvcount);
737 if (afs_vcount >= afs_maxvcount) {
738 printf("afs_NewVCache - none freed\n");
744 #if defined(AFS_LINUX22_ENV)
749 ip = new_inode(afs_globalVFS);
751 osi_Panic("afs_NewVCache: no more inodes");
753 #if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
756 tvc = afs_osi_Alloc(sizeof(struct vcache));
757 ip->u.generic_ip = tvc;
763 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
764 /* What should we do ???? */
765 osi_Panic("afs_NewVCache: no more vnodes");
770 tvc->nextfree = NULL;
773 #else /* AFS_OSF_ENV */
774 /* pull out a free cache entry */
777 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
781 if (tvc->states & CVFlushed) {
782 refpanic("CVFlushed on VLRU");
783 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
784 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
785 } else if (QNext(uq) != tq) {
786 refpanic("VLRU inconsistent");
787 } else if (tvc->states & CVInit) {
791 if (!VREFCOUNT_GT(tvc,0)
792 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL) && !defined(AFS_DARWIN80_ENV)
793 || ((VREFCOUNT(tvc) == 1) &&
794 (UBCINFOEXISTS(AFSTOV(tvc))))
796 && tvc->opens == 0 && (tvc->states & CUnlinkedDel) == 0) {
797 #if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
798 #ifdef AFS_DARWIN80_ENV
799 vnode_t tvp = AFSTOV(tvc);
800 /* VREFCOUNT_GT only sees usecounts, not iocounts */
801 /* so this may fail to actually recycle the vnode now */
802 /* must call vnode_get to avoid races. */
803 if (vnode_get(tvp) == 0) {
805 /* must release lock, since vnode_put will immediately
806 reclaim if there are no other users */
807 ReleaseWriteLock(&afs_xvcache);
812 ObtainWriteLock(&afs_xvcache, 336);
814 /* we can't use the vnode_recycle return value to figure
815 * this out, since the iocount we have to hold makes it
817 if (AFSTOV(tvc) == tvp)
823 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
824 * then it puts the vnode on the free list.
825 * If we don't do this we end up with a cleaned vnode that's
826 * not on the free list.
827 * XXX assume FreeBSD is the same for now.
836 code = afs_FlushVCache(tvc, &fv_slept);
844 continue; /* start over - may have raced. */
852 /* none free, making one is better than a panic */
853 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
854 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
855 #if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
856 tvc->v = NULL; /* important to clean this, or use memset 0 */
858 #ifdef KERNEL_HAVE_PIN
859 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
861 #if defined(AFS_SGI_ENV)
863 char name[METER_NAMSZ];
864 memset(tvc, 0, sizeof(struct vcache));
865 tvc->v.v_number = ++afsvnumbers;
866 tvc->vc_rwlockid = OSI_NO_LOCKID;
867 initnsema(&tvc->vc_rwlock, 1,
868 makesname(name, "vrw", tvc->v.v_number));
869 #ifndef AFS_SGI53_ENV
870 initnsema(&tvc->v.v_sync, 0,
871 makesname(name, "vsy", tvc->v.v_number));
873 #ifndef AFS_SGI62_ENV
874 initnlock(&tvc->v.v_lock,
875 makesname(name, "vlk", tvc->v.v_number));
878 #endif /* AFS_SGI_ENV */
880 tvc = freeVCList; /* take from free list */
881 freeVCList = tvc->nextfree;
882 tvc->nextfree = NULL;
884 #endif /* AFS_OSF_ENV */
886 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
888 panic("afs_NewVCache(): free vcache with vnode attached");
891 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
892 memset((char *)tvc, 0, sizeof(struct vcache));
897 RWLOCK_INIT(&tvc->lock, "vcache lock");
898 #if defined(AFS_SUN5_ENV)
899 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
900 #endif /* defined(AFS_SUN5_ENV) */
902 tvc->parentVnode = 0;
904 tvc->linkData = NULL;
907 tvc->execsOrWriters = 0;
910 tvc->states = CVInit;
911 tvc->last_looker = 0;
913 tvc->asynchrony = -1;
916 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
919 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
920 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
922 tvc->callback = serverp; /* to minimize chance that clear
928 tvc->hnext = afs_vhashT[i];
930 QAdd(&afs_vhashTV[j], &tvc->vhashq);
932 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
933 refpanic("NewVCache VLRU inconsistent");
935 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
936 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
937 refpanic("NewVCache VLRU inconsistent2");
939 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
940 refpanic("NewVCache VLRU inconsistent3");
942 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
943 refpanic("NewVCache VLRU inconsistent4");
946 /* it should now be safe to drop the xvcache lock */
948 ReleaseWriteLock(&afs_xvcache);
950 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
952 ObtainWriteLock(&afs_xvcache,337);
953 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
955 #ifdef AFS_DARWIN_ENV
956 ReleaseWriteLock(&afs_xvcache);
958 afs_darwin_getnewvnode(tvc); /* includes one refcount */
960 ObtainWriteLock(&afs_xvcache,338);
961 #ifdef AFS_DARWIN80_ENV
962 LOCKINIT(tvc->rwlock);
964 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
971 ReleaseWriteLock(&afs_xvcache);
973 #if defined(AFS_FBSD60_ENV)
974 if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
975 #elif defined(AFS_FBSD50_ENV)
976 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
978 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
980 panic("afs getnewvnode"); /* can't happen */
982 ObtainWriteLock(&afs_xvcache,339);
983 if (tvc->v != NULL) {
984 /* I'd like to know if this ever happens...
985 * We don't drop global for the rest of this function,
986 * so if we do lose the race, the other thread should
987 * have found the same vnode and finished initializing
988 * the vcache entry. Is it conceivable that this vcache
989 * entry could be recycled during this interval? If so,
990 * then there probably needs to be some sort of additional
991 * mutual exclusion (an Embryonic flag would suffice).
993 printf("afs_NewVCache: lost the race\n");
997 tvc->v->v_data = tvc;
998 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1002 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
1003 /* Hold it for the LRU (should make count 2) */
1004 VN_HOLD(AFSTOV(tvc));
1005 #else /* AFS_OSF_ENV */
1006 #if !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
1007 VREFCOUNT_SET(tvc, 1); /* us */
1008 #endif /* AFS_XBSD_ENV */
1009 #endif /* AFS_OSF_ENV */
1010 #ifdef AFS_AIX32_ENV
1011 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1012 tvc->vmh = tvc->segid = NULL;
1015 #ifdef AFS_BOZONLOCK_ENV
1016 #if defined(AFS_SUN5_ENV)
1017 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1019 #if defined(AFS_SUN55_ENV)
1020 /* This is required if the kaio (kernel aynchronous io)
1021 ** module is installed. Inside the kernel, the function
1022 ** check_vp( common/os/aio.c) checks to see if the kernel has
1023 ** to provide asynchronous io for this vnode. This
1024 ** function extracts the device number by following the
1025 ** v_data field of the vnode. If we do not set this field
1026 ** then the system panics. The value of the v_data field
1027 ** is not really important for AFS vnodes because the kernel
1028 ** does not do asynchronous io for regular files. Hence,
1029 ** for the time being, we fill up the v_data field with the
1030 ** vnode pointer itself. */
1031 tvc->v.v_data = (char *)tvc;
1032 #endif /* AFS_SUN55_ENV */
1034 afs_BozonInit(&tvc->pvnLock, tvc);
1037 /* initialize vnode data, note vrefCount is v.v_count */
1039 /* Don't forget to free the gnode space */
1040 tvc->v.v_gnode = gnodepnt =
1041 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1042 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1044 #ifdef AFS_SGI64_ENV
1045 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1046 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1047 #ifdef AFS_SGI65_ENV
1048 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1049 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1051 bhv_head_init(&(tvc->v.v_bh));
1052 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1054 #ifdef AFS_SGI65_ENV
1055 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1056 #ifdef VNODE_TRACING
1057 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1059 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1061 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1062 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1064 vnode_pcache_init(&tvc->v);
1065 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1066 /* Above define is never true execpt in SGI test kernels. */
1067 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1069 #ifdef INTR_KTHREADS
1070 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1073 SetAfsVnode(AFSTOV(tvc));
1074 #endif /* AFS_SGI64_ENV */
1076 * The proper value for mvstat (for root fids) is setup by the caller.
1079 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1081 if (afs_globalVFS == 0)
1082 osi_Panic("afs globalvfs");
1083 #if !defined(AFS_LINUX22_ENV)
1084 vSetVfsp(tvc, afs_globalVFS);
1086 vSetType(tvc, VREG);
1088 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1089 tvc->v.v_vfsprev = NULL;
1090 afs_globalVFS->vfs_vnodes = &tvc->v;
1091 if (tvc->v.v_vfsnext != NULL)
1092 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1093 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1094 gnodepnt->gn_vnode = &tvc->v;
1096 #if defined(AFS_DUX40_ENV)
1097 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1100 /* Is this needed??? */
1101 insmntque(tvc, afs_globalVFS);
1102 #endif /* AFS_OSF_ENV */
1103 #endif /* AFS_DUX40_ENV */
1104 #if defined(AFS_SGI_ENV)
1105 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1106 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1108 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1109 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1110 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1111 osi_Assert(tvc->v.v_filocks == NULL);
1112 #if !defined(AFS_SGI65_ENV)
1113 osi_Assert(tvc->v.v_filocksem == NULL);
1115 osi_Assert(tvc->cred == NULL);
1116 #ifdef AFS_SGI64_ENV
1117 vnode_pcache_reinit(&tvc->v);
1118 tvc->v.v_rdev = NODEV;
1120 vn_initlist((struct vnlist *)&tvc->v);
1122 #endif /* AFS_SGI_ENV */
1124 osi_dnlc_purgedp(tvc); /* this may be overkill */
1125 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1127 tvc->states &=~ CVInit;
1128 afs_osi_Wakeup(&tvc->states);
1132 } /*afs_NewVCache */
1136 * afs_FlushActiveVcaches
1142 * doflocks : Do we handle flocks?
1144 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
1146 afs_FlushActiveVcaches(register afs_int32 doflocks)
1148 register struct vcache *tvc;
1150 register struct conn *tc;
1151 register afs_int32 code;
1152 register struct AFS_UCRED *cred = NULL;
1153 struct vrequest treq, ureq;
1154 struct AFSVolSync tsync;
1157 AFS_STATCNT(afs_FlushActiveVcaches);
1158 ObtainReadLock(&afs_xvcache);
1159 for (i = 0; i < VCSIZE; i++) {
1160 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1161 if (tvc->states & CVInit) continue;
1162 #ifdef AFS_DARWIN80_ENV
1163 if (tvc->states & CDeadVnode &&
1164 (tvc->states & (CCore|CUnlinkedDel) ||
1165 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1167 if (doflocks && tvc->flockCount != 0) {
1168 /* if this entry has an flock, send a keep-alive call out */
1170 ReleaseReadLock(&afs_xvcache);
1171 ObtainWriteLock(&tvc->lock, 51);
1173 afs_InitReq(&treq, afs_osi_credp);
1174 treq.flags |= O_NONBLOCK;
1176 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1178 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1181 RXAFS_ExtendLock(tc->id,
1182 (struct AFSFid *)&tvc->fid.Fid,
1188 } while (afs_Analyze
1189 (tc, code, &tvc->fid, &treq,
1190 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1192 ReleaseWriteLock(&tvc->lock);
1193 #ifdef AFS_DARWIN80_ENV
1195 ObtainReadLock(&afs_xvcache);
1197 ObtainReadLock(&afs_xvcache);
1202 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1204 * Don't let it evaporate in case someone else is in
1205 * this code. Also, drop the afs_xvcache lock while
1206 * getting vcache locks.
1209 ReleaseReadLock(&afs_xvcache);
1210 #ifdef AFS_BOZONLOCK_ENV
1211 afs_BozonLock(&tvc->pvnLock, tvc);
1213 #if defined(AFS_SGI_ENV)
1215 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1217 osi_Assert(VREFCOUNT_GT(tvc,0));
1218 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1220 ObtainWriteLock(&tvc->lock, 52);
1221 if (tvc->states & CCore) {
1222 tvc->states &= ~CCore;
1223 /* XXXX Find better place-holder for cred XXXX */
1224 cred = (struct AFS_UCRED *)tvc->linkData;
1225 tvc->linkData = NULL; /* XXX */
1226 afs_InitReq(&ureq, cred);
1227 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1228 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1229 tvc->execsOrWriters);
1230 code = afs_StoreOnLastReference(tvc, &ureq);
1231 ReleaseWriteLock(&tvc->lock);
1232 #ifdef AFS_BOZONLOCK_ENV
1233 afs_BozonUnlock(&tvc->pvnLock, tvc);
1235 hzero(tvc->flushDV);
1238 if (code && code != VNOVNODE) {
1239 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1240 /* /dev/console */ 1);
1242 } else if (tvc->states & CUnlinkedDel) {
1246 ReleaseWriteLock(&tvc->lock);
1247 #ifdef AFS_BOZONLOCK_ENV
1248 afs_BozonUnlock(&tvc->pvnLock, tvc);
1250 #if defined(AFS_SGI_ENV)
1251 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1253 afs_remunlink(tvc, 0);
1254 #if defined(AFS_SGI_ENV)
1255 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1258 /* lost (or won, perhaps) the race condition */
1259 ReleaseWriteLock(&tvc->lock);
1260 #ifdef AFS_BOZONLOCK_ENV
1261 afs_BozonUnlock(&tvc->pvnLock, tvc);
1264 #if defined(AFS_SGI_ENV)
1265 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1267 #ifdef AFS_DARWIN80_ENV
1270 AFS_RELE(AFSTOV(tvc));
1271 /* Matches write code setting CCore flag */
1274 ObtainReadLock(&afs_xvcache);
1276 ObtainReadLock(&afs_xvcache);
1279 AFS_RELE(AFSTOV(tvc));
1280 /* Matches write code setting CCore flag */
1287 ReleaseReadLock(&afs_xvcache);
1296 * Make sure a cache entry is up-to-date status-wise.
1298 * NOTE: everywhere that calls this can potentially be sped up
1299 * by checking CStatd first, and avoiding doing the InitReq
1300 * if this is up-to-date.
1302 * Anymore, the only places that call this KNOW already that the
1303 * vcache is not up-to-date, so we don't screw around.
1306 * avc : Ptr to vcache entry to verify.
1311 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1313 register struct vcache *tvc;
1315 AFS_STATCNT(afs_VerifyVCache);
1317 #if defined(AFS_OSF_ENV)
1318 ObtainReadLock(&avc->lock);
1319 if (afs_IsWired(avc)) {
1320 ReleaseReadLock(&avc->lock);
1323 ReleaseReadLock(&avc->lock);
1324 #endif /* AFS_OSF_ENV */
1325 /* otherwise we must fetch the status info */
1327 ObtainWriteLock(&avc->lock, 53);
1328 if (avc->states & CStatd) {
1329 ReleaseWriteLock(&avc->lock);
1332 ObtainWriteLock(&afs_xcbhash, 461);
1333 avc->states &= ~(CStatd | CUnique);
1334 avc->callback = NULL;
1335 afs_DequeueCallback(avc);
1336 ReleaseWriteLock(&afs_xcbhash);
1337 ReleaseWriteLock(&avc->lock);
1339 /* since we've been called back, or the callback has expired,
1340 * it's possible that the contents of this directory, or this
1341 * file's name have changed, thus invalidating the dnlc contents.
1343 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1344 osi_dnlc_purgedp(avc);
1346 osi_dnlc_purgevp(avc);
1348 /* fetch the status info */
1349 tvc = afs_GetVCache(&avc->fid, areq, NULL, avc);
1352 /* Put it back; caller has already incremented vrefCount */
1356 } /*afs_VerifyVCache */
1363 * Simple copy of stat info into cache.
1366 * avc : Ptr to vcache entry involved.
1367 * astat : Ptr to stat info to copy.
1370 * Nothing interesting.
1372 * Callers: as of 1992-04-29, only called by WriteVCache
1375 afs_SimpleVStat(register struct vcache *avc,
1376 register struct AFSFetchStatus *astat, struct vrequest *areq)
1379 AFS_STATCNT(afs_SimpleVStat);
1382 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1383 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1385 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1387 #ifdef AFS_64BIT_CLIENT
1388 FillInt64(length, astat->Length_hi, astat->Length);
1389 #else /* AFS_64BIT_CLIENT */
1390 length = astat->Length;
1391 #endif /* AFS_64BIT_CLIENT */
1392 #if defined(AFS_SGI_ENV)
1393 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1394 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1395 if (length < avc->m.Length) {
1396 vnode_t *vp = (vnode_t *) avc;
1398 osi_Assert(WriteLocked(&avc->lock));
1399 ReleaseWriteLock(&avc->lock);
1401 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1403 ObtainWriteLock(&avc->lock, 67);
1406 /* if writing the file, don't fetch over this value */
1407 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1408 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1409 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1410 avc->m.Length = length;
1411 avc->m.Date = astat->ClientModTime;
1413 avc->m.Owner = astat->Owner;
1414 avc->m.Group = astat->Group;
1415 avc->m.Mode = astat->UnixModeBits;
1416 if (vType(avc) == VREG) {
1417 avc->m.Mode |= S_IFREG;
1418 } else if (vType(avc) == VDIR) {
1419 avc->m.Mode |= S_IFDIR;
1420 } else if (vType(avc) == VLNK) {
1421 avc->m.Mode |= S_IFLNK;
1422 if ((avc->m.Mode & 0111) == 0)
1425 if (avc->states & CForeign) {
1426 struct axscache *ac;
1427 avc->anyAccess = astat->AnonymousAccess;
1429 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1431 * Caller has at least one bit not covered by anonymous, and
1432 * thus may have interesting rights.
1434 * HOWEVER, this is a really bad idea, because any access query
1435 * for bits which aren't covered by anonymous, on behalf of a user
1436 * who doesn't have any special rights, will result in an answer of
1437 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1438 * It's an especially bad idea under Ultrix, since (due to the lack of
1439 * a proper access() call) it must perform several afs_access() calls
1440 * in order to create magic mode bits that vary according to who makes
1441 * the call. In other words, _every_ stat() generates a test for
1444 #endif /* badidea */
1445 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1446 ac->axess = astat->CallerAccess;
1447 else /* not found, add a new one if possible */
1448 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1452 } /*afs_SimpleVStat */
1459 * Store the status info *only* back to the server for a
1463 * avc : Ptr to the vcache entry.
1464 * astatus : Ptr to the status info to store.
1465 * areq : Ptr to the associated vrequest.
1468 * Must be called with a shared lock held on the vnode.
1472 afs_WriteVCache(register struct vcache *avc,
1473 register struct AFSStoreStatus *astatus,
1474 struct vrequest *areq)
1478 struct AFSFetchStatus OutStatus;
1479 struct AFSVolSync tsync;
1481 AFS_STATCNT(afs_WriteVCache);
1482 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1483 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1486 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1488 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1491 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
1492 astatus, &OutStatus, &tsync);
1497 } while (afs_Analyze
1498 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1499 SHARED_LOCK, NULL));
1501 UpgradeSToWLock(&avc->lock, 20);
1503 /* success, do the changes locally */
1504 afs_SimpleVStat(avc, &OutStatus, areq);
1506 * Update the date, too. SimpleVStat didn't do this, since
1507 * it thought we were doing this after fetching new status
1508 * over a file being written.
1510 avc->m.Date = OutStatus.ClientModTime;
1512 /* failure, set up to check with server next time */
1513 ObtainWriteLock(&afs_xcbhash, 462);
1514 afs_DequeueCallback(avc);
1515 avc->states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1516 ReleaseWriteLock(&afs_xcbhash);
1517 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1518 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1520 ConvertWToSLock(&avc->lock);
1523 } /*afs_WriteVCache */
1529 * Copy astat block into vcache info
1532 * avc : Ptr to vcache entry.
1533 * astat : Ptr to stat block to copy in.
1534 * areq : Ptr to associated request.
1537 * Must be called under a write lock
1539 * Note: this code may get dataversion and length out of sync if the file has
1540 * been modified. This is less than ideal. I haven't thought about
1541 * it sufficiently to be certain that it is adequate.
1544 afs_ProcessFS(register struct vcache *avc,
1545 register struct AFSFetchStatus *astat, struct vrequest *areq)
1548 AFS_STATCNT(afs_ProcessFS);
1550 #ifdef AFS_64BIT_CLIENT
1551 FillInt64(length, astat->Length_hi, astat->Length);
1552 #else /* AFS_64BIT_CLIENT */
1553 length = astat->Length;
1554 #endif /* AFS_64BIT_CLIENT */
1555 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1556 * number for each bulk status request. Under no circumstances
1557 * should afs_DoBulkStat store a sequence number if the new
1558 * length will be ignored when afs_ProcessFS is called with
1559 * new stats. If you change the following conditional then you
1560 * also need to change the conditional in afs_DoBulkStat. */
1562 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1563 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1565 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1567 /* if we're writing or mapping this file, don't fetch over these
1570 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1571 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1572 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1573 avc->m.Length = length;
1574 avc->m.Date = astat->ClientModTime;
1576 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1577 avc->m.Owner = astat->Owner;
1578 avc->m.Mode = astat->UnixModeBits;
1579 avc->m.Group = astat->Group;
1580 avc->m.LinkCount = astat->LinkCount;
1581 if (astat->FileType == File) {
1582 vSetType(avc, VREG);
1583 avc->m.Mode |= S_IFREG;
1584 } else if (astat->FileType == Directory) {
1585 vSetType(avc, VDIR);
1586 avc->m.Mode |= S_IFDIR;
1587 } else if (astat->FileType == SymbolicLink) {
1588 if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
1589 vSetType(avc, VDIR);
1590 avc->m.Mode |= S_IFDIR;
1592 vSetType(avc, VLNK);
1593 avc->m.Mode |= S_IFLNK;
1595 if ((avc->m.Mode & 0111) == 0) {
1599 avc->anyAccess = astat->AnonymousAccess;
1601 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1603 * Caller has at least one bit not covered by anonymous, and
1604 * thus may have interesting rights.
1606 * HOWEVER, this is a really bad idea, because any access query
1607 * for bits which aren't covered by anonymous, on behalf of a user
1608 * who doesn't have any special rights, will result in an answer of
1609 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1610 * It's an especially bad idea under Ultrix, since (due to the lack of
1611 * a proper access() call) it must perform several afs_access() calls
1612 * in order to create magic mode bits that vary according to who makes
1613 * the call. In other words, _every_ stat() generates a test for
1616 #endif /* badidea */
1618 struct axscache *ac;
1619 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1620 ac->axess = astat->CallerAccess;
1621 else /* not found, add a new one if possible */
1622 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1624 } /*afs_ProcessFS */
1628 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1629 char *name, struct VenusFid *nfid,
1630 struct AFSFetchStatus *OutStatusp,
1631 struct AFSCallBack *CallBackp, struct server **serverp,
1632 struct AFSVolSync *tsyncp)
1636 register struct conn *tc;
1637 struct AFSFetchStatus OutDirStatus;
1640 name = ""; /* XXX */
1642 tc = afs_Conn(afid, areq, SHARED_LOCK);
1645 *serverp = tc->srvr->server;
1647 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1650 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1651 (struct AFSFid *)&nfid->Fid, OutStatusp,
1652 &OutDirStatus, CallBackp, tsyncp);
1657 } while (afs_Analyze
1658 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1669 * Given a file id and a vrequest structure, fetch the status
1670 * information associated with the file.
1674 * areq : Ptr to associated vrequest structure, specifying the
1675 * user whose authentication tokens will be used.
1676 * avc : caller may already have a vcache for this file, which is
1680 * The cache entry is returned with an increased vrefCount field.
1681 * The entry must be discarded by calling afs_PutVCache when you
1682 * are through using the pointer to the cache entry.
1684 * You should not hold any locks when calling this function, except
1685 * locks on other vcache entries. If you lock more than one vcache
1686 * entry simultaneously, you should lock them in this order:
1688 * 1. Lock all files first, then directories.
1689 * 2. Within a particular type, lock entries in Fid.Vnode order.
1691 * This locking hierarchy is convenient because it allows locking
1692 * of a parent dir cache entry, given a file (to check its access
1693 * control list). It also allows renames to be handled easily by
1694 * locking directories in a constant order.
1695 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1697 /* might have a vcache structure already, which must
1698 * already be held by the caller */
1701 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1702 afs_int32 * cached, struct vcache *avc)
1705 afs_int32 code, newvcache = 0;
1706 register struct vcache *tvc;
1710 AFS_STATCNT(afs_GetVCache);
1713 *cached = 0; /* Init just in case */
1715 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1719 ObtainSharedLock(&afs_xvcache, 5);
1721 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1723 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1724 ReleaseSharedLock(&afs_xvcache);
1725 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1733 osi_Assert((tvc->states & CVInit) == 0);
1734 if (tvc->states & CStatd) {
1735 ReleaseSharedLock(&afs_xvcache);
1739 UpgradeSToWLock(&afs_xvcache, 21);
1741 /* no cache entry, better grab one */
1742 tvc = afs_NewVCache(afid, NULL);
1745 ConvertWToSLock(&afs_xvcache);
1748 ReleaseSharedLock(&afs_xvcache);
1752 afs_stats_cmperf.vcacheMisses++;
1755 ReleaseSharedLock(&afs_xvcache);
1757 ObtainWriteLock(&tvc->lock, 54);
1759 if (tvc->states & CStatd) {
1760 ReleaseWriteLock(&tvc->lock);
1763 #if defined(AFS_OSF_ENV)
1764 if (afs_IsWired(tvc)) {
1765 ReleaseWriteLock(&tvc->lock);
1768 #endif /* AFS_OSF_ENV */
1769 #ifdef AFS_DARWIN80_ENV
1770 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1773 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1775 * XXX - I really don't like this. Should try to understand better.
1776 * It seems that sometimes, when we get called, we already hold the
1777 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1778 * We can't drop the vnode lock, because that could result in a race.
1779 * Sometimes, though, we get here and don't hold the vnode lock.
1780 * I hate code paths that sometimes hold locks and sometimes don't.
1781 * In any event, the dodge we use here is to check whether the vnode
1782 * is locked, and if it isn't, then we gain and drop it around the call
1783 * to vinvalbuf; otherwise, we leave it alone.
1786 struct vnode *vp = AFSTOV(tvc);
1789 #if defined(AFS_DARWIN_ENV)
1790 iheldthelock = VOP_ISLOCKED(vp);
1792 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1793 /* this is messy. we can call fsync which will try to reobtain this */
1794 if (VTOAFS(vp) == tvc)
1795 ReleaseWriteLock(&tvc->lock);
1796 if (UBCINFOEXISTS(vp)) {
1797 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1799 if (VTOAFS(vp) == tvc)
1800 ObtainWriteLock(&tvc->lock, 954);
1802 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1803 #elif defined(AFS_FBSD60_ENV)
1804 iheldthelock = VOP_ISLOCKED(vp, curthread);
1806 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1807 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1809 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1810 #elif defined(AFS_FBSD50_ENV)
1811 iheldthelock = VOP_ISLOCKED(vp, curthread);
1813 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1814 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1816 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1817 #elif defined(AFS_FBSD40_ENV)
1818 iheldthelock = VOP_ISLOCKED(vp, curproc);
1820 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1821 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
1823 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
1824 #elif defined(AFS_OBSD_ENV)
1825 iheldthelock = VOP_ISLOCKED(vp, curproc);
1827 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1828 uvm_vnp_uncache(vp);
1830 VOP_UNLOCK(vp, 0, curproc);
1836 ObtainWriteLock(&afs_xcbhash, 464);
1837 tvc->states &= ~CUnique;
1839 afs_DequeueCallback(tvc);
1840 ReleaseWriteLock(&afs_xcbhash);
1842 /* It is always appropriate to throw away all the access rights? */
1843 afs_FreeAllAxs(&(tvc->Access));
1844 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1846 if ((tvp->states & VForeign)) {
1848 tvc->states |= CForeign;
1849 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1850 && (tvp->rootUnique == afid->Fid.Unique)) {
1854 if (tvp->states & VRO)
1856 if (tvp->states & VBackup)
1857 tvc->states |= CBackup;
1858 /* now copy ".." entry back out of volume structure, if necessary */
1859 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1861 tvc->mvid = (struct VenusFid *)
1862 osi_AllocSmallSpace(sizeof(struct VenusFid));
1863 *tvc->mvid = tvp->dotdot;
1865 afs_PutVolume(tvp, READ_LOCK);
1869 afs_RemoveVCB(afid);
1871 struct AFSFetchStatus OutStatus;
1873 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1874 afs_ProcessFS(tvc, &OutStatus, areq);
1875 tvc->states |= CStatd | CUnique;
1878 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1883 ReleaseWriteLock(&tvc->lock);
1889 ReleaseWriteLock(&tvc->lock);
1892 } /*afs_GetVCache */
1897 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1898 afs_int32 * cached, struct vcache *adp, char *aname)
1900 afs_int32 code, now, newvcache = 0;
1901 struct VenusFid nfid;
1902 register struct vcache *tvc;
1904 struct AFSFetchStatus OutStatus;
1905 struct AFSCallBack CallBack;
1906 struct AFSVolSync tsync;
1907 struct server *serverp = 0;
1911 AFS_STATCNT(afs_GetVCache);
1913 *cached = 0; /* Init just in case */
1915 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1919 ObtainReadLock(&afs_xvcache);
1920 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1923 ReleaseReadLock(&afs_xvcache);
1925 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1926 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1930 ObtainReadLock(&tvc->lock);
1932 if (tvc->states & CStatd) {
1936 ReleaseReadLock(&tvc->lock);
1939 tvc->states &= ~CUnique;
1941 ReleaseReadLock(&tvc->lock);
1943 ObtainReadLock(&afs_xvcache);
1946 ReleaseReadLock(&afs_xvcache);
1948 /* lookup the file */
1951 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1953 afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
1956 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1960 ObtainSharedLock(&afs_xvcache, 6);
1961 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
1963 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1964 ReleaseSharedLock(&afs_xvcache);
1965 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1971 /* no cache entry, better grab one */
1972 UpgradeSToWLock(&afs_xvcache, 22);
1973 tvc = afs_NewVCache(&nfid, serverp);
1975 ConvertWToSLock(&afs_xvcache);
1978 ReleaseSharedLock(&afs_xvcache);
1983 ReleaseSharedLock(&afs_xvcache);
1984 ObtainWriteLock(&tvc->lock, 55);
1986 /* It is always appropriate to throw away all the access rights? */
1987 afs_FreeAllAxs(&(tvc->Access));
1988 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1990 if ((tvp->states & VForeign)) {
1992 tvc->states |= CForeign;
1993 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1994 && (tvp->rootUnique == afid->Fid.Unique))
1997 if (tvp->states & VRO)
1999 if (tvp->states & VBackup)
2000 tvc->states |= CBackup;
2001 /* now copy ".." entry back out of volume structure, if necessary */
2002 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2004 tvc->mvid = (struct VenusFid *)
2005 osi_AllocSmallSpace(sizeof(struct VenusFid));
2006 *tvc->mvid = tvp->dotdot;
2011 ObtainWriteLock(&afs_xcbhash, 465);
2012 afs_DequeueCallback(tvc);
2013 tvc->states &= ~(CStatd | CUnique);
2014 ReleaseWriteLock(&afs_xcbhash);
2015 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2016 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2018 afs_PutVolume(tvp, READ_LOCK);
2019 ReleaseWriteLock(&tvc->lock);
2024 ObtainWriteLock(&afs_xcbhash, 466);
2025 if (origCBs == afs_allCBs) {
2026 if (CallBack.ExpirationTime) {
2027 tvc->callback = serverp;
2028 tvc->cbExpires = CallBack.ExpirationTime + now;
2029 tvc->states |= CStatd | CUnique;
2030 tvc->states &= ~CBulkFetching;
2031 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2032 } else if (tvc->states & CRO) {
2033 /* adapt gives us an hour. */
2034 tvc->cbExpires = 3600 + osi_Time();
2035 /*XXX*/ tvc->states |= CStatd | CUnique;
2036 tvc->states &= ~CBulkFetching;
2037 afs_QueueCallback(tvc, CBHash(3600), tvp);
2039 tvc->callback = NULL;
2040 afs_DequeueCallback(tvc);
2041 tvc->states &= ~(CStatd | CUnique);
2042 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2043 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2046 afs_DequeueCallback(tvc);
2047 tvc->states &= ~CStatd;
2048 tvc->states &= ~CUnique;
2049 tvc->callback = NULL;
2050 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2051 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2053 ReleaseWriteLock(&afs_xcbhash);
2055 afs_PutVolume(tvp, READ_LOCK);
2056 afs_ProcessFS(tvc, &OutStatus, areq);
2058 ReleaseWriteLock(&tvc->lock);
2064 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2065 afs_int32 * cached, struct volume *tvolp)
2067 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2068 afs_int32 getNewFid = 0;
2070 struct VenusFid nfid;
2071 register struct vcache *tvc;
2072 struct server *serverp = 0;
2073 struct AFSFetchStatus OutStatus;
2074 struct AFSCallBack CallBack;
2075 struct AFSVolSync tsync;
2084 if (!tvolp->rootVnode || getNewFid) {
2085 struct VenusFid tfid;
2088 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2089 origCBs = afs_allCBs; /* ignore InitCallBackState */
2091 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2096 /* ReleaseReadLock(&tvolp->lock); */
2097 ObtainWriteLock(&tvolp->lock, 56);
2098 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2099 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2100 ReleaseWriteLock(&tvolp->lock);
2101 /* ObtainReadLock(&tvolp->lock);*/
2104 afid->Fid.Vnode = tvolp->rootVnode;
2105 afid->Fid.Unique = tvolp->rootUnique;
2109 ObtainSharedLock(&afs_xvcache, 7);
2111 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2112 if (!FidCmp(&(tvc->fid), afid)) {
2113 if (tvc->states & CVInit) {
2114 ReleaseSharedLock(&afs_xvcache);
2115 afs_osi_Sleep(&tvc->states);
2119 /* Grab this vnode, possibly reactivating from the free list */
2120 /* for the present (95.05.25) everything on the hash table is
2121 * definitively NOT in the free list -- at least until afs_reclaim
2122 * can be safely implemented */
2124 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2128 #endif /* AFS_OSF_ENV */
2129 #ifdef AFS_DARWIN80_ENV
2130 if (tvc->states & CDeadVnode) {
2131 ReleaseSharedLock(&afs_xvcache);
2132 afs_osi_Sleep(&tvc->states);
2135 if (vnode_get(AFSTOV(tvc))) /* this bumps ref count */
2142 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
2143 /* Mount point no longer stat'd or unknown. FID may have changed. */
2146 AFS_RELE(AFSTOV(tvc));
2149 ReleaseSharedLock(&afs_xvcache);
2150 #ifdef AFS_DARWIN80_ENV
2153 vnode_put(AFSTOV(tvc));
2162 UpgradeSToWLock(&afs_xvcache, 23);
2163 /* no cache entry, better grab one */
2164 tvc = afs_NewVCache(afid, NULL);
2167 ReleaseWriteLock(&afs_xvcache);
2171 afs_stats_cmperf.vcacheMisses++;
2175 afs_stats_cmperf.vcacheHits++;
2177 /* we already bumped the ref count in the for loop above */
2178 #else /* AFS_OSF_ENV */
2181 UpgradeSToWLock(&afs_xvcache, 24);
2182 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2183 refpanic("GRVC VLRU inconsistent0");
2185 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2186 refpanic("GRVC VLRU inconsistent1");
2188 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2189 refpanic("GRVC VLRU inconsistent2");
2191 QRemove(&tvc->vlruq); /* move to lruq head */
2192 QAdd(&VLRU, &tvc->vlruq);
2193 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2194 refpanic("GRVC VLRU inconsistent3");
2196 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2197 refpanic("GRVC VLRU inconsistent4");
2199 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2200 refpanic("GRVC VLRU inconsistent5");
2205 ReleaseWriteLock(&afs_xvcache);
2207 if (tvc->states & CStatd) {
2211 ObtainReadLock(&tvc->lock);
2212 tvc->states &= ~CUnique;
2213 tvc->callback = NULL; /* redundant, perhaps */
2214 ReleaseReadLock(&tvc->lock);
2217 ObtainWriteLock(&tvc->lock, 57);
2219 /* It is always appropriate to throw away all the access rights? */
2220 afs_FreeAllAxs(&(tvc->Access));
2223 tvc->states |= CForeign;
2224 if (tvolp->states & VRO)
2226 if (tvolp->states & VBackup)
2227 tvc->states |= CBackup;
2228 /* now copy ".." entry back out of volume structure, if necessary */
2229 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2230 && (tvolp->rootUnique == afid->Fid.Unique)) {
2233 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2235 tvc->mvid = (struct VenusFid *)
2236 osi_AllocSmallSpace(sizeof(struct VenusFid));
2237 *tvc->mvid = tvolp->dotdot;
2241 afs_RemoveVCB(afid);
2244 struct VenusFid tfid;
2247 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2248 origCBs = afs_allCBs; /* ignore InitCallBackState */
2250 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2255 ObtainWriteLock(&afs_xcbhash, 467);
2256 afs_DequeueCallback(tvc);
2257 tvc->callback = NULL;
2258 tvc->states &= ~(CStatd | CUnique);
2259 ReleaseWriteLock(&afs_xcbhash);
2260 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2261 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2262 ReleaseWriteLock(&tvc->lock);
2267 ObtainWriteLock(&afs_xcbhash, 468);
2268 if (origCBs == afs_allCBs) {
2269 tvc->states |= CTruth;
2270 tvc->callback = serverp;
2271 if (CallBack.ExpirationTime != 0) {
2272 tvc->cbExpires = CallBack.ExpirationTime + start;
2273 tvc->states |= CStatd;
2274 tvc->states &= ~CBulkFetching;
2275 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2276 } else if (tvc->states & CRO) {
2277 /* adapt gives us an hour. */
2278 tvc->cbExpires = 3600 + osi_Time();
2279 /*XXX*/ tvc->states |= CStatd;
2280 tvc->states &= ~CBulkFetching;
2281 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2284 afs_DequeueCallback(tvc);
2285 tvc->callback = NULL;
2286 tvc->states &= ~(CStatd | CUnique);
2287 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2288 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2290 ReleaseWriteLock(&afs_xcbhash);
2291 afs_ProcessFS(tvc, &OutStatus, areq);
2293 ReleaseWriteLock(&tvc->lock);
2300 * must be called with avc write-locked
2301 * don't absolutely have to invalidate the hint unless the dv has
2302 * changed, but be sure to get it right else there will be consistency bugs.
2305 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2306 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2309 afs_uint32 start = 0;
2310 register struct conn *tc;
2311 struct AFSCallBack CallBack;
2312 struct AFSVolSync tsync;
2313 struct volume *volp;
2316 tc = afs_Conn(afid, areq, SHARED_LOCK);
2317 avc->dchint = NULL; /* invalidate hints */
2319 avc->callback = tc->srvr->server;
2321 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2324 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2332 } while (afs_Analyze
2333 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2334 SHARED_LOCK, NULL));
2337 afs_ProcessFS(avc, Outsp, areq);
2338 volp = afs_GetVolume(afid, areq, READ_LOCK);
2339 ObtainWriteLock(&afs_xcbhash, 469);
2340 avc->states |= CTruth;
2341 if (avc->callback /* check for race */ ) {
2342 if (CallBack.ExpirationTime != 0) {
2343 avc->cbExpires = CallBack.ExpirationTime + start;
2344 avc->states |= CStatd;
2345 avc->states &= ~CBulkFetching;
2346 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2347 } else if (avc->states & CRO) { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2348 avc->cbExpires = 3600 + start;
2349 avc->states |= CStatd;
2350 avc->states &= ~CBulkFetching;
2351 afs_QueueCallback(avc, CBHash(3600), volp);
2353 afs_DequeueCallback(avc);
2354 avc->callback = NULL;
2355 avc->states &= ~(CStatd | CUnique);
2356 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2357 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2360 afs_DequeueCallback(avc);
2361 avc->callback = NULL;
2362 avc->states &= ~(CStatd | CUnique);
2363 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2364 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2366 ReleaseWriteLock(&afs_xcbhash);
2368 afs_PutVolume(volp, READ_LOCK);
2370 /* used to undo the local callback, but that's too extreme.
2371 * There are plenty of good reasons that fetchstatus might return
2372 * an error, such as EPERM. If we have the vnode cached, statd,
2373 * with callback, might as well keep track of the fact that we
2374 * don't have access...
2376 if (code == EPERM || code == EACCES) {
2377 struct axscache *ac;
2378 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2380 else /* not found, add a new one if possible */
2381 afs_AddAxs(avc->Access, areq->uid, 0);
2392 * Stuff some information into the vcache for the given file.
2395 * afid : File in question.
2396 * OutStatus : Fetch status on the file.
2397 * CallBack : Callback info.
2398 * tc : RPC connection involved.
2399 * areq : vrequest involved.
2402 * Nothing interesting.
2405 afs_StuffVcache(register struct VenusFid *afid,
2406 struct AFSFetchStatus *OutStatus,
2407 struct AFSCallBack *CallBack, register struct conn *tc,
2408 struct vrequest *areq)
2410 register afs_int32 code, i, newvcache = 0;
2411 register struct vcache *tvc;
2412 struct AFSVolSync tsync;
2414 struct axscache *ac;
2417 AFS_STATCNT(afs_StuffVcache);
2418 #ifdef IFS_VCACHECOUNT
2423 ObtainSharedLock(&afs_xvcache, 8);
2425 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2427 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2428 ReleaseSharedLock(&afs_xvcache);
2429 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2435 /* no cache entry, better grab one */
2436 UpgradeSToWLock(&afs_xvcache, 25);
2437 tvc = afs_NewVCache(afid, NULL);
2439 ConvertWToSLock(&afs_xvcache);
2442 ReleaseSharedLock(&afs_xvcache);
2447 ReleaseSharedLock(&afs_xvcache);
2448 ObtainWriteLock(&tvc->lock, 58);
2450 tvc->states &= ~CStatd;
2451 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2452 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2454 /* Is it always appropriate to throw away all the access rights? */
2455 afs_FreeAllAxs(&(tvc->Access));
2457 /*Copy useful per-volume info */
2458 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2460 if (newvcache && (tvp->states & VForeign))
2461 tvc->states |= CForeign;
2462 if (tvp->states & VRO)
2464 if (tvp->states & VBackup)
2465 tvc->states |= CBackup;
2467 * Now, copy ".." entry back out of volume structure, if
2470 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2472 tvc->mvid = (struct VenusFid *)
2473 osi_AllocSmallSpace(sizeof(struct VenusFid));
2474 *tvc->mvid = tvp->dotdot;
2477 /* store the stat on the file */
2478 afs_RemoveVCB(afid);
2479 afs_ProcessFS(tvc, OutStatus, areq);
2480 tvc->callback = tc->srvr->server;
2482 /* we use osi_Time twice below. Ideally, we would use the time at which
2483 * the FetchStatus call began, instead, but we don't have it here. So we
2484 * make do with "now". In the CRO case, it doesn't really matter. In
2485 * the other case, we hope that the difference between "now" and when the
2486 * call actually began execution on the server won't be larger than the
2487 * padding which the server keeps. Subtract 1 second anyway, to be on
2488 * the safe side. Can't subtract more because we don't know how big
2489 * ExpirationTime is. Possible consistency problems may arise if the call
2490 * timeout period becomes longer than the server's expiration padding. */
2491 ObtainWriteLock(&afs_xcbhash, 470);
2492 if (CallBack->ExpirationTime != 0) {
2493 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2494 tvc->states |= CStatd;
2495 tvc->states &= ~CBulkFetching;
2496 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2497 } else if (tvc->states & CRO) {
2498 /* old-fashioned AFS 3.2 style */
2499 tvc->cbExpires = 3600 + osi_Time();
2500 /*XXX*/ tvc->states |= CStatd;
2501 tvc->states &= ~CBulkFetching;
2502 afs_QueueCallback(tvc, CBHash(3600), tvp);
2504 afs_DequeueCallback(tvc);
2505 tvc->callback = NULL;
2506 tvc->states &= ~(CStatd | CUnique);
2507 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2508 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2510 ReleaseWriteLock(&afs_xcbhash);
2512 afs_PutVolume(tvp, READ_LOCK);
2514 /* look in per-pag cache */
2515 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2516 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2517 else /* not found, add a new one if possible */
2518 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2520 ReleaseWriteLock(&tvc->lock);
2521 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2522 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2523 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2525 * Release ref count... hope this guy stays around...
2528 } /*afs_StuffVcache */
2535 * Decrements the reference count on a cache entry.
2538 * avc : Pointer to the cache entry to decrement.
2541 * Nothing interesting.
2544 afs_PutVCache(register struct vcache *avc)
2546 AFS_STATCNT(afs_PutVCache);
2547 #ifdef AFS_DARWIN80_ENV
2548 vnode_put(AFSTOV(avc));
2552 * Can we use a read lock here?
2554 ObtainReadLock(&afs_xvcache);
2556 ReleaseReadLock(&afs_xvcache);
2558 } /*afs_PutVCache */
2561 static void findvc_sleep(struct vcache *avc, int flag) {
2562 if (flag & IS_SLOCK) {
2563 ReleaseSharedLock(&afs_xvcache);
2565 if (flag & IS_WLOCK) {
2566 ReleaseWriteLock(&afs_xvcache);
2568 ReleaseReadLock(&afs_xvcache);
2571 afs_osi_Sleep(&avc->states);
2572 if (flag & IS_SLOCK) {
2573 ObtainSharedLock(&afs_xvcache, 341);
2575 if (flag & IS_WLOCK) {
2576 ObtainWriteLock(&afs_xvcache, 343);
2578 ObtainReadLock(&afs_xvcache);
2586 * Find a vcache entry given a fid.
2589 * afid : Pointer to the fid whose cache entry we desire.
2590 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2591 * unlock the vnode, and try again.
2592 * flags: bit 1 to specify whether to compute hit statistics. Not
2593 * set if FindVCache is called as part of internal bookkeeping.
2596 * Must be called with the afs_xvcache lock at least held at
2597 * the read level. In order to do the VLRU adjustment, the xvcache lock
2598 * must be shared-- we upgrade it here.
2602 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2605 register struct vcache *tvc;
2607 #if defined( AFS_OSF_ENV)
2611 AFS_STATCNT(afs_FindVCache);
2615 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2616 if (FidMatches(afid, tvc)) {
2617 if (tvc->states & CVInit) {
2618 findvc_sleep(tvc, flag);
2622 /* Grab this vnode, possibly reactivating from the free list */
2624 vg = vget(AFSTOV(tvc));
2628 #endif /* AFS_OSF_ENV */
2629 #ifdef AFS_DARWIN80_ENV
2630 if (tvc->states & CDeadVnode) {
2631 findvc_sleep(tvc, flag);
2634 if (vnode_get(AFSTOV(tvc)))
2641 /* should I have a read lock on the vnode here? */
2645 #if !defined(AFS_OSF_ENV)
2646 osi_vnhold(tvc, retry); /* already held, above */
2647 if (retry && *retry)
2650 #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
2651 tvc->states |= CUBCinit;
2653 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2654 UBCINFORECLAIMED(AFSTOV(tvc))) {
2655 ubc_info_init(AFSTOV(tvc));
2658 tvc->states &= ~CUBCinit;
2661 * only move to front of vlru if we have proper vcache locking)
2663 if (flag & DO_VLRU) {
2664 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2665 refpanic("FindVC VLRU inconsistent1");
2667 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2668 refpanic("FindVC VLRU inconsistent1");
2670 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2671 refpanic("FindVC VLRU inconsistent2");
2673 UpgradeSToWLock(&afs_xvcache, 26);
2674 QRemove(&tvc->vlruq);
2675 QAdd(&VLRU, &tvc->vlruq);
2676 ConvertWToSLock(&afs_xvcache);
2677 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2678 refpanic("FindVC VLRU inconsistent1");
2680 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2681 refpanic("FindVC VLRU inconsistent2");
2683 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2684 refpanic("FindVC VLRU inconsistent3");
2690 if (flag & DO_STATS) {
2692 afs_stats_cmperf.vcacheHits++;
2694 afs_stats_cmperf.vcacheMisses++;
2695 if (afs_IsPrimaryCellNum(afid->Cell))
2696 afs_stats_cmperf.vlocalAccesses++;
2698 afs_stats_cmperf.vremoteAccesses++;
2701 } /*afs_FindVCache */
2707 * Find a vcache entry given a fid. Does a wildcard match on what we
2708 * have for the fid. If more than one entry, don't return anything.
2711 * avcp : Fill in pointer if we found one and only one.
2712 * afid : Pointer to the fid whose cache entry we desire.
2713 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2714 * unlock the vnode, and try again.
2715 * flags: bit 1 to specify whether to compute hit statistics. Not
2716 * set if FindVCache is called as part of internal bookkeeping.
2719 * Must be called with the afs_xvcache lock at least held at
2720 * the read level. In order to do the VLRU adjustment, the xvcache lock
2721 * must be shared-- we upgrade it here.
2724 * number of matches found.
2727 int afs_duplicate_nfs_fids = 0;
2730 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2732 register struct vcache *tvc;
2734 afs_int32 count = 0;
2735 struct vcache *found_tvc = NULL;
2740 AFS_STATCNT(afs_FindVCache);
2744 ObtainSharedLock(&afs_xvcache, 331);
2747 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2748 /* Match only on what we have.... */
2749 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2750 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2751 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2752 && (tvc->fid.Cell == afid->Cell)) {
2753 if (tvc->states & CVInit) {
2755 ReleaseSharedLock(&afs_xvcache);
2756 afs_osi_Sleep(&tvc->states);
2760 /* Grab this vnode, possibly reactivating from the free list */
2762 vg = vget(AFSTOV(tvc));
2765 /* This vnode no longer exists. */
2768 #endif /* AFS_OSF_ENV */
2769 #ifdef AFS_DARWIN80_ENV
2770 if (tvc->states & CDeadVnode) {
2771 ReleaseSharedLock(&afs_xvcache);
2772 afs_osi_Sleep(&tvc->states);
2775 if (vnode_get(AFSTOV(tvc))) {
2776 /* This vnode no longer exists. */
2779 #endif /* AFS_DARWIN80_ENV */
2784 /* Drop our reference counts. */
2786 vrele(AFSTOV(found_tvc));
2788 afs_duplicate_nfs_fids++;
2789 ReleaseSharedLock(&afs_xvcache);
2790 #ifdef AFS_DARWIN80_ENV
2791 /* Drop our reference counts. */
2792 vnode_put(AFSTOV(tvc));
2793 vnode_put(AFSTOV(found_tvc));
2802 /* should I have a read lock on the vnode here? */
2804 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2805 afs_int32 retry = 0;
2806 osi_vnhold(tvc, &retry);
2809 found_tvc = (struct vcache *)0;
2810 ReleaseSharedLock(&afs_xvcache);
2811 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2815 #if !defined(AFS_OSF_ENV)
2816 osi_vnhold(tvc, (int *)0); /* already held, above */
2820 * We obtained the xvcache lock above.
2822 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2823 refpanic("FindVC VLRU inconsistent1");
2825 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2826 refpanic("FindVC VLRU inconsistent1");
2828 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2829 refpanic("FindVC VLRU inconsistent2");
2831 UpgradeSToWLock(&afs_xvcache, 568);
2832 QRemove(&tvc->vlruq);
2833 QAdd(&VLRU, &tvc->vlruq);
2834 ConvertWToSLock(&afs_xvcache);
2835 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2836 refpanic("FindVC VLRU inconsistent1");
2838 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2839 refpanic("FindVC VLRU inconsistent2");
2841 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2842 refpanic("FindVC VLRU inconsistent3");
2848 afs_stats_cmperf.vcacheHits++;
2850 afs_stats_cmperf.vcacheMisses++;
2851 if (afs_IsPrimaryCellNum(afid->Cell))
2852 afs_stats_cmperf.vlocalAccesses++;
2854 afs_stats_cmperf.vremoteAccesses++;
2856 *avcp = tvc; /* May be null */
2858 ReleaseSharedLock(&afs_xvcache);
2859 return (tvc ? 1 : 0);
2861 } /*afs_NFSFindVCache */
2869 * Initialize vcache related variables
2872 afs_vcacheInit(int astatSize)
2874 register struct vcache *tvp;
2876 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
2877 if (!afs_maxvcount) {
2878 #if defined(AFS_LINUX22_ENV)
2879 afs_maxvcount = astatSize; /* no particular limit on linux? */
2880 #elif defined(AFS_OSF30_ENV)
2881 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
2883 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
2885 if (astatSize < afs_maxvcount) {
2886 afs_maxvcount = astatSize;
2889 #else /* AFS_OSF_ENV */
2893 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2894 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2896 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
2897 /* Allocate and thread the struct vcache entries */
2898 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
2899 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
2901 Initial_freeVCList = tvp;
2902 freeVCList = &(tvp[0]);
2903 for (i = 0; i < astatSize - 1; i++) {
2904 tvp[i].nextfree = &(tvp[i + 1]);
2906 tvp[astatSize - 1].nextfree = NULL;
2907 #ifdef KERNEL_HAVE_PIN
2908 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2912 #if defined(AFS_SGI_ENV)
2913 for (i = 0; i < astatSize; i++) {
2914 char name[METER_NAMSZ];
2915 struct vcache *tvc = &tvp[i];
2917 tvc->v.v_number = ++afsvnumbers;
2918 tvc->vc_rwlockid = OSI_NO_LOCKID;
2919 initnsema(&tvc->vc_rwlock, 1,
2920 makesname(name, "vrw", tvc->v.v_number));
2921 #ifndef AFS_SGI53_ENV
2922 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2924 #ifndef AFS_SGI62_ENV
2925 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2926 #endif /* AFS_SGI62_ENV */
2930 for(i = 0; i < VCSIZE; ++i)
2931 QInit(&afs_vhashTV[i]);
2939 shutdown_vcache(void)
2942 struct afs_cbr *tsp, *nsp;
2944 * XXX We may potentially miss some of the vcaches because if when there're no
2945 * free vcache entries and all the vcache entries are active ones then we allocate
2946 * an additional one - admittedly we almost never had that occur.
2950 register struct afs_q *tq, *uq;
2951 register struct vcache *tvc;
2952 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2956 osi_FreeSmallSpace(tvc->mvid);
2957 tvc->mvid = (struct VenusFid *)0;
2960 aix_gnode_rele(AFSTOV(tvc));
2962 if (tvc->linkData) {
2963 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2968 * Also free the remaining ones in the Cache
2970 for (i = 0; i < VCSIZE; i++) {
2971 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2973 osi_FreeSmallSpace(tvc->mvid);
2974 tvc->mvid = (struct VenusFid *)0;
2978 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2979 #ifdef AFS_AIX32_ENV
2982 vms_delete(tvc->segid);
2984 tvc->segid = tvc->vmh = NULL;
2985 if (VREFCOUNT_GT(tvc,0))
2986 osi_Panic("flushVcache: vm race");
2994 #if defined(AFS_SUN5_ENV)
3000 if (tvc->linkData) {
3001 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3005 afs_FreeAllAxs(&(tvc->Access));
3011 * Free any leftover callback queue
3013 for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
3015 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3019 #ifdef KERNEL_HAVE_PIN
3020 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3022 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3023 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3026 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3027 freeVCList = Initial_freeVCList = 0;
3029 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3030 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3032 for(i = 0; i < VCSIZE; ++i)
3033 QInit(&afs_vhashTV[i]);