2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include "../afs/param.h" /*Should be always first*/
39 #include "../afs/sysincludes.h" /*Standard vendor system headers*/
40 #include "../afs/afsincludes.h" /*AFS-based standard headers*/
41 #include "../afs/afs_stats.h"
42 #include "../afs/afs_cbqueue.h"
43 #include "../afs/afs_osidnlc.h"
46 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
47 afs_int32 afs_vcount = 0; /* number of vcache in use now */
48 #if defined(AFS_OSF30_ENV)
49 extern int max_vnodes; /* number of total system vnodes */
51 extern int nvnode; /* number of total system vnodes */
54 extern int numvnodes; /* number vnodes in use now */
56 #endif /* AFS_OSF_ENV */
61 /* Imported variables */
62 extern struct server *afs_servers[NSERVERS];
63 extern afs_rwlock_t afs_xserver;
64 extern afs_rwlock_t afs_xcbhash;
65 extern struct vcache *afs_globalVp;
67 extern struct mount *afs_globalVFS;
68 extern struct vnodeops Afs_vnodeops;
70 extern struct vfs *afs_globalVFS;
71 #endif /* AFS_OSF_ENV */
72 #if defined(AFS_DUX40_ENV)
73 extern struct vfs_ubcops afs_ubcops;
76 extern struct vnodeops Afs_vnodeops;
80 #endif /* AFS_SGI64_ENV */
82 /* Exported variables */
83 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries*/
84 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks*/
85 struct vcache *freeVCList; /*Free list for stat cache entries*/
86 struct vcache *Initial_freeVCList; /*Initial list for above*/
87 struct afs_q VLRU; /*vcache LRU*/
88 afs_int32 vcachegen = 0;
89 unsigned int afs_paniconwarn = 0;
90 struct vcache *afs_vhashT[VCSIZE];
91 afs_int32 afs_bulkStatsLost;
92 int afs_norefpanic = 0;
94 /* Forward declarations */
95 static afs_int32 afs_QueueVCB(struct vcache *avc);
102 * Flush the given vcache entry.
105 * avc : Pointer to vcache entry to flush.
106 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
109 * afs_xvcache lock must be held for writing upon entry to
110 * prevent people from changing the vrefCount field, and to
111 * protect the lruq and hnext fields.
112 * LOCK: afs_FlushVCache afs_xvcache W
113 * REFCNT: vcache ref count must be zero on entry except for osf1
114 * RACE: lock is dropped and reobtained, permitting race in caller
117 int afs_FlushVCache(struct vcache *avc, int *slept)
118 { /*afs_FlushVCache*/
120 register afs_int32 i, code;
121 register struct vcache **uvc, *wvc, *tvc;
124 AFS_STATCNT(afs_FlushVCache);
125 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
126 ICL_TYPE_INT32, avc->states);
129 VN_LOCK((struct vnode *)avc);
133 code = osi_VM_FlushVCache(avc, slept);
137 if (avc->states & CVFlushed) {
141 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
142 refpanic ("LRU vs. Free inconsistency");
144 avc->states |= CVFlushed;
145 /* pull the entry out of the lruq and put it on the free list */
146 QRemove(&avc->vlruq);
147 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *) 0;
149 /* keep track of # of files that we bulk stat'd, but never used
150 * before they got recycled.
152 if (avc->states & CBulkStat)
155 /* remove entry from the hash chain */
156 i = VCHash(&avc->fid);
157 uvc = &afs_vhashT[i];
158 for(wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
161 avc->hnext = (struct vcache *) NULL;
165 if (!wvc) osi_Panic("flushvcache"); /* not in correct hash bucket */
166 if (avc->mvid) osi_FreeSmallSpace(avc->mvid);
167 avc->mvid = (struct VenusFid*)0;
169 afs_osi_Free(avc->linkData, strlen(avc->linkData)+1);
170 avc->linkData = NULL;
172 afs_FreeAllAxs(&(avc->Access));
174 /* we can't really give back callbacks on RO files, since the
175 * server only tracks them on a per-volume basis, and we don't
176 * know whether we still have some other files from the same
178 if ((avc->states & CRO) == 0 && avc->callback) {
181 ObtainWriteLock(&afs_xcbhash, 460);
182 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
183 avc->states &= ~(CStatd | CUnique);
184 ReleaseWriteLock(&afs_xcbhash);
185 afs_symhint_inval(avc);
186 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
187 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
189 osi_dnlc_purgevp (avc);
192 * Next, keep track of which vnodes we've deleted for create's
193 * optimistic synchronization algorithm
196 if (avc->fid.Fid.Vnode & 1) afs_oddZaps++;
199 #if !defined(AFS_OSF_ENV)
200 /* put the entry in the free list */
201 avc->nextfree = freeVCList;
203 if (avc->vlruq.prev || avc->vlruq.next) {
204 refpanic ("LRU vs. Free inconsistency");
207 /* This should put it back on the vnode free list since usecount is 1 */
210 if (avc->vrefCount > 0) {
211 VN_UNLOCK((struct vnode *)avc);
212 AFS_RELE((struct vnode *)avc);
214 if (afs_norefpanic) {
215 printf ("flush vc refcnt < 1");
217 (void) vgone(avc, VX_NOSLEEP, (struct vnodeops *) 0);
219 VN_UNLOCK((struct vnode *)avc);
221 else osi_Panic ("flush vc refcnt < 1");
223 #endif /* AFS_OSF_ENV */
224 avc->states |= CVFlushed;
229 VN_UNLOCK((struct vnode *)avc);
233 } /*afs_FlushVCache*/
239 * The core of the inactive vnode op for all but IRIX.
241 void afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
243 extern afs_rwlock_t afs_xdcache, afs_xvcache;
245 AFS_STATCNT(afs_inactive);
246 if (avc->states & CDirty) {
247 /* we can't keep trying to push back dirty data forever. Give up. */
248 afs_InvalidateAllSegments(avc, 1/*set lock*/); /* turns off dirty bit */
250 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
251 avc->states &= ~CDirty; /* Turn it off */
252 if (avc->states & CUnlinked) {
253 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
254 avc->states |= CUnlinkedDel;
257 afs_remunlink(avc, 1); /* ignore any return code */
266 * Description: allocate a callback return structure from the
267 * free list and return it.
269 * Env: The alloc and free routines are both called with the afs_xvcb lock
270 * held, so we don't have to worry about blocking in osi_Alloc.
272 static struct afs_cbr *afs_cbrSpace = 0;
273 struct afs_cbr *afs_AllocCBR() {
274 register struct afs_cbr *tsp;
277 while (!afs_cbrSpace) {
278 if (afs_stats_cmperf.CallBackAlloced >= 2) {
279 /* don't allocate more than 2 * AFS_NCBRS for now */
281 afs_stats_cmperf.CallBackFlushes++;
285 tsp = (struct afs_cbr *) afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
286 for(i=0; i < AFS_NCBRS-1; i++) {
287 tsp[i].next = &tsp[i+1];
289 tsp[AFS_NCBRS-1].next = 0;
291 afs_stats_cmperf.CallBackAlloced++;
295 afs_cbrSpace = tsp->next;
302 * Description: free a callback return structure.
305 * asp -- the address of the structure to free.
307 * Environment: the xvcb lock is held over these calls.
310 register struct afs_cbr *asp; {
311 asp->next = afs_cbrSpace;
319 * Description: flush all queued callbacks to all servers.
323 * Environment: holds xvcb lock over RPC to guard against race conditions
324 * when a new callback is granted for the same file later on.
326 afs_int32 afs_FlushVCBs (afs_int32 lockit)
328 struct AFSFid tfids[AFS_MAXCBRSCALL];
329 struct AFSCallBack callBacks[1];
330 struct AFSCBFids fidArray;
331 struct AFSCBs cbArray;
333 struct afs_cbr *tcbrp;
337 struct vrequest treq;
339 int safety1, safety2, safety3;
340 extern int afs_totalServers;
343 if (code = afs_InitReq(&treq, &afs_osi_cred)) return code;
344 treq.flags |= O_NONBLOCK;
346 if (lockit) MObtainWriteLock(&afs_xvcb,273);
347 ObtainReadLock(&afs_xserver);
348 for(i=0; i<NSERVERS; i++) {
349 for(safety1 = 0, tsp = afs_servers[i];
350 tsp && safety1 < afs_totalServers+10; tsp=tsp->next, safety1++) {
352 if (tsp->cbrs == (struct afs_cbr *) 0) continue;
354 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
355 * and make an RPC, over and over again.
357 tcount = 0; /* number found so far */
358 for (safety2 = 0; safety2 < afs_cacheStats ; safety2++) {
359 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
360 /* if buffer is full, or we've queued all we're going
361 * to from this server, we should flush out the
364 fidArray.AFSCBFids_len = tcount;
365 fidArray.AFSCBFids_val = (struct AFSFid *) tfids;
366 cbArray.AFSCBs_len = 1;
367 cbArray.AFSCBs_val = callBacks;
368 callBacks[0].CallBackType = CB_EXCLUSIVE;
369 for (safety3 = 0; safety3 < MAXHOSTS*2; safety3++) {
370 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
371 tsp->cell->cell, &treq, 0,
374 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
375 #ifdef RX_ENABLE_LOCKS
377 #endif /* RX_ENABLE_LOCKS */
378 code = RXAFS_GiveUpCallBacks(tc->id, &fidArray,
380 #ifdef RX_ENABLE_LOCKS
382 #endif /* RX_ENABLE_LOCKS */
386 if (!afs_Analyze(tc, code, 0, &treq,
387 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS,
388 SHARED_LOCK, tsp->cell)) {
392 /* ignore return code, since callbacks may have
393 * been returned anyway, we shouldn't leave them
394 * around to be returned again.
396 * Next, see if we are done with this server, and if so,
397 * break to deal with the next one.
399 if (!tsp->cbrs) break;
401 } /* if to flush full buffer */
402 /* if we make it here, we have an entry at the head of cbrs,
403 * which we should copy to the file ID array and then free.
406 tfids[tcount++] = tcbrp->fid;
407 tsp->cbrs = tcbrp->next;
409 } /* while loop for this one server */
410 if (safety2 > afs_cacheStats) {
411 afs_warn("possible internal error afs_flushVCBs (%d)\n", safety2);
413 } /* for loop for this hash chain */
414 } /* loop through all hash chains */
415 if (safety1 > afs_totalServers+2) {
416 afs_warn("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n", safety1, afs_totalServers+2);
418 osi_Panic("afs_flushVCBS safety1");
421 ReleaseReadLock(&afs_xserver);
422 if (lockit) MReleaseWriteLock(&afs_xvcb);
430 * Queue a callback on the given fid.
436 * Locks the xvcb lock.
437 * Called when the xvcache lock is already held.
440 static afs_int32 afs_QueueVCB(struct vcache *avc)
442 register struct server *tsp;
443 register struct afs_cbr *tcbp;
445 AFS_STATCNT(afs_QueueVCB);
446 /* The callback is really just a struct server ptr. */
447 tsp = (struct server *)(avc->callback);
449 /* we now have a pointer to the server, so we just allocate
450 * a queue entry and queue it.
452 MObtainWriteLock(&afs_xvcb,274);
453 tcbp = afs_AllocCBR();
454 tcbp->fid = avc->fid.Fid;
455 tcbp->next = tsp->cbrs;
458 /* now release locks and return */
459 MReleaseWriteLock(&afs_xvcb);
468 * Remove a queued callback by looking through all the servers
469 * to see if any have this callback queued.
472 * afid: The fid we want cleansed of queued callbacks.
475 * Locks xvcb and xserver locks.
476 * Typically called with xdcache, xvcache and/or individual vcache
481 register struct VenusFid *afid;
486 register struct server *tsp;
487 register struct afs_cbr *tcbrp;
488 struct afs_cbr **lcbrpp;
490 AFS_STATCNT(afs_RemoveVCB);
491 MObtainWriteLock(&afs_xvcb,275);
492 ObtainReadLock(&afs_xserver);
493 for(i=0;i<NSERVERS;i++) {
494 for(tsp=afs_servers[i]; tsp; tsp=tsp->next) {
495 /* if cell is known, and is wrong, then skip this server */
496 if (tsp->cell && tsp->cell->cell != afid->Cell) continue;
499 * Otherwise, iterate through file IDs we're sending to the
502 lcbrpp = &tsp->cbrs; /* first queued return callback */
503 for(tcbrp = *lcbrpp; tcbrp; lcbrpp = &tcbrp->next, tcbrp = *lcbrpp) {
504 if (afid->Fid.Volume == tcbrp->fid.Volume &&
505 afid->Fid.Unique == tcbrp->fid.Unique &&
506 afid->Fid.Vnode == tcbrp->fid.Vnode) {
507 *lcbrpp = tcbrp->next; /* unthread from list */
515 ReleaseReadLock(&afs_xserver);
516 MReleaseWriteLock(&afs_xvcb);
527 * This routine is responsible for allocating a new cache entry
528 * from the free list. It formats the cache entry and inserts it
529 * into the appropriate hash tables. It must be called with
530 * afs_xvcache write-locked so as to prevent several processes from
531 * trying to create a new cache entry simultaneously.
534 * afid : The file id of the file whose cache entry is being
537 /* LOCK: afs_NewVCache afs_xvcache W */
538 struct vcache *afs_NewVCache(struct VenusFid *afid, struct server *serverp,
539 afs_int32 lockit, afs_int32 locktype)
543 afs_int32 anumber = VCACHE_FREE;
545 struct gnode *gnodepnt;
548 struct vm_info * vm_info_ptr;
549 #endif /* AFS_MACH_ENV */
552 #endif /* AFS_OSF_ENV */
553 struct afs_q *tq, *uq;
556 AFS_STATCNT(afs_NewVCache);
557 #ifdef AFS_LINUX22_ENV
559 /* Free some if possible. */
560 struct afs_q *tq, *uq;
561 int i; char *panicstr;
562 int vmax = 2 * afs_cacheStats;
563 int vn = VCACHE_FREE;
566 for(tq = VLRU.prev; tq != &VLRU && vn > 0; tq = uq) {
569 if (tvc->states & CVFlushed)
570 refpanic ("CVFlushed on VLRU");
572 refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
573 else if (QNext(uq) != tq)
574 refpanic ("VLRU inconsistent");
576 if (tvc == afs_globalVp)
579 if ( tvc->vrefCount && tvc->opens == 0 ) {
580 struct inode *ip = (struct inode*)tvc;
581 if (list_empty(&ip->i_dentry)) {
585 struct list_head *cur;
586 struct list_head *head = &ip->i_dentry;
589 #if defined(AFS_LINUX24_ENV)
590 spin_lock(&dcache_lock);
593 while ((cur = cur->next) != head) {
594 struct dentry *dentry = list_entry(cur, struct dentry, d_alias);
595 #if defined(AFS_LINUX24_ENV)
596 if (!atomic_read(&dentry->d_count)) {
598 if (!dentry->d_count) {
601 #if defined(AFS_LINUX24_ENV)
603 spin_unlock(&dcache_lock);
616 #if defined(AFS_LINUX24_ENV)
617 spin_unlock(&dcache_lock);
625 #endif /* AFS_LINUX22_ENV */
628 if (afs_vcount >= afs_maxvcount)
631 * If we are using > 33 % of the total system vnodes for AFS vcache
632 * entries or we are using the maximum number of vcache entries,
633 * then free some. (if our usage is > 33% we should free some, if
634 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
635 * we _must_ free some -- no choice).
637 if ( (( 3 * afs_vcount ) > nvnode) || ( afs_vcount >= afs_maxvcount ))
640 struct afs_q *tq, *uq;
641 int i; char *panicstr;
644 for(tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
647 if (tvc->states & CVFlushed)
648 refpanic ("CVFlushed on VLRU");
649 else if (i++ > afs_maxvcount)
650 refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
651 else if (QNext(uq) != tq)
652 refpanic ("VLRU inconsistent");
653 else if (tvc->vrefCount < 1)
654 refpanic ("refcnt 0 on VLRU");
656 if ( tvc->vrefCount == 1 && tvc->opens == 0
657 && (tvc->states & CUnlinkedDel) == 0) {
658 code = afs_FlushVCache(tvc, &fv_slept);
665 continue; /* start over - may have raced. */
670 if (anumber == VCACHE_FREE) {
671 printf("NewVCache: warning none freed, using %d of %d\n",
672 afs_vcount, afs_maxvcount);
673 if (afs_vcount >= afs_maxvcount) {
674 osi_Panic("NewVCache - none freed");
675 /* XXX instead of panicing, should do afs_maxvcount++
676 and magic up another one */
682 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
683 /* What should we do ???? */
684 osi_Panic("afs_NewVCache: no more vnodes");
689 tvc->nextfree = (struct vcache *)0;
691 #else /* AFS_OSF_ENV */
692 /* pull out a free cache entry */
695 for(tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
699 if (tvc->states & CVFlushed)
700 refpanic("CVFlushed on VLRU");
701 else if (i++ > 2*afs_cacheStats) /* even allowing for a few xallocs...*/
702 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
703 else if (QNext(uq) != tq)
704 refpanic("VLRU inconsistent");
706 if (tvc->vrefCount == 0 && tvc->opens == 0
707 && (tvc->states & CUnlinkedDel) == 0) {
708 code = afs_FlushVCache(tvc, &fv_slept);
715 continue; /* start over - may have raced. */
718 if (tq == uq ) break;
722 /* none free, making one is better than a panic */
723 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
724 tvc = (struct vcache *) afs_osi_Alloc(sizeof (struct vcache));
726 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
729 /* In case it still comes here we need to fill this */
730 tvc->v.v_vm_info = VM_INFO_NULL;
731 vm_info_init(tvc->v.v_vm_info);
732 /* perhaps we should also do close_flush on non-NeXT mach systems;
733 * who knows; we don't currently have the sources.
735 #endif /* AFS_MACH_ENV */
736 #if defined(AFS_SGI_ENV)
737 { char name[METER_NAMSZ];
738 bzero(tvc, sizeof(struct vcache));
739 tvc->v.v_number = ++afsvnumbers;
740 tvc->vc_rwlockid = OSI_NO_LOCKID;
741 initnsema(&tvc->vc_rwlock, 1, makesname(name, "vrw", tvc->v.v_number));
742 #ifndef AFS_SGI53_ENV
743 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
745 #ifndef AFS_SGI62_ENV
746 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
749 #endif /* AFS_SGI_ENV */
752 tvc = freeVCList; /* take from free list */
753 freeVCList = tvc->nextfree;
754 tvc->nextfree = (struct vcache *)0;
756 #endif /* AFS_OSF_ENV */
759 vm_info_ptr = tvc->v.v_vm_info;
760 #endif /* AFS_MACH_ENV */
762 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
763 bzero((char *)tvc, sizeof(struct vcache));
768 RWLOCK_INIT(&tvc->lock, "vcache lock");
769 #if defined(AFS_SUN5_ENV)
770 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
771 #endif /* defined(AFS_SUN5_ENV) */
774 tvc->v.v_vm_info = vm_info_ptr;
775 tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
776 #endif /* AFS_MACH_ENV */
777 tvc->parentVnode = 0;
778 tvc->mvid = (struct VenusFid *) 0;
779 tvc->linkData = (char *) 0;
782 tvc->execsOrWriters = 0;
786 tvc->last_looker = 0;
788 tvc->asynchrony = -1;
790 afs_symhint_inval(tvc);
792 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
795 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
796 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
798 /* Hold it for the LRU (should make count 2) */
799 VN_HOLD((struct vnode *)tvc);
800 #else /* AFS_OSF_ENV */
801 tvc->vrefCount = 1; /* us */
802 #endif /* AFS_OSF_ENV */
804 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
805 tvc->vmh = tvc->segid = NULL;
808 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
809 #if defined(AFS_SUN5_ENV)
810 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
812 #if defined(AFS_SUN55_ENV)
813 /* This is required if the kaio (kernel aynchronous io)
814 ** module is installed. Inside the kernel, the function
815 ** check_vp( common/os/aio.c) checks to see if the kernel has
816 ** to provide asynchronous io for this vnode. This
817 ** function extracts the device number by following the
818 ** v_data field of the vnode. If we do not set this field
819 ** then the system panics. The value of the v_data field
820 ** is not really important for AFS vnodes because the kernel
821 ** does not do asynchronous io for regular files. Hence,
822 ** for the time being, we fill up the v_data field with the
823 ** vnode pointer itself. */
824 tvc->v.v_data = (char *)tvc;
825 #endif /* AFS_SUN55_ENV */
827 afs_BozonInit(&tvc->pvnLock, tvc);
831 tvc->callback = serverp; /* to minimize chance that clear
833 /* initialize vnode data, note vrefCount is v.v_count */
835 /* Don't forget to free the gnode space */
836 tvc->v.v_gnode = gnodepnt = (struct gnode *) osi_AllocSmallSpace(sizeof(struct gnode));
837 bzero((char *)gnodepnt, sizeof(struct gnode));
840 bzero((void*)&(tvc->vc_bhv_desc), sizeof(tvc->vc_bhv_desc));
841 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
843 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
844 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
846 bhv_head_init(&(tvc->v.v_bh));
847 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
850 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion*)tvc;
852 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
854 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
856 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
857 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
859 vnode_pcache_init(&tvc->v);
860 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
861 /* Above define is never true execpt in SGI test kernels. */
862 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
865 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
868 SetAfsVnode((struct vnode *)tvc);
869 #endif /* AFS_SGI64_ENV */
871 * The proper value for mvstat (for root fids) is setup by the caller.
874 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
876 if (afs_globalVFS == 0) osi_Panic("afs globalvfs");
877 vSetVfsp(tvc, afs_globalVFS);
880 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
881 tvc->v.v_vfsprev = NULL;
882 afs_globalVFS->vfs_vnodes = &tvc->v;
883 if (tvc->v.v_vfsnext != NULL)
884 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
885 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us!*/
886 gnodepnt->gn_vnode = &tvc->v;
889 tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
891 #if defined(AFS_DUX40_ENV)
892 insmntque(tvc, afs_globalVFS, &afs_ubcops);
895 /* Is this needed??? */
896 insmntque(tvc, afs_globalVFS);
897 #endif /* AFS_OSF_ENV */
898 #endif /* AFS_DUX40_ENV */
899 #if defined(AFS_SGI_ENV)
900 VN_SET_DPAGES(&(tvc->v), (struct pfdat*)NULL);
901 osi_Assert((tvc->v.v_flag & VINACT) == 0);
903 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
904 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
905 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
906 osi_Assert(tvc->v.v_filocks == NULL);
907 #if !defined(AFS_SGI65_ENV)
908 osi_Assert(tvc->v.v_filocksem == NULL);
910 osi_Assert(tvc->cred == NULL);
912 vnode_pcache_reinit(&tvc->v);
913 tvc->v.v_rdev = NODEV;
915 vn_initlist((struct vnlist *)&tvc->v);
917 #endif /* AFS_SGI_ENV */
918 #if defined(AFS_LINUX22_ENV)
920 struct inode *ip = (struct inode*)tvc;
921 sema_init(&ip->i_sem, 1);
922 #if defined(AFS_LINUX24_ENV)
923 sema_init(&ip->i_zombie, 1);
924 init_waitqueue_head(&ip->i_wait);
925 spin_lock_init(&ip->i_data.i_shared_lock);
926 INIT_LIST_HEAD(&ip->i_data.pages);
927 ip->i_data.host = (void*) ip;
928 ip->i_mapping = &ip->i_data;
930 sema_init(&ip->i_atomic_write, 1);
931 init_waitqueue(&ip->i_wait);
933 INIT_LIST_HEAD(&ip->i_hash);
934 INIT_LIST_HEAD(&ip->i_dentry);
936 ip->i_dev = afs_globalVFS->s_dev;
937 ip->i_sb = afs_globalVFS;
942 osi_dnlc_purgedp(tvc); /* this may be overkill */
943 bzero((char *)&(tvc->quick),sizeof(struct vtodc));
944 bzero((char *)&(tvc->callsort),sizeof(struct afs_q));
945 tvc->slocks = (struct SimpleLocks *)0;
948 tvc->hnext = afs_vhashT[i];
950 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
951 refpanic ("NewVCache VLRU inconsistent");
953 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
954 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
955 refpanic ("NewVCache VLRU inconsistent2");
957 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
958 refpanic ("NewVCache VLRU inconsistent3");
960 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
961 refpanic ("NewVCache VLRU inconsistent4");
971 * afs_FlushActiveVcaches
977 * doflocks : Do we handle flocks?
979 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
981 afs_FlushActiveVcaches(doflocks)
982 register afs_int32 doflocks;
984 { /*afs_FlushActiveVcaches*/
986 register struct vcache *tvc;
988 register struct conn *tc;
989 register afs_int32 code;
990 register struct AFS_UCRED *cred;
991 struct vrequest treq, ureq;
992 struct AFSVolSync tsync;
996 AFS_STATCNT(afs_FlushActiveVcaches);
997 ObtainReadLock(&afs_xvcache);
998 for(i=0;i<VCSIZE;i++) {
999 for(tvc = afs_vhashT[i]; tvc; tvc=tvc->hnext) {
1000 if (doflocks && tvc->flockCount != 0) {
1001 /* if this entry has an flock, send a keep-alive call out */
1003 ReleaseReadLock(&afs_xvcache);
1004 ObtainWriteLock(&tvc->lock,51);
1006 afs_InitReq(&treq, &afs_osi_cred);
1007 treq.flags |= O_NONBLOCK;
1009 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1011 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1012 #ifdef RX_ENABLE_LOCKS
1014 #endif /* RX_ENABLE_LOCKS */
1016 RXAFS_ExtendLock(tc->id,
1017 (struct AFSFid *) &tvc->fid.Fid,
1019 #ifdef RX_ENABLE_LOCKS
1021 #endif /* RX_ENABLE_LOCKS */
1026 (afs_Analyze(tc, code, &tvc->fid, &treq,
1027 AFS_STATS_FS_RPCIDX_EXTENDLOCK,
1028 SHARED_LOCK, (struct cell *)0));
1030 ReleaseWriteLock(&tvc->lock);
1031 ObtainReadLock(&afs_xvcache);
1035 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1037 * Don't let it evaporate in case someone else is in
1038 * this code. Also, drop the afs_xvcache lock while
1039 * getting vcache locks.
1042 ReleaseReadLock(&afs_xvcache);
1043 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1044 afs_BozonLock(&tvc->pvnLock, tvc);
1046 #if defined(AFS_SGI_ENV)
1048 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1050 osi_Assert(tvc->vrefCount > 0);
1051 AFS_RWLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1053 ObtainWriteLock(&tvc->lock,52);
1054 if (tvc->states & CCore) {
1055 tvc->states &= ~CCore;
1056 /* XXXX Find better place-holder for cred XXXX */
1057 cred = (struct AFS_UCRED *) tvc->linkData;
1058 tvc->linkData = (char *) 0; /* XXX */
1059 afs_InitReq(&ureq, cred);
1060 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1061 ICL_TYPE_POINTER, tvc,
1062 ICL_TYPE_INT32, tvc->execsOrWriters);
1063 code = afs_StoreOnLastReference(tvc, &ureq);
1064 ReleaseWriteLock(&tvc->lock);
1065 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1066 afs_BozonUnlock(&tvc->pvnLock, tvc);
1068 hzero(tvc->flushDV);
1071 if (code && code != VNOVNODE) {
1072 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1073 /* /dev/console */ 1);
1075 } else if (tvc->states & CUnlinkedDel) {
1079 ReleaseWriteLock(&tvc->lock);
1080 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1081 afs_BozonUnlock(&tvc->pvnLock, tvc);
1083 #if defined(AFS_SGI_ENV)
1084 AFS_RWUNLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1086 afs_remunlink(tvc, 0);
1087 #if defined(AFS_SGI_ENV)
1088 AFS_RWLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1092 /* lost (or won, perhaps) the race condition */
1093 ReleaseWriteLock(&tvc->lock);
1094 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1095 afs_BozonUnlock(&tvc->pvnLock, tvc);
1098 #if defined(AFS_SGI_ENV)
1099 AFS_RWUNLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1101 ObtainReadLock(&afs_xvcache);
1107 AFS_RELE((struct vnode *)tvc);
1109 /* Matches write code setting CCore flag */
1115 ReleaseReadLock(&afs_xvcache);
1117 } /*afs_FlushActiveVcaches*/
1124 * Make sure a cache entry is up-to-date status-wise.
1126 * NOTE: everywhere that calls this can potentially be sped up
1127 * by checking CStatd first, and avoiding doing the InitReq
1128 * if this is up-to-date.
1130 * Anymore, the only places that call this KNOW already that the
1131 * vcache is not up-to-date, so we don't screw around.
1134 * avc : Ptr to vcache entry to verify.
1138 int afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1140 register struct vcache *tvc;
1142 AFS_STATCNT(afs_VerifyVCache);
1144 #if defined(AFS_OSF_ENV)
1145 ObtainReadLock(&avc->lock);
1146 if (afs_IsWired(avc)) {
1147 ReleaseReadLock(&avc->lock);
1150 ReleaseReadLock(&avc->lock);
1151 #endif /* AFS_OSF_ENV */
1152 /* otherwise we must fetch the status info */
1154 ObtainWriteLock(&avc->lock,53);
1155 if (avc->states & CStatd) {
1156 ReleaseWriteLock(&avc->lock);
1159 ObtainWriteLock(&afs_xcbhash, 461);
1160 avc->states &= ~( CStatd | CUnique );
1161 avc->callback = (struct server *)0;
1162 afs_DequeueCallback(avc);
1163 ReleaseWriteLock(&afs_xcbhash);
1164 ReleaseWriteLock(&avc->lock);
1166 /* since we've been called back, or the callback has expired,
1167 * it's possible that the contents of this directory, or this
1168 * file's name have changed, thus invalidating the dnlc contents.
1170 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1171 osi_dnlc_purgedp (avc);
1173 osi_dnlc_purgevp (avc);
1175 /* fetch the status info */
1176 tvc = afs_GetVCache(&avc->fid, areq, (afs_int32*)0, avc, READ_LOCK);
1177 if (!tvc) return ENOENT;
1178 /* Put it back; caller has already incremented vrefCount */
1179 afs_PutVCache(tvc, READ_LOCK);
1182 } /*afs_VerifyVCache*/
1189 * Simple copy of stat info into cache.
1192 * avc : Ptr to vcache entry involved.
1193 * astat : Ptr to stat info to copy.
1196 * Nothing interesting.
1198 * Callers: as of 1992-04-29, only called by WriteVCache
1201 afs_SimpleVStat(avc, astat, areq)
1202 register struct vcache *avc;
1203 register struct AFSFetchStatus *astat;
1204 struct vrequest *areq;
1205 { /*afs_SimpleVStat*/
1207 AFS_STATCNT(afs_SimpleVStat);
1210 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1211 && !AFS_VN_MAPPED((vnode_t*)avc))
1213 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc))
1217 #if defined(AFS_SGI_ENV)
1218 osi_Assert((valusema(&avc->vc_rwlock) <= 0) &&
1219 (OSI_GET_LOCKID() == avc->vc_rwlockid));
1220 if (astat->Length < avc->m.Length) {
1221 vnode_t *vp = (vnode_t *)avc;
1223 osi_Assert(WriteLocked(&avc->lock));
1224 ReleaseWriteLock(&avc->lock);
1226 PTOSSVP(vp, (off_t)astat->Length, (off_t)MAXLONG);
1228 ObtainWriteLock(&avc->lock,67);
1231 /* if writing the file, don't fetch over this value */
1232 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT,
1233 ICL_TYPE_POINTER, avc,
1234 ICL_TYPE_INT32, avc->m.Length,
1235 ICL_TYPE_INT32, astat->Length);
1236 avc->m.Length = astat->Length;
1237 avc->m.Date = astat->ClientModTime;
1239 avc->m.Owner = astat->Owner;
1240 avc->m.Group = astat->Group;
1241 avc->m.Mode = astat->UnixModeBits;
1242 if (vType(avc) == VREG) {
1243 avc->m.Mode |= S_IFREG;
1245 else if (vType(avc) == VDIR) {
1246 avc->m.Mode |= S_IFDIR;
1248 else if (vType(avc) == VLNK) {
1252 avc->m.Mode |= S_IFLNK;
1253 if ((avc->m.Mode & 0111) == 0) avc->mvstat = 1;
1255 if (avc->states & CForeign) {
1256 struct axscache *ac;
1257 avc->anyAccess = astat->AnonymousAccess;
1259 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1261 * Caller has at least one bit not covered by anonymous, and
1262 * thus may have interesting rights.
1264 * HOWEVER, this is a really bad idea, because any access query
1265 * for bits which aren't covered by anonymous, on behalf of a user
1266 * who doesn't have any special rights, will result in an answer of
1267 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1268 * It's an especially bad idea under Ultrix, since (due to the lack of
1269 * a proper access() call) it must perform several afs_access() calls
1270 * in order to create magic mode bits that vary according to who makes
1271 * the call. In other words, _every_ stat() generates a test for
1274 #endif /* badidea */
1275 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1276 ac->axess = astat->CallerAccess;
1277 else /* not found, add a new one if possible */
1278 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1282 } /*afs_SimpleVStat*/
1289 * Store the status info *only* back to the server for a
1293 * avc : Ptr to the vcache entry.
1294 * astatus : Ptr to the status info to store.
1295 * areq : Ptr to the associated vrequest.
1298 * Must be called with a shared lock held on the vnode.
1301 afs_WriteVCache(avc, astatus, areq)
1302 register struct vcache *avc;
1303 register struct AFSStoreStatus *astatus;
1304 struct vrequest *areq;
1306 { /*afs_WriteVCache*/
1309 struct AFSFetchStatus OutStatus;
1310 struct AFSVolSync tsync;
1313 AFS_STATCNT(afs_WriteVCache);
1314 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1315 ICL_TYPE_INT32, avc->m.Length);
1318 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1320 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1321 #ifdef RX_ENABLE_LOCKS
1323 #endif /* RX_ENABLE_LOCKS */
1324 code = RXAFS_StoreStatus(tc->id,
1325 (struct AFSFid *) &avc->fid.Fid,
1326 astatus, &OutStatus, &tsync);
1327 #ifdef RX_ENABLE_LOCKS
1329 #endif /* RX_ENABLE_LOCKS */
1334 (afs_Analyze(tc, code, &avc->fid, areq,
1335 AFS_STATS_FS_RPCIDX_STORESTATUS,
1336 SHARED_LOCK, (struct cell *)0));
1338 UpgradeSToWLock(&avc->lock,20);
1340 /* success, do the changes locally */
1341 afs_SimpleVStat(avc, &OutStatus, areq);
1343 * Update the date, too. SimpleVStat didn't do this, since
1344 * it thought we were doing this after fetching new status
1345 * over a file being written.
1347 avc->m.Date = OutStatus.ClientModTime;
1350 /* failure, set up to check with server next time */
1351 ObtainWriteLock(&afs_xcbhash, 462);
1352 afs_DequeueCallback(avc);
1353 avc->states &= ~( CStatd | CUnique); /* turn off stat valid flag */
1354 ReleaseWriteLock(&afs_xcbhash);
1355 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1356 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
1358 ConvertWToSLock(&avc->lock);
1361 } /*afs_WriteVCache*/
1367 * Copy astat block into vcache info
1370 * avc : Ptr to vcache entry.
1371 * astat : Ptr to stat block to copy in.
1372 * areq : Ptr to associated request.
1375 * Must be called under a write lock
1377 * Note: this code may get dataversion and length out of sync if the file has
1378 * been modified. This is less than ideal. I haven't thought about
1379 * it sufficiently to be certain that it is adequate.
1382 afs_ProcessFS(avc, astat, areq)
1383 register struct vcache *avc;
1384 struct vrequest *areq;
1385 register struct AFSFetchStatus *astat;
1390 AFS_STATCNT(afs_ProcessFS);
1392 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1393 * number for each bulk status request. Under no circumstances
1394 * should afs_DoBulkStat store a sequence number if the new
1395 * length will be ignored when afs_ProcessFS is called with
1396 * new stats. If you change the following conditional then you
1397 * also need to change the conditional in afs_DoBulkStat. */
1399 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1400 && !AFS_VN_MAPPED((vnode_t*)avc))
1402 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc))
1405 /* if we're writing or mapping this file, don't fetch over these
1408 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1409 ICL_TYPE_INT32, avc->m.Length,
1410 ICL_TYPE_INT32, astat->Length);
1411 avc->m.Length = astat->Length;
1412 avc->m.Date = astat->ClientModTime;
1414 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1415 avc->m.Owner = astat->Owner;
1416 avc->m.Mode = astat->UnixModeBits;
1417 avc->m.Group = astat->Group;
1418 avc->m.LinkCount = astat->LinkCount;
1419 if (astat->FileType == File) {
1420 vSetType(avc, VREG);
1421 avc->m.Mode |= S_IFREG;
1423 else if (astat->FileType == Directory) {
1424 vSetType(avc, VDIR);
1425 avc->m.Mode |= S_IFDIR;
1427 else if (astat->FileType == SymbolicLink) {
1428 vSetType(avc, VLNK);
1429 avc->m.Mode |= S_IFLNK;
1430 if ((avc->m.Mode & 0111) == 0) avc->mvstat = 1;
1432 avc->anyAccess = astat->AnonymousAccess;
1434 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1436 * Caller has at least one bit not covered by anonymous, and
1437 * thus may have interesting rights.
1439 * HOWEVER, this is a really bad idea, because any access query
1440 * for bits which aren't covered by anonymous, on behalf of a user
1441 * who doesn't have any special rights, will result in an answer of
1442 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1443 * It's an especially bad idea under Ultrix, since (due to the lack of
1444 * a proper access() call) it must perform several afs_access() calls
1445 * in order to create magic mode bits that vary according to who makes
1446 * the call. In other words, _every_ stat() generates a test for
1449 #endif /* badidea */
1451 struct axscache *ac;
1452 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1453 ac->axess = astat->CallerAccess;
1454 else /* not found, add a new one if possible */
1455 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1458 #ifdef AFS_LINUX22_ENV
1459 vcache2inode(avc); /* Set the inode attr cache */
1465 afs_RemoteLookup(afid, areq, name, nfid, OutStatusp, CallBackp, serverp, tsyncp)
1466 register struct VenusFid *afid;
1467 struct vrequest *areq;
1469 struct VenusFid *nfid;
1470 struct AFSFetchStatus *OutStatusp;
1471 struct AFSCallBack *CallBackp;
1472 struct server **serverp;
1473 struct AFSVolSync *tsyncp;
1476 register struct vcache *tvc;
1479 register struct conn *tc;
1480 struct AFSFetchStatus OutDirStatus;
1483 if (!name) name = ""; /* XXX */
1485 tc = afs_Conn(afid, areq, SHARED_LOCK);
1487 if (serverp) *serverp = tc->srvr->server;
1489 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1490 #ifdef RX_ENABLE_LOCKS
1492 #endif /* RX_ENABLE_LOCKS */
1493 code = RXAFS_Lookup(tc->id, (struct AFSFid *) &afid->Fid, name,
1494 (struct AFSFid *) &nfid->Fid,
1495 OutStatusp, &OutDirStatus, CallBackp, tsyncp);
1496 #ifdef RX_ENABLE_LOCKS
1498 #endif /* RX_ENABLE_LOCKS */
1503 (afs_Analyze(tc, code, afid, areq,
1504 AFS_STATS_FS_RPCIDX_XLOOKUP,
1505 SHARED_LOCK, (struct cell *)0));
1515 * Given a file id and a vrequest structure, fetch the status
1516 * information associated with the file.
1520 * areq : Ptr to associated vrequest structure, specifying the
1521 * user whose authentication tokens will be used.
1522 * avc : caller may already have a vcache for this file, which is
1526 * The cache entry is returned with an increased vrefCount field.
1527 * The entry must be discarded by calling afs_PutVCache when you
1528 * are through using the pointer to the cache entry.
1530 * You should not hold any locks when calling this function, except
1531 * locks on other vcache entries. If you lock more than one vcache
1532 * entry simultaneously, you should lock them in this order:
1534 * 1. Lock all files first, then directories.
1535 * 2. Within a particular type, lock entries in Fid.Vnode order.
1537 * This locking hierarchy is convenient because it allows locking
1538 * of a parent dir cache entry, given a file (to check its access
1539 * control list). It also allows renames to be handled easily by
1540 * locking directories in a constant order.
1541 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1543 struct vcache *afs_GetVCache(afid, areq, cached, avc, locktype)
1544 register struct VenusFid *afid;
1545 struct vrequest *areq;
1548 struct vcache *avc; /* might have a vcache structure already, which must
1549 * already be held by the caller */
1552 afs_int32 code, i, newvcache=0;
1553 register struct vcache *tvc;
1557 AFS_STATCNT(afs_GetVCache);
1559 if (cached) *cached = 0; /* Init just in case */
1562 ObtainSharedLock(&afs_xvcache,5);
1564 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_STATS | DO_VLRU );
1566 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1567 ReleaseSharedLock(&afs_xvcache);
1568 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1576 if (tvc->states & CStatd) {
1577 ReleaseSharedLock(&afs_xvcache);
1582 UpgradeSToWLock(&afs_xvcache,21);
1584 /* no cache entry, better grab one */
1585 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
1588 ConvertWToSLock(&afs_xvcache);
1589 afs_stats_cmperf.vcacheMisses++;
1592 ReleaseSharedLock(&afs_xvcache);
1594 ObtainWriteLock(&tvc->lock,54);
1596 if (tvc->states & CStatd) {
1597 #ifdef AFS_LINUX22_ENV
1600 ReleaseWriteLock(&tvc->lock);
1604 #if defined(AFS_OSF_ENV)
1605 if (afs_IsWired(tvc)) {
1606 ReleaseWriteLock(&tvc->lock);
1609 #endif /* AFS_OSF_ENV */
1611 ObtainWriteLock(&afs_xcbhash, 464);
1612 tvc->states &= ~CUnique;
1614 afs_DequeueCallback(tvc);
1615 ReleaseWriteLock(&afs_xcbhash);
1617 /* It is always appropriate to throw away all the access rights? */
1618 afs_FreeAllAxs(&(tvc->Access));
1619 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1621 if ((tvp->states & VForeign)) {
1622 if (newvcache) tvc->states |= CForeign;
1623 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1624 && (tvp->rootUnique == afid->Fid.Unique)) {
1628 if (tvp->states & VRO) tvc->states |= CRO;
1629 if (tvp->states & VBackup) tvc->states |= CBackup;
1630 /* now copy ".." entry back out of volume structure, if necessary */
1631 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1633 tvc->mvid = (struct VenusFid *)
1634 osi_AllocSmallSpace(sizeof(struct VenusFid));
1635 *tvc->mvid = tvp->dotdot;
1637 afs_PutVolume(tvp, READ_LOCK);
1641 afs_RemoveVCB(afid);
1643 struct AFSFetchStatus OutStatus;
1644 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1648 ReleaseWriteLock(&tvc->lock);
1650 ObtainReadLock(&afs_xvcache);
1652 ReleaseReadLock(&afs_xvcache);
1653 return (struct vcache *) 0;
1656 ReleaseWriteLock(&tvc->lock);
1663 struct vcache *afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1664 afs_int32 *cached, afs_int32 locktype,
1665 struct vcache *adp, char *aname)
1667 afs_int32 code, now, newvcache=0, hash;
1668 struct VenusFid nfid;
1669 register struct vcache *tvc;
1671 struct AFSFetchStatus OutStatus;
1672 struct AFSCallBack CallBack;
1673 struct AFSVolSync tsync;
1674 struct server *serverp = 0;
1678 AFS_STATCNT(afs_GetVCache);
1679 if (cached) *cached = 0; /* Init just in case */
1682 ObtainReadLock(&afs_xvcache);
1683 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_STATS /* no vlru */);
1686 ReleaseReadLock(&afs_xvcache);
1688 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1689 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1693 ObtainReadLock(&tvc->lock);
1695 if (tvc->states & CStatd) {
1699 ReleaseReadLock(&tvc->lock);
1702 tvc->states &= ~CUnique;
1704 ReleaseReadLock(&tvc->lock);
1705 ObtainReadLock(&afs_xvcache);
1709 ReleaseReadLock(&afs_xvcache);
1711 /* lookup the file */
1714 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1715 code = afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
1719 ObtainSharedLock(&afs_xvcache,6);
1720 tvc = afs_FindVCache(&nfid, 0, 0, &retry, DO_VLRU /* no xstats now*/);
1722 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1723 ReleaseSharedLock(&afs_xvcache);
1724 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1730 /* no cache entry, better grab one */
1731 UpgradeSToWLock(&afs_xvcache,22);
1732 tvc = afs_NewVCache(&nfid, (struct server *)0, 1, WRITE_LOCK);
1734 ConvertWToSLock(&afs_xvcache);
1737 ReleaseSharedLock(&afs_xvcache);
1738 ObtainWriteLock(&tvc->lock,55);
1740 /* It is always appropriate to throw away all the access rights? */
1741 afs_FreeAllAxs(&(tvc->Access));
1742 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1744 if ((tvp->states & VForeign)) {
1745 if (newvcache) tvc->states |= CForeign;
1746 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1747 && (tvp->rootUnique == afid->Fid.Unique))
1750 if (tvp->states & VRO) tvc->states |= CRO;
1751 if (tvp->states & VBackup) tvc->states |= CBackup;
1752 /* now copy ".." entry back out of volume structure, if necessary */
1753 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1755 tvc->mvid = (struct VenusFid *)
1756 osi_AllocSmallSpace(sizeof(struct VenusFid));
1757 *tvc->mvid = tvp->dotdot;
1762 ObtainWriteLock(&afs_xcbhash, 465);
1763 afs_DequeueCallback(tvc);
1764 tvc->states &= ~( CStatd | CUnique );
1765 ReleaseWriteLock(&afs_xcbhash);
1766 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1767 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1769 afs_PutVolume(tvp, READ_LOCK);
1770 ReleaseWriteLock(&tvc->lock);
1771 ObtainReadLock(&afs_xvcache);
1773 ReleaseReadLock(&afs_xvcache);
1774 return (struct vcache *) 0;
1777 ObtainWriteLock(&afs_xcbhash, 466);
1778 if (origCBs == afs_allCBs) {
1779 if (CallBack.ExpirationTime) {
1780 tvc->callback = serverp;
1781 tvc->cbExpires = CallBack.ExpirationTime+now;
1782 tvc->states |= CStatd | CUnique;
1783 tvc->states &= ~CBulkFetching;
1784 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
1785 } else if (tvc->states & CRO) {
1786 /* adapt gives us an hour. */
1787 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
1788 tvc->states |= CStatd | CUnique;
1789 tvc->states &= ~CBulkFetching;
1790 afs_QueueCallback(tvc, CBHash(3600), tvp);
1792 tvc->callback = (struct server *)0;
1793 afs_DequeueCallback(tvc);
1794 tvc->states &= ~(CStatd | CUnique);
1795 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1796 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1799 afs_DequeueCallback(tvc);
1800 tvc->states &= ~CStatd;
1801 tvc->states &= ~CUnique;
1802 tvc->callback = (struct server *)0;
1803 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1804 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1806 ReleaseWriteLock(&afs_xcbhash);
1808 afs_PutVolume(tvp, READ_LOCK);
1809 afs_ProcessFS(tvc, &OutStatus, areq);
1811 ReleaseWriteLock(&tvc->lock);
1816 struct vcache *afs_GetRootVCache(struct VenusFid *afid,
1817 struct vrequest *areq, afs_int32 *cached,
1818 struct volume *tvolp, afs_int32 locktype)
1820 afs_int32 code, i, newvcache = 0, haveStatus = 0;
1821 afs_int32 getNewFid = 0;
1823 struct VenusFid nfid;
1824 register struct vcache *tvc;
1825 struct server *serverp = 0;
1826 struct AFSFetchStatus OutStatus;
1827 struct AFSCallBack CallBack;
1828 struct AFSVolSync tsync;
1834 if (!tvolp->rootVnode || getNewFid) {
1835 struct VenusFid tfid;
1838 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
1839 origCBs = afs_allCBs; /* ignore InitCallBackState */
1840 code = afs_RemoteLookup(&tfid, areq, (char *)0, &nfid,
1841 &OutStatus, &CallBack, &serverp, &tsync);
1843 return (struct vcache *)0;
1845 /* ReleaseReadLock(&tvolp->lock); */
1846 ObtainWriteLock(&tvolp->lock,56);
1847 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
1848 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
1849 ReleaseWriteLock(&tvolp->lock);
1850 /* ObtainReadLock(&tvolp->lock);*/
1853 afid->Fid.Vnode = tvolp->rootVnode;
1854 afid->Fid.Unique = tvolp->rootUnique;
1857 ObtainSharedLock(&afs_xvcache,7);
1859 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1860 if (!FidCmp(&(tvc->fid), afid)) {
1862 /* Grab this vnode, possibly reactivating from the free list */
1863 /* for the present (95.05.25) everything on the hash table is
1864 * definitively NOT in the free list -- at least until afs_reclaim
1865 * can be safely implemented */
1868 vg = vget((struct vnode *)tvc); /* this bumps ref count */
1872 #endif /* AFS_OSF_ENV */
1877 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
1878 /* Mount point no longer stat'd or unknown. FID may have changed. */
1883 tvc = (struct vcache*)0;
1885 ReleaseSharedLock(&afs_xvcache);
1890 UpgradeSToWLock(&afs_xvcache,23);
1891 /* no cache entry, better grab one */
1892 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
1894 afs_stats_cmperf.vcacheMisses++;
1897 if (cached) *cached = 1;
1898 afs_stats_cmperf.vcacheHits++;
1900 /* we already bumped the ref count in the for loop above */
1901 #else /* AFS_OSF_ENV */
1904 UpgradeSToWLock(&afs_xvcache,24);
1905 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1906 refpanic ("GRVC VLRU inconsistent0");
1908 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1909 refpanic ("GRVC VLRU inconsistent1");
1911 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1912 refpanic ("GRVC VLRU inconsistent2");
1914 QRemove(&tvc->vlruq); /* move to lruq head */
1915 QAdd(&VLRU, &tvc->vlruq);
1916 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1917 refpanic ("GRVC VLRU inconsistent3");
1919 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1920 refpanic ("GRVC VLRU inconsistent4");
1922 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1923 refpanic ("GRVC VLRU inconsistent5");
1928 ReleaseWriteLock(&afs_xvcache);
1930 if (tvc->states & CStatd) {
1934 ObtainReadLock(&tvc->lock);
1935 tvc->states &= ~CUnique;
1936 tvc->callback = (struct server *)0; /* redundant, perhaps */
1937 ReleaseReadLock(&tvc->lock);
1940 ObtainWriteLock(&tvc->lock,57);
1942 /* It is always appropriate to throw away all the access rights? */
1943 afs_FreeAllAxs(&(tvc->Access));
1945 if (newvcache) tvc->states |= CForeign;
1946 if (tvolp->states & VRO) tvc->states |= CRO;
1947 if (tvolp->states & VBackup) tvc->states |= CBackup;
1948 /* now copy ".." entry back out of volume structure, if necessary */
1949 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
1950 && (tvolp->rootUnique == afid->Fid.Unique)) {
1953 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
1955 tvc->mvid = (struct VenusFid *)osi_AllocSmallSpace(sizeof(struct VenusFid));
1956 *tvc->mvid = tvolp->dotdot;
1960 afs_RemoveVCB(afid);
1963 struct VenusFid tfid;
1966 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
1967 origCBs = afs_allCBs; /* ignore InitCallBackState */
1968 code = afs_RemoteLookup(&tfid, areq, (char *)0, &nfid, &OutStatus,
1969 &CallBack, &serverp, &tsync);
1973 ObtainWriteLock(&afs_xcbhash, 467);
1974 afs_DequeueCallback(tvc);
1975 tvc->callback = (struct server *)0;
1976 tvc->states &= ~(CStatd|CUnique);
1977 ReleaseWriteLock(&afs_xcbhash);
1978 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1979 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1980 ReleaseWriteLock(&tvc->lock);
1981 ObtainReadLock(&afs_xvcache);
1983 ReleaseReadLock(&afs_xvcache);
1984 return (struct vcache *) 0;
1987 ObtainWriteLock(&afs_xcbhash, 468);
1988 if (origCBs == afs_allCBs) {
1989 tvc->states |= CTruth;
1990 tvc->callback = serverp;
1991 if (CallBack.ExpirationTime != 0) {
1992 tvc->cbExpires = CallBack.ExpirationTime+start;
1993 tvc->states |= CStatd;
1994 tvc->states &= ~CBulkFetching;
1995 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
1996 } else if (tvc->states & CRO) {
1997 /* adapt gives us an hour. */
1998 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
1999 tvc->states |= CStatd;
2000 tvc->states &= ~CBulkFetching;
2001 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2004 afs_DequeueCallback(tvc);
2005 tvc->callback = (struct server *)0;
2006 tvc->states &= ~(CStatd | CUnique);
2007 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2008 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2010 ReleaseWriteLock(&afs_xcbhash);
2011 afs_ProcessFS(tvc, &OutStatus, areq);
2013 ReleaseWriteLock(&tvc->lock);
2020 * must be called with avc write-locked
2021 * don't absolutely have to invalidate the hint unless the dv has
2022 * changed, but be sure to get it right else there will be consistency bugs.
2024 afs_int32 afs_FetchStatus(struct vcache *avc, struct VenusFid *afid,
2025 struct vrequest *areq, struct AFSFetchStatus *Outsp)
2029 register struct conn *tc;
2030 struct AFSCallBack CallBack;
2031 struct AFSVolSync tsync;
2032 struct volume* volp;
2036 tc = afs_Conn(afid, areq, SHARED_LOCK);
2037 avc->quick.stamp = 0; avc->h1.dchint = NULL; /* invalidate hints */
2039 avc->callback = tc->srvr->server;
2041 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2042 #ifdef RX_ENABLE_LOCKS
2044 #endif /* RX_ENABLE_LOCKS */
2045 code = RXAFS_FetchStatus(tc->id,
2046 (struct AFSFid *) &afid->Fid,
2047 Outsp, &CallBack, &tsync);
2048 #ifdef RX_ENABLE_LOCKS
2050 #endif /* RX_ENABLE_LOCKS */
2057 (afs_Analyze(tc, code, afid, areq,
2058 AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2059 SHARED_LOCK, (struct cell *)0));
2062 afs_ProcessFS(avc, Outsp, areq);
2063 volp = afs_GetVolume(afid, areq, READ_LOCK);
2064 ObtainWriteLock(&afs_xcbhash, 469);
2065 avc->states |= CTruth;
2066 if (avc->callback /* check for race */) {
2067 if (CallBack.ExpirationTime != 0) {
2068 avc->cbExpires = CallBack.ExpirationTime+start;
2069 avc->states |= CStatd;
2070 avc->states &= ~CBulkFetching;
2071 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2073 else if (avc->states & CRO)
2074 { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2075 avc->cbExpires = 3600+start;
2076 avc->states |= CStatd;
2077 avc->states &= ~CBulkFetching;
2078 afs_QueueCallback(avc, CBHash(3600), volp);
2081 afs_DequeueCallback(avc);
2082 avc->callback = (struct server *)0;
2083 avc->states &= ~(CStatd|CUnique);
2084 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2085 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
2089 afs_DequeueCallback(avc);
2090 avc->callback = (struct server *)0;
2091 avc->states &= ~(CStatd|CUnique);
2092 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2093 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
2095 ReleaseWriteLock(&afs_xcbhash);
2097 afs_PutVolume(volp, READ_LOCK);
2099 else { /* used to undo the local callback, but that's too extreme.
2100 * There are plenty of good reasons that fetchstatus might return
2101 * an error, such as EPERM. If we have the vnode cached, statd,
2102 * with callback, might as well keep track of the fact that we
2103 * don't have access...
2105 if (code == EPERM || code == EACCES) {
2106 struct axscache *ac;
2107 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2109 else /* not found, add a new one if possible */
2110 afs_AddAxs(avc->Access, areq->uid, 0);
2120 * Stuff some information into the vcache for the given file.
2123 * afid : File in question.
2124 * OutStatus : Fetch status on the file.
2125 * CallBack : Callback info.
2126 * tc : RPC connection involved.
2127 * areq : vrequest involved.
2130 * Nothing interesting.
2133 afs_StuffVcache(afid, OutStatus, CallBack, tc, areq)
2134 register struct VenusFid *afid;
2135 struct AFSFetchStatus *OutStatus;
2136 struct AFSCallBack *CallBack;
2137 register struct conn *tc;
2138 struct vrequest *areq;
2140 { /*afs_StuffVcache*/
2142 register afs_int32 code, i, newvcache=0;
2143 register struct vcache *tvc;
2144 struct AFSVolSync tsync;
2146 struct axscache *ac;
2149 AFS_STATCNT(afs_StuffVcache);
2150 #ifdef IFS_VCACHECOUNT
2155 ObtainSharedLock(&afs_xvcache,8);
2157 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_VLRU /* no stats */);
2159 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2160 ReleaseSharedLock(&afs_xvcache);
2161 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2167 /* no cache entry, better grab one */
2168 UpgradeSToWLock(&afs_xvcache,25);
2169 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
2171 ConvertWToSLock(&afs_xvcache);
2174 ReleaseSharedLock(&afs_xvcache);
2175 ObtainWriteLock(&tvc->lock,58);
2177 tvc->states &= ~CStatd;
2178 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2179 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2181 /* Is it always appropriate to throw away all the access rights? */
2182 afs_FreeAllAxs(&(tvc->Access));
2184 /*Copy useful per-volume info*/
2185 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2187 if (newvcache && (tvp->states & VForeign)) tvc->states |= CForeign;
2188 if (tvp->states & VRO) tvc->states |= CRO;
2189 if (tvp->states & VBackup) tvc->states |= CBackup;
2191 * Now, copy ".." entry back out of volume structure, if
2194 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2195 if (!tvc->mvid) tvc->mvid =
2196 (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
2197 *tvc->mvid = tvp->dotdot;
2200 /* store the stat on the file */
2201 afs_RemoveVCB(afid);
2202 afs_ProcessFS(tvc, OutStatus, areq);
2203 tvc->callback = tc->srvr->server;
2205 /* we use osi_Time twice below. Ideally, we would use the time at which
2206 * the FetchStatus call began, instead, but we don't have it here. So we
2207 * make do with "now". In the CRO case, it doesn't really matter. In
2208 * the other case, we hope that the difference between "now" and when the
2209 * call actually began execution on the server won't be larger than the
2210 * padding which the server keeps. Subtract 1 second anyway, to be on
2211 * the safe side. Can't subtract more because we don't know how big
2212 * ExpirationTime is. Possible consistency problems may arise if the call
2213 * timeout period becomes longer than the server's expiration padding. */
2214 ObtainWriteLock(&afs_xcbhash, 470);
2215 if (CallBack->ExpirationTime != 0) {
2216 tvc->cbExpires = CallBack->ExpirationTime+osi_Time()-1;
2217 tvc->states |= CStatd;
2218 tvc->states &= ~CBulkFetching;
2219 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2221 else if (tvc->states & CRO) {
2222 /* old-fashioned AFS 3.2 style */
2223 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
2224 tvc->states |= CStatd;
2225 tvc->states &= ~CBulkFetching;
2226 afs_QueueCallback(tvc, CBHash(3600), tvp);
2229 afs_DequeueCallback(tvc);
2230 tvc->callback = (struct server *)0;
2231 tvc->states &= ~(CStatd|CUnique);
2232 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2233 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2235 ReleaseWriteLock(&afs_xcbhash);
2237 afs_PutVolume(tvp, READ_LOCK);
2239 /* look in per-pag cache */
2240 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2241 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2242 else /* not found, add a new one if possible */
2243 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2245 ReleaseWriteLock(&tvc->lock);
2246 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2247 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32, tvc->cbExpires,
2248 ICL_TYPE_INT32, tvc->cbExpires-osi_Time());
2250 * Release ref count... hope this guy stays around...
2252 afs_PutVCache(tvc, WRITE_LOCK);
2253 } /*afs_StuffVcache*/
2259 * Decrements the reference count on a cache entry.
2262 * avc : Pointer to the cache entry to decrement.
2265 * Nothing interesting.
2268 afs_PutVCache(avc, locktype)
2269 register struct vcache *avc;
2273 AFS_STATCNT(afs_PutVCache);
2275 * Can we use a read lock here?
2277 ObtainReadLock(&afs_xvcache);
2279 ReleaseReadLock(&afs_xvcache);
2286 * Find a vcache entry given a fid.
2289 * afid : Pointer to the fid whose cache entry we desire.
2290 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2291 * unlock the vnode, and try again.
2292 * flags: bit 1 to specify whether to compute hit statistics. Not
2293 * set if FindVCache is called as part of internal bookkeeping.
2296 * Must be called with the afs_xvcache lock at least held at
2297 * the read level. In order to do the VLRU adjustment, the xvcache lock
2298 * must be shared-- we upgrade it here.
2301 struct vcache *afs_FindVCache(struct VenusFid *afid, afs_int32 lockit,
2302 afs_int32 locktype, afs_int32 *retry, afs_int32 flag)
2305 register struct vcache *tvc;
2308 AFS_STATCNT(afs_FindVCache);
2311 for(tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2312 if (FidMatches(afid, tvc)) {
2314 /* Grab this vnode, possibly reactivating from the free list */
2317 vg = vget((struct vnode *)tvc);
2321 #endif /* AFS_OSF_ENV */
2326 /* should I have a read lock on the vnode here? */
2328 if (retry) *retry = 0;
2329 #if !defined(AFS_OSF_ENV)
2330 osi_vnhold(tvc, retry); /* already held, above */
2331 if (retry && *retry)
2335 * only move to front of vlru if we have proper vcache locking)
2337 if (flag & DO_VLRU) {
2338 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2339 refpanic ("FindVC VLRU inconsistent1");
2341 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2342 refpanic ("FindVC VLRU inconsistent1");
2344 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2345 refpanic ("FindVC VLRU inconsistent2");
2347 UpgradeSToWLock(&afs_xvcache,26);
2348 QRemove(&tvc->vlruq);
2349 QAdd(&VLRU, &tvc->vlruq);
2350 ConvertWToSLock(&afs_xvcache);
2351 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2352 refpanic ("FindVC VLRU inconsistent1");
2354 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2355 refpanic ("FindVC VLRU inconsistent2");
2357 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2358 refpanic ("FindVC VLRU inconsistent3");
2364 if (flag & DO_STATS) {
2365 if (tvc) afs_stats_cmperf.vcacheHits++;
2366 else afs_stats_cmperf.vcacheMisses++;
2367 if (afid->Cell == LOCALCELL)
2368 afs_stats_cmperf.vlocalAccesses++;
2370 afs_stats_cmperf.vremoteAccesses++;
2373 #ifdef AFS_LINUX22_ENV
2374 if (tvc && (tvc->states & CStatd))
2375 vcache2inode(tvc); /* mainly to reset i_nlink */
2378 } /*afs_FindVCache*/
2384 * Find a vcache entry given a fid. Does a wildcard match on what we
2385 * have for the fid. If more than one entry, don't return anything.
2388 * avcp : Fill in pointer if we found one and only one.
2389 * afid : Pointer to the fid whose cache entry we desire.
2390 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2391 * unlock the vnode, and try again.
2392 * flags: bit 1 to specify whether to compute hit statistics. Not
2393 * set if FindVCache is called as part of internal bookkeeping.
2396 * Must be called with the afs_xvcache lock at least held at
2397 * the read level. In order to do the VLRU adjustment, the xvcache lock
2398 * must be shared-- we upgrade it here.
2401 * number of matches found.
2404 int afs_duplicate_nfs_fids=0;
2406 afs_int32 afs_NFSFindVCache(avcp, afid, lockit)
2407 struct vcache **avcp;
2408 struct VenusFid *afid;
2410 { /*afs_FindVCache*/
2412 register struct vcache *tvc;
2414 afs_int32 retry = 0;
2415 afs_int32 count = 0;
2416 struct vcache *found_tvc = NULL;
2418 AFS_STATCNT(afs_FindVCache);
2422 ObtainSharedLock(&afs_xvcache,331);
2425 for(tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2426 /* Match only on what we have.... */
2427 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2428 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2429 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2430 && (tvc->fid.Cell == afid->Cell)) {
2432 /* Grab this vnode, possibly reactivating from the free list */
2435 vg = vget((struct vnode *)tvc);
2438 /* This vnode no longer exists. */
2441 #endif /* AFS_OSF_ENV */
2446 /* Drop our reference counts. */
2447 vrele((struct vnode *)tvc);
2448 vrele((struct vnode *)found_tvc);
2450 afs_duplicate_nfs_fids++;
2451 ReleaseSharedLock(&afs_xvcache);
2459 /* should I have a read lock on the vnode here? */
2461 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2462 osi_vnhold(tvc, &retry);
2465 found_tvc = (struct vcache*)0;
2466 ReleaseSharedLock(&afs_xvcache);
2467 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2471 #if !defined(AFS_OSF_ENV)
2472 osi_vnhold(tvc, (int*)0); /* already held, above */
2476 * We obtained the xvcache lock above.
2478 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2479 refpanic ("FindVC VLRU inconsistent1");
2481 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2482 refpanic ("FindVC VLRU inconsistent1");
2484 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2485 refpanic ("FindVC VLRU inconsistent2");
2487 UpgradeSToWLock(&afs_xvcache,568);
2488 QRemove(&tvc->vlruq);
2489 QAdd(&VLRU, &tvc->vlruq);
2490 ConvertWToSLock(&afs_xvcache);
2491 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2492 refpanic ("FindVC VLRU inconsistent1");
2494 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2495 refpanic ("FindVC VLRU inconsistent2");
2497 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2498 refpanic ("FindVC VLRU inconsistent3");
2503 if (tvc) afs_stats_cmperf.vcacheHits++;
2504 else afs_stats_cmperf.vcacheMisses++;
2505 if (afid->Cell == LOCALCELL)
2506 afs_stats_cmperf.vlocalAccesses++;
2508 afs_stats_cmperf.vremoteAccesses++;
2510 *avcp = tvc; /* May be null */
2512 ReleaseSharedLock(&afs_xvcache);
2513 return (tvc ? 1 : 0);
2515 } /*afs_NFSFindVCache*/
2523 * Initialize vcache related variables
2525 void afs_vcacheInit(int astatSize)
2527 register struct vcache *tvp;
2529 #if defined(AFS_OSF_ENV)
2530 if (!afs_maxvcount) {
2531 #if defined(AFS_OSF30_ENV)
2532 afs_maxvcount = max_vnodes/2; /* limit ourselves to half the total */
2534 afs_maxvcount = nvnode/2; /* limit ourselves to half the total */
2536 if (astatSize < afs_maxvcount) {
2537 afs_maxvcount = astatSize;
2540 #else /* AFS_OSF_ENV */
2541 freeVCList = (struct vcache *)0;
2544 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2545 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2547 #if !defined(AFS_OSF_ENV)
2548 /* Allocate and thread the struct vcache entries */
2549 tvp = (struct vcache *) afs_osi_Alloc(astatSize * sizeof(struct vcache));
2550 bzero((char *)tvp, sizeof(struct vcache)*astatSize);
2552 Initial_freeVCList = tvp;
2553 freeVCList = &(tvp[0]);
2554 for(i=0; i < astatSize-1; i++) {
2555 tvp[i].nextfree = &(tvp[i+1]);
2557 tvp[astatSize-1].nextfree = (struct vcache *) 0;
2558 #ifdef AFS_AIX32_ENV
2559 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2564 #if defined(AFS_SGI_ENV)
2565 for(i=0; i < astatSize; i++) {
2566 char name[METER_NAMSZ];
2567 struct vcache *tvc = &tvp[i];
2569 tvc->v.v_number = ++afsvnumbers;
2570 tvc->vc_rwlockid = OSI_NO_LOCKID;
2571 initnsema(&tvc->vc_rwlock, 1, makesname(name, "vrw", tvc->v.v_number));
2572 #ifndef AFS_SGI53_ENV
2573 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2575 #ifndef AFS_SGI62_ENV
2576 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2577 #endif /* AFS_SGI62_ENV */
2590 void shutdown_vcache(void)
2593 struct afs_cbr *tsp, *nsp;
2595 * XXX We may potentially miss some of the vcaches because if when there're no
2596 * free vcache entries and all the vcache entries are active ones then we allocate
2597 * an additional one - admittedly we almost never had that occur.
2599 #if !defined(AFS_OSF_ENV)
2600 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2602 #ifdef AFS_AIX32_ENV
2603 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2607 register struct afs_q *tq, *uq;
2608 register struct vcache *tvc;
2609 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2613 osi_FreeSmallSpace(tvc->mvid);
2614 tvc->mvid = (struct VenusFid*)0;
2617 aix_gnode_rele((struct vnode *)tvc);
2619 if (tvc->linkData) {
2620 afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);
2625 * Also free the remaining ones in the Cache
2627 for (i=0; i < VCSIZE; i++) {
2628 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2630 osi_FreeSmallSpace(tvc->mvid);
2631 tvc->mvid = (struct VenusFid*)0;
2635 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2636 #ifdef AFS_AIX32_ENV
2639 vms_delete(tvc->segid);
2641 tvc->segid = tvc->vmh = NULL;
2642 if (tvc->vrefCount) osi_Panic("flushVcache: vm race");
2650 #if defined(AFS_SUN5_ENV)
2656 if (tvc->linkData) {
2657 afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);
2661 afs_FreeAllAxs(&(tvc->Access));
2667 * Free any leftover callback queue
2669 for (tsp = afs_cbrSpace; tsp; tsp = nsp ) {
2671 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
2675 #if !defined(AFS_OSF_ENV)
2676 freeVCList = Initial_freeVCList = 0;
2678 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2679 LOCK_INIT(&afs_xvcb, "afs_xvcb");