2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include <afsconfig.h>
39 #include "../afs/param.h"
43 #include "../afs/sysincludes.h" /*Standard vendor system headers*/
44 #include "../afs/afsincludes.h" /*AFS-based standard headers*/
45 #include "../afs/afs_stats.h"
46 #include "../afs/afs_cbqueue.h"
47 #include "../afs/afs_osidnlc.h"
50 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
51 afs_int32 afs_vcount = 0; /* number of vcache in use now */
52 #if defined(AFS_OSF30_ENV)
53 extern int max_vnodes; /* number of total system vnodes */
55 extern int nvnode; /* number of total system vnodes */
58 extern int numvnodes; /* number vnodes in use now */
60 #endif /* AFS_OSF_ENV */
65 /* Imported variables */
66 extern struct server *afs_servers[NSERVERS];
67 extern afs_rwlock_t afs_xserver;
68 extern afs_rwlock_t afs_xcbhash;
69 extern struct vcache *afs_globalVp;
71 extern struct mount *afs_globalVFS;
72 extern struct vnodeops Afs_vnodeops;
73 #elif defined(AFS_DARWIN_ENV)
74 extern struct mount *afs_globalVFS;
76 extern struct vfs *afs_globalVFS;
77 #endif /* AFS_OSF_ENV */
78 #if defined(AFS_DUX40_ENV)
79 extern struct vfs_ubcops afs_ubcops;
82 extern struct vnodeops Afs_vnodeops;
86 #endif /* AFS_SGI64_ENV */
88 /* Exported variables */
89 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries*/
90 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks*/
91 struct vcache *freeVCList; /*Free list for stat cache entries*/
92 struct vcache *Initial_freeVCList; /*Initial list for above*/
93 struct afs_q VLRU; /*vcache LRU*/
94 afs_int32 vcachegen = 0;
95 unsigned int afs_paniconwarn = 0;
96 struct vcache *afs_vhashT[VCSIZE];
97 afs_int32 afs_bulkStatsLost;
98 int afs_norefpanic = 0;
100 /* Forward declarations */
101 static afs_int32 afs_QueueVCB(struct vcache *avc);
108 * Flush the given vcache entry.
111 * avc : Pointer to vcache entry to flush.
112 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
115 * afs_xvcache lock must be held for writing upon entry to
116 * prevent people from changing the vrefCount field, and to
117 * protect the lruq and hnext fields.
118 * LOCK: afs_FlushVCache afs_xvcache W
119 * REFCNT: vcache ref count must be zero on entry except for osf1
120 * RACE: lock is dropped and reobtained, permitting race in caller
123 int afs_FlushVCache(struct vcache *avc, int *slept)
124 { /*afs_FlushVCache*/
126 register afs_int32 i, code;
127 register struct vcache **uvc, *wvc, *tvc;
130 AFS_STATCNT(afs_FlushVCache);
131 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
132 ICL_TYPE_INT32, avc->states);
135 VN_LOCK((struct vnode *)avc);
139 code = osi_VM_FlushVCache(avc, slept);
143 if (avc->states & CVFlushed) {
147 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
148 refpanic ("LRU vs. Free inconsistency");
150 avc->states |= CVFlushed;
151 /* pull the entry out of the lruq and put it on the free list */
152 QRemove(&avc->vlruq);
153 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *) 0;
155 /* keep track of # of files that we bulk stat'd, but never used
156 * before they got recycled.
158 if (avc->states & CBulkStat)
161 /* remove entry from the hash chain */
162 i = VCHash(&avc->fid);
163 uvc = &afs_vhashT[i];
164 for(wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
167 avc->hnext = (struct vcache *) NULL;
171 if (!wvc) osi_Panic("flushvcache"); /* not in correct hash bucket */
172 if (avc->mvid) osi_FreeSmallSpace(avc->mvid);
173 avc->mvid = (struct VenusFid*)0;
175 afs_osi_Free(avc->linkData, strlen(avc->linkData)+1);
176 avc->linkData = NULL;
178 afs_FreeAllAxs(&(avc->Access));
180 /* we can't really give back callbacks on RO files, since the
181 * server only tracks them on a per-volume basis, and we don't
182 * know whether we still have some other files from the same
184 if ((avc->states & CRO) == 0 && avc->callback) {
187 ObtainWriteLock(&afs_xcbhash, 460);
188 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
189 avc->states &= ~(CStatd | CUnique);
190 ReleaseWriteLock(&afs_xcbhash);
191 afs_symhint_inval(avc);
192 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
193 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
195 osi_dnlc_purgevp (avc);
198 * Next, keep track of which vnodes we've deleted for create's
199 * optimistic synchronization algorithm
202 if (avc->fid.Fid.Vnode & 1) afs_oddZaps++;
205 #if !defined(AFS_OSF_ENV)
206 /* put the entry in the free list */
207 avc->nextfree = freeVCList;
209 if (avc->vlruq.prev || avc->vlruq.next) {
210 refpanic ("LRU vs. Free inconsistency");
213 /* This should put it back on the vnode free list since usecount is 1 */
216 if (VREFCOUNT(avc) > 0) {
217 VN_UNLOCK((struct vnode *)avc);
218 AFS_RELE((struct vnode *)avc);
220 if (afs_norefpanic) {
221 printf ("flush vc refcnt < 1");
223 (void) vgone(avc, VX_NOSLEEP, (struct vnodeops *) 0);
225 VN_UNLOCK((struct vnode *)avc);
227 else osi_Panic ("flush vc refcnt < 1");
229 #endif /* AFS_OSF_ENV */
230 avc->states |= CVFlushed;
235 VN_UNLOCK((struct vnode *)avc);
239 } /*afs_FlushVCache*/
245 * The core of the inactive vnode op for all but IRIX.
247 void afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
249 extern afs_rwlock_t afs_xdcache, afs_xvcache;
251 AFS_STATCNT(afs_inactive);
252 if (avc->states & CDirty) {
253 /* we can't keep trying to push back dirty data forever. Give up. */
254 afs_InvalidateAllSegments(avc, 1/*set lock*/); /* turns off dirty bit */
256 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
257 avc->states &= ~CDirty; /* Turn it off */
258 if (avc->states & CUnlinked) {
259 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
260 avc->states |= CUnlinkedDel;
263 afs_remunlink(avc, 1); /* ignore any return code */
272 * Description: allocate a callback return structure from the
273 * free list and return it.
275 * Env: The alloc and free routines are both called with the afs_xvcb lock
276 * held, so we don't have to worry about blocking in osi_Alloc.
278 static struct afs_cbr *afs_cbrSpace = 0;
279 struct afs_cbr *afs_AllocCBR() {
280 register struct afs_cbr *tsp;
283 while (!afs_cbrSpace) {
284 if (afs_stats_cmperf.CallBackAlloced >= 2) {
285 /* don't allocate more than 2 * AFS_NCBRS for now */
287 afs_stats_cmperf.CallBackFlushes++;
291 tsp = (struct afs_cbr *) afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
292 for(i=0; i < AFS_NCBRS-1; i++) {
293 tsp[i].next = &tsp[i+1];
295 tsp[AFS_NCBRS-1].next = 0;
297 afs_stats_cmperf.CallBackAlloced++;
301 afs_cbrSpace = tsp->next;
308 * Description: free a callback return structure.
311 * asp -- the address of the structure to free.
313 * Environment: the xvcb lock is held over these calls.
316 register struct afs_cbr *asp; {
317 asp->next = afs_cbrSpace;
325 * Description: flush all queued callbacks to all servers.
329 * Environment: holds xvcb lock over RPC to guard against race conditions
330 * when a new callback is granted for the same file later on.
332 afs_int32 afs_FlushVCBs (afs_int32 lockit)
334 struct AFSFid tfids[AFS_MAXCBRSCALL];
335 struct AFSCallBack callBacks[1];
336 struct AFSCBFids fidArray;
337 struct AFSCBs cbArray;
339 struct afs_cbr *tcbrp;
343 struct vrequest treq;
345 int safety1, safety2, safety3;
346 extern int afs_totalServers;
349 if (code = afs_InitReq(&treq, &afs_osi_cred)) return code;
350 treq.flags |= O_NONBLOCK;
352 if (lockit) MObtainWriteLock(&afs_xvcb,273);
353 ObtainReadLock(&afs_xserver);
354 for(i=0; i<NSERVERS; i++) {
355 for(safety1 = 0, tsp = afs_servers[i];
356 tsp && safety1 < afs_totalServers+10; tsp=tsp->next, safety1++) {
358 if (tsp->cbrs == (struct afs_cbr *) 0) continue;
360 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
361 * and make an RPC, over and over again.
363 tcount = 0; /* number found so far */
364 for (safety2 = 0; safety2 < afs_cacheStats ; safety2++) {
365 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
366 /* if buffer is full, or we've queued all we're going
367 * to from this server, we should flush out the
370 fidArray.AFSCBFids_len = tcount;
371 fidArray.AFSCBFids_val = (struct AFSFid *) tfids;
372 cbArray.AFSCBs_len = 1;
373 cbArray.AFSCBs_val = callBacks;
374 callBacks[0].CallBackType = CB_EXCLUSIVE;
375 for (safety3 = 0; safety3 < MAXHOSTS*2; safety3++) {
376 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
377 tsp->cell->cell, &treq, 0,
380 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
381 #ifdef RX_ENABLE_LOCKS
383 #endif /* RX_ENABLE_LOCKS */
384 code = RXAFS_GiveUpCallBacks(tc->id, &fidArray,
386 #ifdef RX_ENABLE_LOCKS
388 #endif /* RX_ENABLE_LOCKS */
392 if (!afs_Analyze(tc, code, 0, &treq,
393 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS,
394 SHARED_LOCK, tsp->cell)) {
398 /* ignore return code, since callbacks may have
399 * been returned anyway, we shouldn't leave them
400 * around to be returned again.
402 * Next, see if we are done with this server, and if so,
403 * break to deal with the next one.
405 if (!tsp->cbrs) break;
407 } /* if to flush full buffer */
408 /* if we make it here, we have an entry at the head of cbrs,
409 * which we should copy to the file ID array and then free.
412 tfids[tcount++] = tcbrp->fid;
413 tsp->cbrs = tcbrp->next;
415 } /* while loop for this one server */
416 if (safety2 > afs_cacheStats) {
417 afs_warn("possible internal error afs_flushVCBs (%d)\n", safety2);
419 } /* for loop for this hash chain */
420 } /* loop through all hash chains */
421 if (safety1 > afs_totalServers+2) {
422 afs_warn("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n", safety1, afs_totalServers+2);
424 osi_Panic("afs_flushVCBS safety1");
427 ReleaseReadLock(&afs_xserver);
428 if (lockit) MReleaseWriteLock(&afs_xvcb);
436 * Queue a callback on the given fid.
442 * Locks the xvcb lock.
443 * Called when the xvcache lock is already held.
446 static afs_int32 afs_QueueVCB(struct vcache *avc)
448 register struct server *tsp;
449 register struct afs_cbr *tcbp;
451 AFS_STATCNT(afs_QueueVCB);
452 /* The callback is really just a struct server ptr. */
453 tsp = (struct server *)(avc->callback);
455 /* we now have a pointer to the server, so we just allocate
456 * a queue entry and queue it.
458 MObtainWriteLock(&afs_xvcb,274);
459 tcbp = afs_AllocCBR();
460 tcbp->fid = avc->fid.Fid;
461 tcbp->next = tsp->cbrs;
464 /* now release locks and return */
465 MReleaseWriteLock(&afs_xvcb);
474 * Remove a queued callback by looking through all the servers
475 * to see if any have this callback queued.
478 * afid: The fid we want cleansed of queued callbacks.
481 * Locks xvcb and xserver locks.
482 * Typically called with xdcache, xvcache and/or individual vcache
487 register struct VenusFid *afid;
492 register struct server *tsp;
493 register struct afs_cbr *tcbrp;
494 struct afs_cbr **lcbrpp;
496 AFS_STATCNT(afs_RemoveVCB);
497 MObtainWriteLock(&afs_xvcb,275);
498 ObtainReadLock(&afs_xserver);
499 for(i=0;i<NSERVERS;i++) {
500 for(tsp=afs_servers[i]; tsp; tsp=tsp->next) {
501 /* if cell is known, and is wrong, then skip this server */
502 if (tsp->cell && tsp->cell->cell != afid->Cell) continue;
505 * Otherwise, iterate through file IDs we're sending to the
508 lcbrpp = &tsp->cbrs; /* first queued return callback */
509 for(tcbrp = *lcbrpp; tcbrp; lcbrpp = &tcbrp->next, tcbrp = *lcbrpp) {
510 if (afid->Fid.Volume == tcbrp->fid.Volume &&
511 afid->Fid.Unique == tcbrp->fid.Unique &&
512 afid->Fid.Vnode == tcbrp->fid.Vnode) {
513 *lcbrpp = tcbrp->next; /* unthread from list */
521 ReleaseReadLock(&afs_xserver);
522 MReleaseWriteLock(&afs_xvcb);
533 * This routine is responsible for allocating a new cache entry
534 * from the free list. It formats the cache entry and inserts it
535 * into the appropriate hash tables. It must be called with
536 * afs_xvcache write-locked so as to prevent several processes from
537 * trying to create a new cache entry simultaneously.
540 * afid : The file id of the file whose cache entry is being
543 /* LOCK: afs_NewVCache afs_xvcache W */
544 struct vcache *afs_NewVCache(struct VenusFid *afid, struct server *serverp,
545 afs_int32 lockit, afs_int32 locktype)
549 afs_int32 anumber = VCACHE_FREE;
551 struct gnode *gnodepnt;
554 struct vm_info * vm_info_ptr;
555 #endif /* AFS_MACH_ENV */
558 #endif /* AFS_OSF_ENV */
559 struct afs_q *tq, *uq;
562 AFS_STATCNT(afs_NewVCache);
563 #ifdef AFS_LINUX22_ENV
565 /* Free some if possible. */
566 struct afs_q *tq, *uq;
567 int i; char *panicstr;
568 int vmax = 2 * afs_cacheStats;
569 int vn = VCACHE_FREE;
572 shrink_dcache_sb(afs_globalVFS);
576 for(tq = VLRU.prev; tq != &VLRU && vn > 0; tq = uq) {
579 if (tvc->states & CVFlushed)
580 refpanic ("CVFlushed on VLRU");
582 refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
583 else if (QNext(uq) != tq)
584 refpanic ("VLRU inconsistent");
586 if (tvc == afs_globalVp)
589 if ( VREFCOUNT(tvc) && tvc->opens == 0 ) {
590 struct inode *ip = (struct inode*)tvc;
591 if (list_empty(&ip->i_dentry)) {
595 struct list_head *cur;
596 struct list_head *head = &ip->i_dentry;
599 #if defined(AFS_LINUX24_ENV)
600 spin_lock(&dcache_lock);
603 while ((cur = cur->next) != head) {
604 struct dentry *dentry = list_entry(cur, struct dentry, d_alias);
605 #if defined(AFS_LINUX24_ENV)
606 if (!atomic_read(&dentry->d_count)) {
608 if (!dentry->d_count) {
611 #if defined(AFS_LINUX24_ENV)
613 spin_unlock(&dcache_lock);
626 #if defined(AFS_LINUX24_ENV)
627 spin_unlock(&dcache_lock);
635 #endif /* AFS_LINUX22_ENV */
638 if (afs_vcount >= afs_maxvcount)
641 * If we are using > 33 % of the total system vnodes for AFS vcache
642 * entries or we are using the maximum number of vcache entries,
643 * then free some. (if our usage is > 33% we should free some, if
644 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
645 * we _must_ free some -- no choice).
647 if ( (( 3 * afs_vcount ) > nvnode) || ( afs_vcount >= afs_maxvcount ))
650 struct afs_q *tq, *uq;
651 int i; char *panicstr;
654 for(tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
657 if (tvc->states & CVFlushed)
658 refpanic ("CVFlushed on VLRU");
659 else if (i++ > afs_maxvcount)
660 refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
661 else if (QNext(uq) != tq)
662 refpanic ("VLRU inconsistent");
663 else if (VREFCOUNT(tvc) < 1)
664 refpanic ("refcnt 0 on VLRU");
666 if ( VREFCOUNT(tvc) == 1 && tvc->opens == 0
667 && (tvc->states & CUnlinkedDel) == 0) {
668 code = afs_FlushVCache(tvc, &fv_slept);
675 continue; /* start over - may have raced. */
680 if (anumber == VCACHE_FREE) {
681 printf("NewVCache: warning none freed, using %d of %d\n",
682 afs_vcount, afs_maxvcount);
683 if (afs_vcount >= afs_maxvcount) {
684 osi_Panic("NewVCache - none freed");
685 /* XXX instead of panicing, should do afs_maxvcount++
686 and magic up another one */
692 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
693 /* What should we do ???? */
694 osi_Panic("afs_NewVCache: no more vnodes");
699 tvc->nextfree = (struct vcache *)0;
701 #else /* AFS_OSF_ENV */
702 /* pull out a free cache entry */
705 for(tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
709 if (tvc->states & CVFlushed)
710 refpanic("CVFlushed on VLRU");
711 else if (i++ > 2*afs_cacheStats) /* even allowing for a few xallocs...*/
712 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
713 else if (QNext(uq) != tq)
714 refpanic("VLRU inconsistent");
716 #ifdef AFS_DARWIN_ENV
717 if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0) &&
718 VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
719 osi_VM_TryReclaim(tvc, &fv_slept);
723 continue; /* start over - may have raced. */
727 if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
728 && (tvc->states & CUnlinkedDel) == 0) {
729 code = afs_FlushVCache(tvc, &fv_slept);
736 continue; /* start over - may have raced. */
739 if (tq == uq ) break;
743 /* none free, making one is better than a panic */
744 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
745 tvc = (struct vcache *) afs_osi_Alloc(sizeof (struct vcache));
747 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
750 /* In case it still comes here we need to fill this */
751 tvc->v.v_vm_info = VM_INFO_NULL;
752 vm_info_init(tvc->v.v_vm_info);
753 /* perhaps we should also do close_flush on non-NeXT mach systems;
754 * who knows; we don't currently have the sources.
756 #endif /* AFS_MACH_ENV */
757 #if defined(AFS_SGI_ENV)
758 { char name[METER_NAMSZ];
759 memset(tvc, 0, sizeof(struct vcache));
760 tvc->v.v_number = ++afsvnumbers;
761 tvc->vc_rwlockid = OSI_NO_LOCKID;
762 initnsema(&tvc->vc_rwlock, 1, makesname(name, "vrw", tvc->v.v_number));
763 #ifndef AFS_SGI53_ENV
764 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
766 #ifndef AFS_SGI62_ENV
767 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
770 #endif /* AFS_SGI_ENV */
773 tvc = freeVCList; /* take from free list */
774 freeVCList = tvc->nextfree;
775 tvc->nextfree = (struct vcache *)0;
777 #endif /* AFS_OSF_ENV */
780 vm_info_ptr = tvc->v.v_vm_info;
781 #endif /* AFS_MACH_ENV */
783 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
784 memset((char *)tvc, 0, sizeof(struct vcache));
789 RWLOCK_INIT(&tvc->lock, "vcache lock");
790 #if defined(AFS_SUN5_ENV)
791 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
792 #endif /* defined(AFS_SUN5_ENV) */
795 tvc->v.v_vm_info = vm_info_ptr;
796 tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
797 #endif /* AFS_MACH_ENV */
798 tvc->parentVnode = 0;
799 tvc->mvid = (struct VenusFid *) 0;
800 tvc->linkData = (char *) 0;
803 tvc->execsOrWriters = 0;
807 tvc->last_looker = 0;
809 tvc->asynchrony = -1;
811 afs_symhint_inval(tvc);
813 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
816 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
817 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
819 /* Hold it for the LRU (should make count 2) */
820 VN_HOLD((struct vnode *)tvc);
821 #else /* AFS_OSF_ENV */
822 VREFCOUNT_SET(tvc, 1); /* us */
823 #endif /* AFS_OSF_ENV */
825 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
826 tvc->vmh = tvc->segid = NULL;
829 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
830 #if defined(AFS_SUN5_ENV)
831 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
833 #if defined(AFS_SUN55_ENV)
834 /* This is required if the kaio (kernel aynchronous io)
835 ** module is installed. Inside the kernel, the function
836 ** check_vp( common/os/aio.c) checks to see if the kernel has
837 ** to provide asynchronous io for this vnode. This
838 ** function extracts the device number by following the
839 ** v_data field of the vnode. If we do not set this field
840 ** then the system panics. The value of the v_data field
841 ** is not really important for AFS vnodes because the kernel
842 ** does not do asynchronous io for regular files. Hence,
843 ** for the time being, we fill up the v_data field with the
844 ** vnode pointer itself. */
845 tvc->v.v_data = (char *)tvc;
846 #endif /* AFS_SUN55_ENV */
848 afs_BozonInit(&tvc->pvnLock, tvc);
852 tvc->callback = serverp; /* to minimize chance that clear
854 /* initialize vnode data, note vrefCount is v.v_count */
856 /* Don't forget to free the gnode space */
857 tvc->v.v_gnode = gnodepnt = (struct gnode *) osi_AllocSmallSpace(sizeof(struct gnode));
858 memset((char *)gnodepnt, 0, sizeof(struct gnode));
861 memset((void*)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
862 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
864 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
865 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
867 bhv_head_init(&(tvc->v.v_bh));
868 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
871 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion*)tvc;
873 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
875 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
877 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
878 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
880 vnode_pcache_init(&tvc->v);
881 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
882 /* Above define is never true execpt in SGI test kernels. */
883 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
886 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
889 SetAfsVnode((struct vnode *)tvc);
890 #endif /* AFS_SGI64_ENV */
891 #ifdef AFS_DARWIN_ENV
892 tvc->v.v_ubcinfo = UBC_INFO_NULL;
893 lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
894 cache_purge((struct vnode *)tvc);
897 /* VLISTNONE(&tvc->v); */
898 tvc->v.v_freelist.tqe_next=0;
899 tvc->v.v_freelist.tqe_prev=(struct vnode **)0xdeadb;
900 /*tvc->vrefCount++;*/
903 * The proper value for mvstat (for root fids) is setup by the caller.
906 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
908 if (afs_globalVFS == 0) osi_Panic("afs globalvfs");
909 vSetVfsp(tvc, afs_globalVFS);
912 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
913 tvc->v.v_vfsprev = NULL;
914 afs_globalVFS->vfs_vnodes = &tvc->v;
915 if (tvc->v.v_vfsnext != NULL)
916 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
917 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us!*/
918 gnodepnt->gn_vnode = &tvc->v;
921 tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
923 #if defined(AFS_DUX40_ENV)
924 insmntque(tvc, afs_globalVFS, &afs_ubcops);
927 /* Is this needed??? */
928 insmntque(tvc, afs_globalVFS);
929 #endif /* AFS_OSF_ENV */
930 #endif /* AFS_DUX40_ENV */
931 #if defined(AFS_SGI_ENV)
932 VN_SET_DPAGES(&(tvc->v), (struct pfdat*)NULL);
933 osi_Assert((tvc->v.v_flag & VINACT) == 0);
935 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
936 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
937 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
938 osi_Assert(tvc->v.v_filocks == NULL);
939 #if !defined(AFS_SGI65_ENV)
940 osi_Assert(tvc->v.v_filocksem == NULL);
942 osi_Assert(tvc->cred == NULL);
944 vnode_pcache_reinit(&tvc->v);
945 tvc->v.v_rdev = NODEV;
947 vn_initlist((struct vnlist *)&tvc->v);
949 #endif /* AFS_SGI_ENV */
950 #if defined(AFS_LINUX22_ENV)
952 struct inode *ip = (struct inode*)tvc;
953 sema_init(&ip->i_sem, 1);
954 #if defined(AFS_LINUX24_ENV)
955 sema_init(&ip->i_zombie, 1);
956 init_waitqueue_head(&ip->i_wait);
957 spin_lock_init(&ip->i_data.i_shared_lock);
958 #ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
959 spin_lock_init(&ip->i_data.page_lock);
961 INIT_LIST_HEAD(&ip->i_data.clean_pages);
962 INIT_LIST_HEAD(&ip->i_data.dirty_pages);
963 INIT_LIST_HEAD(&ip->i_data.locked_pages);
964 INIT_LIST_HEAD(&ip->i_dirty_buffers);
965 ip->i_data.host = (void*) ip;
966 ip->i_mapping = &ip->i_data;
968 sema_init(&ip->i_atomic_write, 1);
969 init_waitqueue(&ip->i_wait);
971 INIT_LIST_HEAD(&ip->i_hash);
972 INIT_LIST_HEAD(&ip->i_dentry);
974 ip->i_dev = afs_globalVFS->s_dev;
975 ip->i_sb = afs_globalVFS;
980 osi_dnlc_purgedp(tvc); /* this may be overkill */
981 memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
982 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
983 tvc->slocks = (struct SimpleLocks *)0;
986 tvc->hnext = afs_vhashT[i];
988 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
989 refpanic ("NewVCache VLRU inconsistent");
991 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
992 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
993 refpanic ("NewVCache VLRU inconsistent2");
995 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
996 refpanic ("NewVCache VLRU inconsistent3");
998 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
999 refpanic ("NewVCache VLRU inconsistent4");
1009 * afs_FlushActiveVcaches
1015 * doflocks : Do we handle flocks?
1017 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
1019 afs_FlushActiveVcaches(doflocks)
1020 register afs_int32 doflocks;
1022 { /*afs_FlushActiveVcaches*/
1024 register struct vcache *tvc;
1026 register struct conn *tc;
1027 register afs_int32 code;
1028 register struct AFS_UCRED *cred;
1029 struct vrequest treq, ureq;
1030 struct AFSVolSync tsync;
1034 AFS_STATCNT(afs_FlushActiveVcaches);
1035 ObtainReadLock(&afs_xvcache);
1036 for(i=0;i<VCSIZE;i++) {
1037 for(tvc = afs_vhashT[i]; tvc; tvc=tvc->hnext) {
1038 if (doflocks && tvc->flockCount != 0) {
1039 /* if this entry has an flock, send a keep-alive call out */
1041 ReleaseReadLock(&afs_xvcache);
1042 ObtainWriteLock(&tvc->lock,51);
1044 afs_InitReq(&treq, &afs_osi_cred);
1045 treq.flags |= O_NONBLOCK;
1047 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1049 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1050 #ifdef RX_ENABLE_LOCKS
1052 #endif /* RX_ENABLE_LOCKS */
1054 RXAFS_ExtendLock(tc->id,
1055 (struct AFSFid *) &tvc->fid.Fid,
1057 #ifdef RX_ENABLE_LOCKS
1059 #endif /* RX_ENABLE_LOCKS */
1064 (afs_Analyze(tc, code, &tvc->fid, &treq,
1065 AFS_STATS_FS_RPCIDX_EXTENDLOCK,
1066 SHARED_LOCK, (struct cell *)0));
1068 ReleaseWriteLock(&tvc->lock);
1069 ObtainReadLock(&afs_xvcache);
1073 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1075 * Don't let it evaporate in case someone else is in
1076 * this code. Also, drop the afs_xvcache lock while
1077 * getting vcache locks.
1080 ReleaseReadLock(&afs_xvcache);
1081 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1082 afs_BozonLock(&tvc->pvnLock, tvc);
1084 #if defined(AFS_SGI_ENV)
1086 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1088 osi_Assert(VREFCOUNT(tvc) > 0);
1089 AFS_RWLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1091 ObtainWriteLock(&tvc->lock,52);
1092 if (tvc->states & CCore) {
1093 tvc->states &= ~CCore;
1094 /* XXXX Find better place-holder for cred XXXX */
1095 cred = (struct AFS_UCRED *) tvc->linkData;
1096 tvc->linkData = (char *) 0; /* XXX */
1097 afs_InitReq(&ureq, cred);
1098 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1099 ICL_TYPE_POINTER, tvc,
1100 ICL_TYPE_INT32, tvc->execsOrWriters);
1101 code = afs_StoreOnLastReference(tvc, &ureq);
1102 ReleaseWriteLock(&tvc->lock);
1103 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1104 afs_BozonUnlock(&tvc->pvnLock, tvc);
1106 hzero(tvc->flushDV);
1109 if (code && code != VNOVNODE) {
1110 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1111 /* /dev/console */ 1);
1113 } else if (tvc->states & CUnlinkedDel) {
1117 ReleaseWriteLock(&tvc->lock);
1118 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1119 afs_BozonUnlock(&tvc->pvnLock, tvc);
1121 #if defined(AFS_SGI_ENV)
1122 AFS_RWUNLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1124 afs_remunlink(tvc, 0);
1125 #if defined(AFS_SGI_ENV)
1126 AFS_RWLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1130 /* lost (or won, perhaps) the race condition */
1131 ReleaseWriteLock(&tvc->lock);
1132 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1133 afs_BozonUnlock(&tvc->pvnLock, tvc);
1136 #if defined(AFS_SGI_ENV)
1137 AFS_RWUNLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1139 ObtainReadLock(&afs_xvcache);
1145 AFS_RELE((struct vnode *)tvc);
1147 /* Matches write code setting CCore flag */
1151 #ifdef AFS_DARWIN_ENV
1152 if (VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
1153 if (tvc->opens) panic("flushactive open, hasubc, but refcnt 1");
1154 osi_VM_TryReclaim(tvc,0);
1159 ReleaseReadLock(&afs_xvcache);
1161 } /*afs_FlushActiveVcaches*/
1168 * Make sure a cache entry is up-to-date status-wise.
1170 * NOTE: everywhere that calls this can potentially be sped up
1171 * by checking CStatd first, and avoiding doing the InitReq
1172 * if this is up-to-date.
1174 * Anymore, the only places that call this KNOW already that the
1175 * vcache is not up-to-date, so we don't screw around.
1178 * avc : Ptr to vcache entry to verify.
1182 int afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1184 register struct vcache *tvc;
1186 AFS_STATCNT(afs_VerifyVCache);
1188 #if defined(AFS_OSF_ENV)
1189 ObtainReadLock(&avc->lock);
1190 if (afs_IsWired(avc)) {
1191 ReleaseReadLock(&avc->lock);
1194 ReleaseReadLock(&avc->lock);
1195 #endif /* AFS_OSF_ENV */
1196 /* otherwise we must fetch the status info */
1198 ObtainWriteLock(&avc->lock,53);
1199 if (avc->states & CStatd) {
1200 ReleaseWriteLock(&avc->lock);
1203 ObtainWriteLock(&afs_xcbhash, 461);
1204 avc->states &= ~( CStatd | CUnique );
1205 avc->callback = (struct server *)0;
1206 afs_DequeueCallback(avc);
1207 ReleaseWriteLock(&afs_xcbhash);
1208 ReleaseWriteLock(&avc->lock);
1210 /* since we've been called back, or the callback has expired,
1211 * it's possible that the contents of this directory, or this
1212 * file's name have changed, thus invalidating the dnlc contents.
1214 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1215 osi_dnlc_purgedp (avc);
1217 osi_dnlc_purgevp (avc);
1219 /* fetch the status info */
1220 tvc = afs_GetVCache(&avc->fid, areq, (afs_int32*)0, avc, READ_LOCK);
1221 if (!tvc) return ENOENT;
1222 /* Put it back; caller has already incremented vrefCount */
1223 afs_PutVCache(tvc, READ_LOCK);
1226 } /*afs_VerifyVCache*/
1233 * Simple copy of stat info into cache.
1236 * avc : Ptr to vcache entry involved.
1237 * astat : Ptr to stat info to copy.
1240 * Nothing interesting.
1242 * Callers: as of 1992-04-29, only called by WriteVCache
1245 afs_SimpleVStat(avc, astat, areq)
1246 register struct vcache *avc;
1247 register struct AFSFetchStatus *astat;
1248 struct vrequest *areq;
1249 { /*afs_SimpleVStat*/
1251 AFS_STATCNT(afs_SimpleVStat);
1254 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1255 && !AFS_VN_MAPPED((vnode_t*)avc))
1257 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc))
1261 #if defined(AFS_SGI_ENV)
1262 osi_Assert((valusema(&avc->vc_rwlock) <= 0) &&
1263 (OSI_GET_LOCKID() == avc->vc_rwlockid));
1264 if (astat->Length < avc->m.Length) {
1265 vnode_t *vp = (vnode_t *)avc;
1267 osi_Assert(WriteLocked(&avc->lock));
1268 ReleaseWriteLock(&avc->lock);
1270 PTOSSVP(vp, (off_t)astat->Length, (off_t)MAXLONG);
1272 ObtainWriteLock(&avc->lock,67);
1275 /* if writing the file, don't fetch over this value */
1276 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT,
1277 ICL_TYPE_POINTER, avc,
1278 ICL_TYPE_INT32, avc->m.Length,
1279 ICL_TYPE_INT32, astat->Length);
1280 avc->m.Length = astat->Length;
1281 avc->m.Date = astat->ClientModTime;
1283 avc->m.Owner = astat->Owner;
1284 avc->m.Group = astat->Group;
1285 avc->m.Mode = astat->UnixModeBits;
1286 if (vType(avc) == VREG) {
1287 avc->m.Mode |= S_IFREG;
1289 else if (vType(avc) == VDIR) {
1290 avc->m.Mode |= S_IFDIR;
1292 else if (vType(avc) == VLNK) {
1296 avc->m.Mode |= S_IFLNK;
1297 if ((avc->m.Mode & 0111) == 0) avc->mvstat = 1;
1299 if (avc->states & CForeign) {
1300 struct axscache *ac;
1301 avc->anyAccess = astat->AnonymousAccess;
1303 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1305 * Caller has at least one bit not covered by anonymous, and
1306 * thus may have interesting rights.
1308 * HOWEVER, this is a really bad idea, because any access query
1309 * for bits which aren't covered by anonymous, on behalf of a user
1310 * who doesn't have any special rights, will result in an answer of
1311 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1312 * It's an especially bad idea under Ultrix, since (due to the lack of
1313 * a proper access() call) it must perform several afs_access() calls
1314 * in order to create magic mode bits that vary according to who makes
1315 * the call. In other words, _every_ stat() generates a test for
1318 #endif /* badidea */
1319 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1320 ac->axess = astat->CallerAccess;
1321 else /* not found, add a new one if possible */
1322 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1326 } /*afs_SimpleVStat*/
1333 * Store the status info *only* back to the server for a
1337 * avc : Ptr to the vcache entry.
1338 * astatus : Ptr to the status info to store.
1339 * areq : Ptr to the associated vrequest.
1342 * Must be called with a shared lock held on the vnode.
1345 afs_WriteVCache(avc, astatus, areq)
1346 register struct vcache *avc;
1347 register struct AFSStoreStatus *astatus;
1348 struct vrequest *areq;
1350 { /*afs_WriteVCache*/
1353 struct AFSFetchStatus OutStatus;
1354 struct AFSVolSync tsync;
1357 AFS_STATCNT(afs_WriteVCache);
1358 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1359 ICL_TYPE_INT32, avc->m.Length);
1362 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1364 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1365 #ifdef RX_ENABLE_LOCKS
1367 #endif /* RX_ENABLE_LOCKS */
1368 code = RXAFS_StoreStatus(tc->id,
1369 (struct AFSFid *) &avc->fid.Fid,
1370 astatus, &OutStatus, &tsync);
1371 #ifdef RX_ENABLE_LOCKS
1373 #endif /* RX_ENABLE_LOCKS */
1378 (afs_Analyze(tc, code, &avc->fid, areq,
1379 AFS_STATS_FS_RPCIDX_STORESTATUS,
1380 SHARED_LOCK, (struct cell *)0));
1382 UpgradeSToWLock(&avc->lock,20);
1384 /* success, do the changes locally */
1385 afs_SimpleVStat(avc, &OutStatus, areq);
1387 * Update the date, too. SimpleVStat didn't do this, since
1388 * it thought we were doing this after fetching new status
1389 * over a file being written.
1391 avc->m.Date = OutStatus.ClientModTime;
1394 /* failure, set up to check with server next time */
1395 ObtainWriteLock(&afs_xcbhash, 462);
1396 afs_DequeueCallback(avc);
1397 avc->states &= ~( CStatd | CUnique); /* turn off stat valid flag */
1398 ReleaseWriteLock(&afs_xcbhash);
1399 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1400 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
1402 ConvertWToSLock(&avc->lock);
1405 } /*afs_WriteVCache*/
1411 * Copy astat block into vcache info
1414 * avc : Ptr to vcache entry.
1415 * astat : Ptr to stat block to copy in.
1416 * areq : Ptr to associated request.
1419 * Must be called under a write lock
1421 * Note: this code may get dataversion and length out of sync if the file has
1422 * been modified. This is less than ideal. I haven't thought about
1423 * it sufficiently to be certain that it is adequate.
1426 afs_ProcessFS(avc, astat, areq)
1427 register struct vcache *avc;
1428 struct vrequest *areq;
1429 register struct AFSFetchStatus *astat;
1434 AFS_STATCNT(afs_ProcessFS);
1436 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1437 * number for each bulk status request. Under no circumstances
1438 * should afs_DoBulkStat store a sequence number if the new
1439 * length will be ignored when afs_ProcessFS is called with
1440 * new stats. If you change the following conditional then you
1441 * also need to change the conditional in afs_DoBulkStat. */
1443 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1444 && !AFS_VN_MAPPED((vnode_t*)avc))
1446 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc))
1449 /* if we're writing or mapping this file, don't fetch over these
1452 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1453 ICL_TYPE_INT32, avc->m.Length,
1454 ICL_TYPE_INT32, astat->Length);
1455 avc->m.Length = astat->Length;
1456 avc->m.Date = astat->ClientModTime;
1458 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1459 avc->m.Owner = astat->Owner;
1460 avc->m.Mode = astat->UnixModeBits;
1461 avc->m.Group = astat->Group;
1462 avc->m.LinkCount = astat->LinkCount;
1463 if (astat->FileType == File) {
1464 vSetType(avc, VREG);
1465 avc->m.Mode |= S_IFREG;
1467 else if (astat->FileType == Directory) {
1468 vSetType(avc, VDIR);
1469 avc->m.Mode |= S_IFDIR;
1471 else if (astat->FileType == SymbolicLink) {
1472 vSetType(avc, VLNK);
1473 avc->m.Mode |= S_IFLNK;
1474 if ((avc->m.Mode & 0111) == 0) avc->mvstat = 1;
1476 avc->anyAccess = astat->AnonymousAccess;
1478 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1480 * Caller has at least one bit not covered by anonymous, and
1481 * thus may have interesting rights.
1483 * HOWEVER, this is a really bad idea, because any access query
1484 * for bits which aren't covered by anonymous, on behalf of a user
1485 * who doesn't have any special rights, will result in an answer of
1486 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1487 * It's an especially bad idea under Ultrix, since (due to the lack of
1488 * a proper access() call) it must perform several afs_access() calls
1489 * in order to create magic mode bits that vary according to who makes
1490 * the call. In other words, _every_ stat() generates a test for
1493 #endif /* badidea */
1495 struct axscache *ac;
1496 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1497 ac->axess = astat->CallerAccess;
1498 else /* not found, add a new one if possible */
1499 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1502 #ifdef AFS_LINUX22_ENV
1503 vcache2inode(avc); /* Set the inode attr cache */
1509 afs_RemoteLookup(afid, areq, name, nfid, OutStatusp, CallBackp, serverp, tsyncp)
1510 register struct VenusFid *afid;
1511 struct vrequest *areq;
1513 struct VenusFid *nfid;
1514 struct AFSFetchStatus *OutStatusp;
1515 struct AFSCallBack *CallBackp;
1516 struct server **serverp;
1517 struct AFSVolSync *tsyncp;
1520 register struct vcache *tvc;
1523 register struct conn *tc;
1524 struct AFSFetchStatus OutDirStatus;
1527 if (!name) name = ""; /* XXX */
1529 tc = afs_Conn(afid, areq, SHARED_LOCK);
1531 if (serverp) *serverp = tc->srvr->server;
1533 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1534 #ifdef RX_ENABLE_LOCKS
1536 #endif /* RX_ENABLE_LOCKS */
1537 code = RXAFS_Lookup(tc->id, (struct AFSFid *) &afid->Fid, name,
1538 (struct AFSFid *) &nfid->Fid,
1539 OutStatusp, &OutDirStatus, CallBackp, tsyncp);
1540 #ifdef RX_ENABLE_LOCKS
1542 #endif /* RX_ENABLE_LOCKS */
1547 (afs_Analyze(tc, code, afid, areq,
1548 AFS_STATS_FS_RPCIDX_XLOOKUP,
1549 SHARED_LOCK, (struct cell *)0));
1559 * Given a file id and a vrequest structure, fetch the status
1560 * information associated with the file.
1564 * areq : Ptr to associated vrequest structure, specifying the
1565 * user whose authentication tokens will be used.
1566 * avc : caller may already have a vcache for this file, which is
1570 * The cache entry is returned with an increased vrefCount field.
1571 * The entry must be discarded by calling afs_PutVCache when you
1572 * are through using the pointer to the cache entry.
1574 * You should not hold any locks when calling this function, except
1575 * locks on other vcache entries. If you lock more than one vcache
1576 * entry simultaneously, you should lock them in this order:
1578 * 1. Lock all files first, then directories.
1579 * 2. Within a particular type, lock entries in Fid.Vnode order.
1581 * This locking hierarchy is convenient because it allows locking
1582 * of a parent dir cache entry, given a file (to check its access
1583 * control list). It also allows renames to be handled easily by
1584 * locking directories in a constant order.
1585 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1587 struct vcache *afs_GetVCache(afid, areq, cached, avc, locktype)
1588 register struct VenusFid *afid;
1589 struct vrequest *areq;
1592 struct vcache *avc; /* might have a vcache structure already, which must
1593 * already be held by the caller */
1596 afs_int32 code, i, newvcache=0;
1597 register struct vcache *tvc;
1601 AFS_STATCNT(afs_GetVCache);
1603 if (cached) *cached = 0; /* Init just in case */
1606 ObtainSharedLock(&afs_xvcache,5);
1608 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_STATS | DO_VLRU );
1610 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1611 ReleaseSharedLock(&afs_xvcache);
1612 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1620 if (tvc->states & CStatd) {
1621 ReleaseSharedLock(&afs_xvcache);
1626 UpgradeSToWLock(&afs_xvcache,21);
1628 /* no cache entry, better grab one */
1629 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
1632 ConvertWToSLock(&afs_xvcache);
1633 afs_stats_cmperf.vcacheMisses++;
1636 ReleaseSharedLock(&afs_xvcache);
1638 ObtainWriteLock(&tvc->lock,54);
1640 if (tvc->states & CStatd) {
1641 #ifdef AFS_LINUX22_ENV
1644 ReleaseWriteLock(&tvc->lock);
1645 #ifdef AFS_DARWIN_ENV
1651 #if defined(AFS_OSF_ENV)
1652 if (afs_IsWired(tvc)) {
1653 ReleaseWriteLock(&tvc->lock);
1656 #endif /* AFS_OSF_ENV */
1658 ObtainWriteLock(&afs_xcbhash, 464);
1659 tvc->states &= ~CUnique;
1661 afs_DequeueCallback(tvc);
1662 ReleaseWriteLock(&afs_xcbhash);
1664 /* It is always appropriate to throw away all the access rights? */
1665 afs_FreeAllAxs(&(tvc->Access));
1666 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1668 if ((tvp->states & VForeign)) {
1669 if (newvcache) tvc->states |= CForeign;
1670 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1671 && (tvp->rootUnique == afid->Fid.Unique)) {
1675 if (tvp->states & VRO) tvc->states |= CRO;
1676 if (tvp->states & VBackup) tvc->states |= CBackup;
1677 /* now copy ".." entry back out of volume structure, if necessary */
1678 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1680 tvc->mvid = (struct VenusFid *)
1681 osi_AllocSmallSpace(sizeof(struct VenusFid));
1682 *tvc->mvid = tvp->dotdot;
1684 afs_PutVolume(tvp, READ_LOCK);
1688 afs_RemoveVCB(afid);
1690 struct AFSFetchStatus OutStatus;
1691 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1695 ReleaseWriteLock(&tvc->lock);
1697 ObtainReadLock(&afs_xvcache);
1699 ReleaseReadLock(&afs_xvcache);
1700 return (struct vcache *) 0;
1703 ReleaseWriteLock(&tvc->lock);
1704 #ifdef AFS_DARWIN_ENV
1713 struct vcache *afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1714 afs_int32 *cached, afs_int32 locktype,
1715 struct vcache *adp, char *aname)
1717 afs_int32 code, now, newvcache=0, hash;
1718 struct VenusFid nfid;
1719 register struct vcache *tvc;
1721 struct AFSFetchStatus OutStatus;
1722 struct AFSCallBack CallBack;
1723 struct AFSVolSync tsync;
1724 struct server *serverp = 0;
1728 AFS_STATCNT(afs_GetVCache);
1729 if (cached) *cached = 0; /* Init just in case */
1732 ObtainReadLock(&afs_xvcache);
1733 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_STATS /* no vlru */);
1736 ReleaseReadLock(&afs_xvcache);
1738 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1739 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1743 ObtainReadLock(&tvc->lock);
1745 if (tvc->states & CStatd) {
1749 ReleaseReadLock(&tvc->lock);
1752 tvc->states &= ~CUnique;
1754 ReleaseReadLock(&tvc->lock);
1755 ObtainReadLock(&afs_xvcache);
1759 ReleaseReadLock(&afs_xvcache);
1761 /* lookup the file */
1764 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1765 code = afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
1769 ObtainSharedLock(&afs_xvcache,6);
1770 tvc = afs_FindVCache(&nfid, 0, 0, &retry, DO_VLRU /* no xstats now*/);
1772 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1773 ReleaseSharedLock(&afs_xvcache);
1774 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1780 /* no cache entry, better grab one */
1781 UpgradeSToWLock(&afs_xvcache,22);
1782 tvc = afs_NewVCache(&nfid, (struct server *)0, 1, WRITE_LOCK);
1784 ConvertWToSLock(&afs_xvcache);
1787 ReleaseSharedLock(&afs_xvcache);
1788 ObtainWriteLock(&tvc->lock,55);
1790 /* It is always appropriate to throw away all the access rights? */
1791 afs_FreeAllAxs(&(tvc->Access));
1792 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1794 if ((tvp->states & VForeign)) {
1795 if (newvcache) tvc->states |= CForeign;
1796 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1797 && (tvp->rootUnique == afid->Fid.Unique))
1800 if (tvp->states & VRO) tvc->states |= CRO;
1801 if (tvp->states & VBackup) tvc->states |= CBackup;
1802 /* now copy ".." entry back out of volume structure, if necessary */
1803 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1805 tvc->mvid = (struct VenusFid *)
1806 osi_AllocSmallSpace(sizeof(struct VenusFid));
1807 *tvc->mvid = tvp->dotdot;
1812 ObtainWriteLock(&afs_xcbhash, 465);
1813 afs_DequeueCallback(tvc);
1814 tvc->states &= ~( CStatd | CUnique );
1815 ReleaseWriteLock(&afs_xcbhash);
1816 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1817 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1819 afs_PutVolume(tvp, READ_LOCK);
1820 ReleaseWriteLock(&tvc->lock);
1821 ObtainReadLock(&afs_xvcache);
1823 ReleaseReadLock(&afs_xvcache);
1824 return (struct vcache *) 0;
1827 ObtainWriteLock(&afs_xcbhash, 466);
1828 if (origCBs == afs_allCBs) {
1829 if (CallBack.ExpirationTime) {
1830 tvc->callback = serverp;
1831 tvc->cbExpires = CallBack.ExpirationTime+now;
1832 tvc->states |= CStatd | CUnique;
1833 tvc->states &= ~CBulkFetching;
1834 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
1835 } else if (tvc->states & CRO) {
1836 /* adapt gives us an hour. */
1837 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
1838 tvc->states |= CStatd | CUnique;
1839 tvc->states &= ~CBulkFetching;
1840 afs_QueueCallback(tvc, CBHash(3600), tvp);
1842 tvc->callback = (struct server *)0;
1843 afs_DequeueCallback(tvc);
1844 tvc->states &= ~(CStatd | CUnique);
1845 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1846 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1849 afs_DequeueCallback(tvc);
1850 tvc->states &= ~CStatd;
1851 tvc->states &= ~CUnique;
1852 tvc->callback = (struct server *)0;
1853 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1854 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1856 ReleaseWriteLock(&afs_xcbhash);
1858 afs_PutVolume(tvp, READ_LOCK);
1859 afs_ProcessFS(tvc, &OutStatus, areq);
1861 ReleaseWriteLock(&tvc->lock);
1862 #ifdef AFS_DARWIN_ENV
1869 struct vcache *afs_GetRootVCache(struct VenusFid *afid,
1870 struct vrequest *areq, afs_int32 *cached,
1871 struct volume *tvolp, afs_int32 locktype)
1873 afs_int32 code, i, newvcache = 0, haveStatus = 0;
1874 afs_int32 getNewFid = 0;
1876 struct VenusFid nfid;
1877 register struct vcache *tvc;
1878 struct server *serverp = 0;
1879 struct AFSFetchStatus OutStatus;
1880 struct AFSCallBack CallBack;
1881 struct AFSVolSync tsync;
1887 if (!tvolp->rootVnode || getNewFid) {
1888 struct VenusFid tfid;
1891 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
1892 origCBs = afs_allCBs; /* ignore InitCallBackState */
1893 code = afs_RemoteLookup(&tfid, areq, (char *)0, &nfid,
1894 &OutStatus, &CallBack, &serverp, &tsync);
1896 return (struct vcache *)0;
1898 /* ReleaseReadLock(&tvolp->lock); */
1899 ObtainWriteLock(&tvolp->lock,56);
1900 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
1901 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
1902 ReleaseWriteLock(&tvolp->lock);
1903 /* ObtainReadLock(&tvolp->lock);*/
1906 afid->Fid.Vnode = tvolp->rootVnode;
1907 afid->Fid.Unique = tvolp->rootUnique;
1910 ObtainSharedLock(&afs_xvcache,7);
1912 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1913 if (!FidCmp(&(tvc->fid), afid)) {
1915 /* Grab this vnode, possibly reactivating from the free list */
1916 /* for the present (95.05.25) everything on the hash table is
1917 * definitively NOT in the free list -- at least until afs_reclaim
1918 * can be safely implemented */
1921 vg = vget((struct vnode *)tvc); /* this bumps ref count */
1925 #endif /* AFS_OSF_ENV */
1930 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
1931 /* Mount point no longer stat'd or unknown. FID may have changed. */
1936 tvc = (struct vcache*)0;
1938 ReleaseSharedLock(&afs_xvcache);
1943 UpgradeSToWLock(&afs_xvcache,23);
1944 /* no cache entry, better grab one */
1945 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
1947 afs_stats_cmperf.vcacheMisses++;
1950 if (cached) *cached = 1;
1951 afs_stats_cmperf.vcacheHits++;
1953 /* we already bumped the ref count in the for loop above */
1954 #else /* AFS_OSF_ENV */
1957 UpgradeSToWLock(&afs_xvcache,24);
1958 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1959 refpanic ("GRVC VLRU inconsistent0");
1961 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1962 refpanic ("GRVC VLRU inconsistent1");
1964 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1965 refpanic ("GRVC VLRU inconsistent2");
1967 QRemove(&tvc->vlruq); /* move to lruq head */
1968 QAdd(&VLRU, &tvc->vlruq);
1969 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1970 refpanic ("GRVC VLRU inconsistent3");
1972 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1973 refpanic ("GRVC VLRU inconsistent4");
1975 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1976 refpanic ("GRVC VLRU inconsistent5");
1981 ReleaseWriteLock(&afs_xvcache);
1983 if (tvc->states & CStatd) {
1987 ObtainReadLock(&tvc->lock);
1988 tvc->states &= ~CUnique;
1989 tvc->callback = (struct server *)0; /* redundant, perhaps */
1990 ReleaseReadLock(&tvc->lock);
1993 ObtainWriteLock(&tvc->lock,57);
1995 /* It is always appropriate to throw away all the access rights? */
1996 afs_FreeAllAxs(&(tvc->Access));
1998 if (newvcache) tvc->states |= CForeign;
1999 if (tvolp->states & VRO) tvc->states |= CRO;
2000 if (tvolp->states & VBackup) tvc->states |= CBackup;
2001 /* now copy ".." entry back out of volume structure, if necessary */
2002 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2003 && (tvolp->rootUnique == afid->Fid.Unique)) {
2006 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2008 tvc->mvid = (struct VenusFid *)osi_AllocSmallSpace(sizeof(struct VenusFid));
2009 *tvc->mvid = tvolp->dotdot;
2013 afs_RemoveVCB(afid);
2016 struct VenusFid tfid;
2019 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2020 origCBs = afs_allCBs; /* ignore InitCallBackState */
2021 code = afs_RemoteLookup(&tfid, areq, (char *)0, &nfid, &OutStatus,
2022 &CallBack, &serverp, &tsync);
2026 ObtainWriteLock(&afs_xcbhash, 467);
2027 afs_DequeueCallback(tvc);
2028 tvc->callback = (struct server *)0;
2029 tvc->states &= ~(CStatd|CUnique);
2030 ReleaseWriteLock(&afs_xcbhash);
2031 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2032 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2033 ReleaseWriteLock(&tvc->lock);
2034 ObtainReadLock(&afs_xvcache);
2036 ReleaseReadLock(&afs_xvcache);
2037 return (struct vcache *) 0;
2040 ObtainWriteLock(&afs_xcbhash, 468);
2041 if (origCBs == afs_allCBs) {
2042 tvc->states |= CTruth;
2043 tvc->callback = serverp;
2044 if (CallBack.ExpirationTime != 0) {
2045 tvc->cbExpires = CallBack.ExpirationTime+start;
2046 tvc->states |= CStatd;
2047 tvc->states &= ~CBulkFetching;
2048 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2049 } else if (tvc->states & CRO) {
2050 /* adapt gives us an hour. */
2051 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
2052 tvc->states |= CStatd;
2053 tvc->states &= ~CBulkFetching;
2054 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2057 afs_DequeueCallback(tvc);
2058 tvc->callback = (struct server *)0;
2059 tvc->states &= ~(CStatd | CUnique);
2060 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2061 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2063 ReleaseWriteLock(&afs_xcbhash);
2064 afs_ProcessFS(tvc, &OutStatus, areq);
2066 ReleaseWriteLock(&tvc->lock);
2073 * must be called with avc write-locked
2074 * don't absolutely have to invalidate the hint unless the dv has
2075 * changed, but be sure to get it right else there will be consistency bugs.
2077 afs_int32 afs_FetchStatus(struct vcache *avc, struct VenusFid *afid,
2078 struct vrequest *areq, struct AFSFetchStatus *Outsp)
2082 register struct conn *tc;
2083 struct AFSCallBack CallBack;
2084 struct AFSVolSync tsync;
2085 struct volume* volp;
2089 tc = afs_Conn(afid, areq, SHARED_LOCK);
2090 avc->quick.stamp = 0; avc->h1.dchint = NULL; /* invalidate hints */
2092 avc->callback = tc->srvr->server;
2094 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2095 #ifdef RX_ENABLE_LOCKS
2097 #endif /* RX_ENABLE_LOCKS */
2098 code = RXAFS_FetchStatus(tc->id,
2099 (struct AFSFid *) &afid->Fid,
2100 Outsp, &CallBack, &tsync);
2101 #ifdef RX_ENABLE_LOCKS
2103 #endif /* RX_ENABLE_LOCKS */
2110 (afs_Analyze(tc, code, afid, areq,
2111 AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2112 SHARED_LOCK, (struct cell *)0));
2115 afs_ProcessFS(avc, Outsp, areq);
2116 volp = afs_GetVolume(afid, areq, READ_LOCK);
2117 ObtainWriteLock(&afs_xcbhash, 469);
2118 avc->states |= CTruth;
2119 if (avc->callback /* check for race */) {
2120 if (CallBack.ExpirationTime != 0) {
2121 avc->cbExpires = CallBack.ExpirationTime+start;
2122 avc->states |= CStatd;
2123 avc->states &= ~CBulkFetching;
2124 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2126 else if (avc->states & CRO)
2127 { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2128 avc->cbExpires = 3600+start;
2129 avc->states |= CStatd;
2130 avc->states &= ~CBulkFetching;
2131 afs_QueueCallback(avc, CBHash(3600), volp);
2134 afs_DequeueCallback(avc);
2135 avc->callback = (struct server *)0;
2136 avc->states &= ~(CStatd|CUnique);
2137 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2138 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
2142 afs_DequeueCallback(avc);
2143 avc->callback = (struct server *)0;
2144 avc->states &= ~(CStatd|CUnique);
2145 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2146 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
2148 ReleaseWriteLock(&afs_xcbhash);
2150 afs_PutVolume(volp, READ_LOCK);
2152 else { /* used to undo the local callback, but that's too extreme.
2153 * There are plenty of good reasons that fetchstatus might return
2154 * an error, such as EPERM. If we have the vnode cached, statd,
2155 * with callback, might as well keep track of the fact that we
2156 * don't have access...
2158 if (code == EPERM || code == EACCES) {
2159 struct axscache *ac;
2160 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2162 else /* not found, add a new one if possible */
2163 afs_AddAxs(avc->Access, areq->uid, 0);
2174 * Stuff some information into the vcache for the given file.
2177 * afid : File in question.
2178 * OutStatus : Fetch status on the file.
2179 * CallBack : Callback info.
2180 * tc : RPC connection involved.
2181 * areq : vrequest involved.
2184 * Nothing interesting.
2187 afs_StuffVcache(afid, OutStatus, CallBack, tc, areq)
2188 register struct VenusFid *afid;
2189 struct AFSFetchStatus *OutStatus;
2190 struct AFSCallBack *CallBack;
2191 register struct conn *tc;
2192 struct vrequest *areq;
2194 { /*afs_StuffVcache*/
2196 register afs_int32 code, i, newvcache=0;
2197 register struct vcache *tvc;
2198 struct AFSVolSync tsync;
2200 struct axscache *ac;
2203 AFS_STATCNT(afs_StuffVcache);
2204 #ifdef IFS_VCACHECOUNT
2209 ObtainSharedLock(&afs_xvcache,8);
2211 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_VLRU /* no stats */);
2213 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2214 ReleaseSharedLock(&afs_xvcache);
2215 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2221 /* no cache entry, better grab one */
2222 UpgradeSToWLock(&afs_xvcache,25);
2223 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
2225 ConvertWToSLock(&afs_xvcache);
2228 ReleaseSharedLock(&afs_xvcache);
2229 ObtainWriteLock(&tvc->lock,58);
2231 tvc->states &= ~CStatd;
2232 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2233 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2235 /* Is it always appropriate to throw away all the access rights? */
2236 afs_FreeAllAxs(&(tvc->Access));
2238 /*Copy useful per-volume info*/
2239 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2241 if (newvcache && (tvp->states & VForeign)) tvc->states |= CForeign;
2242 if (tvp->states & VRO) tvc->states |= CRO;
2243 if (tvp->states & VBackup) tvc->states |= CBackup;
2245 * Now, copy ".." entry back out of volume structure, if
2248 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2249 if (!tvc->mvid) tvc->mvid =
2250 (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
2251 *tvc->mvid = tvp->dotdot;
2254 /* store the stat on the file */
2255 afs_RemoveVCB(afid);
2256 afs_ProcessFS(tvc, OutStatus, areq);
2257 tvc->callback = tc->srvr->server;
2259 /* we use osi_Time twice below. Ideally, we would use the time at which
2260 * the FetchStatus call began, instead, but we don't have it here. So we
2261 * make do with "now". In the CRO case, it doesn't really matter. In
2262 * the other case, we hope that the difference between "now" and when the
2263 * call actually began execution on the server won't be larger than the
2264 * padding which the server keeps. Subtract 1 second anyway, to be on
2265 * the safe side. Can't subtract more because we don't know how big
2266 * ExpirationTime is. Possible consistency problems may arise if the call
2267 * timeout period becomes longer than the server's expiration padding. */
2268 ObtainWriteLock(&afs_xcbhash, 470);
2269 if (CallBack->ExpirationTime != 0) {
2270 tvc->cbExpires = CallBack->ExpirationTime+osi_Time()-1;
2271 tvc->states |= CStatd;
2272 tvc->states &= ~CBulkFetching;
2273 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2275 else if (tvc->states & CRO) {
2276 /* old-fashioned AFS 3.2 style */
2277 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
2278 tvc->states |= CStatd;
2279 tvc->states &= ~CBulkFetching;
2280 afs_QueueCallback(tvc, CBHash(3600), tvp);
2283 afs_DequeueCallback(tvc);
2284 tvc->callback = (struct server *)0;
2285 tvc->states &= ~(CStatd|CUnique);
2286 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2287 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2289 ReleaseWriteLock(&afs_xcbhash);
2291 afs_PutVolume(tvp, READ_LOCK);
2293 /* look in per-pag cache */
2294 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2295 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2296 else /* not found, add a new one if possible */
2297 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2299 ReleaseWriteLock(&tvc->lock);
2300 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2301 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32, tvc->cbExpires,
2302 ICL_TYPE_INT32, tvc->cbExpires-osi_Time());
2304 * Release ref count... hope this guy stays around...
2306 afs_PutVCache(tvc, WRITE_LOCK);
2307 } /*afs_StuffVcache*/
2314 * Decrements the reference count on a cache entry.
2317 * avc : Pointer to the cache entry to decrement.
2320 * Nothing interesting.
2323 afs_PutVCache(avc, locktype)
2324 register struct vcache *avc;
2328 AFS_STATCNT(afs_PutVCache);
2330 * Can we use a read lock here?
2332 ObtainReadLock(&afs_xvcache);
2334 ReleaseReadLock(&afs_xvcache);
2341 * Find a vcache entry given a fid.
2344 * afid : Pointer to the fid whose cache entry we desire.
2345 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2346 * unlock the vnode, and try again.
2347 * flags: bit 1 to specify whether to compute hit statistics. Not
2348 * set if FindVCache is called as part of internal bookkeeping.
2351 * Must be called with the afs_xvcache lock at least held at
2352 * the read level. In order to do the VLRU adjustment, the xvcache lock
2353 * must be shared-- we upgrade it here.
2356 struct vcache *afs_FindVCache(struct VenusFid *afid, afs_int32 lockit,
2357 afs_int32 locktype, afs_int32 *retry, afs_int32 flag)
2360 register struct vcache *tvc;
2363 AFS_STATCNT(afs_FindVCache);
2366 for(tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2367 if (FidMatches(afid, tvc)) {
2369 /* Grab this vnode, possibly reactivating from the free list */
2372 vg = vget((struct vnode *)tvc);
2376 #endif /* AFS_OSF_ENV */
2381 /* should I have a read lock on the vnode here? */
2383 if (retry) *retry = 0;
2384 #if !defined(AFS_OSF_ENV)
2385 osi_vnhold(tvc, retry); /* already held, above */
2386 if (retry && *retry)
2390 * only move to front of vlru if we have proper vcache locking)
2392 if (flag & DO_VLRU) {
2393 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2394 refpanic ("FindVC VLRU inconsistent1");
2396 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2397 refpanic ("FindVC VLRU inconsistent1");
2399 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2400 refpanic ("FindVC VLRU inconsistent2");
2402 UpgradeSToWLock(&afs_xvcache,26);
2403 QRemove(&tvc->vlruq);
2404 QAdd(&VLRU, &tvc->vlruq);
2405 ConvertWToSLock(&afs_xvcache);
2406 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2407 refpanic ("FindVC VLRU inconsistent1");
2409 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2410 refpanic ("FindVC VLRU inconsistent2");
2412 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2413 refpanic ("FindVC VLRU inconsistent3");
2419 if (flag & DO_STATS) {
2420 if (tvc) afs_stats_cmperf.vcacheHits++;
2421 else afs_stats_cmperf.vcacheMisses++;
2422 if (afid->Cell == LOCALCELL)
2423 afs_stats_cmperf.vlocalAccesses++;
2425 afs_stats_cmperf.vremoteAccesses++;
2428 #ifdef AFS_LINUX22_ENV
2429 if (tvc && (tvc->states & CStatd))
2430 vcache2inode(tvc); /* mainly to reset i_nlink */
2432 #ifdef AFS_DARWIN_ENV
2437 } /*afs_FindVCache*/
2443 * Find a vcache entry given a fid. Does a wildcard match on what we
2444 * have for the fid. If more than one entry, don't return anything.
2447 * avcp : Fill in pointer if we found one and only one.
2448 * afid : Pointer to the fid whose cache entry we desire.
2449 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2450 * unlock the vnode, and try again.
2451 * flags: bit 1 to specify whether to compute hit statistics. Not
2452 * set if FindVCache is called as part of internal bookkeeping.
2455 * Must be called with the afs_xvcache lock at least held at
2456 * the read level. In order to do the VLRU adjustment, the xvcache lock
2457 * must be shared-- we upgrade it here.
2460 * number of matches found.
2463 int afs_duplicate_nfs_fids=0;
2465 afs_int32 afs_NFSFindVCache(avcp, afid, lockit)
2466 struct vcache **avcp;
2467 struct VenusFid *afid;
2469 { /*afs_FindVCache*/
2471 register struct vcache *tvc;
2473 afs_int32 retry = 0;
2474 afs_int32 count = 0;
2475 struct vcache *found_tvc = NULL;
2477 AFS_STATCNT(afs_FindVCache);
2481 ObtainSharedLock(&afs_xvcache,331);
2484 for(tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2485 /* Match only on what we have.... */
2486 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2487 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2488 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2489 && (tvc->fid.Cell == afid->Cell)) {
2491 /* Grab this vnode, possibly reactivating from the free list */
2494 vg = vget((struct vnode *)tvc);
2497 /* This vnode no longer exists. */
2500 #endif /* AFS_OSF_ENV */
2505 /* Drop our reference counts. */
2506 vrele((struct vnode *)tvc);
2507 vrele((struct vnode *)found_tvc);
2509 afs_duplicate_nfs_fids++;
2510 ReleaseSharedLock(&afs_xvcache);
2518 /* should I have a read lock on the vnode here? */
2520 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2521 osi_vnhold(tvc, &retry);
2524 found_tvc = (struct vcache*)0;
2525 ReleaseSharedLock(&afs_xvcache);
2526 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2530 #if !defined(AFS_OSF_ENV)
2531 osi_vnhold(tvc, (int*)0); /* already held, above */
2535 * We obtained the xvcache lock above.
2537 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2538 refpanic ("FindVC VLRU inconsistent1");
2540 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2541 refpanic ("FindVC VLRU inconsistent1");
2543 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2544 refpanic ("FindVC VLRU inconsistent2");
2546 UpgradeSToWLock(&afs_xvcache,568);
2547 QRemove(&tvc->vlruq);
2548 QAdd(&VLRU, &tvc->vlruq);
2549 ConvertWToSLock(&afs_xvcache);
2550 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2551 refpanic ("FindVC VLRU inconsistent1");
2553 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2554 refpanic ("FindVC VLRU inconsistent2");
2556 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2557 refpanic ("FindVC VLRU inconsistent3");
2562 if (tvc) afs_stats_cmperf.vcacheHits++;
2563 else afs_stats_cmperf.vcacheMisses++;
2564 if (afid->Cell == LOCALCELL)
2565 afs_stats_cmperf.vlocalAccesses++;
2567 afs_stats_cmperf.vremoteAccesses++;
2569 *avcp = tvc; /* May be null */
2571 ReleaseSharedLock(&afs_xvcache);
2572 return (tvc ? 1 : 0);
2574 } /*afs_NFSFindVCache*/
2582 * Initialize vcache related variables
2584 void afs_vcacheInit(int astatSize)
2586 register struct vcache *tvp;
2588 #if defined(AFS_OSF_ENV)
2589 if (!afs_maxvcount) {
2590 #if defined(AFS_OSF30_ENV)
2591 afs_maxvcount = max_vnodes/2; /* limit ourselves to half the total */
2593 afs_maxvcount = nvnode/2; /* limit ourselves to half the total */
2595 if (astatSize < afs_maxvcount) {
2596 afs_maxvcount = astatSize;
2599 #else /* AFS_OSF_ENV */
2600 freeVCList = (struct vcache *)0;
2603 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2604 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2606 #if !defined(AFS_OSF_ENV)
2607 /* Allocate and thread the struct vcache entries */
2608 tvp = (struct vcache *) afs_osi_Alloc(astatSize * sizeof(struct vcache));
2609 memset((char *)tvp, 0, sizeof(struct vcache)*astatSize);
2611 Initial_freeVCList = tvp;
2612 freeVCList = &(tvp[0]);
2613 for(i=0; i < astatSize-1; i++) {
2614 tvp[i].nextfree = &(tvp[i+1]);
2616 tvp[astatSize-1].nextfree = (struct vcache *) 0;
2617 #ifdef AFS_AIX32_ENV
2618 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2623 #if defined(AFS_SGI_ENV)
2624 for(i=0; i < astatSize; i++) {
2625 char name[METER_NAMSZ];
2626 struct vcache *tvc = &tvp[i];
2628 tvc->v.v_number = ++afsvnumbers;
2629 tvc->vc_rwlockid = OSI_NO_LOCKID;
2630 initnsema(&tvc->vc_rwlock, 1, makesname(name, "vrw", tvc->v.v_number));
2631 #ifndef AFS_SGI53_ENV
2632 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2634 #ifndef AFS_SGI62_ENV
2635 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2636 #endif /* AFS_SGI62_ENV */
2649 void shutdown_vcache(void)
2652 struct afs_cbr *tsp, *nsp;
2654 * XXX We may potentially miss some of the vcaches because if when there're no
2655 * free vcache entries and all the vcache entries are active ones then we allocate
2656 * an additional one - admittedly we almost never had that occur.
2658 #if !defined(AFS_OSF_ENV)
2659 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2661 #ifdef AFS_AIX32_ENV
2662 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2666 register struct afs_q *tq, *uq;
2667 register struct vcache *tvc;
2668 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2672 osi_FreeSmallSpace(tvc->mvid);
2673 tvc->mvid = (struct VenusFid*)0;
2676 aix_gnode_rele((struct vnode *)tvc);
2678 if (tvc->linkData) {
2679 afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);
2684 * Also free the remaining ones in the Cache
2686 for (i=0; i < VCSIZE; i++) {
2687 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2689 osi_FreeSmallSpace(tvc->mvid);
2690 tvc->mvid = (struct VenusFid*)0;
2694 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2695 #ifdef AFS_AIX32_ENV
2698 vms_delete(tvc->segid);
2700 tvc->segid = tvc->vmh = NULL;
2701 if (VREFCOUNT(tvc)) osi_Panic("flushVcache: vm race");
2709 #if defined(AFS_SUN5_ENV)
2715 if (tvc->linkData) {
2716 afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);
2720 afs_FreeAllAxs(&(tvc->Access));
2726 * Free any leftover callback queue
2728 for (tsp = afs_cbrSpace; tsp; tsp = nsp ) {
2730 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
2734 #if !defined(AFS_OSF_ENV)
2735 freeVCList = Initial_freeVCList = 0;
2737 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2738 LOCK_INIT(&afs_xvcb, "afs_xvcb");