2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include <afsconfig.h>
39 #include "../afs/param.h"
43 #include "../afs/sysincludes.h" /*Standard vendor system headers*/
44 #include "../afs/afsincludes.h" /*AFS-based standard headers*/
45 #include "../afs/afs_stats.h"
46 #include "../afs/afs_cbqueue.h"
47 #include "../afs/afs_osidnlc.h"
50 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
51 afs_int32 afs_vcount = 0; /* number of vcache in use now */
52 #if defined(AFS_OSF30_ENV)
53 extern int max_vnodes; /* number of total system vnodes */
55 extern int nvnode; /* number of total system vnodes */
58 extern int numvnodes; /* number vnodes in use now */
60 #endif /* AFS_OSF_ENV */
65 /* Imported variables */
66 extern struct server *afs_servers[NSERVERS];
67 extern afs_rwlock_t afs_xserver;
68 extern afs_rwlock_t afs_xcbhash;
69 extern struct vcache *afs_globalVp;
71 extern struct mount *afs_globalVFS;
72 extern struct vnodeops Afs_vnodeops;
73 #elif defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
74 extern struct mount *afs_globalVFS;
76 extern struct vfs *afs_globalVFS;
77 #endif /* AFS_OSF_ENV */
78 #if defined(AFS_DUX40_ENV)
79 extern struct vfs_ubcops afs_ubcops;
82 extern struct vnodeops Afs_vnodeops;
86 #endif /* AFS_SGI64_ENV */
88 /* Exported variables */
89 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries*/
90 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks*/
91 struct vcache *freeVCList; /*Free list for stat cache entries*/
92 struct vcache *Initial_freeVCList; /*Initial list for above*/
93 struct afs_q VLRU; /*vcache LRU*/
94 afs_int32 vcachegen = 0;
95 unsigned int afs_paniconwarn = 0;
96 struct vcache *afs_vhashT[VCSIZE];
97 afs_int32 afs_bulkStatsLost;
98 int afs_norefpanic = 0;
100 /* Forward declarations */
101 static afs_int32 afs_QueueVCB(struct vcache *avc);
108 * Flush the given vcache entry.
111 * avc : Pointer to vcache entry to flush.
112 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
115 * afs_xvcache lock must be held for writing upon entry to
116 * prevent people from changing the vrefCount field, and to
117 * protect the lruq and hnext fields.
118 * LOCK: afs_FlushVCache afs_xvcache W
119 * REFCNT: vcache ref count must be zero on entry except for osf1
120 * RACE: lock is dropped and reobtained, permitting race in caller
123 int afs_FlushVCache(struct vcache *avc, int *slept)
124 { /*afs_FlushVCache*/
126 register afs_int32 i, code;
127 register struct vcache **uvc, *wvc, *tvc;
130 AFS_STATCNT(afs_FlushVCache);
131 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
132 ICL_TYPE_INT32, avc->states);
135 VN_LOCK(AFSTOV(avc));
139 code = osi_VM_FlushVCache(avc, slept);
143 if (avc->states & CVFlushed) {
147 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
148 refpanic ("LRU vs. Free inconsistency");
150 avc->states |= CVFlushed;
151 /* pull the entry out of the lruq and put it on the free list */
152 QRemove(&avc->vlruq);
153 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *) 0;
155 /* keep track of # of files that we bulk stat'd, but never used
156 * before they got recycled.
158 if (avc->states & CBulkStat)
161 /* remove entry from the hash chain */
162 i = VCHash(&avc->fid);
163 uvc = &afs_vhashT[i];
164 for(wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
167 avc->hnext = (struct vcache *) NULL;
171 if (!wvc) osi_Panic("flushvcache"); /* not in correct hash bucket */
172 if (avc->mvid) osi_FreeSmallSpace(avc->mvid);
173 avc->mvid = (struct VenusFid*)0;
175 afs_osi_Free(avc->linkData, strlen(avc->linkData)+1);
176 avc->linkData = NULL;
178 afs_FreeAllAxs(&(avc->Access));
180 /* we can't really give back callbacks on RO files, since the
181 * server only tracks them on a per-volume basis, and we don't
182 * know whether we still have some other files from the same
184 if ((avc->states & CRO) == 0 && avc->callback) {
187 ObtainWriteLock(&afs_xcbhash, 460);
188 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
189 avc->states &= ~(CStatd | CUnique);
190 ReleaseWriteLock(&afs_xcbhash);
191 afs_symhint_inval(avc);
192 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
193 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
195 osi_dnlc_purgevp (avc);
198 * Next, keep track of which vnodes we've deleted for create's
199 * optimistic synchronization algorithm
202 if (avc->fid.Fid.Vnode & 1) afs_oddZaps++;
205 #if !defined(AFS_OSF_ENV)
206 /* put the entry in the free list */
207 avc->nextfree = freeVCList;
209 if (avc->vlruq.prev || avc->vlruq.next) {
210 refpanic ("LRU vs. Free inconsistency");
213 /* This should put it back on the vnode free list since usecount is 1 */
216 if (VREFCOUNT(avc) > 0) {
217 VN_UNLOCK(AFSTOV(avc));
218 AFS_RELE(AFSTOV(avc));
220 if (afs_norefpanic) {
221 printf ("flush vc refcnt < 1");
223 (void) vgone(avc, VX_NOSLEEP, (struct vnodeops *) 0);
225 VN_UNLOCK(AFSTOV(avc));
227 else osi_Panic ("flush vc refcnt < 1");
229 #endif /* AFS_OSF_ENV */
230 avc->states |= CVFlushed;
235 VN_UNLOCK(AFSTOV(avc));
239 } /*afs_FlushVCache*/
245 * The core of the inactive vnode op for all but IRIX.
247 void afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
249 extern afs_rwlock_t afs_xdcache, afs_xvcache;
251 AFS_STATCNT(afs_inactive);
252 if (avc->states & CDirty) {
253 /* we can't keep trying to push back dirty data forever. Give up. */
254 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
256 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
257 avc->states &= ~CDirty; /* Turn it off */
258 if (avc->states & CUnlinked) {
259 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
260 avc->states |= CUnlinkedDel;
263 afs_remunlink(avc, 1); /* ignore any return code */
272 * Description: allocate a callback return structure from the
273 * free list and return it.
275 * Env: The alloc and free routines are both called with the afs_xvcb lock
276 * held, so we don't have to worry about blocking in osi_Alloc.
278 static struct afs_cbr *afs_cbrSpace = 0;
279 struct afs_cbr *afs_AllocCBR() {
280 register struct afs_cbr *tsp;
283 while (!afs_cbrSpace) {
284 if (afs_stats_cmperf.CallBackAlloced >= 2) {
285 /* don't allocate more than 2 * AFS_NCBRS for now */
287 afs_stats_cmperf.CallBackFlushes++;
291 tsp = (struct afs_cbr *) afs_osi_Alloc(AFS_NCBRS * sizeof(struct afs_cbr));
292 for(i=0; i < AFS_NCBRS-1; i++) {
293 tsp[i].next = &tsp[i+1];
295 tsp[AFS_NCBRS-1].next = 0;
297 afs_stats_cmperf.CallBackAlloced++;
301 afs_cbrSpace = tsp->next;
308 * Description: free a callback return structure.
311 * asp -- the address of the structure to free.
313 * Environment: the xvcb lock is held over these calls.
316 register struct afs_cbr *asp; {
317 asp->next = afs_cbrSpace;
325 * Description: flush all queued callbacks to all servers.
329 * Environment: holds xvcb lock over RPC to guard against race conditions
330 * when a new callback is granted for the same file later on.
332 afs_int32 afs_FlushVCBs (afs_int32 lockit)
334 struct AFSFid tfids[AFS_MAXCBRSCALL];
335 struct AFSCallBack callBacks[1];
336 struct AFSCBFids fidArray;
337 struct AFSCBs cbArray;
339 struct afs_cbr *tcbrp;
343 struct vrequest treq;
345 int safety1, safety2, safety3;
346 extern int afs_totalServers;
349 if (code = afs_InitReq(&treq, &afs_osi_cred)) return code;
350 treq.flags |= O_NONBLOCK;
352 if (lockit) MObtainWriteLock(&afs_xvcb,273);
353 ObtainReadLock(&afs_xserver);
354 for(i=0; i<NSERVERS; i++) {
355 for(safety1 = 0, tsp = afs_servers[i];
356 tsp && safety1 < afs_totalServers+10; tsp=tsp->next, safety1++) {
358 if (tsp->cbrs == (struct afs_cbr *) 0) continue;
360 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
361 * and make an RPC, over and over again.
363 tcount = 0; /* number found so far */
364 for (safety2 = 0; safety2 < afs_cacheStats ; safety2++) {
365 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
366 /* if buffer is full, or we've queued all we're going
367 * to from this server, we should flush out the
370 fidArray.AFSCBFids_len = tcount;
371 fidArray.AFSCBFids_val = (struct AFSFid *) tfids;
372 cbArray.AFSCBs_len = 1;
373 cbArray.AFSCBs_val = callBacks;
374 callBacks[0].CallBackType = CB_EXCLUSIVE;
375 for (safety3 = 0; safety3 < MAXHOSTS*2; safety3++) {
376 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
377 tsp->cell->cell, &treq, 0,
380 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
382 code = RXAFS_GiveUpCallBacks(tc->id, &fidArray,
388 if (!afs_Analyze(tc, code, 0, &treq,
389 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS,
390 SHARED_LOCK, tsp->cell)) {
394 /* ignore return code, since callbacks may have
395 * been returned anyway, we shouldn't leave them
396 * around to be returned again.
398 * Next, see if we are done with this server, and if so,
399 * break to deal with the next one.
401 if (!tsp->cbrs) break;
403 } /* if to flush full buffer */
404 /* if we make it here, we have an entry at the head of cbrs,
405 * which we should copy to the file ID array and then free.
408 tfids[tcount++] = tcbrp->fid;
409 tsp->cbrs = tcbrp->next;
411 } /* while loop for this one server */
412 if (safety2 > afs_cacheStats) {
413 afs_warn("possible internal error afs_flushVCBs (%d)\n", safety2);
415 } /* for loop for this hash chain */
416 } /* loop through all hash chains */
417 if (safety1 > afs_totalServers+2) {
418 afs_warn("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n", safety1, afs_totalServers+2);
420 osi_Panic("afs_flushVCBS safety1");
423 ReleaseReadLock(&afs_xserver);
424 if (lockit) MReleaseWriteLock(&afs_xvcb);
432 * Queue a callback on the given fid.
438 * Locks the xvcb lock.
439 * Called when the xvcache lock is already held.
442 static afs_int32 afs_QueueVCB(struct vcache *avc)
444 register struct server *tsp;
445 register struct afs_cbr *tcbp;
447 AFS_STATCNT(afs_QueueVCB);
448 /* The callback is really just a struct server ptr. */
449 tsp = (struct server *)(avc->callback);
451 /* we now have a pointer to the server, so we just allocate
452 * a queue entry and queue it.
454 MObtainWriteLock(&afs_xvcb,274);
455 tcbp = afs_AllocCBR();
456 tcbp->fid = avc->fid.Fid;
457 tcbp->next = tsp->cbrs;
460 /* now release locks and return */
461 MReleaseWriteLock(&afs_xvcb);
465 #ifdef AFS_LINUX22_ENV
467 static void __shrink_dcache_parent(struct dentry * parent)
469 struct dentry *this_parent = parent;
470 struct list_head *next;
472 LIST_HEAD(afs_dentry_unused);
475 next = this_parent->d_subdirs.next;
477 while (next != &this_parent->d_subdirs) {
478 struct list_head *tmp = next;
479 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
481 if (!atomic_read(&dentry->d_count)) {
482 list_del(&dentry->d_lru);
483 list_add(&dentry->d_lru, afs_dentry_unused.prev);
487 * Descend a level if the d_subdirs list is non-empty.
489 if (!list_empty(&dentry->d_subdirs)) {
490 this_parent = dentry;
495 * All done at this level ... ascend and resume the search.
497 if (this_parent != parent) {
498 next = this_parent->d_child.next;
499 this_parent = this_parent->d_parent;
504 struct dentry *dentry;
505 struct list_head *tmp;
507 tmp = afs_dentry_unused.prev;
509 if (tmp == &afs_dentry_unused)
512 dentry = list_entry(tmp, struct dentry, d_lru);
514 /* Unused dentry with a count? */
515 if (atomic_read(&dentry->d_count))
519 list_del_init(&dentry->d_hash); /* d_drop */
520 spin_unlock(&dcache_lock);
528 /* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
529 * children of the dentry
531 * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
532 * AFS_GLOCK, so it can call dput, which may call iput, but
533 * keeps afs_xvcache exclusively.
535 * Tree traversal algorithm from fs/dcache.c: select_parent()
537 static void afs_TryFlushDcacheChildren(struct vcache *tvc)
539 struct inode *ip = AFSTOI(tvc);
540 struct dentry *this_parent;
541 struct list_head *next;
542 struct list_head *cur;
543 struct list_head *head = &ip->i_dentry;
544 struct dentry *dentry;
548 #ifndef old_vcache_scheme
551 while ((cur = cur->next) != head) {
552 dentry = list_entry(cur, struct dentry, d_alias);
554 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
555 ICL_TYPE_POINTER, ip,
556 ICL_TYPE_STRING, dentry->d_parent->d_name.name,
557 ICL_TYPE_STRING, dentry->d_name.name);
559 if (!list_empty(&dentry->d_hash) && !list_empty(&dentry->d_subdirs))
560 __shrink_dcache_parent(dentry);
562 if (!atomic_read(&dentry->d_count)) {
564 list_del_init(&dentry->d_hash); /* d_drop */
576 while ((cur = cur->next) != head) {
577 dentry = list_entry(cur, struct dentry, d_alias);
579 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
580 ICL_TYPE_POINTER, ip,
581 ICL_TYPE_STRING, dentry->d_parent->d_name.name,
582 ICL_TYPE_STRING, dentry->d_name.name);
584 if (!DCOUNT(dentry)) {
597 #endif /* AFS_LINUX22_ENV */
603 * Remove a queued callback by looking through all the servers
604 * to see if any have this callback queued.
607 * afid: The fid we want cleansed of queued callbacks.
610 * Locks xvcb and xserver locks.
611 * Typically called with xdcache, xvcache and/or individual vcache
616 register struct VenusFid *afid;
621 register struct server *tsp;
622 register struct afs_cbr *tcbrp;
623 struct afs_cbr **lcbrpp;
625 AFS_STATCNT(afs_RemoveVCB);
626 MObtainWriteLock(&afs_xvcb,275);
627 ObtainReadLock(&afs_xserver);
628 for(i=0;i<NSERVERS;i++) {
629 for(tsp=afs_servers[i]; tsp; tsp=tsp->next) {
630 /* if cell is known, and is wrong, then skip this server */
631 if (tsp->cell && tsp->cell->cell != afid->Cell) continue;
634 * Otherwise, iterate through file IDs we're sending to the
637 lcbrpp = &tsp->cbrs; /* first queued return callback */
638 for(tcbrp = *lcbrpp; tcbrp; lcbrpp = &tcbrp->next, tcbrp = *lcbrpp) {
639 if (afid->Fid.Volume == tcbrp->fid.Volume &&
640 afid->Fid.Unique == tcbrp->fid.Unique &&
641 afid->Fid.Vnode == tcbrp->fid.Vnode) {
642 *lcbrpp = tcbrp->next; /* unthread from list */
650 ReleaseReadLock(&afs_xserver);
651 MReleaseWriteLock(&afs_xvcb);
661 * This routine is responsible for allocating a new cache entry
662 * from the free list. It formats the cache entry and inserts it
663 * into the appropriate hash tables. It must be called with
664 * afs_xvcache write-locked so as to prevent several processes from
665 * trying to create a new cache entry simultaneously.
668 * afid : The file id of the file whose cache entry is being
671 /* LOCK: afs_NewVCache afs_xvcache W */
672 struct vcache *afs_NewVCache(struct VenusFid *afid, struct server *serverp,
673 afs_int32 lockit, afs_int32 locktype)
677 afs_int32 anumber = VCACHE_FREE;
679 struct gnode *gnodepnt;
682 struct vm_info * vm_info_ptr;
683 #endif /* AFS_MACH_ENV */
686 #endif /* AFS_OSF_ENV */
687 struct afs_q *tq, *uq;
690 AFS_STATCNT(afs_NewVCache);
693 if (afs_vcount >= afs_maxvcount)
696 * If we are using > 33 % of the total system vnodes for AFS vcache
697 * entries or we are using the maximum number of vcache entries,
698 * then free some. (if our usage is > 33% we should free some, if
699 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
700 * we _must_ free some -- no choice).
702 if ( (( 3 * afs_vcount ) > nvnode) || ( afs_vcount >= afs_maxvcount ))
705 struct afs_q *tq, *uq;
706 int i; char *panicstr;
709 for(tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
712 if (tvc->states & CVFlushed)
713 refpanic ("CVFlushed on VLRU");
714 else if (i++ > afs_maxvcount)
715 refpanic ("Exceeded pool of AFS vnodes(VLRU cycle?)");
716 else if (QNext(uq) != tq)
717 refpanic ("VLRU inconsistent");
718 else if (VREFCOUNT(tvc) < 1)
719 refpanic ("refcnt 0 on VLRU");
721 if ( VREFCOUNT(tvc) == 1 && tvc->opens == 0
722 && (tvc->states & CUnlinkedDel) == 0) {
723 code = afs_FlushVCache(tvc, &fv_slept);
730 continue; /* start over - may have raced. */
735 if (anumber == VCACHE_FREE) {
736 printf("NewVCache: warning none freed, using %d of %d\n",
737 afs_vcount, afs_maxvcount);
738 if (afs_vcount >= afs_maxvcount) {
739 osi_Panic("NewVCache - none freed");
740 /* XXX instead of panicing, should do afs_maxvcount++
741 and magic up another one */
747 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
748 /* What should we do ???? */
749 osi_Panic("afs_NewVCache: no more vnodes");
754 tvc->nextfree = (struct vcache *)0;
756 #else /* AFS_OSF_ENV */
757 /* pull out a free cache entry */
760 for(tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
764 if (tvc->states & CVFlushed)
765 refpanic("CVFlushed on VLRU");
766 else if (i++ > 2*afs_cacheStats) /* even allowing for a few xallocs...*/
767 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
768 else if (QNext(uq) != tq)
769 refpanic("VLRU inconsistent");
771 #ifdef AFS_DARWIN_ENV
772 if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0) &&
773 VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
774 osi_VM_TryReclaim(tvc, &fv_slept);
778 continue; /* start over - may have raced. */
782 #if defined(AFS_FBSD_ENV)
783 if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
784 && (tvc->states & CUnlinkedDel) == 0) {
785 if (!(VOP_LOCK(&tvc->v, LK_EXCLUSIVE, curproc))) {
786 if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
787 && (tvc->states & CUnlinkedDel) == 0) {
789 AFS_GUNLOCK(); /* perhaps inline inactive for locking */
790 VOP_INACTIVE(&tvc->v, curproc);
793 VOP_UNLOCK(&tvc->v, 0, curproc);
798 #if defined(AFS_LINUX22_ENV)
799 if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0)
800 afs_TryFlushDcacheChildren(tvc);
803 if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
804 && (tvc->states & CUnlinkedDel) == 0) {
805 code = afs_FlushVCache(tvc, &fv_slept);
812 continue; /* start over - may have raced. */
815 if (tq == uq ) break;
819 /* none free, making one is better than a panic */
820 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
821 tvc = (struct vcache *) afs_osi_Alloc(sizeof (struct vcache));
823 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
826 /* In case it still comes here we need to fill this */
827 tvc->v.v_vm_info = VM_INFO_NULL;
828 vm_info_init(tvc->v.v_vm_info);
829 /* perhaps we should also do close_flush on non-NeXT mach systems;
830 * who knows; we don't currently have the sources.
832 #endif /* AFS_MACH_ENV */
833 #if defined(AFS_SGI_ENV)
834 { char name[METER_NAMSZ];
835 memset(tvc, 0, sizeof(struct vcache));
836 tvc->v.v_number = ++afsvnumbers;
837 tvc->vc_rwlockid = OSI_NO_LOCKID;
838 initnsema(&tvc->vc_rwlock, 1, makesname(name, "vrw", tvc->v.v_number));
839 #ifndef AFS_SGI53_ENV
840 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
842 #ifndef AFS_SGI62_ENV
843 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
846 #endif /* AFS_SGI_ENV */
849 tvc = freeVCList; /* take from free list */
850 freeVCList = tvc->nextfree;
851 tvc->nextfree = (struct vcache *)0;
853 #endif /* AFS_OSF_ENV */
856 vm_info_ptr = tvc->v.v_vm_info;
857 #endif /* AFS_MACH_ENV */
859 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
860 memset((char *)tvc, 0, sizeof(struct vcache));
865 RWLOCK_INIT(&tvc->lock, "vcache lock");
866 #if defined(AFS_SUN5_ENV)
867 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
868 #endif /* defined(AFS_SUN5_ENV) */
871 tvc->v.v_vm_info = vm_info_ptr;
872 tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
873 #endif /* AFS_MACH_ENV */
874 tvc->parentVnode = 0;
875 tvc->mvid = (struct VenusFid *) 0;
876 tvc->linkData = (char *) 0;
879 tvc->execsOrWriters = 0;
883 tvc->last_looker = 0;
885 tvc->asynchrony = -1;
887 afs_symhint_inval(tvc);
889 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
892 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
893 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
895 /* Hold it for the LRU (should make count 2) */
896 VN_HOLD(AFSTOV(tvc));
897 #else /* AFS_OSF_ENV */
898 VREFCOUNT_SET(tvc, 1); /* us */
899 #endif /* AFS_OSF_ENV */
901 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
902 tvc->vmh = tvc->segid = NULL;
905 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
906 #if defined(AFS_SUN5_ENV)
907 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
909 #if defined(AFS_SUN55_ENV)
910 /* This is required if the kaio (kernel aynchronous io)
911 ** module is installed. Inside the kernel, the function
912 ** check_vp( common/os/aio.c) checks to see if the kernel has
913 ** to provide asynchronous io for this vnode. This
914 ** function extracts the device number by following the
915 ** v_data field of the vnode. If we do not set this field
916 ** then the system panics. The value of the v_data field
917 ** is not really important for AFS vnodes because the kernel
918 ** does not do asynchronous io for regular files. Hence,
919 ** for the time being, we fill up the v_data field with the
920 ** vnode pointer itself. */
921 tvc->v.v_data = (char *)tvc;
922 #endif /* AFS_SUN55_ENV */
924 afs_BozonInit(&tvc->pvnLock, tvc);
928 tvc->callback = serverp; /* to minimize chance that clear
930 /* initialize vnode data, note vrefCount is v.v_count */
932 /* Don't forget to free the gnode space */
933 tvc->v.v_gnode = gnodepnt = (struct gnode *) osi_AllocSmallSpace(sizeof(struct gnode));
934 memset((char *)gnodepnt, 0, sizeof(struct gnode));
937 memset((void*)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
938 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
940 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
941 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
943 bhv_head_init(&(tvc->v.v_bh));
944 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
947 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion*)tvc;
949 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
951 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
953 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
954 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
956 vnode_pcache_init(&tvc->v);
957 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
958 /* Above define is never true execpt in SGI test kernels. */
959 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
962 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
965 SetAfsVnode(AFSTOV(tvc));
966 #endif /* AFS_SGI64_ENV */
967 #ifdef AFS_DARWIN_ENV
968 tvc->v.v_ubcinfo = UBC_INFO_NULL;
969 lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
970 cache_purge(AFSTOV(tvc));
973 /* VLISTNONE(&tvc->v); */
974 tvc->v.v_freelist.tqe_next=0;
975 tvc->v.v_freelist.tqe_prev=(struct vnode **)0xdeadb;
976 /*tvc->vrefCount++;*/
979 lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
980 cache_purge(AFSTOV(tvc));
983 tvc->v.v_usecount++; /* steal an extra ref for now so vfree never happens */
984 /* This extra ref is dealt with above... */
987 * The proper value for mvstat (for root fids) is setup by the caller.
990 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
992 if (afs_globalVFS == 0) osi_Panic("afs globalvfs");
993 vSetVfsp(tvc, afs_globalVFS);
996 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
997 tvc->v.v_vfsprev = NULL;
998 afs_globalVFS->vfs_vnodes = &tvc->v;
999 if (tvc->v.v_vfsnext != NULL)
1000 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1001 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us!*/
1002 gnodepnt->gn_vnode = &tvc->v;
1005 tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
1007 #if defined(AFS_DUX40_ENV)
1008 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1011 /* Is this needed??? */
1012 insmntque(tvc, afs_globalVFS);
1013 #endif /* AFS_OSF_ENV */
1014 #endif /* AFS_DUX40_ENV */
1015 #if defined(AFS_SGI_ENV)
1016 VN_SET_DPAGES(&(tvc->v), (struct pfdat*)NULL);
1017 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1019 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1020 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1021 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1022 osi_Assert(tvc->v.v_filocks == NULL);
1023 #if !defined(AFS_SGI65_ENV)
1024 osi_Assert(tvc->v.v_filocksem == NULL);
1026 osi_Assert(tvc->cred == NULL);
1027 #ifdef AFS_SGI64_ENV
1028 vnode_pcache_reinit(&tvc->v);
1029 tvc->v.v_rdev = NODEV;
1031 vn_initlist((struct vnlist *)&tvc->v);
1033 #endif /* AFS_SGI_ENV */
1034 #if defined(AFS_LINUX22_ENV)
1036 struct inode *ip = AFSTOI(tvc);
1037 sema_init(&ip->i_sem, 1);
1038 #if defined(AFS_LINUX24_ENV)
1039 sema_init(&ip->i_zombie, 1);
1040 init_waitqueue_head(&ip->i_wait);
1041 spin_lock_init(&ip->i_data.i_shared_lock);
1042 #ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
1043 spin_lock_init(&ip->i_data.page_lock);
1045 INIT_LIST_HEAD(&ip->i_data.clean_pages);
1046 INIT_LIST_HEAD(&ip->i_data.dirty_pages);
1047 INIT_LIST_HEAD(&ip->i_data.locked_pages);
1048 INIT_LIST_HEAD(&ip->i_dirty_buffers);
1049 #ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
1050 INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
1052 #ifdef STRUCT_INODE_HAS_I_DEVICES
1053 INIT_LIST_HEAD(&ip->i_devices);
1055 ip->i_data.host = (void*) ip;
1056 #ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
1057 ip->i_data.gfp_mask = GFP_HIGHUSER;
1059 ip->i_mapping = &ip->i_data;
1060 #ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
1061 init_rwsem(&ip->i_truncate_sem);
1064 sema_init(&ip->i_atomic_write, 1);
1065 init_waitqueue(&ip->i_wait);
1067 INIT_LIST_HEAD(&ip->i_hash);
1068 INIT_LIST_HEAD(&ip->i_dentry);
1069 if (afs_globalVFS) {
1070 ip->i_dev = afs_globalVFS->s_dev;
1071 ip->i_sb = afs_globalVFS;
1076 osi_dnlc_purgedp(tvc); /* this may be overkill */
1077 memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
1078 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1079 tvc->slocks = (struct SimpleLocks *)0;
1082 tvc->hnext = afs_vhashT[i];
1083 afs_vhashT[i] = tvc;
1084 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1085 refpanic ("NewVCache VLRU inconsistent");
1087 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1088 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1089 refpanic ("NewVCache VLRU inconsistent2");
1091 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1092 refpanic ("NewVCache VLRU inconsistent3");
1094 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1095 refpanic ("NewVCache VLRU inconsistent4");
1105 * afs_FlushActiveVcaches
1111 * doflocks : Do we handle flocks?
1113 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
1115 afs_FlushActiveVcaches(doflocks)
1116 register afs_int32 doflocks;
1118 { /*afs_FlushActiveVcaches*/
1120 register struct vcache *tvc;
1122 register struct conn *tc;
1123 register afs_int32 code;
1124 register struct AFS_UCRED *cred;
1125 struct vrequest treq, ureq;
1126 struct AFSVolSync tsync;
1130 AFS_STATCNT(afs_FlushActiveVcaches);
1131 ObtainReadLock(&afs_xvcache);
1132 for(i=0;i<VCSIZE;i++) {
1133 for(tvc = afs_vhashT[i]; tvc; tvc=tvc->hnext) {
1134 if (doflocks && tvc->flockCount != 0) {
1135 /* if this entry has an flock, send a keep-alive call out */
1137 ReleaseReadLock(&afs_xvcache);
1138 ObtainWriteLock(&tvc->lock,51);
1140 afs_InitReq(&treq, &afs_osi_cred);
1141 treq.flags |= O_NONBLOCK;
1143 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1145 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1148 RXAFS_ExtendLock(tc->id,
1149 (struct AFSFid *) &tvc->fid.Fid,
1156 (afs_Analyze(tc, code, &tvc->fid, &treq,
1157 AFS_STATS_FS_RPCIDX_EXTENDLOCK,
1158 SHARED_LOCK, (struct cell *)0));
1160 ReleaseWriteLock(&tvc->lock);
1161 ObtainReadLock(&afs_xvcache);
1165 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1167 * Don't let it evaporate in case someone else is in
1168 * this code. Also, drop the afs_xvcache lock while
1169 * getting vcache locks.
1172 ReleaseReadLock(&afs_xvcache);
1173 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1174 afs_BozonLock(&tvc->pvnLock, tvc);
1176 #if defined(AFS_SGI_ENV)
1178 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1180 osi_Assert(VREFCOUNT(tvc) > 0);
1181 AFS_RWLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1183 ObtainWriteLock(&tvc->lock,52);
1184 if (tvc->states & CCore) {
1185 tvc->states &= ~CCore;
1186 /* XXXX Find better place-holder for cred XXXX */
1187 cred = (struct AFS_UCRED *) tvc->linkData;
1188 tvc->linkData = (char *) 0; /* XXX */
1189 afs_InitReq(&ureq, cred);
1190 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1191 ICL_TYPE_POINTER, tvc,
1192 ICL_TYPE_INT32, tvc->execsOrWriters);
1193 code = afs_StoreOnLastReference(tvc, &ureq);
1194 ReleaseWriteLock(&tvc->lock);
1195 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1196 afs_BozonUnlock(&tvc->pvnLock, tvc);
1198 hzero(tvc->flushDV);
1201 if (code && code != VNOVNODE) {
1202 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1203 /* /dev/console */ 1);
1205 } else if (tvc->states & CUnlinkedDel) {
1209 ReleaseWriteLock(&tvc->lock);
1210 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1211 afs_BozonUnlock(&tvc->pvnLock, tvc);
1213 #if defined(AFS_SGI_ENV)
1214 AFS_RWUNLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1216 afs_remunlink(tvc, 0);
1217 #if defined(AFS_SGI_ENV)
1218 AFS_RWLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1222 /* lost (or won, perhaps) the race condition */
1223 ReleaseWriteLock(&tvc->lock);
1224 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1225 afs_BozonUnlock(&tvc->pvnLock, tvc);
1228 #if defined(AFS_SGI_ENV)
1229 AFS_RWUNLOCK((vnode_t *)tvc, VRWLOCK_WRITE);
1231 ObtainReadLock(&afs_xvcache);
1237 AFS_RELE(AFSTOV(tvc));
1239 /* Matches write code setting CCore flag */
1243 #ifdef AFS_DARWIN_ENV
1244 if (VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
1245 if (tvc->opens) panic("flushactive open, hasubc, but refcnt 1");
1246 osi_VM_TryReclaim(tvc,0);
1251 ReleaseReadLock(&afs_xvcache);
1253 } /*afs_FlushActiveVcaches*/
1260 * Make sure a cache entry is up-to-date status-wise.
1262 * NOTE: everywhere that calls this can potentially be sped up
1263 * by checking CStatd first, and avoiding doing the InitReq
1264 * if this is up-to-date.
1266 * Anymore, the only places that call this KNOW already that the
1267 * vcache is not up-to-date, so we don't screw around.
1270 * avc : Ptr to vcache entry to verify.
1274 int afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1276 register struct vcache *tvc;
1278 AFS_STATCNT(afs_VerifyVCache);
1280 #if defined(AFS_OSF_ENV)
1281 ObtainReadLock(&avc->lock);
1282 if (afs_IsWired(avc)) {
1283 ReleaseReadLock(&avc->lock);
1286 ReleaseReadLock(&avc->lock);
1287 #endif /* AFS_OSF_ENV */
1288 /* otherwise we must fetch the status info */
1290 ObtainWriteLock(&avc->lock,53);
1291 if (avc->states & CStatd) {
1292 ReleaseWriteLock(&avc->lock);
1295 ObtainWriteLock(&afs_xcbhash, 461);
1296 avc->states &= ~( CStatd | CUnique );
1297 avc->callback = (struct server *)0;
1298 afs_DequeueCallback(avc);
1299 ReleaseWriteLock(&afs_xcbhash);
1300 ReleaseWriteLock(&avc->lock);
1302 /* since we've been called back, or the callback has expired,
1303 * it's possible that the contents of this directory, or this
1304 * file's name have changed, thus invalidating the dnlc contents.
1306 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1307 osi_dnlc_purgedp (avc);
1309 osi_dnlc_purgevp (avc);
1311 /* fetch the status info */
1312 tvc = afs_GetVCache(&avc->fid, areq, (afs_int32*)0, avc, READ_LOCK);
1313 if (!tvc) return ENOENT;
1314 /* Put it back; caller has already incremented vrefCount */
1315 afs_PutVCache(tvc, READ_LOCK);
1318 } /*afs_VerifyVCache*/
1325 * Simple copy of stat info into cache.
1328 * avc : Ptr to vcache entry involved.
1329 * astat : Ptr to stat info to copy.
1332 * Nothing interesting.
1334 * Callers: as of 1992-04-29, only called by WriteVCache
1337 afs_SimpleVStat(avc, astat, areq)
1338 register struct vcache *avc;
1339 register struct AFSFetchStatus *astat;
1340 struct vrequest *areq;
1341 { /*afs_SimpleVStat*/
1344 AFS_STATCNT(afs_SimpleVStat);
1347 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1348 && !AFS_VN_MAPPED((vnode_t*)avc))
1350 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc))
1354 #ifdef AFS_64BIT_ClIENT
1355 FillInt64(length, astat->Length_hi, astat->Length);
1356 #else /* AFS_64BIT_CLIENT */
1357 length = astat->Length;
1358 #endif /* AFS_64BIT_CLIENT */
1359 #if defined(AFS_SGI_ENV)
1360 osi_Assert((valusema(&avc->vc_rwlock) <= 0) &&
1361 (OSI_GET_LOCKID() == avc->vc_rwlockid));
1362 if (length < avc->m.Length) {
1363 vnode_t *vp = (vnode_t *)avc;
1365 osi_Assert(WriteLocked(&avc->lock));
1366 ReleaseWriteLock(&avc->lock);
1368 PTOSSVP(vp, (off_t)length, (off_t)MAXLONG);
1370 ObtainWriteLock(&avc->lock,67);
1373 /* if writing the file, don't fetch over this value */
1374 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT,
1375 ICL_TYPE_POINTER, avc,
1376 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1377 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1378 avc->m.Length = length;
1379 avc->m.Date = astat->ClientModTime;
1381 avc->m.Owner = astat->Owner;
1382 avc->m.Group = astat->Group;
1383 avc->m.Mode = astat->UnixModeBits;
1384 if (vType(avc) == VREG) {
1385 avc->m.Mode |= S_IFREG;
1387 else if (vType(avc) == VDIR) {
1388 avc->m.Mode |= S_IFDIR;
1390 else if (vType(avc) == VLNK) {
1394 avc->m.Mode |= S_IFLNK;
1395 if ((avc->m.Mode & 0111) == 0) avc->mvstat = 1;
1397 if (avc->states & CForeign) {
1398 struct axscache *ac;
1399 avc->anyAccess = astat->AnonymousAccess;
1401 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1403 * Caller has at least one bit not covered by anonymous, and
1404 * thus may have interesting rights.
1406 * HOWEVER, this is a really bad idea, because any access query
1407 * for bits which aren't covered by anonymous, on behalf of a user
1408 * who doesn't have any special rights, will result in an answer of
1409 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1410 * It's an especially bad idea under Ultrix, since (due to the lack of
1411 * a proper access() call) it must perform several afs_access() calls
1412 * in order to create magic mode bits that vary according to who makes
1413 * the call. In other words, _every_ stat() generates a test for
1416 #endif /* badidea */
1417 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1418 ac->axess = astat->CallerAccess;
1419 else /* not found, add a new one if possible */
1420 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1424 } /*afs_SimpleVStat*/
1431 * Store the status info *only* back to the server for a
1435 * avc : Ptr to the vcache entry.
1436 * astatus : Ptr to the status info to store.
1437 * areq : Ptr to the associated vrequest.
1440 * Must be called with a shared lock held on the vnode.
1443 afs_WriteVCache(avc, astatus, areq)
1444 register struct vcache *avc;
1445 register struct AFSStoreStatus *astatus;
1446 struct vrequest *areq;
1448 { /*afs_WriteVCache*/
1451 struct AFSFetchStatus OutStatus;
1452 struct AFSVolSync tsync;
1455 AFS_STATCNT(afs_WriteVCache);
1456 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1457 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1460 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1462 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1464 code = RXAFS_StoreStatus(tc->id,
1465 (struct AFSFid *) &avc->fid.Fid,
1466 astatus, &OutStatus, &tsync);
1472 (afs_Analyze(tc, code, &avc->fid, areq,
1473 AFS_STATS_FS_RPCIDX_STORESTATUS,
1474 SHARED_LOCK, (struct cell *)0));
1476 UpgradeSToWLock(&avc->lock,20);
1478 /* success, do the changes locally */
1479 afs_SimpleVStat(avc, &OutStatus, areq);
1481 * Update the date, too. SimpleVStat didn't do this, since
1482 * it thought we were doing this after fetching new status
1483 * over a file being written.
1485 avc->m.Date = OutStatus.ClientModTime;
1488 /* failure, set up to check with server next time */
1489 ObtainWriteLock(&afs_xcbhash, 462);
1490 afs_DequeueCallback(avc);
1491 avc->states &= ~( CStatd | CUnique); /* turn off stat valid flag */
1492 ReleaseWriteLock(&afs_xcbhash);
1493 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1494 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
1496 ConvertWToSLock(&avc->lock);
1499 } /*afs_WriteVCache*/
1505 * Copy astat block into vcache info
1508 * avc : Ptr to vcache entry.
1509 * astat : Ptr to stat block to copy in.
1510 * areq : Ptr to associated request.
1513 * Must be called under a write lock
1515 * Note: this code may get dataversion and length out of sync if the file has
1516 * been modified. This is less than ideal. I haven't thought about
1517 * it sufficiently to be certain that it is adequate.
1520 afs_ProcessFS(avc, astat, areq)
1521 register struct vcache *avc;
1522 struct vrequest *areq;
1523 register struct AFSFetchStatus *astat;
1529 AFS_STATCNT(afs_ProcessFS);
1531 #ifdef AFS_64BIT_CLIENT
1532 FillInt64(length, astat->Length_hi, astat->Length);
1533 #else /* AFS_64BIT_CLIENT */
1534 length = astat->Length;
1535 #endif /* AFS_64BIT_CLIENT */
1536 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1537 * number for each bulk status request. Under no circumstances
1538 * should afs_DoBulkStat store a sequence number if the new
1539 * length will be ignored when afs_ProcessFS is called with
1540 * new stats. If you change the following conditional then you
1541 * also need to change the conditional in afs_DoBulkStat. */
1543 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1544 && !AFS_VN_MAPPED((vnode_t*)avc))
1546 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc))
1549 /* if we're writing or mapping this file, don't fetch over these
1552 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1553 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1554 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1555 avc->m.Length = length;
1556 avc->m.Date = astat->ClientModTime;
1558 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1559 avc->m.Owner = astat->Owner;
1560 avc->m.Mode = astat->UnixModeBits;
1561 avc->m.Group = astat->Group;
1562 avc->m.LinkCount = astat->LinkCount;
1563 if (astat->FileType == File) {
1564 vSetType(avc, VREG);
1565 avc->m.Mode |= S_IFREG;
1567 else if (astat->FileType == Directory) {
1568 vSetType(avc, VDIR);
1569 avc->m.Mode |= S_IFDIR;
1571 else if (astat->FileType == SymbolicLink) {
1572 if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
1573 vSetType(avc, VDIR);
1574 avc->m.Mode |= S_IFDIR;
1576 vSetType(avc, VLNK);
1577 avc->m.Mode |= S_IFLNK;
1579 if ((avc->m.Mode & 0111) == 0) {
1583 avc->anyAccess = astat->AnonymousAccess;
1585 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1587 * Caller has at least one bit not covered by anonymous, and
1588 * thus may have interesting rights.
1590 * HOWEVER, this is a really bad idea, because any access query
1591 * for bits which aren't covered by anonymous, on behalf of a user
1592 * who doesn't have any special rights, will result in an answer of
1593 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1594 * It's an especially bad idea under Ultrix, since (due to the lack of
1595 * a proper access() call) it must perform several afs_access() calls
1596 * in order to create magic mode bits that vary according to who makes
1597 * the call. In other words, _every_ stat() generates a test for
1600 #endif /* badidea */
1602 struct axscache *ac;
1603 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1604 ac->axess = astat->CallerAccess;
1605 else /* not found, add a new one if possible */
1606 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1609 #ifdef AFS_LINUX22_ENV
1610 vcache2inode(avc); /* Set the inode attr cache */
1616 afs_RemoteLookup(afid, areq, name, nfid, OutStatusp, CallBackp, serverp, tsyncp)
1617 register struct VenusFid *afid;
1618 struct vrequest *areq;
1620 struct VenusFid *nfid;
1621 struct AFSFetchStatus *OutStatusp;
1622 struct AFSCallBack *CallBackp;
1623 struct server **serverp;
1624 struct AFSVolSync *tsyncp;
1627 register struct vcache *tvc;
1630 register struct conn *tc;
1631 struct AFSFetchStatus OutDirStatus;
1634 if (!name) name = ""; /* XXX */
1636 tc = afs_Conn(afid, areq, SHARED_LOCK);
1638 if (serverp) *serverp = tc->srvr->server;
1640 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1642 code = RXAFS_Lookup(tc->id, (struct AFSFid *) &afid->Fid, name,
1643 (struct AFSFid *) &nfid->Fid,
1644 OutStatusp, &OutDirStatus, CallBackp, tsyncp);
1650 (afs_Analyze(tc, code, afid, areq,
1651 AFS_STATS_FS_RPCIDX_XLOOKUP,
1652 SHARED_LOCK, (struct cell *)0));
1662 * Given a file id and a vrequest structure, fetch the status
1663 * information associated with the file.
1667 * areq : Ptr to associated vrequest structure, specifying the
1668 * user whose authentication tokens will be used.
1669 * avc : caller may already have a vcache for this file, which is
1673 * The cache entry is returned with an increased vrefCount field.
1674 * The entry must be discarded by calling afs_PutVCache when you
1675 * are through using the pointer to the cache entry.
1677 * You should not hold any locks when calling this function, except
1678 * locks on other vcache entries. If you lock more than one vcache
1679 * entry simultaneously, you should lock them in this order:
1681 * 1. Lock all files first, then directories.
1682 * 2. Within a particular type, lock entries in Fid.Vnode order.
1684 * This locking hierarchy is convenient because it allows locking
1685 * of a parent dir cache entry, given a file (to check its access
1686 * control list). It also allows renames to be handled easily by
1687 * locking directories in a constant order.
1688 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1690 struct vcache *afs_GetVCache(afid, areq, cached, avc, locktype)
1691 register struct VenusFid *afid;
1692 struct vrequest *areq;
1695 struct vcache *avc; /* might have a vcache structure already, which must
1696 * already be held by the caller */
1699 afs_int32 code, i, newvcache=0;
1700 register struct vcache *tvc;
1704 AFS_STATCNT(afs_GetVCache);
1706 if (cached) *cached = 0; /* Init just in case */
1709 ObtainSharedLock(&afs_xvcache,5);
1711 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_STATS | DO_VLRU );
1713 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1714 ReleaseSharedLock(&afs_xvcache);
1715 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1723 if (tvc->states & CStatd) {
1724 ReleaseSharedLock(&afs_xvcache);
1729 UpgradeSToWLock(&afs_xvcache,21);
1731 /* no cache entry, better grab one */
1732 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
1735 ConvertWToSLock(&afs_xvcache);
1736 afs_stats_cmperf.vcacheMisses++;
1739 ReleaseSharedLock(&afs_xvcache);
1741 ObtainWriteLock(&tvc->lock,54);
1743 if (tvc->states & CStatd) {
1744 #ifdef AFS_LINUX22_ENV
1747 ReleaseWriteLock(&tvc->lock);
1748 #ifdef AFS_DARWIN_ENV
1754 #if defined(AFS_OSF_ENV)
1755 if (afs_IsWired(tvc)) {
1756 ReleaseWriteLock(&tvc->lock);
1759 #endif /* AFS_OSF_ENV */
1761 ObtainWriteLock(&afs_xcbhash, 464);
1762 tvc->states &= ~CUnique;
1764 afs_DequeueCallback(tvc);
1765 ReleaseWriteLock(&afs_xcbhash);
1767 /* It is always appropriate to throw away all the access rights? */
1768 afs_FreeAllAxs(&(tvc->Access));
1769 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1771 if ((tvp->states & VForeign)) {
1772 if (newvcache) tvc->states |= CForeign;
1773 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1774 && (tvp->rootUnique == afid->Fid.Unique)) {
1778 if (tvp->states & VRO) tvc->states |= CRO;
1779 if (tvp->states & VBackup) tvc->states |= CBackup;
1780 /* now copy ".." entry back out of volume structure, if necessary */
1781 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1783 tvc->mvid = (struct VenusFid *)
1784 osi_AllocSmallSpace(sizeof(struct VenusFid));
1785 *tvc->mvid = tvp->dotdot;
1787 afs_PutVolume(tvp, READ_LOCK);
1791 afs_RemoveVCB(afid);
1793 struct AFSFetchStatus OutStatus;
1795 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1796 afs_ProcessFS(tvc, &OutStatus, areq);
1797 tvc->states |= CStatd | CUnique;
1800 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1805 ReleaseWriteLock(&tvc->lock);
1807 ObtainReadLock(&afs_xvcache);
1809 ReleaseReadLock(&afs_xvcache);
1810 return (struct vcache *) 0;
1813 ReleaseWriteLock(&tvc->lock);
1814 #ifdef AFS_DARWIN_ENV
1823 struct vcache *afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1824 afs_int32 *cached, afs_int32 locktype,
1825 struct vcache *adp, char *aname)
1827 afs_int32 code, now, newvcache=0, hash;
1828 struct VenusFid nfid;
1829 register struct vcache *tvc;
1831 struct AFSFetchStatus OutStatus;
1832 struct AFSCallBack CallBack;
1833 struct AFSVolSync tsync;
1834 struct server *serverp = 0;
1838 AFS_STATCNT(afs_GetVCache);
1839 if (cached) *cached = 0; /* Init just in case */
1842 ObtainReadLock(&afs_xvcache);
1843 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_STATS /* no vlru */);
1846 ReleaseReadLock(&afs_xvcache);
1848 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1849 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1853 ObtainReadLock(&tvc->lock);
1855 if (tvc->states & CStatd) {
1859 ReleaseReadLock(&tvc->lock);
1862 tvc->states &= ~CUnique;
1864 ReleaseReadLock(&tvc->lock);
1865 ObtainReadLock(&afs_xvcache);
1869 ReleaseReadLock(&afs_xvcache);
1871 /* lookup the file */
1874 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1875 code = afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
1879 ObtainSharedLock(&afs_xvcache,6);
1880 tvc = afs_FindVCache(&nfid, 0, 0, &retry, DO_VLRU /* no xstats now*/);
1882 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1883 ReleaseSharedLock(&afs_xvcache);
1884 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1890 /* no cache entry, better grab one */
1891 UpgradeSToWLock(&afs_xvcache,22);
1892 tvc = afs_NewVCache(&nfid, (struct server *)0, 1, WRITE_LOCK);
1894 ConvertWToSLock(&afs_xvcache);
1897 ReleaseSharedLock(&afs_xvcache);
1898 ObtainWriteLock(&tvc->lock,55);
1900 /* It is always appropriate to throw away all the access rights? */
1901 afs_FreeAllAxs(&(tvc->Access));
1902 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1904 if ((tvp->states & VForeign)) {
1905 if (newvcache) tvc->states |= CForeign;
1906 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1907 && (tvp->rootUnique == afid->Fid.Unique))
1910 if (tvp->states & VRO) tvc->states |= CRO;
1911 if (tvp->states & VBackup) tvc->states |= CBackup;
1912 /* now copy ".." entry back out of volume structure, if necessary */
1913 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1915 tvc->mvid = (struct VenusFid *)
1916 osi_AllocSmallSpace(sizeof(struct VenusFid));
1917 *tvc->mvid = tvp->dotdot;
1922 ObtainWriteLock(&afs_xcbhash, 465);
1923 afs_DequeueCallback(tvc);
1924 tvc->states &= ~( CStatd | CUnique );
1925 ReleaseWriteLock(&afs_xcbhash);
1926 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1927 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1929 afs_PutVolume(tvp, READ_LOCK);
1930 ReleaseWriteLock(&tvc->lock);
1931 ObtainReadLock(&afs_xvcache);
1933 ReleaseReadLock(&afs_xvcache);
1934 return (struct vcache *) 0;
1937 ObtainWriteLock(&afs_xcbhash, 466);
1938 if (origCBs == afs_allCBs) {
1939 if (CallBack.ExpirationTime) {
1940 tvc->callback = serverp;
1941 tvc->cbExpires = CallBack.ExpirationTime+now;
1942 tvc->states |= CStatd | CUnique;
1943 tvc->states &= ~CBulkFetching;
1944 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
1945 } else if (tvc->states & CRO) {
1946 /* adapt gives us an hour. */
1947 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
1948 tvc->states |= CStatd | CUnique;
1949 tvc->states &= ~CBulkFetching;
1950 afs_QueueCallback(tvc, CBHash(3600), tvp);
1952 tvc->callback = (struct server *)0;
1953 afs_DequeueCallback(tvc);
1954 tvc->states &= ~(CStatd | CUnique);
1955 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1956 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1959 afs_DequeueCallback(tvc);
1960 tvc->states &= ~CStatd;
1961 tvc->states &= ~CUnique;
1962 tvc->callback = (struct server *)0;
1963 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
1964 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
1966 ReleaseWriteLock(&afs_xcbhash);
1968 afs_PutVolume(tvp, READ_LOCK);
1969 afs_ProcessFS(tvc, &OutStatus, areq);
1971 ReleaseWriteLock(&tvc->lock);
1972 #ifdef AFS_DARWIN_ENV
1979 struct vcache *afs_GetRootVCache(struct VenusFid *afid,
1980 struct vrequest *areq, afs_int32 *cached,
1981 struct volume *tvolp, afs_int32 locktype)
1983 afs_int32 code, i, newvcache = 0, haveStatus = 0;
1984 afs_int32 getNewFid = 0;
1986 struct VenusFid nfid;
1987 register struct vcache *tvc;
1988 struct server *serverp = 0;
1989 struct AFSFetchStatus OutStatus;
1990 struct AFSCallBack CallBack;
1991 struct AFSVolSync tsync;
1997 if (!tvolp->rootVnode || getNewFid) {
1998 struct VenusFid tfid;
2001 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2002 origCBs = afs_allCBs; /* ignore InitCallBackState */
2003 code = afs_RemoteLookup(&tfid, areq, (char *)0, &nfid,
2004 &OutStatus, &CallBack, &serverp, &tsync);
2006 return (struct vcache *)0;
2008 /* ReleaseReadLock(&tvolp->lock); */
2009 ObtainWriteLock(&tvolp->lock,56);
2010 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2011 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2012 ReleaseWriteLock(&tvolp->lock);
2013 /* ObtainReadLock(&tvolp->lock);*/
2016 afid->Fid.Vnode = tvolp->rootVnode;
2017 afid->Fid.Unique = tvolp->rootUnique;
2020 ObtainSharedLock(&afs_xvcache,7);
2022 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2023 if (!FidCmp(&(tvc->fid), afid)) {
2025 /* Grab this vnode, possibly reactivating from the free list */
2026 /* for the present (95.05.25) everything on the hash table is
2027 * definitively NOT in the free list -- at least until afs_reclaim
2028 * can be safely implemented */
2031 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2035 #endif /* AFS_OSF_ENV */
2040 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
2041 /* Mount point no longer stat'd or unknown. FID may have changed. */
2046 tvc = (struct vcache*)0;
2048 ReleaseSharedLock(&afs_xvcache);
2053 UpgradeSToWLock(&afs_xvcache,23);
2054 /* no cache entry, better grab one */
2055 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
2057 afs_stats_cmperf.vcacheMisses++;
2060 if (cached) *cached = 1;
2061 afs_stats_cmperf.vcacheHits++;
2063 /* we already bumped the ref count in the for loop above */
2064 #else /* AFS_OSF_ENV */
2067 UpgradeSToWLock(&afs_xvcache,24);
2068 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2069 refpanic ("GRVC VLRU inconsistent0");
2071 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2072 refpanic ("GRVC VLRU inconsistent1");
2074 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2075 refpanic ("GRVC VLRU inconsistent2");
2077 QRemove(&tvc->vlruq); /* move to lruq head */
2078 QAdd(&VLRU, &tvc->vlruq);
2079 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2080 refpanic ("GRVC VLRU inconsistent3");
2082 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2083 refpanic ("GRVC VLRU inconsistent4");
2085 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2086 refpanic ("GRVC VLRU inconsistent5");
2091 ReleaseWriteLock(&afs_xvcache);
2093 if (tvc->states & CStatd) {
2097 ObtainReadLock(&tvc->lock);
2098 tvc->states &= ~CUnique;
2099 tvc->callback = (struct server *)0; /* redundant, perhaps */
2100 ReleaseReadLock(&tvc->lock);
2103 ObtainWriteLock(&tvc->lock,57);
2105 /* It is always appropriate to throw away all the access rights? */
2106 afs_FreeAllAxs(&(tvc->Access));
2108 if (newvcache) tvc->states |= CForeign;
2109 if (tvolp->states & VRO) tvc->states |= CRO;
2110 if (tvolp->states & VBackup) tvc->states |= CBackup;
2111 /* now copy ".." entry back out of volume structure, if necessary */
2112 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2113 && (tvolp->rootUnique == afid->Fid.Unique)) {
2116 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2118 tvc->mvid = (struct VenusFid *)osi_AllocSmallSpace(sizeof(struct VenusFid));
2119 *tvc->mvid = tvolp->dotdot;
2123 afs_RemoveVCB(afid);
2126 struct VenusFid tfid;
2129 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2130 origCBs = afs_allCBs; /* ignore InitCallBackState */
2131 code = afs_RemoteLookup(&tfid, areq, (char *)0, &nfid, &OutStatus,
2132 &CallBack, &serverp, &tsync);
2136 ObtainWriteLock(&afs_xcbhash, 467);
2137 afs_DequeueCallback(tvc);
2138 tvc->callback = (struct server *)0;
2139 tvc->states &= ~(CStatd|CUnique);
2140 ReleaseWriteLock(&afs_xcbhash);
2141 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2142 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2143 ReleaseWriteLock(&tvc->lock);
2144 ObtainReadLock(&afs_xvcache);
2146 ReleaseReadLock(&afs_xvcache);
2147 return (struct vcache *) 0;
2150 ObtainWriteLock(&afs_xcbhash, 468);
2151 if (origCBs == afs_allCBs) {
2152 tvc->states |= CTruth;
2153 tvc->callback = serverp;
2154 if (CallBack.ExpirationTime != 0) {
2155 tvc->cbExpires = CallBack.ExpirationTime+start;
2156 tvc->states |= CStatd;
2157 tvc->states &= ~CBulkFetching;
2158 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2159 } else if (tvc->states & CRO) {
2160 /* adapt gives us an hour. */
2161 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
2162 tvc->states |= CStatd;
2163 tvc->states &= ~CBulkFetching;
2164 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2167 afs_DequeueCallback(tvc);
2168 tvc->callback = (struct server *)0;
2169 tvc->states &= ~(CStatd | CUnique);
2170 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2171 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2173 ReleaseWriteLock(&afs_xcbhash);
2174 afs_ProcessFS(tvc, &OutStatus, areq);
2176 ReleaseWriteLock(&tvc->lock);
2183 * must be called with avc write-locked
2184 * don't absolutely have to invalidate the hint unless the dv has
2185 * changed, but be sure to get it right else there will be consistency bugs.
2187 afs_int32 afs_FetchStatus(struct vcache *avc, struct VenusFid *afid,
2188 struct vrequest *areq, struct AFSFetchStatus *Outsp)
2192 register struct conn *tc;
2193 struct AFSCallBack CallBack;
2194 struct AFSVolSync tsync;
2195 struct volume* volp;
2199 tc = afs_Conn(afid, areq, SHARED_LOCK);
2200 avc->quick.stamp = 0; avc->h1.dchint = NULL; /* invalidate hints */
2202 avc->callback = tc->srvr->server;
2204 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2206 code = RXAFS_FetchStatus(tc->id,
2207 (struct AFSFid *) &afid->Fid,
2208 Outsp, &CallBack, &tsync);
2216 (afs_Analyze(tc, code, afid, areq,
2217 AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2218 SHARED_LOCK, (struct cell *)0));
2221 afs_ProcessFS(avc, Outsp, areq);
2222 volp = afs_GetVolume(afid, areq, READ_LOCK);
2223 ObtainWriteLock(&afs_xcbhash, 469);
2224 avc->states |= CTruth;
2225 if (avc->callback /* check for race */) {
2226 if (CallBack.ExpirationTime != 0) {
2227 avc->cbExpires = CallBack.ExpirationTime+start;
2228 avc->states |= CStatd;
2229 avc->states &= ~CBulkFetching;
2230 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2232 else if (avc->states & CRO)
2233 { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2234 avc->cbExpires = 3600+start;
2235 avc->states |= CStatd;
2236 avc->states &= ~CBulkFetching;
2237 afs_QueueCallback(avc, CBHash(3600), volp);
2240 afs_DequeueCallback(avc);
2241 avc->callback = (struct server *)0;
2242 avc->states &= ~(CStatd|CUnique);
2243 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2244 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
2248 afs_DequeueCallback(avc);
2249 avc->callback = (struct server *)0;
2250 avc->states &= ~(CStatd|CUnique);
2251 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2252 osi_dnlc_purgedp (avc); /* if it (could be) a directory */
2254 ReleaseWriteLock(&afs_xcbhash);
2256 afs_PutVolume(volp, READ_LOCK);
2258 else { /* used to undo the local callback, but that's too extreme.
2259 * There are plenty of good reasons that fetchstatus might return
2260 * an error, such as EPERM. If we have the vnode cached, statd,
2261 * with callback, might as well keep track of the fact that we
2262 * don't have access...
2264 if (code == EPERM || code == EACCES) {
2265 struct axscache *ac;
2266 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2268 else /* not found, add a new one if possible */
2269 afs_AddAxs(avc->Access, areq->uid, 0);
2280 * Stuff some information into the vcache for the given file.
2283 * afid : File in question.
2284 * OutStatus : Fetch status on the file.
2285 * CallBack : Callback info.
2286 * tc : RPC connection involved.
2287 * areq : vrequest involved.
2290 * Nothing interesting.
2293 afs_StuffVcache(afid, OutStatus, CallBack, tc, areq)
2294 register struct VenusFid *afid;
2295 struct AFSFetchStatus *OutStatus;
2296 struct AFSCallBack *CallBack;
2297 register struct conn *tc;
2298 struct vrequest *areq;
2300 { /*afs_StuffVcache*/
2302 register afs_int32 code, i, newvcache=0;
2303 register struct vcache *tvc;
2304 struct AFSVolSync tsync;
2306 struct axscache *ac;
2309 AFS_STATCNT(afs_StuffVcache);
2310 #ifdef IFS_VCACHECOUNT
2315 ObtainSharedLock(&afs_xvcache,8);
2317 tvc = afs_FindVCache(afid, 0, 0, &retry, DO_VLRU /* no stats */);
2319 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2320 ReleaseSharedLock(&afs_xvcache);
2321 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2327 /* no cache entry, better grab one */
2328 UpgradeSToWLock(&afs_xvcache,25);
2329 tvc = afs_NewVCache(afid, (struct server *)0, 1, WRITE_LOCK);
2331 ConvertWToSLock(&afs_xvcache);
2334 ReleaseSharedLock(&afs_xvcache);
2335 ObtainWriteLock(&tvc->lock,58);
2337 tvc->states &= ~CStatd;
2338 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2339 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2341 /* Is it always appropriate to throw away all the access rights? */
2342 afs_FreeAllAxs(&(tvc->Access));
2344 /*Copy useful per-volume info*/
2345 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2347 if (newvcache && (tvp->states & VForeign)) tvc->states |= CForeign;
2348 if (tvp->states & VRO) tvc->states |= CRO;
2349 if (tvp->states & VBackup) tvc->states |= CBackup;
2351 * Now, copy ".." entry back out of volume structure, if
2354 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2355 if (!tvc->mvid) tvc->mvid =
2356 (struct VenusFid *) osi_AllocSmallSpace(sizeof(struct VenusFid));
2357 *tvc->mvid = tvp->dotdot;
2360 /* store the stat on the file */
2361 afs_RemoveVCB(afid);
2362 afs_ProcessFS(tvc, OutStatus, areq);
2363 tvc->callback = tc->srvr->server;
2365 /* we use osi_Time twice below. Ideally, we would use the time at which
2366 * the FetchStatus call began, instead, but we don't have it here. So we
2367 * make do with "now". In the CRO case, it doesn't really matter. In
2368 * the other case, we hope that the difference between "now" and when the
2369 * call actually began execution on the server won't be larger than the
2370 * padding which the server keeps. Subtract 1 second anyway, to be on
2371 * the safe side. Can't subtract more because we don't know how big
2372 * ExpirationTime is. Possible consistency problems may arise if the call
2373 * timeout period becomes longer than the server's expiration padding. */
2374 ObtainWriteLock(&afs_xcbhash, 470);
2375 if (CallBack->ExpirationTime != 0) {
2376 tvc->cbExpires = CallBack->ExpirationTime+osi_Time()-1;
2377 tvc->states |= CStatd;
2378 tvc->states &= ~CBulkFetching;
2379 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2381 else if (tvc->states & CRO) {
2382 /* old-fashioned AFS 3.2 style */
2383 tvc->cbExpires = 3600+osi_Time(); /*XXX*/
2384 tvc->states |= CStatd;
2385 tvc->states &= ~CBulkFetching;
2386 afs_QueueCallback(tvc, CBHash(3600), tvp);
2389 afs_DequeueCallback(tvc);
2390 tvc->callback = (struct server *)0;
2391 tvc->states &= ~(CStatd|CUnique);
2392 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2393 osi_dnlc_purgedp (tvc); /* if it (could be) a directory */
2395 ReleaseWriteLock(&afs_xcbhash);
2397 afs_PutVolume(tvp, READ_LOCK);
2399 /* look in per-pag cache */
2400 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2401 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2402 else /* not found, add a new one if possible */
2403 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2405 ReleaseWriteLock(&tvc->lock);
2406 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2407 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32, tvc->cbExpires,
2408 ICL_TYPE_INT32, tvc->cbExpires-osi_Time());
2410 * Release ref count... hope this guy stays around...
2412 afs_PutVCache(tvc, WRITE_LOCK);
2413 } /*afs_StuffVcache*/
2420 * Decrements the reference count on a cache entry.
2423 * avc : Pointer to the cache entry to decrement.
2426 * Nothing interesting.
2429 afs_PutVCache(avc, locktype)
2430 register struct vcache *avc;
2434 AFS_STATCNT(afs_PutVCache);
2436 * Can we use a read lock here?
2438 ObtainReadLock(&afs_xvcache);
2440 ReleaseReadLock(&afs_xvcache);
2447 * Find a vcache entry given a fid.
2450 * afid : Pointer to the fid whose cache entry we desire.
2451 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2452 * unlock the vnode, and try again.
2453 * flags: bit 1 to specify whether to compute hit statistics. Not
2454 * set if FindVCache is called as part of internal bookkeeping.
2457 * Must be called with the afs_xvcache lock at least held at
2458 * the read level. In order to do the VLRU adjustment, the xvcache lock
2459 * must be shared-- we upgrade it here.
2462 struct vcache *afs_FindVCache(struct VenusFid *afid, afs_int32 lockit,
2463 afs_int32 locktype, afs_int32 *retry, afs_int32 flag)
2466 register struct vcache *tvc;
2469 AFS_STATCNT(afs_FindVCache);
2472 for(tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2473 if (FidMatches(afid, tvc)) {
2475 /* Grab this vnode, possibly reactivating from the free list */
2478 vg = vget(AFSTOV(tvc));
2482 #endif /* AFS_OSF_ENV */
2487 /* should I have a read lock on the vnode here? */
2489 if (retry) *retry = 0;
2490 #if !defined(AFS_OSF_ENV)
2491 osi_vnhold(tvc, retry); /* already held, above */
2492 if (retry && *retry)
2496 * only move to front of vlru if we have proper vcache locking)
2498 if (flag & DO_VLRU) {
2499 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2500 refpanic ("FindVC VLRU inconsistent1");
2502 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2503 refpanic ("FindVC VLRU inconsistent1");
2505 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2506 refpanic ("FindVC VLRU inconsistent2");
2508 UpgradeSToWLock(&afs_xvcache,26);
2509 QRemove(&tvc->vlruq);
2510 QAdd(&VLRU, &tvc->vlruq);
2511 ConvertWToSLock(&afs_xvcache);
2512 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2513 refpanic ("FindVC VLRU inconsistent1");
2515 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2516 refpanic ("FindVC VLRU inconsistent2");
2518 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2519 refpanic ("FindVC VLRU inconsistent3");
2525 if (flag & DO_STATS) {
2526 if (tvc) afs_stats_cmperf.vcacheHits++;
2527 else afs_stats_cmperf.vcacheMisses++;
2528 if (afid->Cell == LOCALCELL)
2529 afs_stats_cmperf.vlocalAccesses++;
2531 afs_stats_cmperf.vremoteAccesses++;
2534 #ifdef AFS_LINUX22_ENV
2535 if (tvc && (tvc->states & CStatd))
2536 vcache2inode(tvc); /* mainly to reset i_nlink */
2538 #ifdef AFS_DARWIN_ENV
2543 } /*afs_FindVCache*/
2549 * Find a vcache entry given a fid. Does a wildcard match on what we
2550 * have for the fid. If more than one entry, don't return anything.
2553 * avcp : Fill in pointer if we found one and only one.
2554 * afid : Pointer to the fid whose cache entry we desire.
2555 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2556 * unlock the vnode, and try again.
2557 * flags: bit 1 to specify whether to compute hit statistics. Not
2558 * set if FindVCache is called as part of internal bookkeeping.
2561 * Must be called with the afs_xvcache lock at least held at
2562 * the read level. In order to do the VLRU adjustment, the xvcache lock
2563 * must be shared-- we upgrade it here.
2566 * number of matches found.
2569 int afs_duplicate_nfs_fids=0;
2571 afs_int32 afs_NFSFindVCache(avcp, afid, lockit)
2572 struct vcache **avcp;
2573 struct VenusFid *afid;
2575 { /*afs_FindVCache*/
2577 register struct vcache *tvc;
2579 afs_int32 retry = 0;
2580 afs_int32 count = 0;
2581 struct vcache *found_tvc = NULL;
2583 AFS_STATCNT(afs_FindVCache);
2587 ObtainSharedLock(&afs_xvcache,331);
2590 for(tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2591 /* Match only on what we have.... */
2592 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2593 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2594 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2595 && (tvc->fid.Cell == afid->Cell)) {
2597 /* Grab this vnode, possibly reactivating from the free list */
2600 vg = vget(AFSTOV(tvc));
2603 /* This vnode no longer exists. */
2606 #endif /* AFS_OSF_ENV */
2611 /* Drop our reference counts. */
2613 vrele(AFSTOV(found_tvc));
2615 afs_duplicate_nfs_fids++;
2616 ReleaseSharedLock(&afs_xvcache);
2624 /* should I have a read lock on the vnode here? */
2626 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2627 osi_vnhold(tvc, &retry);
2630 found_tvc = (struct vcache*)0;
2631 ReleaseSharedLock(&afs_xvcache);
2632 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2636 #if !defined(AFS_OSF_ENV)
2637 osi_vnhold(tvc, (int*)0); /* already held, above */
2641 * We obtained the xvcache lock above.
2643 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2644 refpanic ("FindVC VLRU inconsistent1");
2646 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2647 refpanic ("FindVC VLRU inconsistent1");
2649 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2650 refpanic ("FindVC VLRU inconsistent2");
2652 UpgradeSToWLock(&afs_xvcache,568);
2653 QRemove(&tvc->vlruq);
2654 QAdd(&VLRU, &tvc->vlruq);
2655 ConvertWToSLock(&afs_xvcache);
2656 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2657 refpanic ("FindVC VLRU inconsistent1");
2659 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2660 refpanic ("FindVC VLRU inconsistent2");
2662 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2663 refpanic ("FindVC VLRU inconsistent3");
2668 if (tvc) afs_stats_cmperf.vcacheHits++;
2669 else afs_stats_cmperf.vcacheMisses++;
2670 if (afid->Cell == LOCALCELL)
2671 afs_stats_cmperf.vlocalAccesses++;
2673 afs_stats_cmperf.vremoteAccesses++;
2675 *avcp = tvc; /* May be null */
2677 ReleaseSharedLock(&afs_xvcache);
2678 return (tvc ? 1 : 0);
2680 } /*afs_NFSFindVCache*/
2688 * Initialize vcache related variables
2690 void afs_vcacheInit(int astatSize)
2692 register struct vcache *tvp;
2694 #if defined(AFS_OSF_ENV)
2695 if (!afs_maxvcount) {
2696 #if defined(AFS_OSF30_ENV)
2697 afs_maxvcount = max_vnodes/2; /* limit ourselves to half the total */
2699 afs_maxvcount = nvnode/2; /* limit ourselves to half the total */
2701 if (astatSize < afs_maxvcount) {
2702 afs_maxvcount = astatSize;
2705 #else /* AFS_OSF_ENV */
2706 freeVCList = (struct vcache *)0;
2709 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2710 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2712 #if !defined(AFS_OSF_ENV)
2713 /* Allocate and thread the struct vcache entries */
2714 tvp = (struct vcache *) afs_osi_Alloc(astatSize * sizeof(struct vcache));
2715 memset((char *)tvp, 0, sizeof(struct vcache)*astatSize);
2717 Initial_freeVCList = tvp;
2718 freeVCList = &(tvp[0]);
2719 for(i=0; i < astatSize-1; i++) {
2720 tvp[i].nextfree = &(tvp[i+1]);
2722 tvp[astatSize-1].nextfree = (struct vcache *) 0;
2723 #ifdef AFS_AIX32_ENV
2724 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2729 #if defined(AFS_SGI_ENV)
2730 for(i=0; i < astatSize; i++) {
2731 char name[METER_NAMSZ];
2732 struct vcache *tvc = &tvp[i];
2734 tvc->v.v_number = ++afsvnumbers;
2735 tvc->vc_rwlockid = OSI_NO_LOCKID;
2736 initnsema(&tvc->vc_rwlock, 1, makesname(name, "vrw", tvc->v.v_number));
2737 #ifndef AFS_SGI53_ENV
2738 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2740 #ifndef AFS_SGI62_ENV
2741 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2742 #endif /* AFS_SGI62_ENV */
2755 void shutdown_vcache(void)
2758 struct afs_cbr *tsp, *nsp;
2760 * XXX We may potentially miss some of the vcaches because if when there're no
2761 * free vcache entries and all the vcache entries are active ones then we allocate
2762 * an additional one - admittedly we almost never had that occur.
2764 #if !defined(AFS_OSF_ENV)
2765 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2767 #ifdef AFS_AIX32_ENV
2768 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2772 register struct afs_q *tq, *uq;
2773 register struct vcache *tvc;
2774 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2778 osi_FreeSmallSpace(tvc->mvid);
2779 tvc->mvid = (struct VenusFid*)0;
2782 aix_gnode_rele(AFSTOV(tvc));
2784 if (tvc->linkData) {
2785 afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);
2790 * Also free the remaining ones in the Cache
2792 for (i=0; i < VCSIZE; i++) {
2793 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2795 osi_FreeSmallSpace(tvc->mvid);
2796 tvc->mvid = (struct VenusFid*)0;
2800 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2801 #ifdef AFS_AIX32_ENV
2804 vms_delete(tvc->segid);
2806 tvc->segid = tvc->vmh = NULL;
2807 if (VREFCOUNT(tvc)) osi_Panic("flushVcache: vm race");
2815 #if defined(AFS_SUN5_ENV)
2821 if (tvc->linkData) {
2822 afs_osi_Free(tvc->linkData, strlen(tvc->linkData)+1);
2826 afs_FreeAllAxs(&(tvc->Access));
2832 * Free any leftover callback queue
2834 for (tsp = afs_cbrSpace; tsp; tsp = nsp ) {
2836 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
2840 #if !defined(AFS_OSF_ENV)
2841 freeVCList = Initial_freeVCList = 0;
2843 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2844 LOCK_INIT(&afs_xvcb, "afs_xvcb");