2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include <afsconfig.h>
39 #include "afs/param.h"
44 #include "afs/sysincludes.h" /*Standard vendor system headers */
45 #include "afsincludes.h" /*AFS-based standard headers */
46 #include "afs/afs_stats.h"
47 #include "afs/afs_cbqueue.h"
48 #include "afs/afs_osidnlc.h"
51 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
52 afs_int32 afs_vcount = 0; /* number of vcache in use now */
53 #endif /* AFS_OSF_ENV */
61 #endif /* AFS_SGI64_ENV */
63 /* Exported variables */
64 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
65 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
66 struct vcache *freeVCList; /*Free list for stat cache entries */
67 struct vcache *Initial_freeVCList; /*Initial list for above */
68 struct afs_q VLRU; /*vcache LRU */
69 afs_int32 vcachegen = 0;
70 unsigned int afs_paniconwarn = 0;
71 struct vcache *afs_vhashT[VCSIZE];
72 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
73 afs_int32 afs_bulkStatsLost;
74 int afs_norefpanic = 0;
76 /* Forward declarations */
77 static afs_int32 afs_QueueVCB(struct vcache *avc);
82 * Generate an index into the hash table for a given Fid.
85 afs_HashCBRFid(struct AFSFid *fid) {
86 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
92 * Insert a CBR entry into the hash table.
93 * Must be called with afs_xvcb held.
96 afs_InsertHashCBR(struct afs_cbr *cbr) {
97 int slot = afs_HashCBRFid(&cbr->fid);
99 cbr->hash_next = afs_cbrHashT[slot];
100 if (afs_cbrHashT[slot])
101 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
103 cbr->hash_pprev = &afs_cbrHashT[slot];
104 afs_cbrHashT[slot] = cbr;
111 * Flush the given vcache entry.
114 * avc : Pointer to vcache entry to flush.
115 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
118 * afs_xvcache lock must be held for writing upon entry to
119 * prevent people from changing the vrefCount field, and to
120 * protect the lruq and hnext fields.
121 * LOCK: afs_FlushVCache afs_xvcache W
122 * REFCNT: vcache ref count must be zero on entry except for osf1
123 * RACE: lock is dropped and reobtained, permitting race in caller
127 afs_FlushVCache(struct vcache *avc, int *slept)
128 { /*afs_FlushVCache */
130 register afs_int32 i, code;
131 register struct vcache **uvc, *wvc;
134 AFS_STATCNT(afs_FlushVCache);
135 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
136 ICL_TYPE_INT32, avc->states);
139 VN_LOCK(AFSTOV(avc));
143 code = osi_VM_FlushVCache(avc, slept);
147 if (avc->states & CVFlushed) {
151 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
152 refpanic("LRU vs. Free inconsistency");
154 avc->states |= CVFlushed;
155 /* pull the entry out of the lruq and put it on the free list */
156 QRemove(&avc->vlruq);
157 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *)0;
159 /* keep track of # of files that we bulk stat'd, but never used
160 * before they got recycled.
162 if (avc->states & CBulkStat)
165 /* remove entry from the hash chain */
166 i = VCHash(&avc->fid);
167 uvc = &afs_vhashT[i];
168 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
171 avc->hnext = (struct vcache *)NULL;
176 osi_Panic("flushvcache"); /* not in correct hash bucket */
178 osi_FreeSmallSpace(avc->mvid);
179 avc->mvid = (struct VenusFid *)0;
181 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
182 avc->linkData = NULL;
184 #if defined(AFS_XBSD_ENV)
185 /* OK, there are no internal vrefCounts, so there shouldn't
186 * be any more refs here. */
188 avc->v->v_data = NULL; /* remove from vnode */
189 avc->v = NULL; /* also drop the ptr to vnode */
192 afs_FreeAllAxs(&(avc->Access));
194 /* we can't really give back callbacks on RO files, since the
195 * server only tracks them on a per-volume basis, and we don't
196 * know whether we still have some other files from the same
198 if ((avc->states & CRO) == 0 && avc->callback) {
201 ObtainWriteLock(&afs_xcbhash, 460);
202 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
203 avc->states &= ~(CStatd | CUnique);
204 ReleaseWriteLock(&afs_xcbhash);
205 afs_symhint_inval(avc);
206 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
207 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
209 osi_dnlc_purgevp(avc);
212 * Next, keep track of which vnodes we've deleted for create's
213 * optimistic synchronization algorithm
216 if (avc->fid.Fid.Vnode & 1)
221 #if !defined(AFS_OSF_ENV)
222 /* put the entry in the free list */
223 avc->nextfree = freeVCList;
225 if (avc->vlruq.prev || avc->vlruq.next) {
226 refpanic("LRU vs. Free inconsistency");
229 /* This should put it back on the vnode free list since usecount is 1 */
232 if (VREFCOUNT(avc) > 0) {
233 VN_UNLOCK(AFSTOV(avc));
234 AFS_RELE(AFSTOV(avc));
236 if (afs_norefpanic) {
237 printf("flush vc refcnt < 1");
239 (void)vgone(avc, VX_NOSLEEP, NULL);
241 VN_UNLOCK(AFSTOV(avc));
243 osi_Panic("flush vc refcnt < 1");
245 #endif /* AFS_OSF_ENV */
246 avc->states |= CVFlushed;
251 VN_UNLOCK(AFSTOV(avc));
255 } /*afs_FlushVCache */
261 * The core of the inactive vnode op for all but IRIX.
264 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
266 AFS_STATCNT(afs_inactive);
267 if (avc->states & CDirty) {
268 /* we can't keep trying to push back dirty data forever. Give up. */
269 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
271 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
272 avc->states &= ~CDirty; /* Turn it off */
273 if (avc->states & CUnlinked) {
274 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
275 avc->states |= CUnlinkedDel;
278 afs_remunlink(avc, 1); /* ignore any return code */
287 * Description: allocate a callback return structure from the
288 * free list and return it.
290 * Env: The alloc and free routines are both called with the afs_xvcb lock
291 * held, so we don't have to worry about blocking in osi_Alloc.
293 static struct afs_cbr *afs_cbrSpace = 0;
297 register struct afs_cbr *tsp;
300 while (!afs_cbrSpace) {
301 if (afs_stats_cmperf.CallBackAlloced >= 2) {
302 /* don't allocate more than 2 * AFS_NCBRS for now */
304 afs_stats_cmperf.CallBackFlushes++;
308 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
309 sizeof(struct afs_cbr));
310 for (i = 0; i < AFS_NCBRS - 1; i++) {
311 tsp[i].next = &tsp[i + 1];
313 tsp[AFS_NCBRS - 1].next = 0;
315 afs_stats_cmperf.CallBackAlloced++;
319 afs_cbrSpace = tsp->next;
326 * Description: free a callback return structure, removing it from all lists.
329 * asp -- the address of the structure to free.
331 * Environment: the xvcb lock is held over these calls.
334 afs_FreeCBR(register struct afs_cbr *asp)
336 *(asp->pprev) = asp->next;
338 asp->next->pprev = asp->pprev;
340 *(asp->hash_pprev) = asp->hash_next;
342 asp->hash_next->hash_pprev = asp->hash_pprev;
344 asp->next = afs_cbrSpace;
352 * Description: flush all queued callbacks to all servers.
356 * Environment: holds xvcb lock over RPC to guard against race conditions
357 * when a new callback is granted for the same file later on.
360 afs_FlushVCBs(afs_int32 lockit)
362 struct AFSFid *tfids;
363 struct AFSCallBack callBacks[1];
364 struct AFSCBFids fidArray;
365 struct AFSCBs cbArray;
367 struct afs_cbr *tcbrp;
371 struct vrequest treq;
373 int safety1, safety2, safety3;
374 XSTATS_DECLS if ((code = afs_InitReq(&treq, afs_osi_credp)))
376 treq.flags |= O_NONBLOCK;
377 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
380 MObtainWriteLock(&afs_xvcb, 273);
381 ObtainReadLock(&afs_xserver);
382 for (i = 0; i < NSERVERS; i++) {
383 for (safety1 = 0, tsp = afs_servers[i];
384 tsp && safety1 < afs_totalServers + 10;
385 tsp = tsp->next, safety1++) {
387 if (tsp->cbrs == (struct afs_cbr *)0)
390 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
391 * and make an RPC, over and over again.
393 tcount = 0; /* number found so far */
394 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
395 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
396 /* if buffer is full, or we've queued all we're going
397 * to from this server, we should flush out the
400 fidArray.AFSCBFids_len = tcount;
401 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
402 cbArray.AFSCBs_len = 1;
403 cbArray.AFSCBs_val = callBacks;
404 memset(&callBacks[0], 0, sizeof(callBacks[0]));
405 callBacks[0].CallBackType = CB_EXCLUSIVE;
406 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
407 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
408 tsp->cell->cellNum, &treq, 0,
412 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
415 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
423 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
428 /* ignore return code, since callbacks may have
429 * been returned anyway, we shouldn't leave them
430 * around to be returned again.
432 * Next, see if we are done with this server, and if so,
433 * break to deal with the next one.
439 /* if to flush full buffer */
440 /* if we make it here, we have an entry at the head of cbrs,
441 * which we should copy to the file ID array and then free.
444 tfids[tcount++] = tcbrp->fid;
446 /* Freeing the CBR will unlink it from the server's CBR list */
448 } /* while loop for this one server */
449 if (safety2 > afs_cacheStats) {
450 afs_warn("possible internal error afs_flushVCBs (%d)\n",
453 } /* for loop for this hash chain */
454 } /* loop through all hash chains */
455 if (safety1 > afs_totalServers + 2) {
457 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
458 safety1, afs_totalServers + 2);
460 osi_Panic("afs_flushVCBS safety1");
463 ReleaseReadLock(&afs_xserver);
465 MReleaseWriteLock(&afs_xvcb);
466 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
474 * Queue a callback on the given fid.
480 * Locks the xvcb lock.
481 * Called when the xvcache lock is already held.
485 afs_QueueVCB(struct vcache *avc)
488 struct afs_cbr *tcbp;
490 AFS_STATCNT(afs_QueueVCB);
491 /* The callback is really just a struct server ptr. */
492 tsp = (struct server *)(avc->callback);
494 /* we now have a pointer to the server, so we just allocate
495 * a queue entry and queue it.
497 MObtainWriteLock(&afs_xvcb, 274);
498 tcbp = afs_AllocCBR();
499 tcbp->fid = avc->fid.Fid;
501 tcbp->next = tsp->cbrs;
503 tsp->cbrs->pprev = &tcbp->next;
506 tcbp->pprev = &tsp->cbrs;
508 afs_InsertHashCBR(tcbp);
510 /* now release locks and return */
511 MReleaseWriteLock(&afs_xvcb);
520 * Remove a queued callback for a given Fid.
523 * afid: The fid we want cleansed of queued callbacks.
526 * Locks xvcb and xserver locks.
527 * Typically called with xdcache, xvcache and/or individual vcache
532 afs_RemoveVCB(struct VenusFid *afid)
535 struct afs_cbr *cbr, *ncbr;
537 AFS_STATCNT(afs_RemoveVCB);
538 MObtainWriteLock(&afs_xvcb, 275);
540 slot = afs_HashCBRFid(&afid->Fid);
541 ncbr = afs_cbrHashT[slot];
545 ncbr = cbr->hash_next;
547 if (afid->Fid.Volume == cbr->fid.Volume &&
548 afid->Fid.Vnode == cbr->fid.Vnode &&
549 afid->Fid.Unique == cbr->fid.Unique) {
554 MReleaseWriteLock(&afs_xvcb);
557 #if defined(AFS_LINUX22_ENV) && !defined(AFS_LINUX26_ENV)
560 __shrink_dcache_parent(struct dentry *parent)
562 struct dentry *this_parent = parent;
563 struct list_head *next;
565 LIST_HEAD(afs_dentry_unused);
568 next = this_parent->d_subdirs.next;
570 while (next != &this_parent->d_subdirs) {
571 struct list_head *tmp = next;
572 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
574 if (!DCOUNT(dentry)) {
575 list_del(&dentry->d_lru);
576 list_add(&dentry->d_lru, afs_dentry_unused.prev);
580 * Descend a level if the d_subdirs list is non-empty.
582 if (!list_empty(&dentry->d_subdirs)) {
583 this_parent = dentry;
588 * All done at this level ... ascend and resume the search.
590 if (this_parent != parent) {
591 next = this_parent->d_child.next;
592 this_parent = this_parent->d_parent;
597 struct dentry *dentry;
598 struct list_head *tmp;
600 tmp = afs_dentry_unused.prev;
602 if (tmp == &afs_dentry_unused)
604 #ifdef AFS_LINUX24_ENV
609 #endif /* AFS_LINUX24_ENV */
610 dentry = list_entry(tmp, struct dentry, d_lru);
612 #ifdef AFS_LINUX24_ENV
613 /* Unused dentry with a count? */
618 #ifdef AFS_LINUX24_ENV
619 list_del_init(&dentry->d_hash); /* d_drop */
621 list_del(&dentry->d_hash);
622 INIT_LIST_HEAD(&dentry->d_hash);
623 #endif /* AFS_LINUX24_ENV */
632 /* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
633 * children of the dentry
635 * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
636 * AFS_GLOCK, so it can call dput, which may call iput, but
637 * keeps afs_xvcache exclusively.
639 * Tree traversal algorithm from fs/dcache.c: select_parent()
642 afs_TryFlushDcacheChildren(struct vcache *tvc)
644 struct inode *ip = AFSTOI(tvc);
645 struct dentry *this_parent;
646 struct list_head *next;
647 struct list_head *cur;
648 struct list_head *head = &ip->i_dentry;
649 struct dentry *dentry;
653 #ifndef old_vcache_scheme
656 while ((cur = cur->next) != head) {
657 dentry = list_entry(cur, struct dentry, d_alias);
659 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
660 ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
661 dentry->d_parent->d_name.name, ICL_TYPE_STRING,
662 dentry->d_name.name);
664 if (!list_empty(&dentry->d_hash) && !list_empty(&dentry->d_subdirs))
665 __shrink_dcache_parent(dentry);
667 if (!DCOUNT(dentry)) {
669 #ifdef AFS_LINUX24_ENV
670 list_del_init(&dentry->d_hash); /* d_drop */
672 list_del(&dentry->d_hash);
673 INIT_LIST_HEAD(&dentry->d_hash);
674 #endif /* AFS_LINUX24_ENV */
686 while ((cur = cur->next) != head) {
687 dentry = list_entry(cur, struct dentry, d_alias);
689 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
690 ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
691 dentry->d_parent->d_name.name, ICL_TYPE_STRING,
692 dentry->d_name.name);
694 if (!DCOUNT(dentry)) {
707 #endif /* AFS_LINUX22_ENV && !AFS_LINUX26_ENV */
713 * This routine is responsible for allocating a new cache entry
714 * from the free list. It formats the cache entry and inserts it
715 * into the appropriate hash tables. It must be called with
716 * afs_xvcache write-locked so as to prevent several processes from
717 * trying to create a new cache entry simultaneously.
720 * afid : The file id of the file whose cache entry is being
723 /* LOCK: afs_NewVCache afs_xvcache W */
725 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
729 afs_int32 anumber = VCACHE_FREE;
731 struct gnode *gnodepnt;
734 struct vm_info *vm_info_ptr;
735 #endif /* AFS_MACH_ENV */
738 #endif /* AFS_OSF_ENV */
739 struct afs_q *tq, *uq;
742 AFS_STATCNT(afs_NewVCache);
745 if (afs_vcount >= afs_maxvcount) {
748 * If we are using > 33 % of the total system vnodes for AFS vcache
749 * entries or we are using the maximum number of vcache entries,
750 * then free some. (if our usage is > 33% we should free some, if
751 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
752 * we _must_ free some -- no choice).
754 if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount)) {
756 struct afs_q *tq, *uq;
761 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
764 if (tvc->states & CVFlushed)
765 refpanic("CVFlushed on VLRU");
766 else if (i++ > afs_maxvcount)
767 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
768 else if (QNext(uq) != tq)
769 refpanic("VLRU inconsistent");
770 else if (VREFCOUNT(tvc) < 1)
771 refpanic("refcnt 0 on VLRU");
773 if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
774 && (tvc->states & CUnlinkedDel) == 0) {
775 code = afs_FlushVCache(tvc, &fv_slept);
782 continue; /* start over - may have raced. */
788 if (anumber == VCACHE_FREE) {
789 printf("NewVCache: warning none freed, using %d of %d\n",
790 afs_vcount, afs_maxvcount);
791 if (afs_vcount >= afs_maxvcount) {
792 osi_Panic("NewVCache - none freed");
793 /* XXX instead of panicing, should do afs_maxvcount++
794 * and magic up another one */
800 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
801 /* What should we do ???? */
802 osi_Panic("afs_NewVCache: no more vnodes");
807 tvc->nextfree = NULL;
809 #else /* AFS_OSF_ENV */
810 /* pull out a free cache entry */
813 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
817 if (tvc->states & CVFlushed) {
818 refpanic("CVFlushed on VLRU");
819 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
820 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
821 } else if (QNext(uq) != tq) {
822 refpanic("VLRU inconsistent");
824 #ifdef AFS_DARWIN_ENV
825 if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0)
826 && VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
827 osi_VM_TryReclaim(tvc, &fv_slept);
831 continue; /* start over - may have raced. */
834 #elif defined(AFS_LINUX22_ENV)
835 if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0) {
836 #if defined(AFS_LINUX26_ENV)
838 d_prune_aliases(AFSTOI(tvc));
841 afs_TryFlushDcacheChildren(tvc);
846 if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
847 && (tvc->states & CUnlinkedDel) == 0) {
848 #if defined(AFS_XBSD_ENV)
850 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
851 * then it puts the vnode on the free list.
852 * If we don't do this we end up with a cleaned vnode that's
853 * not on the free list.
854 * XXX assume FreeBSD is the same for now.
859 code = afs_FlushVCache(tvc, &fv_slept);
867 continue; /* start over - may have raced. */
875 /* none free, making one is better than a panic */
876 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
877 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
878 #ifdef KERNEL_HAVE_PIN
879 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
882 /* In case it still comes here we need to fill this */
883 tvc->v.v_vm_info = VM_INFO_NULL;
884 vm_info_init(tvc->v.v_vm_info);
885 /* perhaps we should also do close_flush on non-NeXT mach systems;
886 * who knows; we don't currently have the sources.
888 #endif /* AFS_MACH_ENV */
889 #if defined(AFS_SGI_ENV)
891 char name[METER_NAMSZ];
892 memset(tvc, 0, sizeof(struct vcache));
893 tvc->v.v_number = ++afsvnumbers;
894 tvc->vc_rwlockid = OSI_NO_LOCKID;
895 initnsema(&tvc->vc_rwlock, 1,
896 makesname(name, "vrw", tvc->v.v_number));
897 #ifndef AFS_SGI53_ENV
898 initnsema(&tvc->v.v_sync, 0,
899 makesname(name, "vsy", tvc->v.v_number));
901 #ifndef AFS_SGI62_ENV
902 initnlock(&tvc->v.v_lock,
903 makesname(name, "vlk", tvc->v.v_number));
906 #endif /* AFS_SGI_ENV */
908 tvc = freeVCList; /* take from free list */
909 freeVCList = tvc->nextfree;
910 tvc->nextfree = NULL;
912 #endif /* AFS_OSF_ENV */
915 vm_info_ptr = tvc->v.v_vm_info;
916 #endif /* AFS_MACH_ENV */
918 #if defined(AFS_XBSD_ENV)
920 panic("afs_NewVCache(): free vcache with vnode attached");
923 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
924 memset((char *)tvc, 0, sizeof(struct vcache));
929 RWLOCK_INIT(&tvc->lock, "vcache lock");
930 #if defined(AFS_SUN5_ENV)
931 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
932 #endif /* defined(AFS_SUN5_ENV) */
935 tvc->v.v_vm_info = vm_info_ptr;
936 tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
937 #endif /* AFS_MACH_ENV */
940 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
942 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
949 #ifdef AFS_FBSD50_ENV
950 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
952 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
954 panic("afs getnewvnode"); /* can't happen */
956 if (tvc->v != NULL) {
957 /* I'd like to know if this ever happens...
958 We don't drop global for the rest of this function,
959 so if we do lose the race, the other thread should
960 have found the same vnode and finished initializing
961 the vcache entry. Is it conceivable that this vcache
962 entry could be recycled during this interval? If so,
963 then there probably needs to be some sort of additional
964 mutual exclusion (an Embryonic flag would suffice).
966 printf("afs_NewVCache: lost the race\n");
970 tvc->v->v_data = tvc;
971 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
974 tvc->parentVnode = 0;
976 tvc->linkData = NULL;
979 tvc->execsOrWriters = 0;
983 tvc->last_looker = 0;
985 tvc->asynchrony = -1;
987 afs_symhint_inval(tvc);
989 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
992 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
993 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
994 #if defined(AFS_LINUX22_ENV)
996 struct inode *ip = AFSTOI(tvc);
997 struct address_space *mapping = &ip->i_data;
999 #if defined(AFS_LINUX26_ENV)
1000 inode_init_once(ip);
1002 sema_init(&ip->i_sem, 1);
1003 INIT_LIST_HEAD(&ip->i_hash);
1004 INIT_LIST_HEAD(&ip->i_dentry);
1005 #if defined(AFS_LINUX24_ENV)
1006 sema_init(&ip->i_zombie, 1);
1007 init_waitqueue_head(&ip->i_wait);
1008 spin_lock_init(&ip->i_data.i_shared_lock);
1009 #ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
1010 spin_lock_init(&ip->i_data.page_lock);
1012 INIT_LIST_HEAD(&ip->i_data.clean_pages);
1013 INIT_LIST_HEAD(&ip->i_data.dirty_pages);
1014 INIT_LIST_HEAD(&ip->i_data.locked_pages);
1015 INIT_LIST_HEAD(&ip->i_dirty_buffers);
1016 #ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
1017 INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
1019 #ifdef STRUCT_INODE_HAS_I_DEVICES
1020 INIT_LIST_HEAD(&ip->i_devices);
1022 #ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
1023 init_rwsem(&ip->i_truncate_sem);
1025 #ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
1026 init_rwsem(&ip->i_alloc_sem);
1029 #else /* AFS_LINUX22_ENV */
1030 sema_init(&ip->i_atomic_write, 1);
1031 init_waitqueue(&ip->i_wait);
1035 #if defined(AFS_LINUX24_ENV)
1037 ip->i_mapping = mapping;
1038 #ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
1039 ip->i_data.gfp_mask = GFP_HIGHUSER;
1041 #if defined(AFS_LINUX26_ENV)
1042 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
1044 extern struct backing_dev_info afs_backing_dev_info;
1046 mapping->backing_dev_info = &afs_backing_dev_info;
1051 #if !defined(AFS_LINUX26_ENV)
1053 ip->i_dev = afs_globalVFS->s_dev;
1055 #ifdef STRUCT_INODE_HAS_I_SECURITY
1056 ip->i_security = NULL;
1057 if (security_inode_alloc(ip))
1058 panic("Cannot allocate inode security");
1061 ip->i_sb = afs_globalVFS;
1062 put_inode_on_dummy_list(ip);
1067 /* Hold it for the LRU (should make count 2) */
1068 VN_HOLD(AFSTOV(tvc));
1069 #else /* AFS_OSF_ENV */
1070 #if !defined(AFS_XBSD_ENV)
1071 VREFCOUNT_SET(tvc, 1); /* us */
1072 #endif /* AFS_XBSD_ENV */
1073 #endif /* AFS_OSF_ENV */
1074 #ifdef AFS_AIX32_ENV
1075 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1076 tvc->vmh = tvc->segid = NULL;
1079 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
1080 #if defined(AFS_SUN5_ENV)
1081 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1083 #if defined(AFS_SUN55_ENV)
1084 /* This is required if the kaio (kernel aynchronous io)
1085 ** module is installed. Inside the kernel, the function
1086 ** check_vp( common/os/aio.c) checks to see if the kernel has
1087 ** to provide asynchronous io for this vnode. This
1088 ** function extracts the device number by following the
1089 ** v_data field of the vnode. If we do not set this field
1090 ** then the system panics. The value of the v_data field
1091 ** is not really important for AFS vnodes because the kernel
1092 ** does not do asynchronous io for regular files. Hence,
1093 ** for the time being, we fill up the v_data field with the
1094 ** vnode pointer itself. */
1095 tvc->v.v_data = (char *)tvc;
1096 #endif /* AFS_SUN55_ENV */
1098 afs_BozonInit(&tvc->pvnLock, tvc);
1102 tvc->callback = serverp; /* to minimize chance that clear
1103 * request is lost */
1104 /* initialize vnode data, note vrefCount is v.v_count */
1106 /* Don't forget to free the gnode space */
1107 tvc->v.v_gnode = gnodepnt =
1108 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1109 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1111 #ifdef AFS_SGI64_ENV
1112 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1113 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1114 #ifdef AFS_SGI65_ENV
1115 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1116 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1118 bhv_head_init(&(tvc->v.v_bh));
1119 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1121 #ifdef AFS_SGI65_ENV
1122 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1123 #ifdef VNODE_TRACING
1124 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1126 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1128 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1129 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1131 vnode_pcache_init(&tvc->v);
1132 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1133 /* Above define is never true execpt in SGI test kernels. */
1134 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1136 #ifdef INTR_KTHREADS
1137 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1140 SetAfsVnode(AFSTOV(tvc));
1141 #endif /* AFS_SGI64_ENV */
1142 #ifdef AFS_DARWIN_ENV
1143 tvc->v.v_ubcinfo = UBC_INFO_NULL;
1144 lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
1145 cache_purge(AFSTOV(tvc));
1146 tvc->v.v_data = tvc;
1147 tvc->v.v_tag = VT_AFS;
1148 /* VLISTNONE(&tvc->v); */
1149 tvc->v.v_freelist.tqe_next = 0;
1150 tvc->v.v_freelist.tqe_prev = (struct vnode **)0xdeadb;
1151 /*tvc->vrefCount++; */
1154 * The proper value for mvstat (for root fids) is setup by the caller.
1157 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1159 if (afs_globalVFS == 0)
1160 osi_Panic("afs globalvfs");
1161 vSetVfsp(tvc, afs_globalVFS);
1162 vSetType(tvc, VREG);
1164 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1165 tvc->v.v_vfsprev = NULL;
1166 afs_globalVFS->vfs_vnodes = &tvc->v;
1167 if (tvc->v.v_vfsnext != NULL)
1168 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1169 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1170 gnodepnt->gn_vnode = &tvc->v;
1173 tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
1175 #if defined(AFS_DUX40_ENV)
1176 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1179 /* Is this needed??? */
1180 insmntque(tvc, afs_globalVFS);
1181 #endif /* AFS_OSF_ENV */
1182 #endif /* AFS_DUX40_ENV */
1183 #if defined(AFS_SGI_ENV)
1184 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1185 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1187 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1188 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1189 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1190 osi_Assert(tvc->v.v_filocks == NULL);
1191 #if !defined(AFS_SGI65_ENV)
1192 osi_Assert(tvc->v.v_filocksem == NULL);
1194 osi_Assert(tvc->cred == NULL);
1195 #ifdef AFS_SGI64_ENV
1196 vnode_pcache_reinit(&tvc->v);
1197 tvc->v.v_rdev = NODEV;
1199 vn_initlist((struct vnlist *)&tvc->v);
1201 #endif /* AFS_SGI_ENV */
1203 osi_dnlc_purgedp(tvc); /* this may be overkill */
1204 memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
1205 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1209 tvc->hnext = afs_vhashT[i];
1210 afs_vhashT[i] = tvc;
1211 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1212 refpanic("NewVCache VLRU inconsistent");
1214 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1215 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1216 refpanic("NewVCache VLRU inconsistent2");
1218 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1219 refpanic("NewVCache VLRU inconsistent3");
1221 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1222 refpanic("NewVCache VLRU inconsistent4");
1228 } /*afs_NewVCache */
1232 * afs_FlushActiveVcaches
1238 * doflocks : Do we handle flocks?
1240 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
1242 afs_FlushActiveVcaches(register afs_int32 doflocks)
1244 register struct vcache *tvc;
1246 register struct conn *tc;
1247 register afs_int32 code;
1248 register struct AFS_UCRED *cred = NULL;
1249 struct vrequest treq, ureq;
1250 struct AFSVolSync tsync;
1252 XSTATS_DECLS AFS_STATCNT(afs_FlushActiveVcaches);
1253 ObtainReadLock(&afs_xvcache);
1254 for (i = 0; i < VCSIZE; i++) {
1255 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1256 if (doflocks && tvc->flockCount != 0) {
1257 /* if this entry has an flock, send a keep-alive call out */
1259 ReleaseReadLock(&afs_xvcache);
1260 ObtainWriteLock(&tvc->lock, 51);
1262 afs_InitReq(&treq, afs_osi_credp);
1263 treq.flags |= O_NONBLOCK;
1265 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1267 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1270 RXAFS_ExtendLock(tc->id,
1271 (struct AFSFid *)&tvc->fid.Fid,
1277 } while (afs_Analyze
1278 (tc, code, &tvc->fid, &treq,
1279 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1281 ReleaseWriteLock(&tvc->lock);
1282 ObtainReadLock(&afs_xvcache);
1286 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1288 * Don't let it evaporate in case someone else is in
1289 * this code. Also, drop the afs_xvcache lock while
1290 * getting vcache locks.
1293 ReleaseReadLock(&afs_xvcache);
1294 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1295 afs_BozonLock(&tvc->pvnLock, tvc);
1297 #if defined(AFS_SGI_ENV)
1299 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1301 osi_Assert(VREFCOUNT(tvc) > 0);
1302 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1304 ObtainWriteLock(&tvc->lock, 52);
1305 if (tvc->states & CCore) {
1306 tvc->states &= ~CCore;
1307 /* XXXX Find better place-holder for cred XXXX */
1308 cred = (struct AFS_UCRED *)tvc->linkData;
1309 tvc->linkData = NULL; /* XXX */
1310 afs_InitReq(&ureq, cred);
1311 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1312 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1313 tvc->execsOrWriters);
1314 code = afs_StoreOnLastReference(tvc, &ureq);
1315 ReleaseWriteLock(&tvc->lock);
1316 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1317 afs_BozonUnlock(&tvc->pvnLock, tvc);
1319 hzero(tvc->flushDV);
1322 if (code && code != VNOVNODE) {
1323 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1324 /* /dev/console */ 1);
1326 } else if (tvc->states & CUnlinkedDel) {
1330 ReleaseWriteLock(&tvc->lock);
1331 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1332 afs_BozonUnlock(&tvc->pvnLock, tvc);
1334 #if defined(AFS_SGI_ENV)
1335 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1337 afs_remunlink(tvc, 0);
1338 #if defined(AFS_SGI_ENV)
1339 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1342 /* lost (or won, perhaps) the race condition */
1343 ReleaseWriteLock(&tvc->lock);
1344 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1345 afs_BozonUnlock(&tvc->pvnLock, tvc);
1348 #if defined(AFS_SGI_ENV)
1349 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1351 ObtainReadLock(&afs_xvcache);
1357 AFS_RELE(AFSTOV(tvc));
1359 /* Matches write code setting CCore flag */
1363 #ifdef AFS_DARWIN_ENV
1364 if (VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
1366 panic("flushactive open, hasubc, but refcnt 1");
1367 osi_VM_TryReclaim(tvc, 0);
1372 ReleaseReadLock(&afs_xvcache);
1380 * Make sure a cache entry is up-to-date status-wise.
1382 * NOTE: everywhere that calls this can potentially be sped up
1383 * by checking CStatd first, and avoiding doing the InitReq
1384 * if this is up-to-date.
1386 * Anymore, the only places that call this KNOW already that the
1387 * vcache is not up-to-date, so we don't screw around.
1390 * avc : Ptr to vcache entry to verify.
1395 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1397 register struct vcache *tvc;
1399 AFS_STATCNT(afs_VerifyVCache);
1401 #if defined(AFS_OSF_ENV)
1402 ObtainReadLock(&avc->lock);
1403 if (afs_IsWired(avc)) {
1404 ReleaseReadLock(&avc->lock);
1407 ReleaseReadLock(&avc->lock);
1408 #endif /* AFS_OSF_ENV */
1409 /* otherwise we must fetch the status info */
1411 ObtainWriteLock(&avc->lock, 53);
1412 if (avc->states & CStatd) {
1413 ReleaseWriteLock(&avc->lock);
1416 ObtainWriteLock(&afs_xcbhash, 461);
1417 avc->states &= ~(CStatd | CUnique);
1418 avc->callback = NULL;
1419 afs_DequeueCallback(avc);
1420 ReleaseWriteLock(&afs_xcbhash);
1421 ReleaseWriteLock(&avc->lock);
1423 /* since we've been called back, or the callback has expired,
1424 * it's possible that the contents of this directory, or this
1425 * file's name have changed, thus invalidating the dnlc contents.
1427 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1428 osi_dnlc_purgedp(avc);
1430 osi_dnlc_purgevp(avc);
1432 /* fetch the status info */
1433 tvc = afs_GetVCache(&avc->fid, areq, NULL, avc);
1436 /* Put it back; caller has already incremented vrefCount */
1440 } /*afs_VerifyVCache */
1447 * Simple copy of stat info into cache.
1450 * avc : Ptr to vcache entry involved.
1451 * astat : Ptr to stat info to copy.
1454 * Nothing interesting.
1456 * Callers: as of 1992-04-29, only called by WriteVCache
1459 afs_SimpleVStat(register struct vcache *avc,
1460 register struct AFSFetchStatus *astat, struct vrequest *areq)
1463 AFS_STATCNT(afs_SimpleVStat);
1466 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1467 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1469 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1471 #ifdef AFS_64BIT_CLIENT
1472 FillInt64(length, astat->Length_hi, astat->Length);
1473 #else /* AFS_64BIT_CLIENT */
1474 length = astat->Length;
1475 #endif /* AFS_64BIT_CLIENT */
1476 #if defined(AFS_SGI_ENV)
1477 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1478 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1479 if (length < avc->m.Length) {
1480 vnode_t *vp = (vnode_t *) avc;
1482 osi_Assert(WriteLocked(&avc->lock));
1483 ReleaseWriteLock(&avc->lock);
1485 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1487 ObtainWriteLock(&avc->lock, 67);
1490 /* if writing the file, don't fetch over this value */
1491 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1492 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1493 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1494 avc->m.Length = length;
1495 avc->m.Date = astat->ClientModTime;
1497 avc->m.Owner = astat->Owner;
1498 avc->m.Group = astat->Group;
1499 avc->m.Mode = astat->UnixModeBits;
1500 if (vType(avc) == VREG) {
1501 avc->m.Mode |= S_IFREG;
1502 } else if (vType(avc) == VDIR) {
1503 avc->m.Mode |= S_IFDIR;
1504 } else if (vType(avc) == VLNK) {
1505 avc->m.Mode |= S_IFLNK;
1506 if ((avc->m.Mode & 0111) == 0)
1509 if (avc->states & CForeign) {
1510 struct axscache *ac;
1511 avc->anyAccess = astat->AnonymousAccess;
1513 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1515 * Caller has at least one bit not covered by anonymous, and
1516 * thus may have interesting rights.
1518 * HOWEVER, this is a really bad idea, because any access query
1519 * for bits which aren't covered by anonymous, on behalf of a user
1520 * who doesn't have any special rights, will result in an answer of
1521 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1522 * It's an especially bad idea under Ultrix, since (due to the lack of
1523 * a proper access() call) it must perform several afs_access() calls
1524 * in order to create magic mode bits that vary according to who makes
1525 * the call. In other words, _every_ stat() generates a test for
1528 #endif /* badidea */
1529 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1530 ac->axess = astat->CallerAccess;
1531 else /* not found, add a new one if possible */
1532 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1536 } /*afs_SimpleVStat */
1543 * Store the status info *only* back to the server for a
1547 * avc : Ptr to the vcache entry.
1548 * astatus : Ptr to the status info to store.
1549 * areq : Ptr to the associated vrequest.
1552 * Must be called with a shared lock held on the vnode.
1556 afs_WriteVCache(register struct vcache *avc,
1557 register struct AFSStoreStatus *astatus,
1558 struct vrequest *areq)
1562 struct AFSFetchStatus OutStatus;
1563 struct AFSVolSync tsync;
1564 XSTATS_DECLS AFS_STATCNT(afs_WriteVCache);
1565 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1566 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1569 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1571 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1574 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
1575 astatus, &OutStatus, &tsync);
1580 } while (afs_Analyze
1581 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1582 SHARED_LOCK, NULL));
1584 UpgradeSToWLock(&avc->lock, 20);
1586 /* success, do the changes locally */
1587 afs_SimpleVStat(avc, &OutStatus, areq);
1589 * Update the date, too. SimpleVStat didn't do this, since
1590 * it thought we were doing this after fetching new status
1591 * over a file being written.
1593 avc->m.Date = OutStatus.ClientModTime;
1595 /* failure, set up to check with server next time */
1596 ObtainWriteLock(&afs_xcbhash, 462);
1597 afs_DequeueCallback(avc);
1598 avc->states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1599 ReleaseWriteLock(&afs_xcbhash);
1600 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1601 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1603 ConvertWToSLock(&avc->lock);
1606 } /*afs_WriteVCache */
1612 * Copy astat block into vcache info
1615 * avc : Ptr to vcache entry.
1616 * astat : Ptr to stat block to copy in.
1617 * areq : Ptr to associated request.
1620 * Must be called under a write lock
1622 * Note: this code may get dataversion and length out of sync if the file has
1623 * been modified. This is less than ideal. I haven't thought about
1624 * it sufficiently to be certain that it is adequate.
1627 afs_ProcessFS(register struct vcache *avc,
1628 register struct AFSFetchStatus *astat, struct vrequest *areq)
1631 AFS_STATCNT(afs_ProcessFS);
1633 #ifdef AFS_64BIT_CLIENT
1634 FillInt64(length, astat->Length_hi, astat->Length);
1635 #else /* AFS_64BIT_CLIENT */
1636 length = astat->Length;
1637 #endif /* AFS_64BIT_CLIENT */
1638 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1639 * number for each bulk status request. Under no circumstances
1640 * should afs_DoBulkStat store a sequence number if the new
1641 * length will be ignored when afs_ProcessFS is called with
1642 * new stats. If you change the following conditional then you
1643 * also need to change the conditional in afs_DoBulkStat. */
1645 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1646 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1648 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1650 /* if we're writing or mapping this file, don't fetch over these
1653 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1654 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1655 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1656 avc->m.Length = length;
1657 avc->m.Date = astat->ClientModTime;
1659 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1660 avc->m.Owner = astat->Owner;
1661 avc->m.Mode = astat->UnixModeBits;
1662 avc->m.Group = astat->Group;
1663 avc->m.LinkCount = astat->LinkCount;
1664 if (astat->FileType == File) {
1665 vSetType(avc, VREG);
1666 avc->m.Mode |= S_IFREG;
1667 } else if (astat->FileType == Directory) {
1668 vSetType(avc, VDIR);
1669 avc->m.Mode |= S_IFDIR;
1670 } else if (astat->FileType == SymbolicLink) {
1671 if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
1672 vSetType(avc, VDIR);
1673 avc->m.Mode |= S_IFDIR;
1675 vSetType(avc, VLNK);
1676 avc->m.Mode |= S_IFLNK;
1678 if ((avc->m.Mode & 0111) == 0) {
1682 avc->anyAccess = astat->AnonymousAccess;
1684 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1686 * Caller has at least one bit not covered by anonymous, and
1687 * thus may have interesting rights.
1689 * HOWEVER, this is a really bad idea, because any access query
1690 * for bits which aren't covered by anonymous, on behalf of a user
1691 * who doesn't have any special rights, will result in an answer of
1692 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1693 * It's an especially bad idea under Ultrix, since (due to the lack of
1694 * a proper access() call) it must perform several afs_access() calls
1695 * in order to create magic mode bits that vary according to who makes
1696 * the call. In other words, _every_ stat() generates a test for
1699 #endif /* badidea */
1701 struct axscache *ac;
1702 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1703 ac->axess = astat->CallerAccess;
1704 else /* not found, add a new one if possible */
1705 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1707 #ifdef AFS_LINUX22_ENV
1708 vcache2inode(avc); /* Set the inode attr cache */
1710 #ifdef AFS_DARWIN_ENV
1711 osi_VM_Setup(avc, 1);
1714 } /*afs_ProcessFS */
1718 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1719 char *name, struct VenusFid *nfid,
1720 struct AFSFetchStatus *OutStatusp,
1721 struct AFSCallBack *CallBackp, struct server **serverp,
1722 struct AFSVolSync *tsyncp)
1726 register struct conn *tc;
1727 struct AFSFetchStatus OutDirStatus;
1728 XSTATS_DECLS if (!name)
1729 name = ""; /* XXX */
1731 tc = afs_Conn(afid, areq, SHARED_LOCK);
1734 *serverp = tc->srvr->server;
1736 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1739 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1740 (struct AFSFid *)&nfid->Fid, OutStatusp,
1741 &OutDirStatus, CallBackp, tsyncp);
1746 } while (afs_Analyze
1747 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1758 * Given a file id and a vrequest structure, fetch the status
1759 * information associated with the file.
1763 * areq : Ptr to associated vrequest structure, specifying the
1764 * user whose authentication tokens will be used.
1765 * avc : caller may already have a vcache for this file, which is
1769 * The cache entry is returned with an increased vrefCount field.
1770 * The entry must be discarded by calling afs_PutVCache when you
1771 * are through using the pointer to the cache entry.
1773 * You should not hold any locks when calling this function, except
1774 * locks on other vcache entries. If you lock more than one vcache
1775 * entry simultaneously, you should lock them in this order:
1777 * 1. Lock all files first, then directories.
1778 * 2. Within a particular type, lock entries in Fid.Vnode order.
1780 * This locking hierarchy is convenient because it allows locking
1781 * of a parent dir cache entry, given a file (to check its access
1782 * control list). It also allows renames to be handled easily by
1783 * locking directories in a constant order.
1784 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1786 /* might have a vcache structure already, which must
1787 * already be held by the caller */
1790 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1791 afs_int32 * cached, struct vcache *avc)
1794 afs_int32 code, newvcache = 0;
1795 register struct vcache *tvc;
1799 AFS_STATCNT(afs_GetVCache);
1802 *cached = 0; /* Init just in case */
1804 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1808 ObtainSharedLock(&afs_xvcache, 5);
1810 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU);
1812 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1813 ReleaseSharedLock(&afs_xvcache);
1814 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1822 if (tvc->states & CStatd) {
1823 ReleaseSharedLock(&afs_xvcache);
1827 UpgradeSToWLock(&afs_xvcache, 21);
1829 /* no cache entry, better grab one */
1830 tvc = afs_NewVCache(afid, NULL);
1833 ConvertWToSLock(&afs_xvcache);
1834 afs_stats_cmperf.vcacheMisses++;
1837 ReleaseSharedLock(&afs_xvcache);
1839 ObtainWriteLock(&tvc->lock, 54);
1841 if (tvc->states & CStatd) {
1842 #ifdef AFS_LINUX22_ENV
1845 ReleaseWriteLock(&tvc->lock);
1846 #ifdef AFS_DARWIN_ENV
1847 osi_VM_Setup(tvc, 0);
1851 #if defined(AFS_OSF_ENV)
1852 if (afs_IsWired(tvc)) {
1853 ReleaseWriteLock(&tvc->lock);
1856 #endif /* AFS_OSF_ENV */
1858 VOP_LOCK(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY, curproc);
1859 uvm_vnp_uncache(AFSTOV(tvc));
1860 VOP_UNLOCK(AFSTOV(tvc), 0, curproc);
1864 * XXX - I really don't like this. Should try to understand better.
1865 * It seems that sometimes, when we get called, we already hold the
1866 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1867 * We can't drop the vnode lock, because that could result in a race.
1868 * Sometimes, though, we get here and don't hold the vnode lock.
1869 * I hate code paths that sometimes hold locks and sometimes don't.
1870 * In any event, the dodge we use here is to check whether the vnode
1871 * is locked, and if it isn't, then we gain and drop it around the call
1872 * to vinvalbuf; otherwise, we leave it alone.
1879 #ifdef AFS_FBSD50_ENV
1880 iheldthelock = VOP_ISLOCKED(vp, curthread);
1882 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1883 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1885 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1887 iheldthelock = VOP_ISLOCKED(vp, curproc);
1889 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1890 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
1892 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
1897 ObtainWriteLock(&afs_xcbhash, 464);
1898 tvc->states &= ~CUnique;
1900 afs_DequeueCallback(tvc);
1901 ReleaseWriteLock(&afs_xcbhash);
1903 /* It is always appropriate to throw away all the access rights? */
1904 afs_FreeAllAxs(&(tvc->Access));
1905 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1907 if ((tvp->states & VForeign)) {
1909 tvc->states |= CForeign;
1910 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1911 && (tvp->rootUnique == afid->Fid.Unique)) {
1915 if (tvp->states & VRO)
1917 if (tvp->states & VBackup)
1918 tvc->states |= CBackup;
1919 /* now copy ".." entry back out of volume structure, if necessary */
1920 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1922 tvc->mvid = (struct VenusFid *)
1923 osi_AllocSmallSpace(sizeof(struct VenusFid));
1924 *tvc->mvid = tvp->dotdot;
1926 afs_PutVolume(tvp, READ_LOCK);
1930 afs_RemoveVCB(afid);
1932 struct AFSFetchStatus OutStatus;
1934 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1935 afs_ProcessFS(tvc, &OutStatus, areq);
1936 tvc->states |= CStatd | CUnique;
1939 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1944 ReleaseWriteLock(&tvc->lock);
1946 ObtainReadLock(&afs_xvcache);
1948 ReleaseReadLock(&afs_xvcache);
1952 ReleaseWriteLock(&tvc->lock);
1955 } /*afs_GetVCache */
1960 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1961 afs_int32 * cached, struct vcache *adp, char *aname)
1963 afs_int32 code, now, newvcache = 0;
1964 struct VenusFid nfid;
1965 register struct vcache *tvc;
1967 struct AFSFetchStatus OutStatus;
1968 struct AFSCallBack CallBack;
1969 struct AFSVolSync tsync;
1970 struct server *serverp = 0;
1974 AFS_STATCNT(afs_GetVCache);
1976 *cached = 0; /* Init just in case */
1978 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1982 ObtainReadLock(&afs_xvcache);
1983 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1986 ReleaseReadLock(&afs_xvcache);
1988 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1989 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1993 ObtainReadLock(&tvc->lock);
1995 if (tvc->states & CStatd) {
1999 ReleaseReadLock(&tvc->lock);
2002 tvc->states &= ~CUnique;
2004 ReleaseReadLock(&tvc->lock);
2005 ObtainReadLock(&afs_xvcache);
2009 ReleaseReadLock(&afs_xvcache);
2011 /* lookup the file */
2014 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
2016 afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
2019 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2023 ObtainSharedLock(&afs_xvcache, 6);
2024 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU /* no xstats now */ );
2026 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2027 ReleaseSharedLock(&afs_xvcache);
2028 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2034 /* no cache entry, better grab one */
2035 UpgradeSToWLock(&afs_xvcache, 22);
2036 tvc = afs_NewVCache(&nfid, serverp);
2038 ConvertWToSLock(&afs_xvcache);
2041 ReleaseSharedLock(&afs_xvcache);
2042 ObtainWriteLock(&tvc->lock, 55);
2044 /* It is always appropriate to throw away all the access rights? */
2045 afs_FreeAllAxs(&(tvc->Access));
2046 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2048 if ((tvp->states & VForeign)) {
2050 tvc->states |= CForeign;
2051 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2052 && (tvp->rootUnique == afid->Fid.Unique))
2055 if (tvp->states & VRO)
2057 if (tvp->states & VBackup)
2058 tvc->states |= CBackup;
2059 /* now copy ".." entry back out of volume structure, if necessary */
2060 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2062 tvc->mvid = (struct VenusFid *)
2063 osi_AllocSmallSpace(sizeof(struct VenusFid));
2064 *tvc->mvid = tvp->dotdot;
2069 ObtainWriteLock(&afs_xcbhash, 465);
2070 afs_DequeueCallback(tvc);
2071 tvc->states &= ~(CStatd | CUnique);
2072 ReleaseWriteLock(&afs_xcbhash);
2073 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2074 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2076 afs_PutVolume(tvp, READ_LOCK);
2077 ReleaseWriteLock(&tvc->lock);
2078 ObtainReadLock(&afs_xvcache);
2080 ReleaseReadLock(&afs_xvcache);
2084 ObtainWriteLock(&afs_xcbhash, 466);
2085 if (origCBs == afs_allCBs) {
2086 if (CallBack.ExpirationTime) {
2087 tvc->callback = serverp;
2088 tvc->cbExpires = CallBack.ExpirationTime + now;
2089 tvc->states |= CStatd | CUnique;
2090 tvc->states &= ~CBulkFetching;
2091 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2092 } else if (tvc->states & CRO) {
2093 /* adapt gives us an hour. */
2094 tvc->cbExpires = 3600 + osi_Time();
2095 /*XXX*/ tvc->states |= CStatd | CUnique;
2096 tvc->states &= ~CBulkFetching;
2097 afs_QueueCallback(tvc, CBHash(3600), tvp);
2099 tvc->callback = NULL;
2100 afs_DequeueCallback(tvc);
2101 tvc->states &= ~(CStatd | CUnique);
2102 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2103 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2106 afs_DequeueCallback(tvc);
2107 tvc->states &= ~CStatd;
2108 tvc->states &= ~CUnique;
2109 tvc->callback = NULL;
2110 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2111 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2113 ReleaseWriteLock(&afs_xcbhash);
2115 afs_PutVolume(tvp, READ_LOCK);
2116 afs_ProcessFS(tvc, &OutStatus, areq);
2118 ReleaseWriteLock(&tvc->lock);
2124 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2125 afs_int32 * cached, struct volume *tvolp)
2127 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2128 afs_int32 getNewFid = 0;
2130 struct VenusFid nfid;
2131 register struct vcache *tvc;
2132 struct server *serverp = 0;
2133 struct AFSFetchStatus OutStatus;
2134 struct AFSCallBack CallBack;
2135 struct AFSVolSync tsync;
2141 if (!tvolp->rootVnode || getNewFid) {
2142 struct VenusFid tfid;
2145 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2146 origCBs = afs_allCBs; /* ignore InitCallBackState */
2148 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2153 /* ReleaseReadLock(&tvolp->lock); */
2154 ObtainWriteLock(&tvolp->lock, 56);
2155 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2156 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2157 ReleaseWriteLock(&tvolp->lock);
2158 /* ObtainReadLock(&tvolp->lock);*/
2161 afid->Fid.Vnode = tvolp->rootVnode;
2162 afid->Fid.Unique = tvolp->rootUnique;
2165 ObtainSharedLock(&afs_xvcache, 7);
2167 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2168 if (!FidCmp(&(tvc->fid), afid)) {
2170 /* Grab this vnode, possibly reactivating from the free list */
2171 /* for the present (95.05.25) everything on the hash table is
2172 * definitively NOT in the free list -- at least until afs_reclaim
2173 * can be safely implemented */
2176 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2180 #endif /* AFS_OSF_ENV */
2185 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
2186 /* Mount point no longer stat'd or unknown. FID may have changed. */
2189 AFS_RELE(AFSTOV(tvc));
2193 ReleaseSharedLock(&afs_xvcache);
2198 UpgradeSToWLock(&afs_xvcache, 23);
2199 /* no cache entry, better grab one */
2200 tvc = afs_NewVCache(afid, NULL);
2202 afs_stats_cmperf.vcacheMisses++;
2206 afs_stats_cmperf.vcacheHits++;
2208 /* we already bumped the ref count in the for loop above */
2209 #else /* AFS_OSF_ENV */
2212 UpgradeSToWLock(&afs_xvcache, 24);
2213 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2214 refpanic("GRVC VLRU inconsistent0");
2216 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2217 refpanic("GRVC VLRU inconsistent1");
2219 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2220 refpanic("GRVC VLRU inconsistent2");
2222 QRemove(&tvc->vlruq); /* move to lruq head */
2223 QAdd(&VLRU, &tvc->vlruq);
2224 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2225 refpanic("GRVC VLRU inconsistent3");
2227 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2228 refpanic("GRVC VLRU inconsistent4");
2230 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2231 refpanic("GRVC VLRU inconsistent5");
2236 ReleaseWriteLock(&afs_xvcache);
2238 if (tvc->states & CStatd) {
2242 ObtainReadLock(&tvc->lock);
2243 tvc->states &= ~CUnique;
2244 tvc->callback = NULL; /* redundant, perhaps */
2245 ReleaseReadLock(&tvc->lock);
2248 ObtainWriteLock(&tvc->lock, 57);
2250 /* It is always appropriate to throw away all the access rights? */
2251 afs_FreeAllAxs(&(tvc->Access));
2254 tvc->states |= CForeign;
2255 if (tvolp->states & VRO)
2257 if (tvolp->states & VBackup)
2258 tvc->states |= CBackup;
2259 /* now copy ".." entry back out of volume structure, if necessary */
2260 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2261 && (tvolp->rootUnique == afid->Fid.Unique)) {
2264 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2266 tvc->mvid = (struct VenusFid *)
2267 osi_AllocSmallSpace(sizeof(struct VenusFid));
2268 *tvc->mvid = tvolp->dotdot;
2272 afs_RemoveVCB(afid);
2275 struct VenusFid tfid;
2278 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2279 origCBs = afs_allCBs; /* ignore InitCallBackState */
2281 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2286 ObtainWriteLock(&afs_xcbhash, 467);
2287 afs_DequeueCallback(tvc);
2288 tvc->callback = NULL;
2289 tvc->states &= ~(CStatd | CUnique);
2290 ReleaseWriteLock(&afs_xcbhash);
2291 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2292 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2293 ReleaseWriteLock(&tvc->lock);
2294 ObtainReadLock(&afs_xvcache);
2296 ReleaseReadLock(&afs_xvcache);
2300 ObtainWriteLock(&afs_xcbhash, 468);
2301 if (origCBs == afs_allCBs) {
2302 tvc->states |= CTruth;
2303 tvc->callback = serverp;
2304 if (CallBack.ExpirationTime != 0) {
2305 tvc->cbExpires = CallBack.ExpirationTime + start;
2306 tvc->states |= CStatd;
2307 tvc->states &= ~CBulkFetching;
2308 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2309 } else if (tvc->states & CRO) {
2310 /* adapt gives us an hour. */
2311 tvc->cbExpires = 3600 + osi_Time();
2312 /*XXX*/ tvc->states |= CStatd;
2313 tvc->states &= ~CBulkFetching;
2314 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2317 afs_DequeueCallback(tvc);
2318 tvc->callback = NULL;
2319 tvc->states &= ~(CStatd | CUnique);
2320 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2321 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2323 ReleaseWriteLock(&afs_xcbhash);
2324 afs_ProcessFS(tvc, &OutStatus, areq);
2326 ReleaseWriteLock(&tvc->lock);
2333 * must be called with avc write-locked
2334 * don't absolutely have to invalidate the hint unless the dv has
2335 * changed, but be sure to get it right else there will be consistency bugs.
2338 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2339 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2342 afs_uint32 start = 0;
2343 register struct conn *tc;
2344 struct AFSCallBack CallBack;
2345 struct AFSVolSync tsync;
2346 struct volume *volp;
2349 tc = afs_Conn(afid, areq, SHARED_LOCK);
2350 avc->quick.stamp = 0;
2351 avc->h1.dchint = NULL; /* invalidate hints */
2353 avc->callback = tc->srvr->server;
2355 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2358 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2366 } while (afs_Analyze
2367 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2368 SHARED_LOCK, NULL));
2371 afs_ProcessFS(avc, Outsp, areq);
2372 volp = afs_GetVolume(afid, areq, READ_LOCK);
2373 ObtainWriteLock(&afs_xcbhash, 469);
2374 avc->states |= CTruth;
2375 if (avc->callback /* check for race */ ) {
2376 if (CallBack.ExpirationTime != 0) {
2377 avc->cbExpires = CallBack.ExpirationTime + start;
2378 avc->states |= CStatd;
2379 avc->states &= ~CBulkFetching;
2380 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2381 } else if (avc->states & CRO) { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2382 avc->cbExpires = 3600 + start;
2383 avc->states |= CStatd;
2384 avc->states &= ~CBulkFetching;
2385 afs_QueueCallback(avc, CBHash(3600), volp);
2387 afs_DequeueCallback(avc);
2388 avc->callback = NULL;
2389 avc->states &= ~(CStatd | CUnique);
2390 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2391 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2394 afs_DequeueCallback(avc);
2395 avc->callback = NULL;
2396 avc->states &= ~(CStatd | CUnique);
2397 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2398 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2400 ReleaseWriteLock(&afs_xcbhash);
2402 afs_PutVolume(volp, READ_LOCK);
2404 /* used to undo the local callback, but that's too extreme.
2405 * There are plenty of good reasons that fetchstatus might return
2406 * an error, such as EPERM. If we have the vnode cached, statd,
2407 * with callback, might as well keep track of the fact that we
2408 * don't have access...
2410 if (code == EPERM || code == EACCES) {
2411 struct axscache *ac;
2412 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2414 else /* not found, add a new one if possible */
2415 afs_AddAxs(avc->Access, areq->uid, 0);
2426 * Stuff some information into the vcache for the given file.
2429 * afid : File in question.
2430 * OutStatus : Fetch status on the file.
2431 * CallBack : Callback info.
2432 * tc : RPC connection involved.
2433 * areq : vrequest involved.
2436 * Nothing interesting.
2439 afs_StuffVcache(register struct VenusFid *afid,
2440 struct AFSFetchStatus *OutStatus,
2441 struct AFSCallBack *CallBack, register struct conn *tc,
2442 struct vrequest *areq)
2444 register afs_int32 code, i, newvcache = 0;
2445 register struct vcache *tvc;
2446 struct AFSVolSync tsync;
2448 struct axscache *ac;
2451 AFS_STATCNT(afs_StuffVcache);
2452 #ifdef IFS_VCACHECOUNT
2457 ObtainSharedLock(&afs_xvcache, 8);
2459 tvc = afs_FindVCache(afid, &retry, DO_VLRU /* no stats */ );
2461 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2462 ReleaseSharedLock(&afs_xvcache);
2463 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2469 /* no cache entry, better grab one */
2470 UpgradeSToWLock(&afs_xvcache, 25);
2471 tvc = afs_NewVCache(afid, NULL);
2473 ConvertWToSLock(&afs_xvcache);
2476 ReleaseSharedLock(&afs_xvcache);
2477 ObtainWriteLock(&tvc->lock, 58);
2479 tvc->states &= ~CStatd;
2480 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2481 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2483 /* Is it always appropriate to throw away all the access rights? */
2484 afs_FreeAllAxs(&(tvc->Access));
2486 /*Copy useful per-volume info */
2487 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2489 if (newvcache && (tvp->states & VForeign))
2490 tvc->states |= CForeign;
2491 if (tvp->states & VRO)
2493 if (tvp->states & VBackup)
2494 tvc->states |= CBackup;
2496 * Now, copy ".." entry back out of volume structure, if
2499 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2501 tvc->mvid = (struct VenusFid *)
2502 osi_AllocSmallSpace(sizeof(struct VenusFid));
2503 *tvc->mvid = tvp->dotdot;
2506 /* store the stat on the file */
2507 afs_RemoveVCB(afid);
2508 afs_ProcessFS(tvc, OutStatus, areq);
2509 tvc->callback = tc->srvr->server;
2511 /* we use osi_Time twice below. Ideally, we would use the time at which
2512 * the FetchStatus call began, instead, but we don't have it here. So we
2513 * make do with "now". In the CRO case, it doesn't really matter. In
2514 * the other case, we hope that the difference between "now" and when the
2515 * call actually began execution on the server won't be larger than the
2516 * padding which the server keeps. Subtract 1 second anyway, to be on
2517 * the safe side. Can't subtract more because we don't know how big
2518 * ExpirationTime is. Possible consistency problems may arise if the call
2519 * timeout period becomes longer than the server's expiration padding. */
2520 ObtainWriteLock(&afs_xcbhash, 470);
2521 if (CallBack->ExpirationTime != 0) {
2522 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2523 tvc->states |= CStatd;
2524 tvc->states &= ~CBulkFetching;
2525 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2526 } else if (tvc->states & CRO) {
2527 /* old-fashioned AFS 3.2 style */
2528 tvc->cbExpires = 3600 + osi_Time();
2529 /*XXX*/ tvc->states |= CStatd;
2530 tvc->states &= ~CBulkFetching;
2531 afs_QueueCallback(tvc, CBHash(3600), tvp);
2533 afs_DequeueCallback(tvc);
2534 tvc->callback = NULL;
2535 tvc->states &= ~(CStatd | CUnique);
2536 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2537 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2539 ReleaseWriteLock(&afs_xcbhash);
2541 afs_PutVolume(tvp, READ_LOCK);
2543 /* look in per-pag cache */
2544 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2545 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2546 else /* not found, add a new one if possible */
2547 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2549 ReleaseWriteLock(&tvc->lock);
2550 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2551 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2552 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2554 * Release ref count... hope this guy stays around...
2557 } /*afs_StuffVcache */
2564 * Decrements the reference count on a cache entry.
2567 * avc : Pointer to the cache entry to decrement.
2570 * Nothing interesting.
2573 afs_PutVCache(register struct vcache *avc)
2575 AFS_STATCNT(afs_PutVCache);
2577 * Can we use a read lock here?
2579 ObtainReadLock(&afs_xvcache);
2581 ReleaseReadLock(&afs_xvcache);
2582 } /*afs_PutVCache */
2588 * Find a vcache entry given a fid.
2591 * afid : Pointer to the fid whose cache entry we desire.
2592 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2593 * unlock the vnode, and try again.
2594 * flags: bit 1 to specify whether to compute hit statistics. Not
2595 * set if FindVCache is called as part of internal bookkeeping.
2598 * Must be called with the afs_xvcache lock at least held at
2599 * the read level. In order to do the VLRU adjustment, the xvcache lock
2600 * must be shared-- we upgrade it here.
2604 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2607 register struct vcache *tvc;
2610 AFS_STATCNT(afs_FindVCache);
2613 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2614 if (FidMatches(afid, tvc)) {
2616 /* Grab this vnode, possibly reactivating from the free list */
2619 vg = vget(AFSTOV(tvc));
2623 #endif /* AFS_OSF_ENV */
2628 /* should I have a read lock on the vnode here? */
2632 #if !defined(AFS_OSF_ENV)
2633 osi_vnhold(tvc, retry); /* already held, above */
2634 if (retry && *retry)
2638 * only move to front of vlru if we have proper vcache locking)
2640 if (flag & DO_VLRU) {
2641 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2642 refpanic("FindVC VLRU inconsistent1");
2644 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2645 refpanic("FindVC VLRU inconsistent1");
2647 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2648 refpanic("FindVC VLRU inconsistent2");
2650 UpgradeSToWLock(&afs_xvcache, 26);
2651 QRemove(&tvc->vlruq);
2652 QAdd(&VLRU, &tvc->vlruq);
2653 ConvertWToSLock(&afs_xvcache);
2654 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2655 refpanic("FindVC VLRU inconsistent1");
2657 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2658 refpanic("FindVC VLRU inconsistent2");
2660 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2661 refpanic("FindVC VLRU inconsistent3");
2667 if (flag & DO_STATS) {
2669 afs_stats_cmperf.vcacheHits++;
2671 afs_stats_cmperf.vcacheMisses++;
2672 if (afs_IsPrimaryCellNum(afid->Cell))
2673 afs_stats_cmperf.vlocalAccesses++;
2675 afs_stats_cmperf.vremoteAccesses++;
2677 #ifdef AFS_LINUX22_ENV
2678 if (tvc && (tvc->states & CStatd))
2679 vcache2inode(tvc); /* mainly to reset i_nlink */
2681 #ifdef AFS_DARWIN_ENV
2683 osi_VM_Setup(tvc, 0);
2686 } /*afs_FindVCache */
2692 * Find a vcache entry given a fid. Does a wildcard match on what we
2693 * have for the fid. If more than one entry, don't return anything.
2696 * avcp : Fill in pointer if we found one and only one.
2697 * afid : Pointer to the fid whose cache entry we desire.
2698 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2699 * unlock the vnode, and try again.
2700 * flags: bit 1 to specify whether to compute hit statistics. Not
2701 * set if FindVCache is called as part of internal bookkeeping.
2704 * Must be called with the afs_xvcache lock at least held at
2705 * the read level. In order to do the VLRU adjustment, the xvcache lock
2706 * must be shared-- we upgrade it here.
2709 * number of matches found.
2712 int afs_duplicate_nfs_fids = 0;
2715 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2717 register struct vcache *tvc;
2719 afs_int32 count = 0;
2720 struct vcache *found_tvc = NULL;
2722 AFS_STATCNT(afs_FindVCache);
2724 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2728 ObtainSharedLock(&afs_xvcache, 331);
2731 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2732 /* Match only on what we have.... */
2733 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2734 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2735 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2736 && (tvc->fid.Cell == afid->Cell)) {
2738 /* Grab this vnode, possibly reactivating from the free list */
2741 vg = vget(AFSTOV(tvc));
2744 /* This vnode no longer exists. */
2747 #endif /* AFS_OSF_ENV */
2752 /* Drop our reference counts. */
2754 vrele(AFSTOV(found_tvc));
2756 afs_duplicate_nfs_fids++;
2757 ReleaseSharedLock(&afs_xvcache);
2765 /* should I have a read lock on the vnode here? */
2767 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2768 afs_int32 retry = 0;
2769 osi_vnhold(tvc, &retry);
2772 found_tvc = (struct vcache *)0;
2773 ReleaseSharedLock(&afs_xvcache);
2774 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2778 #if !defined(AFS_OSF_ENV)
2779 osi_vnhold(tvc, (int *)0); /* already held, above */
2783 * We obtained the xvcache lock above.
2785 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2786 refpanic("FindVC VLRU inconsistent1");
2788 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2789 refpanic("FindVC VLRU inconsistent1");
2791 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2792 refpanic("FindVC VLRU inconsistent2");
2794 UpgradeSToWLock(&afs_xvcache, 568);
2795 QRemove(&tvc->vlruq);
2796 QAdd(&VLRU, &tvc->vlruq);
2797 ConvertWToSLock(&afs_xvcache);
2798 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2799 refpanic("FindVC VLRU inconsistent1");
2801 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2802 refpanic("FindVC VLRU inconsistent2");
2804 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2805 refpanic("FindVC VLRU inconsistent3");
2811 afs_stats_cmperf.vcacheHits++;
2813 afs_stats_cmperf.vcacheMisses++;
2814 if (afs_IsPrimaryCellNum(afid->Cell))
2815 afs_stats_cmperf.vlocalAccesses++;
2817 afs_stats_cmperf.vremoteAccesses++;
2819 *avcp = tvc; /* May be null */
2821 ReleaseSharedLock(&afs_xvcache);
2822 return (tvc ? 1 : 0);
2824 } /*afs_NFSFindVCache */
2832 * Initialize vcache related variables
2835 afs_vcacheInit(int astatSize)
2837 register struct vcache *tvp;
2839 #if defined(AFS_OSF_ENV)
2840 if (!afs_maxvcount) {
2841 #if defined(AFS_OSF30_ENV)
2842 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
2844 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
2846 if (astatSize < afs_maxvcount) {
2847 afs_maxvcount = astatSize;
2850 #else /* AFS_OSF_ENV */
2854 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2855 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2857 #if !defined(AFS_OSF_ENV)
2858 /* Allocate and thread the struct vcache entries */
2859 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
2860 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
2862 Initial_freeVCList = tvp;
2863 freeVCList = &(tvp[0]);
2864 for (i = 0; i < astatSize - 1; i++) {
2865 tvp[i].nextfree = &(tvp[i + 1]);
2867 tvp[astatSize - 1].nextfree = NULL;
2868 #ifdef KERNEL_HAVE_PIN
2869 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2874 #if defined(AFS_SGI_ENV)
2875 for (i = 0; i < astatSize; i++) {
2876 char name[METER_NAMSZ];
2877 struct vcache *tvc = &tvp[i];
2879 tvc->v.v_number = ++afsvnumbers;
2880 tvc->vc_rwlockid = OSI_NO_LOCKID;
2881 initnsema(&tvc->vc_rwlock, 1,
2882 makesname(name, "vrw", tvc->v.v_number));
2883 #ifndef AFS_SGI53_ENV
2884 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2886 #ifndef AFS_SGI62_ENV
2887 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2888 #endif /* AFS_SGI62_ENV */
2902 shutdown_vcache(void)
2905 struct afs_cbr *tsp, *nsp;
2907 * XXX We may potentially miss some of the vcaches because if when there're no
2908 * free vcache entries and all the vcache entries are active ones then we allocate
2909 * an additional one - admittedly we almost never had that occur.
2913 register struct afs_q *tq, *uq;
2914 register struct vcache *tvc;
2915 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2919 osi_FreeSmallSpace(tvc->mvid);
2920 tvc->mvid = (struct VenusFid *)0;
2923 aix_gnode_rele(AFSTOV(tvc));
2925 if (tvc->linkData) {
2926 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2931 * Also free the remaining ones in the Cache
2933 for (i = 0; i < VCSIZE; i++) {
2934 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2936 osi_FreeSmallSpace(tvc->mvid);
2937 tvc->mvid = (struct VenusFid *)0;
2941 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2942 #ifdef AFS_AIX32_ENV
2945 vms_delete(tvc->segid);
2947 tvc->segid = tvc->vmh = NULL;
2949 osi_Panic("flushVcache: vm race");
2957 #if defined(AFS_SUN5_ENV)
2963 if (tvc->linkData) {
2964 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2968 afs_FreeAllAxs(&(tvc->Access));
2974 * Free any leftover callback queue
2976 for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
2978 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
2982 #if !defined(AFS_OSF_ENV)
2983 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2985 #ifdef KERNEL_HAVE_PIN
2986 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2988 #if !defined(AFS_OSF_ENV)
2989 freeVCList = Initial_freeVCList = 0;
2991 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2992 LOCK_INIT(&afs_xvcb, "afs_xvcb");