2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include <afsconfig.h>
39 #include "afs/param.h"
44 #include "afs/sysincludes.h" /*Standard vendor system headers */
45 #include "afsincludes.h" /*AFS-based standard headers */
46 #include "afs/afs_stats.h"
47 #include "afs/afs_cbqueue.h"
48 #include "afs/afs_osidnlc.h"
51 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
52 afs_int32 afs_vcount = 0; /* number of vcache in use now */
53 #endif /* AFS_OSF_ENV */
61 #endif /* AFS_SGI64_ENV */
63 /* Exported variables */
64 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
65 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
66 struct vcache *freeVCList; /*Free list for stat cache entries */
67 struct vcache *Initial_freeVCList; /*Initial list for above */
68 struct afs_q VLRU; /*vcache LRU */
69 afs_int32 vcachegen = 0;
70 unsigned int afs_paniconwarn = 0;
71 struct vcache *afs_vhashT[VCSIZE];
72 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
73 afs_int32 afs_bulkStatsLost;
74 int afs_norefpanic = 0;
76 /* Forward declarations */
77 static afs_int32 afs_QueueVCB(struct vcache *avc);
82 * Generate an index into the hash table for a given Fid.
85 afs_HashCBRFid(struct AFSFid *fid) {
86 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
92 * Insert a CBR entry into the hash table.
93 * Must be called with afs_xvcb held.
96 afs_InsertHashCBR(struct afs_cbr *cbr) {
97 int slot = afs_HashCBRFid(&cbr->fid);
99 cbr->hash_next = afs_cbrHashT[slot];
100 if (afs_cbrHashT[slot])
101 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
103 cbr->hash_pprev = &afs_cbrHashT[slot];
104 afs_cbrHashT[slot] = cbr;
111 * Flush the given vcache entry.
114 * avc : Pointer to vcache entry to flush.
115 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
118 * afs_xvcache lock must be held for writing upon entry to
119 * prevent people from changing the vrefCount field, and to
120 * protect the lruq and hnext fields.
121 * LOCK: afs_FlushVCache afs_xvcache W
122 * REFCNT: vcache ref count must be zero on entry except for osf1
123 * RACE: lock is dropped and reobtained, permitting race in caller
127 afs_FlushVCache(struct vcache *avc, int *slept)
128 { /*afs_FlushVCache */
130 register afs_int32 i, code;
131 register struct vcache **uvc, *wvc;
134 AFS_STATCNT(afs_FlushVCache);
135 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
136 ICL_TYPE_INT32, avc->states);
139 VN_LOCK(AFSTOV(avc));
143 code = osi_VM_FlushVCache(avc, slept);
147 if (avc->states & CVFlushed) {
151 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
152 refpanic("LRU vs. Free inconsistency");
154 avc->states |= CVFlushed;
155 /* pull the entry out of the lruq and put it on the free list */
156 QRemove(&avc->vlruq);
157 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *)0;
159 /* keep track of # of files that we bulk stat'd, but never used
160 * before they got recycled.
162 if (avc->states & CBulkStat)
165 /* remove entry from the hash chain */
166 i = VCHash(&avc->fid);
167 uvc = &afs_vhashT[i];
168 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
171 avc->hnext = (struct vcache *)NULL;
176 osi_Panic("flushvcache"); /* not in correct hash bucket */
178 osi_FreeSmallSpace(avc->mvid);
179 avc->mvid = (struct VenusFid *)0;
181 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
182 avc->linkData = NULL;
184 #if defined(AFS_XBSD_ENV)
185 /* OK, there are no internal vrefCounts, so there shouldn't
186 * be any more refs here. */
188 avc->v->v_data = NULL; /* remove from vnode */
189 avc->v = NULL; /* also drop the ptr to vnode */
192 afs_FreeAllAxs(&(avc->Access));
194 /* we can't really give back callbacks on RO files, since the
195 * server only tracks them on a per-volume basis, and we don't
196 * know whether we still have some other files from the same
198 if ((avc->states & CRO) == 0 && avc->callback) {
201 ObtainWriteLock(&afs_xcbhash, 460);
202 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
203 avc->states &= ~(CStatd | CUnique);
204 ReleaseWriteLock(&afs_xcbhash);
205 afs_symhint_inval(avc);
206 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
207 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
209 osi_dnlc_purgevp(avc);
212 * Next, keep track of which vnodes we've deleted for create's
213 * optimistic synchronization algorithm
216 if (avc->fid.Fid.Vnode & 1)
221 #if !defined(AFS_OSF_ENV)
222 /* put the entry in the free list */
223 avc->nextfree = freeVCList;
225 if (avc->vlruq.prev || avc->vlruq.next) {
226 refpanic("LRU vs. Free inconsistency");
229 /* This should put it back on the vnode free list since usecount is 1 */
232 if (VREFCOUNT(avc) > 0) {
233 VN_UNLOCK(AFSTOV(avc));
234 AFS_RELE(AFSTOV(avc));
236 if (afs_norefpanic) {
237 printf("flush vc refcnt < 1");
239 (void)vgone(avc, VX_NOSLEEP, NULL);
241 VN_UNLOCK(AFSTOV(avc));
243 osi_Panic("flush vc refcnt < 1");
245 #endif /* AFS_OSF_ENV */
246 avc->states |= CVFlushed;
251 VN_UNLOCK(AFSTOV(avc));
255 } /*afs_FlushVCache */
261 * The core of the inactive vnode op for all but IRIX.
264 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
266 AFS_STATCNT(afs_inactive);
267 if (avc->states & CDirty) {
268 /* we can't keep trying to push back dirty data forever. Give up. */
269 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
271 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
272 avc->states &= ~CDirty; /* Turn it off */
273 if (avc->states & CUnlinked) {
274 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
275 avc->states |= CUnlinkedDel;
278 afs_remunlink(avc, 1); /* ignore any return code */
287 * Description: allocate a callback return structure from the
288 * free list and return it.
290 * Env: The alloc and free routines are both called with the afs_xvcb lock
291 * held, so we don't have to worry about blocking in osi_Alloc.
293 static struct afs_cbr *afs_cbrSpace = 0;
297 register struct afs_cbr *tsp;
300 while (!afs_cbrSpace) {
301 if (afs_stats_cmperf.CallBackAlloced >= 2) {
302 /* don't allocate more than 2 * AFS_NCBRS for now */
304 afs_stats_cmperf.CallBackFlushes++;
308 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
309 sizeof(struct afs_cbr));
310 for (i = 0; i < AFS_NCBRS - 1; i++) {
311 tsp[i].next = &tsp[i + 1];
313 tsp[AFS_NCBRS - 1].next = 0;
315 afs_stats_cmperf.CallBackAlloced++;
319 afs_cbrSpace = tsp->next;
326 * Description: free a callback return structure, removing it from all lists.
329 * asp -- the address of the structure to free.
331 * Environment: the xvcb lock is held over these calls.
334 afs_FreeCBR(register struct afs_cbr *asp)
336 *(asp->pprev) = asp->next;
338 asp->next->pprev = asp->pprev;
340 *(asp->hash_pprev) = asp->hash_next;
342 asp->hash_next->hash_pprev = asp->hash_pprev;
344 asp->next = afs_cbrSpace;
352 * Description: flush all queued callbacks to all servers.
356 * Environment: holds xvcb lock over RPC to guard against race conditions
357 * when a new callback is granted for the same file later on.
360 afs_FlushVCBs(afs_int32 lockit)
362 struct AFSFid *tfids;
363 struct AFSCallBack callBacks[1];
364 struct AFSCBFids fidArray;
365 struct AFSCBs cbArray;
367 struct afs_cbr *tcbrp;
371 struct vrequest treq;
373 int safety1, safety2, safety3;
374 XSTATS_DECLS if ((code = afs_InitReq(&treq, afs_osi_credp)))
376 treq.flags |= O_NONBLOCK;
377 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
380 MObtainWriteLock(&afs_xvcb, 273);
381 ObtainReadLock(&afs_xserver);
382 for (i = 0; i < NSERVERS; i++) {
383 for (safety1 = 0, tsp = afs_servers[i];
384 tsp && safety1 < afs_totalServers + 10;
385 tsp = tsp->next, safety1++) {
387 if (tsp->cbrs == (struct afs_cbr *)0)
390 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
391 * and make an RPC, over and over again.
393 tcount = 0; /* number found so far */
394 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
395 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
396 /* if buffer is full, or we've queued all we're going
397 * to from this server, we should flush out the
400 fidArray.AFSCBFids_len = tcount;
401 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
402 cbArray.AFSCBs_len = 1;
403 cbArray.AFSCBs_val = callBacks;
404 memset(&callBacks[0], 0, sizeof(callBacks[0]));
405 callBacks[0].CallBackType = CB_EXCLUSIVE;
406 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
407 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
408 tsp->cell->cellNum, &treq, 0,
412 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
415 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
423 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
428 /* ignore return code, since callbacks may have
429 * been returned anyway, we shouldn't leave them
430 * around to be returned again.
432 * Next, see if we are done with this server, and if so,
433 * break to deal with the next one.
439 /* if to flush full buffer */
440 /* if we make it here, we have an entry at the head of cbrs,
441 * which we should copy to the file ID array and then free.
444 tfids[tcount++] = tcbrp->fid;
446 /* Freeing the CBR will unlink it from the server's CBR list */
448 } /* while loop for this one server */
449 if (safety2 > afs_cacheStats) {
450 afs_warn("possible internal error afs_flushVCBs (%d)\n",
453 } /* for loop for this hash chain */
454 } /* loop through all hash chains */
455 if (safety1 > afs_totalServers + 2) {
457 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
458 safety1, afs_totalServers + 2);
460 osi_Panic("afs_flushVCBS safety1");
463 ReleaseReadLock(&afs_xserver);
465 MReleaseWriteLock(&afs_xvcb);
466 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
474 * Queue a callback on the given fid.
480 * Locks the xvcb lock.
481 * Called when the xvcache lock is already held.
485 afs_QueueVCB(struct vcache *avc)
488 struct afs_cbr *tcbp;
490 AFS_STATCNT(afs_QueueVCB);
491 /* The callback is really just a struct server ptr. */
492 tsp = (struct server *)(avc->callback);
494 /* we now have a pointer to the server, so we just allocate
495 * a queue entry and queue it.
497 MObtainWriteLock(&afs_xvcb, 274);
498 tcbp = afs_AllocCBR();
499 tcbp->fid = avc->fid.Fid;
501 tcbp->next = tsp->cbrs;
503 tsp->cbrs->pprev = &tcbp->next;
506 tcbp->pprev = &tsp->cbrs;
508 afs_InsertHashCBR(tcbp);
510 /* now release locks and return */
511 MReleaseWriteLock(&afs_xvcb);
520 * Remove a queued callback for a given Fid.
523 * afid: The fid we want cleansed of queued callbacks.
526 * Locks xvcb and xserver locks.
527 * Typically called with xdcache, xvcache and/or individual vcache
532 afs_RemoveVCB(struct VenusFid *afid)
535 struct afs_cbr *cbr, *ncbr;
537 AFS_STATCNT(afs_RemoveVCB);
538 MObtainWriteLock(&afs_xvcb, 275);
540 slot = afs_HashCBRFid(&afid->Fid);
541 ncbr = afs_cbrHashT[slot];
545 ncbr = cbr->hash_next;
547 if (afid->Fid.Volume == cbr->fid.Volume &&
548 afid->Fid.Vnode == cbr->fid.Vnode &&
549 afid->Fid.Unique == cbr->fid.Unique) {
554 MReleaseWriteLock(&afs_xvcb);
557 #if defined(AFS_LINUX22_ENV) && !defined(AFS_LINUX26_ENV)
560 __shrink_dcache_parent(struct dentry *parent)
562 struct dentry *this_parent = parent;
563 struct list_head *next;
565 LIST_HEAD(afs_dentry_unused);
568 next = this_parent->d_subdirs.next;
570 while (next != &this_parent->d_subdirs) {
571 struct list_head *tmp = next;
572 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
574 if (!DCOUNT(dentry)) {
575 list_del(&dentry->d_lru);
576 list_add(&dentry->d_lru, afs_dentry_unused.prev);
580 * Descend a level if the d_subdirs list is non-empty.
582 if (!list_empty(&dentry->d_subdirs)) {
583 this_parent = dentry;
588 * All done at this level ... ascend and resume the search.
590 if (this_parent != parent) {
591 next = this_parent->d_child.next;
592 this_parent = this_parent->d_parent;
597 struct dentry *dentry;
598 struct list_head *tmp;
600 tmp = afs_dentry_unused.prev;
602 if (tmp == &afs_dentry_unused)
604 #ifdef AFS_LINUX24_ENV
609 #endif /* AFS_LINUX24_ENV */
610 dentry = list_entry(tmp, struct dentry, d_lru);
612 #ifdef AFS_LINUX24_ENV
613 /* Unused dentry with a count? */
618 #ifdef AFS_LINUX24_ENV
619 list_del_init(&dentry->d_hash); /* d_drop */
621 list_del(&dentry->d_hash);
622 INIT_LIST_HEAD(&dentry->d_hash);
623 #endif /* AFS_LINUX24_ENV */
632 /* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
633 * children of the dentry
635 * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
636 * AFS_GLOCK, so it can call dput, which may call iput, but
637 * keeps afs_xvcache exclusively.
639 * Tree traversal algorithm from fs/dcache.c: select_parent()
642 afs_TryFlushDcacheChildren(struct vcache *tvc)
644 struct inode *ip = AFSTOI(tvc);
645 struct dentry *this_parent;
646 struct list_head *next;
647 struct list_head *cur;
648 struct list_head *head = &ip->i_dentry;
649 struct dentry *dentry;
653 #ifndef old_vcache_scheme
656 while ((cur = cur->next) != head) {
657 dentry = list_entry(cur, struct dentry, d_alias);
659 if (ICL_SETACTIVE(afs_iclSetp)) {
661 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
662 ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
663 dentry->d_parent->d_name.name, ICL_TYPE_STRING,
664 dentry->d_name.name);
668 if (!list_empty(&dentry->d_hash) && !list_empty(&dentry->d_subdirs))
669 __shrink_dcache_parent(dentry);
671 if (!DCOUNT(dentry)) {
673 #ifdef AFS_LINUX24_ENV
674 list_del_init(&dentry->d_hash); /* d_drop */
676 list_del(&dentry->d_hash);
677 INIT_LIST_HEAD(&dentry->d_hash);
678 #endif /* AFS_LINUX24_ENV */
690 while ((cur = cur->next) != head) {
691 dentry = list_entry(cur, struct dentry, d_alias);
693 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
694 ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
695 dentry->d_parent->d_name.name, ICL_TYPE_STRING,
696 dentry->d_name.name);
698 if (!DCOUNT(dentry)) {
711 #endif /* AFS_LINUX22_ENV && !AFS_LINUX26_ENV */
717 * This routine is responsible for allocating a new cache entry
718 * from the free list. It formats the cache entry and inserts it
719 * into the appropriate hash tables. It must be called with
720 * afs_xvcache write-locked so as to prevent several processes from
721 * trying to create a new cache entry simultaneously.
724 * afid : The file id of the file whose cache entry is being
727 /* LOCK: afs_NewVCache afs_xvcache W */
729 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
733 afs_int32 anumber = VCACHE_FREE;
735 struct gnode *gnodepnt;
738 struct vm_info *vm_info_ptr;
739 #endif /* AFS_MACH_ENV */
742 #endif /* AFS_OSF_ENV */
743 struct afs_q *tq, *uq;
746 AFS_STATCNT(afs_NewVCache);
749 if (afs_vcount >= afs_maxvcount) {
752 * If we are using > 33 % of the total system vnodes for AFS vcache
753 * entries or we are using the maximum number of vcache entries,
754 * then free some. (if our usage is > 33% we should free some, if
755 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
756 * we _must_ free some -- no choice).
758 if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount)) {
760 struct afs_q *tq, *uq;
765 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
768 if (tvc->states & CVFlushed)
769 refpanic("CVFlushed on VLRU");
770 else if (i++ > afs_maxvcount)
771 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
772 else if (QNext(uq) != tq)
773 refpanic("VLRU inconsistent");
774 else if (VREFCOUNT(tvc) < 1)
775 refpanic("refcnt 0 on VLRU");
777 if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
778 && (tvc->states & CUnlinkedDel) == 0) {
779 code = afs_FlushVCache(tvc, &fv_slept);
786 continue; /* start over - may have raced. */
792 if (anumber == VCACHE_FREE) {
793 printf("NewVCache: warning none freed, using %d of %d\n",
794 afs_vcount, afs_maxvcount);
795 if (afs_vcount >= afs_maxvcount) {
796 osi_Panic("NewVCache - none freed");
797 /* XXX instead of panicing, should do afs_maxvcount++
798 * and magic up another one */
804 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
805 /* What should we do ???? */
806 osi_Panic("afs_NewVCache: no more vnodes");
811 tvc->nextfree = NULL;
813 #else /* AFS_OSF_ENV */
814 /* pull out a free cache entry */
817 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
821 if (tvc->states & CVFlushed) {
822 refpanic("CVFlushed on VLRU");
823 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
824 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
825 } else if (QNext(uq) != tq) {
826 refpanic("VLRU inconsistent");
828 #ifdef AFS_DARWIN_ENV
829 if ((VREFCOUNT(tvc) < DARWIN_REFBASE) ||
830 (VREFCOUNT(tvc) < 1+DARWIN_REFBASE &&
831 UBCINFOEXISTS(&tvc->v))) {
833 DARWIN_REFBASE + (UBCINFOEXISTS(&tvc->v) ? 1 : 0));
835 if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0)
836 && VREFCOUNT(tvc) == DARWIN_REFBASE+1
837 && UBCINFOEXISTS(&tvc->v)) {
838 osi_VM_TryReclaim(tvc, &fv_slept);
842 continue; /* start over - may have raced. */
845 #elif defined(AFS_LINUX22_ENV)
846 if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0) {
847 #if defined(AFS_LINUX26_ENV)
849 d_prune_aliases(AFSTOI(tvc));
852 afs_TryFlushDcacheChildren(tvc);
857 if (VREFCOUNT(tvc) ==
858 #ifdef AFS_DARWIN_ENV
864 && (tvc->states & CUnlinkedDel) == 0) {
865 #if defined(AFS_XBSD_ENV)
867 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
868 * then it puts the vnode on the free list.
869 * If we don't do this we end up with a cleaned vnode that's
870 * not on the free list.
871 * XXX assume FreeBSD is the same for now.
876 code = afs_FlushVCache(tvc, &fv_slept);
884 continue; /* start over - may have raced. */
892 /* none free, making one is better than a panic */
893 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
894 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
895 #ifdef KERNEL_HAVE_PIN
896 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
899 /* In case it still comes here we need to fill this */
900 tvc->v.v_vm_info = VM_INFO_NULL;
901 vm_info_init(tvc->v.v_vm_info);
902 /* perhaps we should also do close_flush on non-NeXT mach systems;
903 * who knows; we don't currently have the sources.
905 #endif /* AFS_MACH_ENV */
906 #if defined(AFS_SGI_ENV)
908 char name[METER_NAMSZ];
909 memset(tvc, 0, sizeof(struct vcache));
910 tvc->v.v_number = ++afsvnumbers;
911 tvc->vc_rwlockid = OSI_NO_LOCKID;
912 initnsema(&tvc->vc_rwlock, 1,
913 makesname(name, "vrw", tvc->v.v_number));
914 #ifndef AFS_SGI53_ENV
915 initnsema(&tvc->v.v_sync, 0,
916 makesname(name, "vsy", tvc->v.v_number));
918 #ifndef AFS_SGI62_ENV
919 initnlock(&tvc->v.v_lock,
920 makesname(name, "vlk", tvc->v.v_number));
923 #endif /* AFS_SGI_ENV */
925 tvc = freeVCList; /* take from free list */
926 freeVCList = tvc->nextfree;
927 tvc->nextfree = NULL;
929 #endif /* AFS_OSF_ENV */
932 vm_info_ptr = tvc->v.v_vm_info;
933 #endif /* AFS_MACH_ENV */
935 #if defined(AFS_XBSD_ENV)
937 panic("afs_NewVCache(): free vcache with vnode attached");
940 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
941 memset((char *)tvc, 0, sizeof(struct vcache));
946 RWLOCK_INIT(&tvc->lock, "vcache lock");
947 #if defined(AFS_SUN5_ENV)
948 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
949 #endif /* defined(AFS_SUN5_ENV) */
952 tvc->v.v_vm_info = vm_info_ptr;
953 tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
954 #endif /* AFS_MACH_ENV */
957 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
959 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
966 #ifdef AFS_FBSD50_ENV
967 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
969 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
971 panic("afs getnewvnode"); /* can't happen */
973 if (tvc->v != NULL) {
974 /* I'd like to know if this ever happens...
975 We don't drop global for the rest of this function,
976 so if we do lose the race, the other thread should
977 have found the same vnode and finished initializing
978 the vcache entry. Is it conceivable that this vcache
979 entry could be recycled during this interval? If so,
980 then there probably needs to be some sort of additional
981 mutual exclusion (an Embryonic flag would suffice).
983 printf("afs_NewVCache: lost the race\n");
987 tvc->v->v_data = tvc;
988 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
991 tvc->parentVnode = 0;
993 tvc->linkData = NULL;
996 tvc->execsOrWriters = 0;
1000 tvc->last_looker = 0;
1002 tvc->asynchrony = -1;
1004 afs_symhint_inval(tvc);
1006 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
1009 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
1010 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
1011 #if defined(AFS_LINUX22_ENV)
1013 struct inode *ip = AFSTOI(tvc);
1014 struct address_space *mapping = &ip->i_data;
1016 #if defined(AFS_LINUX26_ENV)
1017 inode_init_once(ip);
1019 sema_init(&ip->i_sem, 1);
1020 INIT_LIST_HEAD(&ip->i_hash);
1021 INIT_LIST_HEAD(&ip->i_dentry);
1022 #if defined(AFS_LINUX24_ENV)
1023 sema_init(&ip->i_zombie, 1);
1024 init_waitqueue_head(&ip->i_wait);
1025 spin_lock_init(&ip->i_data.i_shared_lock);
1026 #ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
1027 spin_lock_init(&ip->i_data.page_lock);
1029 INIT_LIST_HEAD(&ip->i_data.clean_pages);
1030 INIT_LIST_HEAD(&ip->i_data.dirty_pages);
1031 INIT_LIST_HEAD(&ip->i_data.locked_pages);
1032 INIT_LIST_HEAD(&ip->i_dirty_buffers);
1033 #ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
1034 INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
1036 #ifdef STRUCT_INODE_HAS_I_DEVICES
1037 INIT_LIST_HEAD(&ip->i_devices);
1039 #ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
1040 init_rwsem(&ip->i_truncate_sem);
1042 #ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
1043 init_rwsem(&ip->i_alloc_sem);
1046 #else /* AFS_LINUX22_ENV */
1047 sema_init(&ip->i_atomic_write, 1);
1048 init_waitqueue(&ip->i_wait);
1052 #if defined(AFS_LINUX24_ENV)
1054 ip->i_mapping = mapping;
1055 #ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
1056 ip->i_data.gfp_mask = GFP_HIGHUSER;
1058 #if defined(AFS_LINUX26_ENV)
1059 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
1061 extern struct backing_dev_info afs_backing_dev_info;
1063 mapping->backing_dev_info = &afs_backing_dev_info;
1068 #if !defined(AFS_LINUX26_ENV)
1070 ip->i_dev = afs_globalVFS->s_dev;
1072 #ifdef STRUCT_INODE_HAS_I_SECURITY
1073 ip->i_security = NULL;
1074 if (security_inode_alloc(ip))
1075 panic("Cannot allocate inode security");
1078 ip->i_sb = afs_globalVFS;
1079 put_inode_on_dummy_list(ip);
1084 /* Hold it for the LRU (should make count 2) */
1085 VN_HOLD(AFSTOV(tvc));
1086 #else /* AFS_OSF_ENV */
1087 #if !defined(AFS_XBSD_ENV)
1088 VREFCOUNT_SET(tvc, 1); /* us */
1089 #endif /* AFS_XBSD_ENV */
1090 #endif /* AFS_OSF_ENV */
1091 #ifdef AFS_AIX32_ENV
1092 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1093 tvc->vmh = tvc->segid = NULL;
1096 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
1097 #if defined(AFS_SUN5_ENV)
1098 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1100 #if defined(AFS_SUN55_ENV)
1101 /* This is required if the kaio (kernel aynchronous io)
1102 ** module is installed. Inside the kernel, the function
1103 ** check_vp( common/os/aio.c) checks to see if the kernel has
1104 ** to provide asynchronous io for this vnode. This
1105 ** function extracts the device number by following the
1106 ** v_data field of the vnode. If we do not set this field
1107 ** then the system panics. The value of the v_data field
1108 ** is not really important for AFS vnodes because the kernel
1109 ** does not do asynchronous io for regular files. Hence,
1110 ** for the time being, we fill up the v_data field with the
1111 ** vnode pointer itself. */
1112 tvc->v.v_data = (char *)tvc;
1113 #endif /* AFS_SUN55_ENV */
1115 afs_BozonInit(&tvc->pvnLock, tvc);
1119 tvc->callback = serverp; /* to minimize chance that clear
1120 * request is lost */
1121 /* initialize vnode data, note vrefCount is v.v_count */
1123 /* Don't forget to free the gnode space */
1124 tvc->v.v_gnode = gnodepnt =
1125 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1126 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1128 #ifdef AFS_SGI64_ENV
1129 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1130 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1131 #ifdef AFS_SGI65_ENV
1132 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1133 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1135 bhv_head_init(&(tvc->v.v_bh));
1136 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1138 #ifdef AFS_SGI65_ENV
1139 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1140 #ifdef VNODE_TRACING
1141 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1143 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1145 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1146 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1148 vnode_pcache_init(&tvc->v);
1149 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1150 /* Above define is never true execpt in SGI test kernels. */
1151 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1153 #ifdef INTR_KTHREADS
1154 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1157 SetAfsVnode(AFSTOV(tvc));
1158 #endif /* AFS_SGI64_ENV */
1159 #ifdef AFS_DARWIN_ENV
1160 tvc->v.v_ubcinfo = UBC_INFO_NULL;
1161 lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
1162 cache_purge(AFSTOV(tvc));
1163 tvc->v.v_data = tvc;
1164 tvc->v.v_tag = VT_AFS;
1165 /* VLISTNONE(&tvc->v); */
1166 tvc->v.v_freelist.tqe_next = 0;
1167 tvc->v.v_freelist.tqe_prev = (struct vnode **)0xdeadb;
1168 tvc->vrefCount+=DARWIN_REFBASE;
1171 * The proper value for mvstat (for root fids) is setup by the caller.
1174 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1176 if (afs_globalVFS == 0)
1177 osi_Panic("afs globalvfs");
1178 vSetVfsp(tvc, afs_globalVFS);
1179 vSetType(tvc, VREG);
1181 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1182 tvc->v.v_vfsprev = NULL;
1183 afs_globalVFS->vfs_vnodes = &tvc->v;
1184 if (tvc->v.v_vfsnext != NULL)
1185 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1186 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1187 gnodepnt->gn_vnode = &tvc->v;
1190 tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
1192 #if defined(AFS_DUX40_ENV)
1193 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1196 /* Is this needed??? */
1197 insmntque(tvc, afs_globalVFS);
1198 #endif /* AFS_OSF_ENV */
1199 #endif /* AFS_DUX40_ENV */
1200 #if defined(AFS_SGI_ENV)
1201 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1202 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1204 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1205 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1206 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1207 osi_Assert(tvc->v.v_filocks == NULL);
1208 #if !defined(AFS_SGI65_ENV)
1209 osi_Assert(tvc->v.v_filocksem == NULL);
1211 osi_Assert(tvc->cred == NULL);
1212 #ifdef AFS_SGI64_ENV
1213 vnode_pcache_reinit(&tvc->v);
1214 tvc->v.v_rdev = NODEV;
1216 vn_initlist((struct vnlist *)&tvc->v);
1218 #endif /* AFS_SGI_ENV */
1220 osi_dnlc_purgedp(tvc); /* this may be overkill */
1221 memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
1222 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1226 tvc->hnext = afs_vhashT[i];
1227 afs_vhashT[i] = tvc;
1228 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1229 refpanic("NewVCache VLRU inconsistent");
1231 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1232 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1233 refpanic("NewVCache VLRU inconsistent2");
1235 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1236 refpanic("NewVCache VLRU inconsistent3");
1238 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1239 refpanic("NewVCache VLRU inconsistent4");
1245 } /*afs_NewVCache */
1249 * afs_FlushActiveVcaches
1255 * doflocks : Do we handle flocks?
1257 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
1259 afs_FlushActiveVcaches(register afs_int32 doflocks)
1261 register struct vcache *tvc;
1263 register struct conn *tc;
1264 register afs_int32 code;
1265 register struct AFS_UCRED *cred = NULL;
1266 struct vrequest treq, ureq;
1267 struct AFSVolSync tsync;
1269 XSTATS_DECLS AFS_STATCNT(afs_FlushActiveVcaches);
1270 ObtainReadLock(&afs_xvcache);
1271 for (i = 0; i < VCSIZE; i++) {
1272 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1273 if (doflocks && tvc->flockCount != 0) {
1274 /* if this entry has an flock, send a keep-alive call out */
1276 ReleaseReadLock(&afs_xvcache);
1277 ObtainWriteLock(&tvc->lock, 51);
1279 afs_InitReq(&treq, afs_osi_credp);
1280 treq.flags |= O_NONBLOCK;
1282 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1284 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1287 RXAFS_ExtendLock(tc->id,
1288 (struct AFSFid *)&tvc->fid.Fid,
1294 } while (afs_Analyze
1295 (tc, code, &tvc->fid, &treq,
1296 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1298 ReleaseWriteLock(&tvc->lock);
1299 ObtainReadLock(&afs_xvcache);
1303 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1305 * Don't let it evaporate in case someone else is in
1306 * this code. Also, drop the afs_xvcache lock while
1307 * getting vcache locks.
1310 ReleaseReadLock(&afs_xvcache);
1311 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1312 afs_BozonLock(&tvc->pvnLock, tvc);
1314 #if defined(AFS_SGI_ENV)
1316 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1318 osi_Assert(VREFCOUNT(tvc) > 0);
1319 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1321 ObtainWriteLock(&tvc->lock, 52);
1322 if (tvc->states & CCore) {
1323 tvc->states &= ~CCore;
1324 /* XXXX Find better place-holder for cred XXXX */
1325 cred = (struct AFS_UCRED *)tvc->linkData;
1326 tvc->linkData = NULL; /* XXX */
1327 afs_InitReq(&ureq, cred);
1328 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1329 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1330 tvc->execsOrWriters);
1331 code = afs_StoreOnLastReference(tvc, &ureq);
1332 ReleaseWriteLock(&tvc->lock);
1333 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1334 afs_BozonUnlock(&tvc->pvnLock, tvc);
1336 hzero(tvc->flushDV);
1339 if (code && code != VNOVNODE) {
1340 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1341 /* /dev/console */ 1);
1343 } else if (tvc->states & CUnlinkedDel) {
1347 ReleaseWriteLock(&tvc->lock);
1348 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1349 afs_BozonUnlock(&tvc->pvnLock, tvc);
1351 #if defined(AFS_SGI_ENV)
1352 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1354 afs_remunlink(tvc, 0);
1355 #if defined(AFS_SGI_ENV)
1356 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1359 /* lost (or won, perhaps) the race condition */
1360 ReleaseWriteLock(&tvc->lock);
1361 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1362 afs_BozonUnlock(&tvc->pvnLock, tvc);
1365 #if defined(AFS_SGI_ENV)
1366 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1368 ObtainReadLock(&afs_xvcache);
1374 AFS_RELE(AFSTOV(tvc));
1376 /* Matches write code setting CCore flag */
1380 #ifdef AFS_DARWIN_ENV
1381 if (VREFCOUNT(tvc) == 1+DARWIN_REFBASE
1382 && UBCINFOEXISTS(&tvc->v)) {
1384 panic("flushactive open, hasubc, but refcnt 1");
1385 osi_VM_TryReclaim(tvc, 0);
1390 ReleaseReadLock(&afs_xvcache);
1398 * Make sure a cache entry is up-to-date status-wise.
1400 * NOTE: everywhere that calls this can potentially be sped up
1401 * by checking CStatd first, and avoiding doing the InitReq
1402 * if this is up-to-date.
1404 * Anymore, the only places that call this KNOW already that the
1405 * vcache is not up-to-date, so we don't screw around.
1408 * avc : Ptr to vcache entry to verify.
1413 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1415 register struct vcache *tvc;
1417 AFS_STATCNT(afs_VerifyVCache);
1419 #if defined(AFS_OSF_ENV)
1420 ObtainReadLock(&avc->lock);
1421 if (afs_IsWired(avc)) {
1422 ReleaseReadLock(&avc->lock);
1425 ReleaseReadLock(&avc->lock);
1426 #endif /* AFS_OSF_ENV */
1427 /* otherwise we must fetch the status info */
1429 ObtainWriteLock(&avc->lock, 53);
1430 if (avc->states & CStatd) {
1431 ReleaseWriteLock(&avc->lock);
1434 ObtainWriteLock(&afs_xcbhash, 461);
1435 avc->states &= ~(CStatd | CUnique);
1436 avc->callback = NULL;
1437 afs_DequeueCallback(avc);
1438 ReleaseWriteLock(&afs_xcbhash);
1439 ReleaseWriteLock(&avc->lock);
1441 /* since we've been called back, or the callback has expired,
1442 * it's possible that the contents of this directory, or this
1443 * file's name have changed, thus invalidating the dnlc contents.
1445 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1446 osi_dnlc_purgedp(avc);
1448 osi_dnlc_purgevp(avc);
1450 /* fetch the status info */
1451 tvc = afs_GetVCache(&avc->fid, areq, NULL, avc);
1454 /* Put it back; caller has already incremented vrefCount */
1458 } /*afs_VerifyVCache */
1465 * Simple copy of stat info into cache.
1468 * avc : Ptr to vcache entry involved.
1469 * astat : Ptr to stat info to copy.
1472 * Nothing interesting.
1474 * Callers: as of 1992-04-29, only called by WriteVCache
1477 afs_SimpleVStat(register struct vcache *avc,
1478 register struct AFSFetchStatus *astat, struct vrequest *areq)
1481 AFS_STATCNT(afs_SimpleVStat);
1484 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1485 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1487 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1489 #ifdef AFS_64BIT_CLIENT
1490 FillInt64(length, astat->Length_hi, astat->Length);
1491 #else /* AFS_64BIT_CLIENT */
1492 length = astat->Length;
1493 #endif /* AFS_64BIT_CLIENT */
1494 #if defined(AFS_SGI_ENV)
1495 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1496 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1497 if (length < avc->m.Length) {
1498 vnode_t *vp = (vnode_t *) avc;
1500 osi_Assert(WriteLocked(&avc->lock));
1501 ReleaseWriteLock(&avc->lock);
1503 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1505 ObtainWriteLock(&avc->lock, 67);
1508 /* if writing the file, don't fetch over this value */
1509 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1510 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1511 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1512 avc->m.Length = length;
1513 avc->m.Date = astat->ClientModTime;
1515 avc->m.Owner = astat->Owner;
1516 avc->m.Group = astat->Group;
1517 avc->m.Mode = astat->UnixModeBits;
1518 if (vType(avc) == VREG) {
1519 avc->m.Mode |= S_IFREG;
1520 } else if (vType(avc) == VDIR) {
1521 avc->m.Mode |= S_IFDIR;
1522 } else if (vType(avc) == VLNK) {
1523 avc->m.Mode |= S_IFLNK;
1524 if ((avc->m.Mode & 0111) == 0)
1527 if (avc->states & CForeign) {
1528 struct axscache *ac;
1529 avc->anyAccess = astat->AnonymousAccess;
1531 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1533 * Caller has at least one bit not covered by anonymous, and
1534 * thus may have interesting rights.
1536 * HOWEVER, this is a really bad idea, because any access query
1537 * for bits which aren't covered by anonymous, on behalf of a user
1538 * who doesn't have any special rights, will result in an answer of
1539 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1540 * It's an especially bad idea under Ultrix, since (due to the lack of
1541 * a proper access() call) it must perform several afs_access() calls
1542 * in order to create magic mode bits that vary according to who makes
1543 * the call. In other words, _every_ stat() generates a test for
1546 #endif /* badidea */
1547 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1548 ac->axess = astat->CallerAccess;
1549 else /* not found, add a new one if possible */
1550 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1554 } /*afs_SimpleVStat */
1561 * Store the status info *only* back to the server for a
1565 * avc : Ptr to the vcache entry.
1566 * astatus : Ptr to the status info to store.
1567 * areq : Ptr to the associated vrequest.
1570 * Must be called with a shared lock held on the vnode.
1574 afs_WriteVCache(register struct vcache *avc,
1575 register struct AFSStoreStatus *astatus,
1576 struct vrequest *areq)
1580 struct AFSFetchStatus OutStatus;
1581 struct AFSVolSync tsync;
1582 XSTATS_DECLS AFS_STATCNT(afs_WriteVCache);
1583 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1584 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1587 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1589 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1592 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
1593 astatus, &OutStatus, &tsync);
1598 } while (afs_Analyze
1599 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1600 SHARED_LOCK, NULL));
1602 UpgradeSToWLock(&avc->lock, 20);
1604 /* success, do the changes locally */
1605 afs_SimpleVStat(avc, &OutStatus, areq);
1607 * Update the date, too. SimpleVStat didn't do this, since
1608 * it thought we were doing this after fetching new status
1609 * over a file being written.
1611 avc->m.Date = OutStatus.ClientModTime;
1613 /* failure, set up to check with server next time */
1614 ObtainWriteLock(&afs_xcbhash, 462);
1615 afs_DequeueCallback(avc);
1616 avc->states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1617 ReleaseWriteLock(&afs_xcbhash);
1618 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1619 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1621 ConvertWToSLock(&avc->lock);
1624 } /*afs_WriteVCache */
1630 * Copy astat block into vcache info
1633 * avc : Ptr to vcache entry.
1634 * astat : Ptr to stat block to copy in.
1635 * areq : Ptr to associated request.
1638 * Must be called under a write lock
1640 * Note: this code may get dataversion and length out of sync if the file has
1641 * been modified. This is less than ideal. I haven't thought about
1642 * it sufficiently to be certain that it is adequate.
1645 afs_ProcessFS(register struct vcache *avc,
1646 register struct AFSFetchStatus *astat, struct vrequest *areq)
1649 AFS_STATCNT(afs_ProcessFS);
1651 #ifdef AFS_64BIT_CLIENT
1652 FillInt64(length, astat->Length_hi, astat->Length);
1653 #else /* AFS_64BIT_CLIENT */
1654 length = astat->Length;
1655 #endif /* AFS_64BIT_CLIENT */
1656 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1657 * number for each bulk status request. Under no circumstances
1658 * should afs_DoBulkStat store a sequence number if the new
1659 * length will be ignored when afs_ProcessFS is called with
1660 * new stats. If you change the following conditional then you
1661 * also need to change the conditional in afs_DoBulkStat. */
1663 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1664 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1666 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1668 /* if we're writing or mapping this file, don't fetch over these
1671 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1672 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1673 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1674 avc->m.Length = length;
1675 avc->m.Date = astat->ClientModTime;
1677 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1678 avc->m.Owner = astat->Owner;
1679 avc->m.Mode = astat->UnixModeBits;
1680 avc->m.Group = astat->Group;
1681 avc->m.LinkCount = astat->LinkCount;
1682 if (astat->FileType == File) {
1683 vSetType(avc, VREG);
1684 avc->m.Mode |= S_IFREG;
1685 } else if (astat->FileType == Directory) {
1686 vSetType(avc, VDIR);
1687 avc->m.Mode |= S_IFDIR;
1688 } else if (astat->FileType == SymbolicLink) {
1689 if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
1690 vSetType(avc, VDIR);
1691 avc->m.Mode |= S_IFDIR;
1693 vSetType(avc, VLNK);
1694 avc->m.Mode |= S_IFLNK;
1696 if ((avc->m.Mode & 0111) == 0) {
1700 avc->anyAccess = astat->AnonymousAccess;
1702 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1704 * Caller has at least one bit not covered by anonymous, and
1705 * thus may have interesting rights.
1707 * HOWEVER, this is a really bad idea, because any access query
1708 * for bits which aren't covered by anonymous, on behalf of a user
1709 * who doesn't have any special rights, will result in an answer of
1710 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1711 * It's an especially bad idea under Ultrix, since (due to the lack of
1712 * a proper access() call) it must perform several afs_access() calls
1713 * in order to create magic mode bits that vary according to who makes
1714 * the call. In other words, _every_ stat() generates a test for
1717 #endif /* badidea */
1719 struct axscache *ac;
1720 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1721 ac->axess = astat->CallerAccess;
1722 else /* not found, add a new one if possible */
1723 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1725 #ifdef AFS_LINUX22_ENV
1726 vcache2inode(avc); /* Set the inode attr cache */
1728 #ifdef AFS_DARWIN_ENV
1729 osi_VM_Setup(avc, 1);
1732 } /*afs_ProcessFS */
1736 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1737 char *name, struct VenusFid *nfid,
1738 struct AFSFetchStatus *OutStatusp,
1739 struct AFSCallBack *CallBackp, struct server **serverp,
1740 struct AFSVolSync *tsyncp)
1744 register struct conn *tc;
1745 struct AFSFetchStatus OutDirStatus;
1746 XSTATS_DECLS if (!name)
1747 name = ""; /* XXX */
1749 tc = afs_Conn(afid, areq, SHARED_LOCK);
1752 *serverp = tc->srvr->server;
1754 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1757 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1758 (struct AFSFid *)&nfid->Fid, OutStatusp,
1759 &OutDirStatus, CallBackp, tsyncp);
1764 } while (afs_Analyze
1765 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1776 * Given a file id and a vrequest structure, fetch the status
1777 * information associated with the file.
1781 * areq : Ptr to associated vrequest structure, specifying the
1782 * user whose authentication tokens will be used.
1783 * avc : caller may already have a vcache for this file, which is
1787 * The cache entry is returned with an increased vrefCount field.
1788 * The entry must be discarded by calling afs_PutVCache when you
1789 * are through using the pointer to the cache entry.
1791 * You should not hold any locks when calling this function, except
1792 * locks on other vcache entries. If you lock more than one vcache
1793 * entry simultaneously, you should lock them in this order:
1795 * 1. Lock all files first, then directories.
1796 * 2. Within a particular type, lock entries in Fid.Vnode order.
1798 * This locking hierarchy is convenient because it allows locking
1799 * of a parent dir cache entry, given a file (to check its access
1800 * control list). It also allows renames to be handled easily by
1801 * locking directories in a constant order.
1802 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1804 /* might have a vcache structure already, which must
1805 * already be held by the caller */
1808 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1809 afs_int32 * cached, struct vcache *avc)
1812 afs_int32 code, newvcache = 0;
1813 register struct vcache *tvc;
1817 AFS_STATCNT(afs_GetVCache);
1820 *cached = 0; /* Init just in case */
1822 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1826 ObtainSharedLock(&afs_xvcache, 5);
1828 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU);
1830 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1831 ReleaseSharedLock(&afs_xvcache);
1832 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1840 if (tvc->states & CStatd) {
1841 ReleaseSharedLock(&afs_xvcache);
1845 UpgradeSToWLock(&afs_xvcache, 21);
1847 /* no cache entry, better grab one */
1848 tvc = afs_NewVCache(afid, NULL);
1851 ConvertWToSLock(&afs_xvcache);
1852 afs_stats_cmperf.vcacheMisses++;
1855 ReleaseSharedLock(&afs_xvcache);
1857 ObtainWriteLock(&tvc->lock, 54);
1859 if (tvc->states & CStatd) {
1860 #ifdef AFS_LINUX22_ENV
1863 ReleaseWriteLock(&tvc->lock);
1864 #ifdef AFS_DARWIN_ENV
1865 osi_VM_Setup(tvc, 0);
1869 #if defined(AFS_OSF_ENV)
1870 if (afs_IsWired(tvc)) {
1871 ReleaseWriteLock(&tvc->lock);
1874 #endif /* AFS_OSF_ENV */
1876 VOP_LOCK(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY, curproc);
1877 uvm_vnp_uncache(AFSTOV(tvc));
1878 VOP_UNLOCK(AFSTOV(tvc), 0, curproc);
1882 * XXX - I really don't like this. Should try to understand better.
1883 * It seems that sometimes, when we get called, we already hold the
1884 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1885 * We can't drop the vnode lock, because that could result in a race.
1886 * Sometimes, though, we get here and don't hold the vnode lock.
1887 * I hate code paths that sometimes hold locks and sometimes don't.
1888 * In any event, the dodge we use here is to check whether the vnode
1889 * is locked, and if it isn't, then we gain and drop it around the call
1890 * to vinvalbuf; otherwise, we leave it alone.
1897 #ifdef AFS_FBSD50_ENV
1898 iheldthelock = VOP_ISLOCKED(vp, curthread);
1900 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1901 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1903 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1905 iheldthelock = VOP_ISLOCKED(vp, curproc);
1907 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1908 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
1910 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
1915 ObtainWriteLock(&afs_xcbhash, 464);
1916 tvc->states &= ~CUnique;
1918 afs_DequeueCallback(tvc);
1919 ReleaseWriteLock(&afs_xcbhash);
1921 /* It is always appropriate to throw away all the access rights? */
1922 afs_FreeAllAxs(&(tvc->Access));
1923 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1925 if ((tvp->states & VForeign)) {
1927 tvc->states |= CForeign;
1928 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1929 && (tvp->rootUnique == afid->Fid.Unique)) {
1933 if (tvp->states & VRO)
1935 if (tvp->states & VBackup)
1936 tvc->states |= CBackup;
1937 /* now copy ".." entry back out of volume structure, if necessary */
1938 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1940 tvc->mvid = (struct VenusFid *)
1941 osi_AllocSmallSpace(sizeof(struct VenusFid));
1942 *tvc->mvid = tvp->dotdot;
1944 afs_PutVolume(tvp, READ_LOCK);
1948 afs_RemoveVCB(afid);
1950 struct AFSFetchStatus OutStatus;
1952 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1953 afs_ProcessFS(tvc, &OutStatus, areq);
1954 tvc->states |= CStatd | CUnique;
1957 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1962 ReleaseWriteLock(&tvc->lock);
1964 ObtainReadLock(&afs_xvcache);
1966 ReleaseReadLock(&afs_xvcache);
1970 ReleaseWriteLock(&tvc->lock);
1973 } /*afs_GetVCache */
1978 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1979 afs_int32 * cached, struct vcache *adp, char *aname)
1981 afs_int32 code, now, newvcache = 0;
1982 struct VenusFid nfid;
1983 register struct vcache *tvc;
1985 struct AFSFetchStatus OutStatus;
1986 struct AFSCallBack CallBack;
1987 struct AFSVolSync tsync;
1988 struct server *serverp = 0;
1992 AFS_STATCNT(afs_GetVCache);
1994 *cached = 0; /* Init just in case */
1996 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2000 ObtainReadLock(&afs_xvcache);
2001 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
2004 ReleaseReadLock(&afs_xvcache);
2006 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2007 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2011 ObtainReadLock(&tvc->lock);
2013 if (tvc->states & CStatd) {
2017 ReleaseReadLock(&tvc->lock);
2020 tvc->states &= ~CUnique;
2022 ReleaseReadLock(&tvc->lock);
2023 ObtainReadLock(&afs_xvcache);
2027 ReleaseReadLock(&afs_xvcache);
2029 /* lookup the file */
2032 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
2034 afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
2037 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2041 ObtainSharedLock(&afs_xvcache, 6);
2042 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU /* no xstats now */ );
2044 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2045 ReleaseSharedLock(&afs_xvcache);
2046 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2052 /* no cache entry, better grab one */
2053 UpgradeSToWLock(&afs_xvcache, 22);
2054 tvc = afs_NewVCache(&nfid, serverp);
2056 ConvertWToSLock(&afs_xvcache);
2059 ReleaseSharedLock(&afs_xvcache);
2060 ObtainWriteLock(&tvc->lock, 55);
2062 /* It is always appropriate to throw away all the access rights? */
2063 afs_FreeAllAxs(&(tvc->Access));
2064 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2066 if ((tvp->states & VForeign)) {
2068 tvc->states |= CForeign;
2069 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2070 && (tvp->rootUnique == afid->Fid.Unique))
2073 if (tvp->states & VRO)
2075 if (tvp->states & VBackup)
2076 tvc->states |= CBackup;
2077 /* now copy ".." entry back out of volume structure, if necessary */
2078 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2080 tvc->mvid = (struct VenusFid *)
2081 osi_AllocSmallSpace(sizeof(struct VenusFid));
2082 *tvc->mvid = tvp->dotdot;
2087 ObtainWriteLock(&afs_xcbhash, 465);
2088 afs_DequeueCallback(tvc);
2089 tvc->states &= ~(CStatd | CUnique);
2090 ReleaseWriteLock(&afs_xcbhash);
2091 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2092 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2094 afs_PutVolume(tvp, READ_LOCK);
2095 ReleaseWriteLock(&tvc->lock);
2096 ObtainReadLock(&afs_xvcache);
2098 ReleaseReadLock(&afs_xvcache);
2102 ObtainWriteLock(&afs_xcbhash, 466);
2103 if (origCBs == afs_allCBs) {
2104 if (CallBack.ExpirationTime) {
2105 tvc->callback = serverp;
2106 tvc->cbExpires = CallBack.ExpirationTime + now;
2107 tvc->states |= CStatd | CUnique;
2108 tvc->states &= ~CBulkFetching;
2109 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2110 } else if (tvc->states & CRO) {
2111 /* adapt gives us an hour. */
2112 tvc->cbExpires = 3600 + osi_Time();
2113 /*XXX*/ tvc->states |= CStatd | CUnique;
2114 tvc->states &= ~CBulkFetching;
2115 afs_QueueCallback(tvc, CBHash(3600), tvp);
2117 tvc->callback = NULL;
2118 afs_DequeueCallback(tvc);
2119 tvc->states &= ~(CStatd | CUnique);
2120 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2121 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2124 afs_DequeueCallback(tvc);
2125 tvc->states &= ~CStatd;
2126 tvc->states &= ~CUnique;
2127 tvc->callback = NULL;
2128 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2129 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2131 ReleaseWriteLock(&afs_xcbhash);
2133 afs_PutVolume(tvp, READ_LOCK);
2134 afs_ProcessFS(tvc, &OutStatus, areq);
2136 ReleaseWriteLock(&tvc->lock);
2142 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2143 afs_int32 * cached, struct volume *tvolp)
2145 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2146 afs_int32 getNewFid = 0;
2148 struct VenusFid nfid;
2149 register struct vcache *tvc;
2150 struct server *serverp = 0;
2151 struct AFSFetchStatus OutStatus;
2152 struct AFSCallBack CallBack;
2153 struct AFSVolSync tsync;
2159 if (!tvolp->rootVnode || getNewFid) {
2160 struct VenusFid tfid;
2163 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2164 origCBs = afs_allCBs; /* ignore InitCallBackState */
2166 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2171 /* ReleaseReadLock(&tvolp->lock); */
2172 ObtainWriteLock(&tvolp->lock, 56);
2173 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2174 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2175 ReleaseWriteLock(&tvolp->lock);
2176 /* ObtainReadLock(&tvolp->lock);*/
2179 afid->Fid.Vnode = tvolp->rootVnode;
2180 afid->Fid.Unique = tvolp->rootUnique;
2183 ObtainSharedLock(&afs_xvcache, 7);
2185 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2186 if (!FidCmp(&(tvc->fid), afid)) {
2188 /* Grab this vnode, possibly reactivating from the free list */
2189 /* for the present (95.05.25) everything on the hash table is
2190 * definitively NOT in the free list -- at least until afs_reclaim
2191 * can be safely implemented */
2194 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2198 #endif /* AFS_OSF_ENV */
2199 #ifdef AFS_DARWIN14_ENV
2200 /* It'd really suck if we allowed duplicate vcaches for the
2201 same fid to happen. Wonder if this will work? */
2202 struct vnode *vp = AFSTOV(tvc);
2203 if (vp->v_flag & (VXLOCK|VORECLAIM|VTERMINATE)) {
2204 printf("precluded FindVCache on %x (%d:%d:%d)\n",
2205 vp, tvc->fid.Fid.Volume, tvc->fid.Fid.Vnode,
2206 tvc->fid.Fid.Unique);
2207 simple_lock(&vp->v_interlock);
2208 SET(vp->v_flag, VTERMWANT);
2209 simple_unlock(&vp->v_interlock);
2210 (void)tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vget1", 0);
2211 printf("VTERMWANT ended on %x\n", vp);
2219 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
2220 /* Mount point no longer stat'd or unknown. FID may have changed. */
2223 AFS_RELE(AFSTOV(tvc));
2227 ReleaseSharedLock(&afs_xvcache);
2232 UpgradeSToWLock(&afs_xvcache, 23);
2233 /* no cache entry, better grab one */
2234 tvc = afs_NewVCache(afid, NULL);
2236 afs_stats_cmperf.vcacheMisses++;
2240 afs_stats_cmperf.vcacheHits++;
2242 /* we already bumped the ref count in the for loop above */
2243 #else /* AFS_OSF_ENV */
2246 UpgradeSToWLock(&afs_xvcache, 24);
2247 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2248 refpanic("GRVC VLRU inconsistent0");
2250 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2251 refpanic("GRVC VLRU inconsistent1");
2253 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2254 refpanic("GRVC VLRU inconsistent2");
2256 QRemove(&tvc->vlruq); /* move to lruq head */
2257 QAdd(&VLRU, &tvc->vlruq);
2258 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2259 refpanic("GRVC VLRU inconsistent3");
2261 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2262 refpanic("GRVC VLRU inconsistent4");
2264 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2265 refpanic("GRVC VLRU inconsistent5");
2270 ReleaseWriteLock(&afs_xvcache);
2272 if (tvc->states & CStatd) {
2276 ObtainReadLock(&tvc->lock);
2277 tvc->states &= ~CUnique;
2278 tvc->callback = NULL; /* redundant, perhaps */
2279 ReleaseReadLock(&tvc->lock);
2282 ObtainWriteLock(&tvc->lock, 57);
2284 /* It is always appropriate to throw away all the access rights? */
2285 afs_FreeAllAxs(&(tvc->Access));
2288 tvc->states |= CForeign;
2289 if (tvolp->states & VRO)
2291 if (tvolp->states & VBackup)
2292 tvc->states |= CBackup;
2293 /* now copy ".." entry back out of volume structure, if necessary */
2294 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2295 && (tvolp->rootUnique == afid->Fid.Unique)) {
2298 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2300 tvc->mvid = (struct VenusFid *)
2301 osi_AllocSmallSpace(sizeof(struct VenusFid));
2302 *tvc->mvid = tvolp->dotdot;
2306 afs_RemoveVCB(afid);
2309 struct VenusFid tfid;
2312 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2313 origCBs = afs_allCBs; /* ignore InitCallBackState */
2315 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2320 ObtainWriteLock(&afs_xcbhash, 467);
2321 afs_DequeueCallback(tvc);
2322 tvc->callback = NULL;
2323 tvc->states &= ~(CStatd | CUnique);
2324 ReleaseWriteLock(&afs_xcbhash);
2325 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2326 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2327 ReleaseWriteLock(&tvc->lock);
2328 ObtainReadLock(&afs_xvcache);
2330 ReleaseReadLock(&afs_xvcache);
2334 ObtainWriteLock(&afs_xcbhash, 468);
2335 if (origCBs == afs_allCBs) {
2336 tvc->states |= CTruth;
2337 tvc->callback = serverp;
2338 if (CallBack.ExpirationTime != 0) {
2339 tvc->cbExpires = CallBack.ExpirationTime + start;
2340 tvc->states |= CStatd;
2341 tvc->states &= ~CBulkFetching;
2342 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2343 } else if (tvc->states & CRO) {
2344 /* adapt gives us an hour. */
2345 tvc->cbExpires = 3600 + osi_Time();
2346 /*XXX*/ tvc->states |= CStatd;
2347 tvc->states &= ~CBulkFetching;
2348 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2351 afs_DequeueCallback(tvc);
2352 tvc->callback = NULL;
2353 tvc->states &= ~(CStatd | CUnique);
2354 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2355 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2357 ReleaseWriteLock(&afs_xcbhash);
2358 afs_ProcessFS(tvc, &OutStatus, areq);
2360 ReleaseWriteLock(&tvc->lock);
2367 * must be called with avc write-locked
2368 * don't absolutely have to invalidate the hint unless the dv has
2369 * changed, but be sure to get it right else there will be consistency bugs.
2372 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2373 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2376 afs_uint32 start = 0;
2377 register struct conn *tc;
2378 struct AFSCallBack CallBack;
2379 struct AFSVolSync tsync;
2380 struct volume *volp;
2383 tc = afs_Conn(afid, areq, SHARED_LOCK);
2384 avc->quick.stamp = 0;
2385 avc->h1.dchint = NULL; /* invalidate hints */
2387 avc->callback = tc->srvr->server;
2389 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2392 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2400 } while (afs_Analyze
2401 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2402 SHARED_LOCK, NULL));
2405 afs_ProcessFS(avc, Outsp, areq);
2406 volp = afs_GetVolume(afid, areq, READ_LOCK);
2407 ObtainWriteLock(&afs_xcbhash, 469);
2408 avc->states |= CTruth;
2409 if (avc->callback /* check for race */ ) {
2410 if (CallBack.ExpirationTime != 0) {
2411 avc->cbExpires = CallBack.ExpirationTime + start;
2412 avc->states |= CStatd;
2413 avc->states &= ~CBulkFetching;
2414 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2415 } else if (avc->states & CRO) { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2416 avc->cbExpires = 3600 + start;
2417 avc->states |= CStatd;
2418 avc->states &= ~CBulkFetching;
2419 afs_QueueCallback(avc, CBHash(3600), volp);
2421 afs_DequeueCallback(avc);
2422 avc->callback = NULL;
2423 avc->states &= ~(CStatd | CUnique);
2424 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2425 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2428 afs_DequeueCallback(avc);
2429 avc->callback = NULL;
2430 avc->states &= ~(CStatd | CUnique);
2431 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2432 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2434 ReleaseWriteLock(&afs_xcbhash);
2436 afs_PutVolume(volp, READ_LOCK);
2438 /* used to undo the local callback, but that's too extreme.
2439 * There are plenty of good reasons that fetchstatus might return
2440 * an error, such as EPERM. If we have the vnode cached, statd,
2441 * with callback, might as well keep track of the fact that we
2442 * don't have access...
2444 if (code == EPERM || code == EACCES) {
2445 struct axscache *ac;
2446 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2448 else /* not found, add a new one if possible */
2449 afs_AddAxs(avc->Access, areq->uid, 0);
2460 * Stuff some information into the vcache for the given file.
2463 * afid : File in question.
2464 * OutStatus : Fetch status on the file.
2465 * CallBack : Callback info.
2466 * tc : RPC connection involved.
2467 * areq : vrequest involved.
2470 * Nothing interesting.
2473 afs_StuffVcache(register struct VenusFid *afid,
2474 struct AFSFetchStatus *OutStatus,
2475 struct AFSCallBack *CallBack, register struct conn *tc,
2476 struct vrequest *areq)
2478 register afs_int32 code, i, newvcache = 0;
2479 register struct vcache *tvc;
2480 struct AFSVolSync tsync;
2482 struct axscache *ac;
2485 AFS_STATCNT(afs_StuffVcache);
2486 #ifdef IFS_VCACHECOUNT
2491 ObtainSharedLock(&afs_xvcache, 8);
2493 tvc = afs_FindVCache(afid, &retry, DO_VLRU /* no stats */ );
2495 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2496 ReleaseSharedLock(&afs_xvcache);
2497 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2503 /* no cache entry, better grab one */
2504 UpgradeSToWLock(&afs_xvcache, 25);
2505 tvc = afs_NewVCache(afid, NULL);
2507 ConvertWToSLock(&afs_xvcache);
2510 ReleaseSharedLock(&afs_xvcache);
2511 ObtainWriteLock(&tvc->lock, 58);
2513 tvc->states &= ~CStatd;
2514 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2515 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2517 /* Is it always appropriate to throw away all the access rights? */
2518 afs_FreeAllAxs(&(tvc->Access));
2520 /*Copy useful per-volume info */
2521 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2523 if (newvcache && (tvp->states & VForeign))
2524 tvc->states |= CForeign;
2525 if (tvp->states & VRO)
2527 if (tvp->states & VBackup)
2528 tvc->states |= CBackup;
2530 * Now, copy ".." entry back out of volume structure, if
2533 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2535 tvc->mvid = (struct VenusFid *)
2536 osi_AllocSmallSpace(sizeof(struct VenusFid));
2537 *tvc->mvid = tvp->dotdot;
2540 /* store the stat on the file */
2541 afs_RemoveVCB(afid);
2542 afs_ProcessFS(tvc, OutStatus, areq);
2543 tvc->callback = tc->srvr->server;
2545 /* we use osi_Time twice below. Ideally, we would use the time at which
2546 * the FetchStatus call began, instead, but we don't have it here. So we
2547 * make do with "now". In the CRO case, it doesn't really matter. In
2548 * the other case, we hope that the difference between "now" and when the
2549 * call actually began execution on the server won't be larger than the
2550 * padding which the server keeps. Subtract 1 second anyway, to be on
2551 * the safe side. Can't subtract more because we don't know how big
2552 * ExpirationTime is. Possible consistency problems may arise if the call
2553 * timeout period becomes longer than the server's expiration padding. */
2554 ObtainWriteLock(&afs_xcbhash, 470);
2555 if (CallBack->ExpirationTime != 0) {
2556 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2557 tvc->states |= CStatd;
2558 tvc->states &= ~CBulkFetching;
2559 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2560 } else if (tvc->states & CRO) {
2561 /* old-fashioned AFS 3.2 style */
2562 tvc->cbExpires = 3600 + osi_Time();
2563 /*XXX*/ tvc->states |= CStatd;
2564 tvc->states &= ~CBulkFetching;
2565 afs_QueueCallback(tvc, CBHash(3600), tvp);
2567 afs_DequeueCallback(tvc);
2568 tvc->callback = NULL;
2569 tvc->states &= ~(CStatd | CUnique);
2570 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2571 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2573 ReleaseWriteLock(&afs_xcbhash);
2575 afs_PutVolume(tvp, READ_LOCK);
2577 /* look in per-pag cache */
2578 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2579 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2580 else /* not found, add a new one if possible */
2581 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2583 ReleaseWriteLock(&tvc->lock);
2584 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2585 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2586 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2588 * Release ref count... hope this guy stays around...
2591 } /*afs_StuffVcache */
2598 * Decrements the reference count on a cache entry.
2601 * avc : Pointer to the cache entry to decrement.
2604 * Nothing interesting.
2607 afs_PutVCache(register struct vcache *avc)
2609 AFS_STATCNT(afs_PutVCache);
2611 * Can we use a read lock here?
2613 ObtainReadLock(&afs_xvcache);
2615 ReleaseReadLock(&afs_xvcache);
2616 } /*afs_PutVCache */
2622 * Find a vcache entry given a fid.
2625 * afid : Pointer to the fid whose cache entry we desire.
2626 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2627 * unlock the vnode, and try again.
2628 * flags: bit 1 to specify whether to compute hit statistics. Not
2629 * set if FindVCache is called as part of internal bookkeeping.
2632 * Must be called with the afs_xvcache lock at least held at
2633 * the read level. In order to do the VLRU adjustment, the xvcache lock
2634 * must be shared-- we upgrade it here.
2638 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2641 register struct vcache *tvc;
2644 AFS_STATCNT(afs_FindVCache);
2647 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2648 if (FidMatches(afid, tvc)) {
2650 /* Grab this vnode, possibly reactivating from the free list */
2653 vg = vget(AFSTOV(tvc));
2657 #endif /* AFS_OSF_ENV */
2662 /* should I have a read lock on the vnode here? */
2666 #if !defined(AFS_OSF_ENV)
2667 osi_vnhold(tvc, retry); /* already held, above */
2668 if (retry && *retry)
2672 * only move to front of vlru if we have proper vcache locking)
2674 if (flag & DO_VLRU) {
2675 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2676 refpanic("FindVC VLRU inconsistent1");
2678 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2679 refpanic("FindVC VLRU inconsistent1");
2681 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2682 refpanic("FindVC VLRU inconsistent2");
2684 UpgradeSToWLock(&afs_xvcache, 26);
2685 QRemove(&tvc->vlruq);
2686 QAdd(&VLRU, &tvc->vlruq);
2687 ConvertWToSLock(&afs_xvcache);
2688 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2689 refpanic("FindVC VLRU inconsistent1");
2691 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2692 refpanic("FindVC VLRU inconsistent2");
2694 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2695 refpanic("FindVC VLRU inconsistent3");
2701 if (flag & DO_STATS) {
2703 afs_stats_cmperf.vcacheHits++;
2705 afs_stats_cmperf.vcacheMisses++;
2706 if (afs_IsPrimaryCellNum(afid->Cell))
2707 afs_stats_cmperf.vlocalAccesses++;
2709 afs_stats_cmperf.vremoteAccesses++;
2711 #ifdef AFS_LINUX22_ENV
2712 if (tvc && (tvc->states & CStatd))
2713 vcache2inode(tvc); /* mainly to reset i_nlink */
2715 #ifdef AFS_DARWIN_ENV
2717 osi_VM_Setup(tvc, 0);
2720 } /*afs_FindVCache */
2726 * Find a vcache entry given a fid. Does a wildcard match on what we
2727 * have for the fid. If more than one entry, don't return anything.
2730 * avcp : Fill in pointer if we found one and only one.
2731 * afid : Pointer to the fid whose cache entry we desire.
2732 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2733 * unlock the vnode, and try again.
2734 * flags: bit 1 to specify whether to compute hit statistics. Not
2735 * set if FindVCache is called as part of internal bookkeeping.
2738 * Must be called with the afs_xvcache lock at least held at
2739 * the read level. In order to do the VLRU adjustment, the xvcache lock
2740 * must be shared-- we upgrade it here.
2743 * number of matches found.
2746 int afs_duplicate_nfs_fids = 0;
2749 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2751 register struct vcache *tvc;
2753 afs_int32 count = 0;
2754 struct vcache *found_tvc = NULL;
2756 AFS_STATCNT(afs_FindVCache);
2758 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2762 ObtainSharedLock(&afs_xvcache, 331);
2765 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2766 /* Match only on what we have.... */
2767 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2768 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2769 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2770 && (tvc->fid.Cell == afid->Cell)) {
2772 /* Grab this vnode, possibly reactivating from the free list */
2775 vg = vget(AFSTOV(tvc));
2778 /* This vnode no longer exists. */
2781 #endif /* AFS_OSF_ENV */
2786 /* Drop our reference counts. */
2788 vrele(AFSTOV(found_tvc));
2790 afs_duplicate_nfs_fids++;
2791 ReleaseSharedLock(&afs_xvcache);
2799 /* should I have a read lock on the vnode here? */
2801 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2802 afs_int32 retry = 0;
2803 osi_vnhold(tvc, &retry);
2806 found_tvc = (struct vcache *)0;
2807 ReleaseSharedLock(&afs_xvcache);
2808 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2812 #if !defined(AFS_OSF_ENV)
2813 osi_vnhold(tvc, (int *)0); /* already held, above */
2817 * We obtained the xvcache lock above.
2819 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2820 refpanic("FindVC VLRU inconsistent1");
2822 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2823 refpanic("FindVC VLRU inconsistent1");
2825 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2826 refpanic("FindVC VLRU inconsistent2");
2828 UpgradeSToWLock(&afs_xvcache, 568);
2829 QRemove(&tvc->vlruq);
2830 QAdd(&VLRU, &tvc->vlruq);
2831 ConvertWToSLock(&afs_xvcache);
2832 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2833 refpanic("FindVC VLRU inconsistent1");
2835 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2836 refpanic("FindVC VLRU inconsistent2");
2838 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2839 refpanic("FindVC VLRU inconsistent3");
2845 afs_stats_cmperf.vcacheHits++;
2847 afs_stats_cmperf.vcacheMisses++;
2848 if (afs_IsPrimaryCellNum(afid->Cell))
2849 afs_stats_cmperf.vlocalAccesses++;
2851 afs_stats_cmperf.vremoteAccesses++;
2853 *avcp = tvc; /* May be null */
2855 ReleaseSharedLock(&afs_xvcache);
2856 return (tvc ? 1 : 0);
2858 } /*afs_NFSFindVCache */
2866 * Initialize vcache related variables
2869 afs_vcacheInit(int astatSize)
2871 register struct vcache *tvp;
2873 #if defined(AFS_OSF_ENV)
2874 if (!afs_maxvcount) {
2875 #if defined(AFS_OSF30_ENV)
2876 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
2878 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
2880 if (astatSize < afs_maxvcount) {
2881 afs_maxvcount = astatSize;
2884 #else /* AFS_OSF_ENV */
2888 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2889 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2891 #if !defined(AFS_OSF_ENV)
2892 /* Allocate and thread the struct vcache entries */
2893 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
2894 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
2896 Initial_freeVCList = tvp;
2897 freeVCList = &(tvp[0]);
2898 for (i = 0; i < astatSize - 1; i++) {
2899 tvp[i].nextfree = &(tvp[i + 1]);
2901 tvp[astatSize - 1].nextfree = NULL;
2902 #ifdef KERNEL_HAVE_PIN
2903 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2908 #if defined(AFS_SGI_ENV)
2909 for (i = 0; i < astatSize; i++) {
2910 char name[METER_NAMSZ];
2911 struct vcache *tvc = &tvp[i];
2913 tvc->v.v_number = ++afsvnumbers;
2914 tvc->vc_rwlockid = OSI_NO_LOCKID;
2915 initnsema(&tvc->vc_rwlock, 1,
2916 makesname(name, "vrw", tvc->v.v_number));
2917 #ifndef AFS_SGI53_ENV
2918 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2920 #ifndef AFS_SGI62_ENV
2921 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2922 #endif /* AFS_SGI62_ENV */
2936 shutdown_vcache(void)
2939 struct afs_cbr *tsp, *nsp;
2941 * XXX We may potentially miss some of the vcaches because if when there're no
2942 * free vcache entries and all the vcache entries are active ones then we allocate
2943 * an additional one - admittedly we almost never had that occur.
2947 register struct afs_q *tq, *uq;
2948 register struct vcache *tvc;
2949 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2953 osi_FreeSmallSpace(tvc->mvid);
2954 tvc->mvid = (struct VenusFid *)0;
2957 aix_gnode_rele(AFSTOV(tvc));
2959 if (tvc->linkData) {
2960 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2965 * Also free the remaining ones in the Cache
2967 for (i = 0; i < VCSIZE; i++) {
2968 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2970 osi_FreeSmallSpace(tvc->mvid);
2971 tvc->mvid = (struct VenusFid *)0;
2975 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2976 #ifdef AFS_AIX32_ENV
2979 vms_delete(tvc->segid);
2981 tvc->segid = tvc->vmh = NULL;
2983 osi_Panic("flushVcache: vm race");
2991 #if defined(AFS_SUN5_ENV)
2997 if (tvc->linkData) {
2998 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3002 afs_FreeAllAxs(&(tvc->Access));
3008 * Free any leftover callback queue
3010 for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
3012 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3016 #if !defined(AFS_OSF_ENV)
3017 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3019 #ifdef KERNEL_HAVE_PIN
3020 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3022 #if !defined(AFS_OSF_ENV)
3023 freeVCList = Initial_freeVCList = 0;
3025 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3026 LOCK_INIT(&afs_xvcb, "afs_xvcb");