2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include <afsconfig.h>
39 #include "afs/param.h"
44 #include "afs/sysincludes.h" /*Standard vendor system headers */
45 #include "afsincludes.h" /*AFS-based standard headers */
46 #include "afs/afs_stats.h"
47 #include "afs/afs_cbqueue.h"
48 #include "afs/afs_osidnlc.h"
51 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
52 afs_int32 afs_vcount = 0; /* number of vcache in use now */
53 #endif /* AFS_OSF_ENV */
61 #endif /* AFS_SGI64_ENV */
63 /* Exported variables */
64 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
65 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
66 struct vcache *freeVCList; /*Free list for stat cache entries */
67 struct vcache *Initial_freeVCList; /*Initial list for above */
68 struct afs_q VLRU; /*vcache LRU */
69 afs_int32 vcachegen = 0;
70 unsigned int afs_paniconwarn = 0;
71 struct vcache *afs_vhashT[VCSIZE];
72 afs_int32 afs_bulkStatsLost;
73 int afs_norefpanic = 0;
75 /* Forward declarations */
76 static afs_int32 afs_QueueVCB(struct vcache *avc);
83 * Flush the given vcache entry.
86 * avc : Pointer to vcache entry to flush.
87 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
90 * afs_xvcache lock must be held for writing upon entry to
91 * prevent people from changing the vrefCount field, and to
92 * protect the lruq and hnext fields.
93 * LOCK: afs_FlushVCache afs_xvcache W
94 * REFCNT: vcache ref count must be zero on entry except for osf1
95 * RACE: lock is dropped and reobtained, permitting race in caller
99 afs_FlushVCache(struct vcache *avc, int *slept)
100 { /*afs_FlushVCache */
102 register afs_int32 i, code;
103 register struct vcache **uvc, *wvc;
106 AFS_STATCNT(afs_FlushVCache);
107 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
108 ICL_TYPE_INT32, avc->states);
111 VN_LOCK(AFSTOV(avc));
115 code = osi_VM_FlushVCache(avc, slept);
119 if (avc->states & CVFlushed) {
123 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
124 refpanic("LRU vs. Free inconsistency");
126 avc->states |= CVFlushed;
127 /* pull the entry out of the lruq and put it on the free list */
128 QRemove(&avc->vlruq);
129 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *)0;
131 /* keep track of # of files that we bulk stat'd, but never used
132 * before they got recycled.
134 if (avc->states & CBulkStat)
137 /* remove entry from the hash chain */
138 i = VCHash(&avc->fid);
139 uvc = &afs_vhashT[i];
140 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
143 avc->hnext = (struct vcache *)NULL;
148 osi_Panic("flushvcache"); /* not in correct hash bucket */
150 osi_FreeSmallSpace(avc->mvid);
151 avc->mvid = (struct VenusFid *)0;
153 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
154 avc->linkData = NULL;
156 #if defined(AFS_XBSD_ENV)
157 /* OK, there are no internal vrefCounts, so there shouldn't
158 * be any more refs here. */
160 avc->v->v_data = NULL; /* remove from vnode */
161 avc->v = NULL; /* also drop the ptr to vnode */
164 afs_FreeAllAxs(&(avc->Access));
166 /* we can't really give back callbacks on RO files, since the
167 * server only tracks them on a per-volume basis, and we don't
168 * know whether we still have some other files from the same
170 if ((avc->states & CRO) == 0 && avc->callback) {
173 ObtainWriteLock(&afs_xcbhash, 460);
174 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
175 avc->states &= ~(CStatd | CUnique);
176 ReleaseWriteLock(&afs_xcbhash);
177 afs_symhint_inval(avc);
178 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
179 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
181 osi_dnlc_purgevp(avc);
184 * Next, keep track of which vnodes we've deleted for create's
185 * optimistic synchronization algorithm
188 if (avc->fid.Fid.Vnode & 1)
193 #if !defined(AFS_OSF_ENV)
194 /* put the entry in the free list */
195 avc->nextfree = freeVCList;
197 if (avc->vlruq.prev || avc->vlruq.next) {
198 refpanic("LRU vs. Free inconsistency");
201 /* This should put it back on the vnode free list since usecount is 1 */
204 if (VREFCOUNT(avc) > 0) {
205 VN_UNLOCK(AFSTOV(avc));
206 AFS_RELE(AFSTOV(avc));
208 if (afs_norefpanic) {
209 printf("flush vc refcnt < 1");
211 (void)vgone(avc, VX_NOSLEEP, NULL);
213 VN_UNLOCK(AFSTOV(avc));
215 osi_Panic("flush vc refcnt < 1");
217 #endif /* AFS_OSF_ENV */
218 avc->states |= CVFlushed;
223 VN_UNLOCK(AFSTOV(avc));
227 } /*afs_FlushVCache */
233 * The core of the inactive vnode op for all but IRIX.
236 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
238 AFS_STATCNT(afs_inactive);
239 if (avc->states & CDirty) {
240 /* we can't keep trying to push back dirty data forever. Give up. */
241 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
243 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
244 avc->states &= ~CDirty; /* Turn it off */
245 if (avc->states & CUnlinked) {
246 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
247 avc->states |= CUnlinkedDel;
250 afs_remunlink(avc, 1); /* ignore any return code */
259 * Description: allocate a callback return structure from the
260 * free list and return it.
262 * Env: The alloc and free routines are both called with the afs_xvcb lock
263 * held, so we don't have to worry about blocking in osi_Alloc.
265 static struct afs_cbr *afs_cbrSpace = 0;
269 register struct afs_cbr *tsp;
272 while (!afs_cbrSpace) {
273 if (afs_stats_cmperf.CallBackAlloced >= 2) {
274 /* don't allocate more than 2 * AFS_NCBRS for now */
276 afs_stats_cmperf.CallBackFlushes++;
280 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
281 sizeof(struct afs_cbr));
282 for (i = 0; i < AFS_NCBRS - 1; i++) {
283 tsp[i].next = &tsp[i + 1];
285 tsp[AFS_NCBRS - 1].next = 0;
287 afs_stats_cmperf.CallBackAlloced++;
291 afs_cbrSpace = tsp->next;
298 * Description: free a callback return structure.
301 * asp -- the address of the structure to free.
303 * Environment: the xvcb lock is held over these calls.
306 afs_FreeCBR(register struct afs_cbr *asp)
308 asp->next = afs_cbrSpace;
316 * Description: flush all queued callbacks to all servers.
320 * Environment: holds xvcb lock over RPC to guard against race conditions
321 * when a new callback is granted for the same file later on.
324 afs_FlushVCBs(afs_int32 lockit)
326 struct AFSFid *tfids;
327 struct AFSCallBack callBacks[1];
328 struct AFSCBFids fidArray;
329 struct AFSCBs cbArray;
331 struct afs_cbr *tcbrp;
335 struct vrequest treq;
337 int safety1, safety2, safety3;
338 XSTATS_DECLS if ((code = afs_InitReq(&treq, afs_osi_credp)))
340 treq.flags |= O_NONBLOCK;
341 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
344 MObtainWriteLock(&afs_xvcb, 273);
345 ObtainReadLock(&afs_xserver);
346 for (i = 0; i < NSERVERS; i++) {
347 for (safety1 = 0, tsp = afs_servers[i];
348 tsp && safety1 < afs_totalServers + 10;
349 tsp = tsp->next, safety1++) {
351 if (tsp->cbrs == (struct afs_cbr *)0)
354 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
355 * and make an RPC, over and over again.
357 tcount = 0; /* number found so far */
358 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
359 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
360 /* if buffer is full, or we've queued all we're going
361 * to from this server, we should flush out the
364 fidArray.AFSCBFids_len = tcount;
365 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
366 cbArray.AFSCBs_len = 1;
367 cbArray.AFSCBs_val = callBacks;
368 callBacks[0].CallBackType = CB_EXCLUSIVE;
369 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
370 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
371 tsp->cell->cellNum, &treq, 0,
375 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
378 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
386 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
391 /* ignore return code, since callbacks may have
392 * been returned anyway, we shouldn't leave them
393 * around to be returned again.
395 * Next, see if we are done with this server, and if so,
396 * break to deal with the next one.
402 /* if to flush full buffer */
403 /* if we make it here, we have an entry at the head of cbrs,
404 * which we should copy to the file ID array and then free.
407 tfids[tcount++] = tcbrp->fid;
408 tsp->cbrs = tcbrp->next;
410 } /* while loop for this one server */
411 if (safety2 > afs_cacheStats) {
412 afs_warn("possible internal error afs_flushVCBs (%d)\n",
415 } /* for loop for this hash chain */
416 } /* loop through all hash chains */
417 if (safety1 > afs_totalServers + 2) {
419 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
420 safety1, afs_totalServers + 2);
422 osi_Panic("afs_flushVCBS safety1");
425 ReleaseReadLock(&afs_xserver);
427 MReleaseWriteLock(&afs_xvcb);
428 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
436 * Queue a callback on the given fid.
442 * Locks the xvcb lock.
443 * Called when the xvcache lock is already held.
447 afs_QueueVCB(struct vcache *avc)
449 register struct server *tsp;
450 register struct afs_cbr *tcbp;
452 AFS_STATCNT(afs_QueueVCB);
453 /* The callback is really just a struct server ptr. */
454 tsp = (struct server *)(avc->callback);
456 /* we now have a pointer to the server, so we just allocate
457 * a queue entry and queue it.
459 MObtainWriteLock(&afs_xvcb, 274);
460 tcbp = afs_AllocCBR();
461 tcbp->fid = avc->fid.Fid;
462 tcbp->next = tsp->cbrs;
465 /* now release locks and return */
466 MReleaseWriteLock(&afs_xvcb);
475 * Remove a queued callback by looking through all the servers
476 * to see if any have this callback queued.
479 * afid: The fid we want cleansed of queued callbacks.
482 * Locks xvcb and xserver locks.
483 * Typically called with xdcache, xvcache and/or individual vcache
488 afs_RemoveVCB(struct VenusFid *afid)
491 register struct server *tsp;
492 register struct afs_cbr *tcbrp;
493 struct afs_cbr **lcbrpp;
495 AFS_STATCNT(afs_RemoveVCB);
496 MObtainWriteLock(&afs_xvcb, 275);
497 ObtainReadLock(&afs_xserver);
498 for (i = 0; i < NSERVERS; i++) {
499 for (tsp = afs_servers[i]; tsp; tsp = tsp->next) {
500 /* if cell is known, and is wrong, then skip this server */
501 if (tsp->cell && tsp->cell->cellNum != afid->Cell)
505 * Otherwise, iterate through file IDs we're sending to the
508 lcbrpp = &tsp->cbrs; /* first queued return callback */
509 for (tcbrp = *lcbrpp; tcbrp;
510 lcbrpp = &tcbrp->next, tcbrp = *lcbrpp) {
511 if (afid->Fid.Volume == tcbrp->fid.Volume
512 && afid->Fid.Unique == tcbrp->fid.Unique
513 && afid->Fid.Vnode == tcbrp->fid.Vnode) {
514 *lcbrpp = tcbrp->next; /* unthread from list */
522 ReleaseReadLock(&afs_xserver);
523 MReleaseWriteLock(&afs_xvcb);
527 #ifdef AFS_LINUX22_ENV
530 __shrink_dcache_parent(struct dentry *parent)
532 struct dentry *this_parent = parent;
533 struct list_head *next;
535 LIST_HEAD(afs_dentry_unused);
538 next = this_parent->d_subdirs.next;
540 while (next != &this_parent->d_subdirs) {
541 struct list_head *tmp = next;
542 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
544 if (!DCOUNT(dentry)) {
545 list_del(&dentry->d_lru);
546 list_add(&dentry->d_lru, afs_dentry_unused.prev);
550 * Descend a level if the d_subdirs list is non-empty.
552 if (!list_empty(&dentry->d_subdirs)) {
553 this_parent = dentry;
558 * All done at this level ... ascend and resume the search.
560 if (this_parent != parent) {
561 next = this_parent->d_child.next;
562 this_parent = this_parent->d_parent;
567 struct dentry *dentry;
568 struct list_head *tmp;
570 tmp = afs_dentry_unused.prev;
572 if (tmp == &afs_dentry_unused)
574 #ifdef AFS_LINUX24_ENV
579 #endif /* AFS_LINUX24_ENV */
580 dentry = list_entry(tmp, struct dentry, d_lru);
582 #ifdef AFS_LINUX24_ENV
583 /* Unused dentry with a count? */
588 #ifdef AFS_LINUX24_ENV
589 list_del_init(&dentry->d_hash); /* d_drop */
591 list_del(&dentry->d_hash);
592 INIT_LIST_HEAD(&dentry->d_hash);
593 #endif /* AFS_LINUX24_ENV */
602 /* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
603 * children of the dentry
605 * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
606 * AFS_GLOCK, so it can call dput, which may call iput, but
607 * keeps afs_xvcache exclusively.
609 * Tree traversal algorithm from fs/dcache.c: select_parent()
612 afs_TryFlushDcacheChildren(struct vcache *tvc)
614 struct inode *ip = AFSTOI(tvc);
615 struct dentry *this_parent;
616 struct list_head *next;
617 struct list_head *cur;
618 struct list_head *head = &ip->i_dentry;
619 struct dentry *dentry;
623 #ifndef old_vcache_scheme
626 while ((cur = cur->next) != head) {
627 dentry = list_entry(cur, struct dentry, d_alias);
629 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
630 ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
631 dentry->d_parent->d_name.name, ICL_TYPE_STRING,
632 dentry->d_name.name);
634 if (!list_empty(&dentry->d_hash) && !list_empty(&dentry->d_subdirs))
635 __shrink_dcache_parent(dentry);
637 if (!DCOUNT(dentry)) {
639 #ifdef AFS_LINUX24_ENV
640 list_del_init(&dentry->d_hash); /* d_drop */
642 list_del(&dentry->d_hash);
643 INIT_LIST_HEAD(&dentry->d_hash);
644 #endif /* AFS_LINUX24_ENV */
656 while ((cur = cur->next) != head) {
657 dentry = list_entry(cur, struct dentry, d_alias);
659 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
660 ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
661 dentry->d_parent->d_name.name, ICL_TYPE_STRING,
662 dentry->d_name.name);
664 if (!DCOUNT(dentry)) {
677 #endif /* AFS_LINUX22_ENV */
683 * This routine is responsible for allocating a new cache entry
684 * from the free list. It formats the cache entry and inserts it
685 * into the appropriate hash tables. It must be called with
686 * afs_xvcache write-locked so as to prevent several processes from
687 * trying to create a new cache entry simultaneously.
690 * afid : The file id of the file whose cache entry is being
693 /* LOCK: afs_NewVCache afs_xvcache W */
695 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
699 afs_int32 anumber = VCACHE_FREE;
701 struct gnode *gnodepnt;
704 struct vm_info *vm_info_ptr;
705 #endif /* AFS_MACH_ENV */
708 #endif /* AFS_OSF_ENV */
709 struct afs_q *tq, *uq;
712 AFS_STATCNT(afs_NewVCache);
715 if (afs_vcount >= afs_maxvcount) {
718 * If we are using > 33 % of the total system vnodes for AFS vcache
719 * entries or we are using the maximum number of vcache entries,
720 * then free some. (if our usage is > 33% we should free some, if
721 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
722 * we _must_ free some -- no choice).
724 if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount)) {
726 struct afs_q *tq, *uq;
731 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
734 if (tvc->states & CVFlushed)
735 refpanic("CVFlushed on VLRU");
736 else if (i++ > afs_maxvcount)
737 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
738 else if (QNext(uq) != tq)
739 refpanic("VLRU inconsistent");
740 else if (VREFCOUNT(tvc) < 1)
741 refpanic("refcnt 0 on VLRU");
743 if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
744 && (tvc->states & CUnlinkedDel) == 0) {
745 code = afs_FlushVCache(tvc, &fv_slept);
752 continue; /* start over - may have raced. */
758 if (anumber == VCACHE_FREE) {
759 printf("NewVCache: warning none freed, using %d of %d\n",
760 afs_vcount, afs_maxvcount);
761 if (afs_vcount >= afs_maxvcount) {
762 osi_Panic("NewVCache - none freed");
763 /* XXX instead of panicing, should do afs_maxvcount++
764 * and magic up another one */
770 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
771 /* What should we do ???? */
772 osi_Panic("afs_NewVCache: no more vnodes");
777 tvc->nextfree = NULL;
779 #else /* AFS_OSF_ENV */
780 /* pull out a free cache entry */
783 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
787 if (tvc->states & CVFlushed) {
788 refpanic("CVFlushed on VLRU");
789 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
790 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
791 } else if (QNext(uq) != tq) {
792 refpanic("VLRU inconsistent");
794 #ifdef AFS_DARWIN_ENV
795 if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0)
796 && VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
797 osi_VM_TryReclaim(tvc, &fv_slept);
801 continue; /* start over - may have raced. */
804 #elif defined(AFS_LINUX22_ENV)
805 if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0)
806 afs_TryFlushDcacheChildren(tvc);
809 if (VREFCOUNT(tvc) == 0 && tvc->opens == 0
810 && (tvc->states & CUnlinkedDel) == 0) {
811 #if defined(AFS_XBSD_ENV)
813 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
814 * then it puts the vnode on the free list.
815 * If we don't do this we end up with a cleaned vnode that's
816 * not on the free list.
817 * XXX assume FreeBSD is the same for now.
822 code = afs_FlushVCache(tvc, &fv_slept);
830 continue; /* start over - may have raced. */
838 /* none free, making one is better than a panic */
839 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
840 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
841 #ifdef KERNEL_HAVE_PIN
842 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
845 /* In case it still comes here we need to fill this */
846 tvc->v.v_vm_info = VM_INFO_NULL;
847 vm_info_init(tvc->v.v_vm_info);
848 /* perhaps we should also do close_flush on non-NeXT mach systems;
849 * who knows; we don't currently have the sources.
851 #endif /* AFS_MACH_ENV */
852 #if defined(AFS_SGI_ENV)
854 char name[METER_NAMSZ];
855 memset(tvc, 0, sizeof(struct vcache));
856 tvc->v.v_number = ++afsvnumbers;
857 tvc->vc_rwlockid = OSI_NO_LOCKID;
858 initnsema(&tvc->vc_rwlock, 1,
859 makesname(name, "vrw", tvc->v.v_number));
860 #ifndef AFS_SGI53_ENV
861 initnsema(&tvc->v.v_sync, 0,
862 makesname(name, "vsy", tvc->v.v_number));
864 #ifndef AFS_SGI62_ENV
865 initnlock(&tvc->v.v_lock,
866 makesname(name, "vlk", tvc->v.v_number));
869 #endif /* AFS_SGI_ENV */
871 tvc = freeVCList; /* take from free list */
872 freeVCList = tvc->nextfree;
873 tvc->nextfree = NULL;
875 #endif /* AFS_OSF_ENV */
878 vm_info_ptr = tvc->v.v_vm_info;
879 #endif /* AFS_MACH_ENV */
881 #if defined(AFS_XBSD_ENV)
883 panic("afs_NewVCache(): free vcache with vnode attached");
886 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
887 memset((char *)tvc, 0, sizeof(struct vcache));
892 RWLOCK_INIT(&tvc->lock, "vcache lock");
893 #if defined(AFS_SUN5_ENV)
894 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
895 #endif /* defined(AFS_SUN5_ENV) */
898 tvc->v.v_vm_info = vm_info_ptr;
899 tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
900 #endif /* AFS_MACH_ENV */
903 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
905 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
912 #ifdef AFS_FBSD50_ENV
913 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
915 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
917 panic("afs getnewvnode"); /* can't happen */
919 if (tvc->v != NULL) {
920 /* I'd like to know if this ever happens...
921 We don't drop global for the rest of this function,
922 so if we do lose the race, the other thread should
923 have found the same vnode and finished initializing
924 the vcache entry. Is it conceivable that this vcache
925 entry could be recycled during this interval? If so,
926 then there probably needs to be some sort of additional
927 mutual exclusion (an Embryonic flag would suffice).
929 printf("afs_NewVCache: lost the race\n");
933 tvc->v->v_data = tvc;
934 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
937 tvc->parentVnode = 0;
939 tvc->linkData = NULL;
942 tvc->execsOrWriters = 0;
946 tvc->last_looker = 0;
948 tvc->asynchrony = -1;
950 afs_symhint_inval(tvc);
952 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
955 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
956 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
958 /* Hold it for the LRU (should make count 2) */
959 VN_HOLD(AFSTOV(tvc));
960 #else /* AFS_OSF_ENV */
961 #if !defined(AFS_XBSD_ENV)
962 VREFCOUNT_SET(tvc, 1); /* us */
963 #endif /* AFS_XBSD_ENV */
964 #endif /* AFS_OSF_ENV */
966 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
967 tvc->vmh = tvc->segid = NULL;
970 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
971 #if defined(AFS_SUN5_ENV)
972 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
974 #if defined(AFS_SUN55_ENV)
975 /* This is required if the kaio (kernel aynchronous io)
976 ** module is installed. Inside the kernel, the function
977 ** check_vp( common/os/aio.c) checks to see if the kernel has
978 ** to provide asynchronous io for this vnode. This
979 ** function extracts the device number by following the
980 ** v_data field of the vnode. If we do not set this field
981 ** then the system panics. The value of the v_data field
982 ** is not really important for AFS vnodes because the kernel
983 ** does not do asynchronous io for regular files. Hence,
984 ** for the time being, we fill up the v_data field with the
985 ** vnode pointer itself. */
986 tvc->v.v_data = (char *)tvc;
987 #endif /* AFS_SUN55_ENV */
989 afs_BozonInit(&tvc->pvnLock, tvc);
993 tvc->callback = serverp; /* to minimize chance that clear
995 /* initialize vnode data, note vrefCount is v.v_count */
997 /* Don't forget to free the gnode space */
998 tvc->v.v_gnode = gnodepnt =
999 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1000 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1002 #ifdef AFS_SGI64_ENV
1003 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1004 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1005 #ifdef AFS_SGI65_ENV
1006 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1007 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1009 bhv_head_init(&(tvc->v.v_bh));
1010 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1012 #ifdef AFS_SGI65_ENV
1013 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1014 #ifdef VNODE_TRACING
1015 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1017 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1019 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1020 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1022 vnode_pcache_init(&tvc->v);
1023 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1024 /* Above define is never true execpt in SGI test kernels. */
1025 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1027 #ifdef INTR_KTHREADS
1028 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1031 SetAfsVnode(AFSTOV(tvc));
1032 #endif /* AFS_SGI64_ENV */
1033 #ifdef AFS_DARWIN_ENV
1034 tvc->v.v_ubcinfo = UBC_INFO_NULL;
1035 lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
1036 cache_purge(AFSTOV(tvc));
1037 tvc->v.v_data = tvc;
1038 tvc->v.v_tag = VT_AFS;
1039 /* VLISTNONE(&tvc->v); */
1040 tvc->v.v_freelist.tqe_next = 0;
1041 tvc->v.v_freelist.tqe_prev = (struct vnode **)0xdeadb;
1042 /*tvc->vrefCount++; */
1045 * The proper value for mvstat (for root fids) is setup by the caller.
1048 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1050 if (afs_globalVFS == 0)
1051 osi_Panic("afs globalvfs");
1052 vSetVfsp(tvc, afs_globalVFS);
1053 vSetType(tvc, VREG);
1055 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1056 tvc->v.v_vfsprev = NULL;
1057 afs_globalVFS->vfs_vnodes = &tvc->v;
1058 if (tvc->v.v_vfsnext != NULL)
1059 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1060 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1061 gnodepnt->gn_vnode = &tvc->v;
1064 tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
1066 #if defined(AFS_DUX40_ENV)
1067 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1070 /* Is this needed??? */
1071 insmntque(tvc, afs_globalVFS);
1072 #endif /* AFS_OSF_ENV */
1073 #endif /* AFS_DUX40_ENV */
1074 #if defined(AFS_SGI_ENV)
1075 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1076 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1078 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1079 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1080 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1081 osi_Assert(tvc->v.v_filocks == NULL);
1082 #if !defined(AFS_SGI65_ENV)
1083 osi_Assert(tvc->v.v_filocksem == NULL);
1085 osi_Assert(tvc->cred == NULL);
1086 #ifdef AFS_SGI64_ENV
1087 vnode_pcache_reinit(&tvc->v);
1088 tvc->v.v_rdev = NODEV;
1090 vn_initlist((struct vnlist *)&tvc->v);
1092 #endif /* AFS_SGI_ENV */
1093 #if defined(AFS_LINUX22_ENV)
1095 struct inode *ip = AFSTOI(tvc);
1096 sema_init(&ip->i_sem, 1);
1097 #if defined(AFS_LINUX24_ENV)
1098 sema_init(&ip->i_zombie, 1);
1099 init_waitqueue_head(&ip->i_wait);
1100 spin_lock_init(&ip->i_data.i_shared_lock);
1101 #ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
1102 spin_lock_init(&ip->i_data.page_lock);
1104 INIT_LIST_HEAD(&ip->i_data.clean_pages);
1105 INIT_LIST_HEAD(&ip->i_data.dirty_pages);
1106 INIT_LIST_HEAD(&ip->i_data.locked_pages);
1107 INIT_LIST_HEAD(&ip->i_dirty_buffers);
1108 #ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
1109 INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
1111 #ifdef STRUCT_INODE_HAS_I_DEVICES
1112 INIT_LIST_HEAD(&ip->i_devices);
1114 ip->i_data.host = (void *)ip;
1115 #ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
1116 ip->i_data.gfp_mask = GFP_HIGHUSER;
1118 ip->i_mapping = &ip->i_data;
1119 #ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
1120 init_rwsem(&ip->i_truncate_sem);
1122 #ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
1123 init_rwsem(&ip->i_alloc_sem);
1126 sema_init(&ip->i_atomic_write, 1);
1127 init_waitqueue(&ip->i_wait);
1129 INIT_LIST_HEAD(&ip->i_hash);
1130 INIT_LIST_HEAD(&ip->i_dentry);
1131 if (afs_globalVFS) {
1132 ip->i_dev = afs_globalVFS->s_dev;
1133 ip->i_sb = afs_globalVFS;
1138 osi_dnlc_purgedp(tvc); /* this may be overkill */
1139 memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
1140 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1144 tvc->hnext = afs_vhashT[i];
1145 afs_vhashT[i] = tvc;
1146 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1147 refpanic("NewVCache VLRU inconsistent");
1149 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1150 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1151 refpanic("NewVCache VLRU inconsistent2");
1153 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1154 refpanic("NewVCache VLRU inconsistent3");
1156 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1157 refpanic("NewVCache VLRU inconsistent4");
1163 } /*afs_NewVCache */
1167 * afs_FlushActiveVcaches
1173 * doflocks : Do we handle flocks?
1175 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
1177 afs_FlushActiveVcaches(register afs_int32 doflocks)
1179 register struct vcache *tvc;
1181 register struct conn *tc;
1182 register afs_int32 code;
1183 register struct AFS_UCRED *cred = NULL;
1184 struct vrequest treq, ureq;
1185 struct AFSVolSync tsync;
1187 XSTATS_DECLS AFS_STATCNT(afs_FlushActiveVcaches);
1188 ObtainReadLock(&afs_xvcache);
1189 for (i = 0; i < VCSIZE; i++) {
1190 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1191 if (doflocks && tvc->flockCount != 0) {
1192 /* if this entry has an flock, send a keep-alive call out */
1194 ReleaseReadLock(&afs_xvcache);
1195 ObtainWriteLock(&tvc->lock, 51);
1197 afs_InitReq(&treq, afs_osi_credp);
1198 treq.flags |= O_NONBLOCK;
1200 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1202 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1205 RXAFS_ExtendLock(tc->id,
1206 (struct AFSFid *)&tvc->fid.Fid,
1212 } while (afs_Analyze
1213 (tc, code, &tvc->fid, &treq,
1214 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1216 ReleaseWriteLock(&tvc->lock);
1217 ObtainReadLock(&afs_xvcache);
1221 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1223 * Don't let it evaporate in case someone else is in
1224 * this code. Also, drop the afs_xvcache lock while
1225 * getting vcache locks.
1228 ReleaseReadLock(&afs_xvcache);
1229 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1230 afs_BozonLock(&tvc->pvnLock, tvc);
1232 #if defined(AFS_SGI_ENV)
1234 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1236 osi_Assert(VREFCOUNT(tvc) > 0);
1237 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1239 ObtainWriteLock(&tvc->lock, 52);
1240 if (tvc->states & CCore) {
1241 tvc->states &= ~CCore;
1242 /* XXXX Find better place-holder for cred XXXX */
1243 cred = (struct AFS_UCRED *)tvc->linkData;
1244 tvc->linkData = NULL; /* XXX */
1245 afs_InitReq(&ureq, cred);
1246 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1247 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1248 tvc->execsOrWriters);
1249 code = afs_StoreOnLastReference(tvc, &ureq);
1250 ReleaseWriteLock(&tvc->lock);
1251 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1252 afs_BozonUnlock(&tvc->pvnLock, tvc);
1254 hzero(tvc->flushDV);
1257 if (code && code != VNOVNODE) {
1258 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1259 /* /dev/console */ 1);
1261 } else if (tvc->states & CUnlinkedDel) {
1265 ReleaseWriteLock(&tvc->lock);
1266 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1267 afs_BozonUnlock(&tvc->pvnLock, tvc);
1269 #if defined(AFS_SGI_ENV)
1270 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1272 afs_remunlink(tvc, 0);
1273 #if defined(AFS_SGI_ENV)
1274 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1277 /* lost (or won, perhaps) the race condition */
1278 ReleaseWriteLock(&tvc->lock);
1279 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1280 afs_BozonUnlock(&tvc->pvnLock, tvc);
1283 #if defined(AFS_SGI_ENV)
1284 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1286 ObtainReadLock(&afs_xvcache);
1292 AFS_RELE(AFSTOV(tvc));
1294 /* Matches write code setting CCore flag */
1298 #ifdef AFS_DARWIN_ENV
1299 if (VREFCOUNT(tvc) == 1 && UBCINFOEXISTS(&tvc->v)) {
1301 panic("flushactive open, hasubc, but refcnt 1");
1302 osi_VM_TryReclaim(tvc, 0);
1307 ReleaseReadLock(&afs_xvcache);
1315 * Make sure a cache entry is up-to-date status-wise.
1317 * NOTE: everywhere that calls this can potentially be sped up
1318 * by checking CStatd first, and avoiding doing the InitReq
1319 * if this is up-to-date.
1321 * Anymore, the only places that call this KNOW already that the
1322 * vcache is not up-to-date, so we don't screw around.
1325 * avc : Ptr to vcache entry to verify.
1330 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1332 register struct vcache *tvc;
1334 AFS_STATCNT(afs_VerifyVCache);
1336 #if defined(AFS_OSF_ENV)
1337 ObtainReadLock(&avc->lock);
1338 if (afs_IsWired(avc)) {
1339 ReleaseReadLock(&avc->lock);
1342 ReleaseReadLock(&avc->lock);
1343 #endif /* AFS_OSF_ENV */
1344 /* otherwise we must fetch the status info */
1346 ObtainWriteLock(&avc->lock, 53);
1347 if (avc->states & CStatd) {
1348 ReleaseWriteLock(&avc->lock);
1351 ObtainWriteLock(&afs_xcbhash, 461);
1352 avc->states &= ~(CStatd | CUnique);
1353 avc->callback = NULL;
1354 afs_DequeueCallback(avc);
1355 ReleaseWriteLock(&afs_xcbhash);
1356 ReleaseWriteLock(&avc->lock);
1358 /* since we've been called back, or the callback has expired,
1359 * it's possible that the contents of this directory, or this
1360 * file's name have changed, thus invalidating the dnlc contents.
1362 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1363 osi_dnlc_purgedp(avc);
1365 osi_dnlc_purgevp(avc);
1367 /* fetch the status info */
1368 tvc = afs_GetVCache(&avc->fid, areq, NULL, avc);
1371 /* Put it back; caller has already incremented vrefCount */
1375 } /*afs_VerifyVCache */
1382 * Simple copy of stat info into cache.
1385 * avc : Ptr to vcache entry involved.
1386 * astat : Ptr to stat info to copy.
1389 * Nothing interesting.
1391 * Callers: as of 1992-04-29, only called by WriteVCache
1394 afs_SimpleVStat(register struct vcache *avc,
1395 register struct AFSFetchStatus *astat, struct vrequest *areq)
1398 AFS_STATCNT(afs_SimpleVStat);
1401 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1402 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1404 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1406 #ifdef AFS_64BIT_CLIENT
1407 FillInt64(length, astat->Length_hi, astat->Length);
1408 #else /* AFS_64BIT_CLIENT */
1409 length = astat->Length;
1410 #endif /* AFS_64BIT_CLIENT */
1411 #if defined(AFS_SGI_ENV)
1412 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1413 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1414 if (length < avc->m.Length) {
1415 vnode_t *vp = (vnode_t *) avc;
1417 osi_Assert(WriteLocked(&avc->lock));
1418 ReleaseWriteLock(&avc->lock);
1420 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1422 ObtainWriteLock(&avc->lock, 67);
1425 /* if writing the file, don't fetch over this value */
1426 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1427 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1428 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1429 avc->m.Length = length;
1430 avc->m.Date = astat->ClientModTime;
1432 avc->m.Owner = astat->Owner;
1433 avc->m.Group = astat->Group;
1434 avc->m.Mode = astat->UnixModeBits;
1435 if (vType(avc) == VREG) {
1436 avc->m.Mode |= S_IFREG;
1437 } else if (vType(avc) == VDIR) {
1438 avc->m.Mode |= S_IFDIR;
1439 } else if (vType(avc) == VLNK) {
1440 avc->m.Mode |= S_IFLNK;
1441 if ((avc->m.Mode & 0111) == 0)
1444 if (avc->states & CForeign) {
1445 struct axscache *ac;
1446 avc->anyAccess = astat->AnonymousAccess;
1448 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1450 * Caller has at least one bit not covered by anonymous, and
1451 * thus may have interesting rights.
1453 * HOWEVER, this is a really bad idea, because any access query
1454 * for bits which aren't covered by anonymous, on behalf of a user
1455 * who doesn't have any special rights, will result in an answer of
1456 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1457 * It's an especially bad idea under Ultrix, since (due to the lack of
1458 * a proper access() call) it must perform several afs_access() calls
1459 * in order to create magic mode bits that vary according to who makes
1460 * the call. In other words, _every_ stat() generates a test for
1463 #endif /* badidea */
1464 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1465 ac->axess = astat->CallerAccess;
1466 else /* not found, add a new one if possible */
1467 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1471 } /*afs_SimpleVStat */
1478 * Store the status info *only* back to the server for a
1482 * avc : Ptr to the vcache entry.
1483 * astatus : Ptr to the status info to store.
1484 * areq : Ptr to the associated vrequest.
1487 * Must be called with a shared lock held on the vnode.
1491 afs_WriteVCache(register struct vcache *avc,
1492 register struct AFSStoreStatus *astatus,
1493 struct vrequest *areq)
1497 struct AFSFetchStatus OutStatus;
1498 struct AFSVolSync tsync;
1499 XSTATS_DECLS AFS_STATCNT(afs_WriteVCache);
1500 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1501 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1504 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1506 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1509 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
1510 astatus, &OutStatus, &tsync);
1515 } while (afs_Analyze
1516 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1517 SHARED_LOCK, NULL));
1519 UpgradeSToWLock(&avc->lock, 20);
1521 /* success, do the changes locally */
1522 afs_SimpleVStat(avc, &OutStatus, areq);
1524 * Update the date, too. SimpleVStat didn't do this, since
1525 * it thought we were doing this after fetching new status
1526 * over a file being written.
1528 avc->m.Date = OutStatus.ClientModTime;
1530 /* failure, set up to check with server next time */
1531 ObtainWriteLock(&afs_xcbhash, 462);
1532 afs_DequeueCallback(avc);
1533 avc->states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1534 ReleaseWriteLock(&afs_xcbhash);
1535 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1536 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1538 ConvertWToSLock(&avc->lock);
1541 } /*afs_WriteVCache */
1547 * Copy astat block into vcache info
1550 * avc : Ptr to vcache entry.
1551 * astat : Ptr to stat block to copy in.
1552 * areq : Ptr to associated request.
1555 * Must be called under a write lock
1557 * Note: this code may get dataversion and length out of sync if the file has
1558 * been modified. This is less than ideal. I haven't thought about
1559 * it sufficiently to be certain that it is adequate.
1562 afs_ProcessFS(register struct vcache *avc,
1563 register struct AFSFetchStatus *astat, struct vrequest *areq)
1566 AFS_STATCNT(afs_ProcessFS);
1568 #ifdef AFS_64BIT_CLIENT
1569 FillInt64(length, astat->Length_hi, astat->Length);
1570 #else /* AFS_64BIT_CLIENT */
1571 length = astat->Length;
1572 #endif /* AFS_64BIT_CLIENT */
1573 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1574 * number for each bulk status request. Under no circumstances
1575 * should afs_DoBulkStat store a sequence number if the new
1576 * length will be ignored when afs_ProcessFS is called with
1577 * new stats. If you change the following conditional then you
1578 * also need to change the conditional in afs_DoBulkStat. */
1580 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1581 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1583 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1585 /* if we're writing or mapping this file, don't fetch over these
1588 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1589 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1590 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1591 avc->m.Length = length;
1592 avc->m.Date = astat->ClientModTime;
1594 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1595 avc->m.Owner = astat->Owner;
1596 avc->m.Mode = astat->UnixModeBits;
1597 avc->m.Group = astat->Group;
1598 avc->m.LinkCount = astat->LinkCount;
1599 if (astat->FileType == File) {
1600 vSetType(avc, VREG);
1601 avc->m.Mode |= S_IFREG;
1602 } else if (astat->FileType == Directory) {
1603 vSetType(avc, VDIR);
1604 avc->m.Mode |= S_IFDIR;
1605 } else if (astat->FileType == SymbolicLink) {
1606 if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
1607 vSetType(avc, VDIR);
1608 avc->m.Mode |= S_IFDIR;
1610 vSetType(avc, VLNK);
1611 avc->m.Mode |= S_IFLNK;
1613 if ((avc->m.Mode & 0111) == 0) {
1617 avc->anyAccess = astat->AnonymousAccess;
1619 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1621 * Caller has at least one bit not covered by anonymous, and
1622 * thus may have interesting rights.
1624 * HOWEVER, this is a really bad idea, because any access query
1625 * for bits which aren't covered by anonymous, on behalf of a user
1626 * who doesn't have any special rights, will result in an answer of
1627 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1628 * It's an especially bad idea under Ultrix, since (due to the lack of
1629 * a proper access() call) it must perform several afs_access() calls
1630 * in order to create magic mode bits that vary according to who makes
1631 * the call. In other words, _every_ stat() generates a test for
1634 #endif /* badidea */
1636 struct axscache *ac;
1637 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1638 ac->axess = astat->CallerAccess;
1639 else /* not found, add a new one if possible */
1640 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1642 #ifdef AFS_LINUX22_ENV
1643 vcache2inode(avc); /* Set the inode attr cache */
1645 #ifdef AFS_DARWIN_ENV
1646 osi_VM_Setup(avc, 1);
1649 } /*afs_ProcessFS */
1653 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1654 char *name, struct VenusFid *nfid,
1655 struct AFSFetchStatus *OutStatusp,
1656 struct AFSCallBack *CallBackp, struct server **serverp,
1657 struct AFSVolSync *tsyncp)
1661 register struct conn *tc;
1662 struct AFSFetchStatus OutDirStatus;
1663 XSTATS_DECLS if (!name)
1664 name = ""; /* XXX */
1666 tc = afs_Conn(afid, areq, SHARED_LOCK);
1669 *serverp = tc->srvr->server;
1671 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1674 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1675 (struct AFSFid *)&nfid->Fid, OutStatusp,
1676 &OutDirStatus, CallBackp, tsyncp);
1681 } while (afs_Analyze
1682 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1693 * Given a file id and a vrequest structure, fetch the status
1694 * information associated with the file.
1698 * areq : Ptr to associated vrequest structure, specifying the
1699 * user whose authentication tokens will be used.
1700 * avc : caller may already have a vcache for this file, which is
1704 * The cache entry is returned with an increased vrefCount field.
1705 * The entry must be discarded by calling afs_PutVCache when you
1706 * are through using the pointer to the cache entry.
1708 * You should not hold any locks when calling this function, except
1709 * locks on other vcache entries. If you lock more than one vcache
1710 * entry simultaneously, you should lock them in this order:
1712 * 1. Lock all files first, then directories.
1713 * 2. Within a particular type, lock entries in Fid.Vnode order.
1715 * This locking hierarchy is convenient because it allows locking
1716 * of a parent dir cache entry, given a file (to check its access
1717 * control list). It also allows renames to be handled easily by
1718 * locking directories in a constant order.
1719 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1721 /* might have a vcache structure already, which must
1722 * already be held by the caller */
1725 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1726 afs_int32 * cached, struct vcache *avc)
1729 afs_int32 code, newvcache = 0;
1730 register struct vcache *tvc;
1734 AFS_STATCNT(afs_GetVCache);
1737 *cached = 0; /* Init just in case */
1739 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1743 ObtainSharedLock(&afs_xvcache, 5);
1745 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU);
1747 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1748 ReleaseSharedLock(&afs_xvcache);
1749 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1757 if (tvc->states & CStatd) {
1758 ReleaseSharedLock(&afs_xvcache);
1762 UpgradeSToWLock(&afs_xvcache, 21);
1764 /* no cache entry, better grab one */
1765 tvc = afs_NewVCache(afid, NULL);
1768 ConvertWToSLock(&afs_xvcache);
1769 afs_stats_cmperf.vcacheMisses++;
1772 ReleaseSharedLock(&afs_xvcache);
1774 ObtainWriteLock(&tvc->lock, 54);
1776 if (tvc->states & CStatd) {
1777 #ifdef AFS_LINUX22_ENV
1780 ReleaseWriteLock(&tvc->lock);
1781 #ifdef AFS_DARWIN_ENV
1782 osi_VM_Setup(tvc, 0);
1786 #if defined(AFS_OSF_ENV)
1787 if (afs_IsWired(tvc)) {
1788 ReleaseWriteLock(&tvc->lock);
1791 #endif /* AFS_OSF_ENV */
1793 VOP_LOCK(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY, curproc);
1794 uvm_vnp_uncache(AFSTOV(tvc));
1795 VOP_UNLOCK(AFSTOV(tvc), 0, curproc);
1799 * XXX - I really don't like this. Should try to understand better.
1800 * It seems that sometimes, when we get called, we already hold the
1801 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1802 * We can't drop the vnode lock, because that could result in a race.
1803 * Sometimes, though, we get here and don't hold the vnode lock.
1804 * I hate code paths that sometimes hold locks and sometimes don't.
1805 * In any event, the dodge we use here is to check whether the vnode
1806 * is locked, and if it isn't, then we gain and drop it around the call
1807 * to vinvalbuf; otherwise, we leave it alone.
1814 #ifdef AFS_FBSD50_ENV
1815 iheldthelock = VOP_ISLOCKED(vp, curthread);
1817 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1818 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1820 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1822 iheldthelock = VOP_ISLOCKED(vp, curproc);
1824 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1825 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
1827 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
1832 ObtainWriteLock(&afs_xcbhash, 464);
1833 tvc->states &= ~CUnique;
1835 afs_DequeueCallback(tvc);
1836 ReleaseWriteLock(&afs_xcbhash);
1838 /* It is always appropriate to throw away all the access rights? */
1839 afs_FreeAllAxs(&(tvc->Access));
1840 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1842 if ((tvp->states & VForeign)) {
1844 tvc->states |= CForeign;
1845 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1846 && (tvp->rootUnique == afid->Fid.Unique)) {
1850 if (tvp->states & VRO)
1852 if (tvp->states & VBackup)
1853 tvc->states |= CBackup;
1854 /* now copy ".." entry back out of volume structure, if necessary */
1855 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1857 tvc->mvid = (struct VenusFid *)
1858 osi_AllocSmallSpace(sizeof(struct VenusFid));
1859 *tvc->mvid = tvp->dotdot;
1861 afs_PutVolume(tvp, READ_LOCK);
1865 afs_RemoveVCB(afid);
1867 struct AFSFetchStatus OutStatus;
1869 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1870 afs_ProcessFS(tvc, &OutStatus, areq);
1871 tvc->states |= CStatd | CUnique;
1874 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1879 ReleaseWriteLock(&tvc->lock);
1881 ObtainReadLock(&afs_xvcache);
1883 ReleaseReadLock(&afs_xvcache);
1887 ReleaseWriteLock(&tvc->lock);
1890 } /*afs_GetVCache */
1895 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1896 afs_int32 * cached, struct vcache *adp, char *aname)
1898 afs_int32 code, now, newvcache = 0;
1899 struct VenusFid nfid;
1900 register struct vcache *tvc;
1902 struct AFSFetchStatus OutStatus;
1903 struct AFSCallBack CallBack;
1904 struct AFSVolSync tsync;
1905 struct server *serverp = 0;
1909 AFS_STATCNT(afs_GetVCache);
1911 *cached = 0; /* Init just in case */
1913 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1917 ObtainReadLock(&afs_xvcache);
1918 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1921 ReleaseReadLock(&afs_xvcache);
1923 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1924 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1928 ObtainReadLock(&tvc->lock);
1930 if (tvc->states & CStatd) {
1934 ReleaseReadLock(&tvc->lock);
1937 tvc->states &= ~CUnique;
1939 ReleaseReadLock(&tvc->lock);
1940 ObtainReadLock(&afs_xvcache);
1944 ReleaseReadLock(&afs_xvcache);
1946 /* lookup the file */
1949 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
1951 afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
1954 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1958 ObtainSharedLock(&afs_xvcache, 6);
1959 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU /* no xstats now */ );
1961 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1962 ReleaseSharedLock(&afs_xvcache);
1963 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1969 /* no cache entry, better grab one */
1970 UpgradeSToWLock(&afs_xvcache, 22);
1971 tvc = afs_NewVCache(&nfid, serverp);
1973 ConvertWToSLock(&afs_xvcache);
1976 ReleaseSharedLock(&afs_xvcache);
1977 ObtainWriteLock(&tvc->lock, 55);
1979 /* It is always appropriate to throw away all the access rights? */
1980 afs_FreeAllAxs(&(tvc->Access));
1981 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
1983 if ((tvp->states & VForeign)) {
1985 tvc->states |= CForeign;
1986 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1987 && (tvp->rootUnique == afid->Fid.Unique))
1990 if (tvp->states & VRO)
1992 if (tvp->states & VBackup)
1993 tvc->states |= CBackup;
1994 /* now copy ".." entry back out of volume structure, if necessary */
1995 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1997 tvc->mvid = (struct VenusFid *)
1998 osi_AllocSmallSpace(sizeof(struct VenusFid));
1999 *tvc->mvid = tvp->dotdot;
2004 ObtainWriteLock(&afs_xcbhash, 465);
2005 afs_DequeueCallback(tvc);
2006 tvc->states &= ~(CStatd | CUnique);
2007 ReleaseWriteLock(&afs_xcbhash);
2008 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2009 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2011 afs_PutVolume(tvp, READ_LOCK);
2012 ReleaseWriteLock(&tvc->lock);
2013 ObtainReadLock(&afs_xvcache);
2015 ReleaseReadLock(&afs_xvcache);
2019 ObtainWriteLock(&afs_xcbhash, 466);
2020 if (origCBs == afs_allCBs) {
2021 if (CallBack.ExpirationTime) {
2022 tvc->callback = serverp;
2023 tvc->cbExpires = CallBack.ExpirationTime + now;
2024 tvc->states |= CStatd | CUnique;
2025 tvc->states &= ~CBulkFetching;
2026 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2027 } else if (tvc->states & CRO) {
2028 /* adapt gives us an hour. */
2029 tvc->cbExpires = 3600 + osi_Time();
2030 /*XXX*/ tvc->states |= CStatd | CUnique;
2031 tvc->states &= ~CBulkFetching;
2032 afs_QueueCallback(tvc, CBHash(3600), tvp);
2034 tvc->callback = NULL;
2035 afs_DequeueCallback(tvc);
2036 tvc->states &= ~(CStatd | CUnique);
2037 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2038 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2041 afs_DequeueCallback(tvc);
2042 tvc->states &= ~CStatd;
2043 tvc->states &= ~CUnique;
2044 tvc->callback = NULL;
2045 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2046 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2048 ReleaseWriteLock(&afs_xcbhash);
2050 afs_PutVolume(tvp, READ_LOCK);
2051 afs_ProcessFS(tvc, &OutStatus, areq);
2053 ReleaseWriteLock(&tvc->lock);
2059 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2060 afs_int32 * cached, struct volume *tvolp)
2062 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2063 afs_int32 getNewFid = 0;
2065 struct VenusFid nfid;
2066 register struct vcache *tvc;
2067 struct server *serverp = 0;
2068 struct AFSFetchStatus OutStatus;
2069 struct AFSCallBack CallBack;
2070 struct AFSVolSync tsync;
2076 if (!tvolp->rootVnode || getNewFid) {
2077 struct VenusFid tfid;
2080 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2081 origCBs = afs_allCBs; /* ignore InitCallBackState */
2083 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2088 /* ReleaseReadLock(&tvolp->lock); */
2089 ObtainWriteLock(&tvolp->lock, 56);
2090 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2091 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2092 ReleaseWriteLock(&tvolp->lock);
2093 /* ObtainReadLock(&tvolp->lock);*/
2096 afid->Fid.Vnode = tvolp->rootVnode;
2097 afid->Fid.Unique = tvolp->rootUnique;
2100 ObtainSharedLock(&afs_xvcache, 7);
2102 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2103 if (!FidCmp(&(tvc->fid), afid)) {
2105 /* Grab this vnode, possibly reactivating from the free list */
2106 /* for the present (95.05.25) everything on the hash table is
2107 * definitively NOT in the free list -- at least until afs_reclaim
2108 * can be safely implemented */
2111 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2115 #endif /* AFS_OSF_ENV */
2120 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
2121 /* Mount point no longer stat'd or unknown. FID may have changed. */
2124 AFS_RELE(AFSTOV(tvc));
2128 ReleaseSharedLock(&afs_xvcache);
2133 UpgradeSToWLock(&afs_xvcache, 23);
2134 /* no cache entry, better grab one */
2135 tvc = afs_NewVCache(afid, NULL);
2137 afs_stats_cmperf.vcacheMisses++;
2141 afs_stats_cmperf.vcacheHits++;
2143 /* we already bumped the ref count in the for loop above */
2144 #else /* AFS_OSF_ENV */
2147 UpgradeSToWLock(&afs_xvcache, 24);
2148 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2149 refpanic("GRVC VLRU inconsistent0");
2151 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2152 refpanic("GRVC VLRU inconsistent1");
2154 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2155 refpanic("GRVC VLRU inconsistent2");
2157 QRemove(&tvc->vlruq); /* move to lruq head */
2158 QAdd(&VLRU, &tvc->vlruq);
2159 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2160 refpanic("GRVC VLRU inconsistent3");
2162 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2163 refpanic("GRVC VLRU inconsistent4");
2165 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2166 refpanic("GRVC VLRU inconsistent5");
2171 ReleaseWriteLock(&afs_xvcache);
2173 if (tvc->states & CStatd) {
2177 ObtainReadLock(&tvc->lock);
2178 tvc->states &= ~CUnique;
2179 tvc->callback = NULL; /* redundant, perhaps */
2180 ReleaseReadLock(&tvc->lock);
2183 ObtainWriteLock(&tvc->lock, 57);
2185 /* It is always appropriate to throw away all the access rights? */
2186 afs_FreeAllAxs(&(tvc->Access));
2189 tvc->states |= CForeign;
2190 if (tvolp->states & VRO)
2192 if (tvolp->states & VBackup)
2193 tvc->states |= CBackup;
2194 /* now copy ".." entry back out of volume structure, if necessary */
2195 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2196 && (tvolp->rootUnique == afid->Fid.Unique)) {
2199 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2201 tvc->mvid = (struct VenusFid *)
2202 osi_AllocSmallSpace(sizeof(struct VenusFid));
2203 *tvc->mvid = tvolp->dotdot;
2207 afs_RemoveVCB(afid);
2210 struct VenusFid tfid;
2213 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2214 origCBs = afs_allCBs; /* ignore InitCallBackState */
2216 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2221 ObtainWriteLock(&afs_xcbhash, 467);
2222 afs_DequeueCallback(tvc);
2223 tvc->callback = NULL;
2224 tvc->states &= ~(CStatd | CUnique);
2225 ReleaseWriteLock(&afs_xcbhash);
2226 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2227 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2228 ReleaseWriteLock(&tvc->lock);
2229 ObtainReadLock(&afs_xvcache);
2231 ReleaseReadLock(&afs_xvcache);
2235 ObtainWriteLock(&afs_xcbhash, 468);
2236 if (origCBs == afs_allCBs) {
2237 tvc->states |= CTruth;
2238 tvc->callback = serverp;
2239 if (CallBack.ExpirationTime != 0) {
2240 tvc->cbExpires = CallBack.ExpirationTime + start;
2241 tvc->states |= CStatd;
2242 tvc->states &= ~CBulkFetching;
2243 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2244 } else if (tvc->states & CRO) {
2245 /* adapt gives us an hour. */
2246 tvc->cbExpires = 3600 + osi_Time();
2247 /*XXX*/ tvc->states |= CStatd;
2248 tvc->states &= ~CBulkFetching;
2249 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2252 afs_DequeueCallback(tvc);
2253 tvc->callback = NULL;
2254 tvc->states &= ~(CStatd | CUnique);
2255 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2256 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2258 ReleaseWriteLock(&afs_xcbhash);
2259 afs_ProcessFS(tvc, &OutStatus, areq);
2261 ReleaseWriteLock(&tvc->lock);
2268 * must be called with avc write-locked
2269 * don't absolutely have to invalidate the hint unless the dv has
2270 * changed, but be sure to get it right else there will be consistency bugs.
2273 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2274 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2277 afs_uint32 start = 0;
2278 register struct conn *tc;
2279 struct AFSCallBack CallBack;
2280 struct AFSVolSync tsync;
2281 struct volume *volp;
2284 tc = afs_Conn(afid, areq, SHARED_LOCK);
2285 avc->quick.stamp = 0;
2286 avc->h1.dchint = NULL; /* invalidate hints */
2288 avc->callback = tc->srvr->server;
2290 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2293 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2301 } while (afs_Analyze
2302 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2303 SHARED_LOCK, NULL));
2306 afs_ProcessFS(avc, Outsp, areq);
2307 volp = afs_GetVolume(afid, areq, READ_LOCK);
2308 ObtainWriteLock(&afs_xcbhash, 469);
2309 avc->states |= CTruth;
2310 if (avc->callback /* check for race */ ) {
2311 if (CallBack.ExpirationTime != 0) {
2312 avc->cbExpires = CallBack.ExpirationTime + start;
2313 avc->states |= CStatd;
2314 avc->states &= ~CBulkFetching;
2315 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2316 } else if (avc->states & CRO) { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2317 avc->cbExpires = 3600 + start;
2318 avc->states |= CStatd;
2319 avc->states &= ~CBulkFetching;
2320 afs_QueueCallback(avc, CBHash(3600), volp);
2322 afs_DequeueCallback(avc);
2323 avc->callback = NULL;
2324 avc->states &= ~(CStatd | CUnique);
2325 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2326 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2329 afs_DequeueCallback(avc);
2330 avc->callback = NULL;
2331 avc->states &= ~(CStatd | CUnique);
2332 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2333 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2335 ReleaseWriteLock(&afs_xcbhash);
2337 afs_PutVolume(volp, READ_LOCK);
2339 /* used to undo the local callback, but that's too extreme.
2340 * There are plenty of good reasons that fetchstatus might return
2341 * an error, such as EPERM. If we have the vnode cached, statd,
2342 * with callback, might as well keep track of the fact that we
2343 * don't have access...
2345 if (code == EPERM || code == EACCES) {
2346 struct axscache *ac;
2347 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2349 else /* not found, add a new one if possible */
2350 afs_AddAxs(avc->Access, areq->uid, 0);
2361 * Stuff some information into the vcache for the given file.
2364 * afid : File in question.
2365 * OutStatus : Fetch status on the file.
2366 * CallBack : Callback info.
2367 * tc : RPC connection involved.
2368 * areq : vrequest involved.
2371 * Nothing interesting.
2374 afs_StuffVcache(register struct VenusFid *afid,
2375 struct AFSFetchStatus *OutStatus,
2376 struct AFSCallBack *CallBack, register struct conn *tc,
2377 struct vrequest *areq)
2379 register afs_int32 code, i, newvcache = 0;
2380 register struct vcache *tvc;
2381 struct AFSVolSync tsync;
2383 struct axscache *ac;
2386 AFS_STATCNT(afs_StuffVcache);
2387 #ifdef IFS_VCACHECOUNT
2392 ObtainSharedLock(&afs_xvcache, 8);
2394 tvc = afs_FindVCache(afid, &retry, DO_VLRU /* no stats */ );
2396 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2397 ReleaseSharedLock(&afs_xvcache);
2398 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2404 /* no cache entry, better grab one */
2405 UpgradeSToWLock(&afs_xvcache, 25);
2406 tvc = afs_NewVCache(afid, NULL);
2408 ConvertWToSLock(&afs_xvcache);
2411 ReleaseSharedLock(&afs_xvcache);
2412 ObtainWriteLock(&tvc->lock, 58);
2414 tvc->states &= ~CStatd;
2415 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2416 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2418 /* Is it always appropriate to throw away all the access rights? */
2419 afs_FreeAllAxs(&(tvc->Access));
2421 /*Copy useful per-volume info */
2422 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2424 if (newvcache && (tvp->states & VForeign))
2425 tvc->states |= CForeign;
2426 if (tvp->states & VRO)
2428 if (tvp->states & VBackup)
2429 tvc->states |= CBackup;
2431 * Now, copy ".." entry back out of volume structure, if
2434 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2436 tvc->mvid = (struct VenusFid *)
2437 osi_AllocSmallSpace(sizeof(struct VenusFid));
2438 *tvc->mvid = tvp->dotdot;
2441 /* store the stat on the file */
2442 afs_RemoveVCB(afid);
2443 afs_ProcessFS(tvc, OutStatus, areq);
2444 tvc->callback = tc->srvr->server;
2446 /* we use osi_Time twice below. Ideally, we would use the time at which
2447 * the FetchStatus call began, instead, but we don't have it here. So we
2448 * make do with "now". In the CRO case, it doesn't really matter. In
2449 * the other case, we hope that the difference between "now" and when the
2450 * call actually began execution on the server won't be larger than the
2451 * padding which the server keeps. Subtract 1 second anyway, to be on
2452 * the safe side. Can't subtract more because we don't know how big
2453 * ExpirationTime is. Possible consistency problems may arise if the call
2454 * timeout period becomes longer than the server's expiration padding. */
2455 ObtainWriteLock(&afs_xcbhash, 470);
2456 if (CallBack->ExpirationTime != 0) {
2457 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2458 tvc->states |= CStatd;
2459 tvc->states &= ~CBulkFetching;
2460 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2461 } else if (tvc->states & CRO) {
2462 /* old-fashioned AFS 3.2 style */
2463 tvc->cbExpires = 3600 + osi_Time();
2464 /*XXX*/ tvc->states |= CStatd;
2465 tvc->states &= ~CBulkFetching;
2466 afs_QueueCallback(tvc, CBHash(3600), tvp);
2468 afs_DequeueCallback(tvc);
2469 tvc->callback = NULL;
2470 tvc->states &= ~(CStatd | CUnique);
2471 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2472 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2474 ReleaseWriteLock(&afs_xcbhash);
2476 afs_PutVolume(tvp, READ_LOCK);
2478 /* look in per-pag cache */
2479 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2480 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2481 else /* not found, add a new one if possible */
2482 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2484 ReleaseWriteLock(&tvc->lock);
2485 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2486 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2487 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2489 * Release ref count... hope this guy stays around...
2492 } /*afs_StuffVcache */
2499 * Decrements the reference count on a cache entry.
2502 * avc : Pointer to the cache entry to decrement.
2505 * Nothing interesting.
2508 afs_PutVCache(register struct vcache *avc)
2510 AFS_STATCNT(afs_PutVCache);
2512 * Can we use a read lock here?
2514 ObtainReadLock(&afs_xvcache);
2516 ReleaseReadLock(&afs_xvcache);
2517 } /*afs_PutVCache */
2523 * Find a vcache entry given a fid.
2526 * afid : Pointer to the fid whose cache entry we desire.
2527 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2528 * unlock the vnode, and try again.
2529 * flags: bit 1 to specify whether to compute hit statistics. Not
2530 * set if FindVCache is called as part of internal bookkeeping.
2533 * Must be called with the afs_xvcache lock at least held at
2534 * the read level. In order to do the VLRU adjustment, the xvcache lock
2535 * must be shared-- we upgrade it here.
2539 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2542 register struct vcache *tvc;
2545 AFS_STATCNT(afs_FindVCache);
2548 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2549 if (FidMatches(afid, tvc)) {
2551 /* Grab this vnode, possibly reactivating from the free list */
2554 vg = vget(AFSTOV(tvc));
2558 #endif /* AFS_OSF_ENV */
2563 /* should I have a read lock on the vnode here? */
2567 #if !defined(AFS_OSF_ENV)
2568 osi_vnhold(tvc, retry); /* already held, above */
2569 if (retry && *retry)
2573 * only move to front of vlru if we have proper vcache locking)
2575 if (flag & DO_VLRU) {
2576 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2577 refpanic("FindVC VLRU inconsistent1");
2579 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2580 refpanic("FindVC VLRU inconsistent1");
2582 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2583 refpanic("FindVC VLRU inconsistent2");
2585 UpgradeSToWLock(&afs_xvcache, 26);
2586 QRemove(&tvc->vlruq);
2587 QAdd(&VLRU, &tvc->vlruq);
2588 ConvertWToSLock(&afs_xvcache);
2589 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2590 refpanic("FindVC VLRU inconsistent1");
2592 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2593 refpanic("FindVC VLRU inconsistent2");
2595 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2596 refpanic("FindVC VLRU inconsistent3");
2602 if (flag & DO_STATS) {
2604 afs_stats_cmperf.vcacheHits++;
2606 afs_stats_cmperf.vcacheMisses++;
2607 if (afs_IsPrimaryCellNum(afid->Cell))
2608 afs_stats_cmperf.vlocalAccesses++;
2610 afs_stats_cmperf.vremoteAccesses++;
2612 #ifdef AFS_LINUX22_ENV
2613 if (tvc && (tvc->states & CStatd))
2614 vcache2inode(tvc); /* mainly to reset i_nlink */
2616 #ifdef AFS_DARWIN_ENV
2618 osi_VM_Setup(tvc, 0);
2621 } /*afs_FindVCache */
2627 * Find a vcache entry given a fid. Does a wildcard match on what we
2628 * have for the fid. If more than one entry, don't return anything.
2631 * avcp : Fill in pointer if we found one and only one.
2632 * afid : Pointer to the fid whose cache entry we desire.
2633 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2634 * unlock the vnode, and try again.
2635 * flags: bit 1 to specify whether to compute hit statistics. Not
2636 * set if FindVCache is called as part of internal bookkeeping.
2639 * Must be called with the afs_xvcache lock at least held at
2640 * the read level. In order to do the VLRU adjustment, the xvcache lock
2641 * must be shared-- we upgrade it here.
2644 * number of matches found.
2647 int afs_duplicate_nfs_fids = 0;
2650 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2652 register struct vcache *tvc;
2654 afs_int32 count = 0;
2655 struct vcache *found_tvc = NULL;
2657 AFS_STATCNT(afs_FindVCache);
2659 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2663 ObtainSharedLock(&afs_xvcache, 331);
2666 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2667 /* Match only on what we have.... */
2668 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2669 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2670 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2671 && (tvc->fid.Cell == afid->Cell)) {
2673 /* Grab this vnode, possibly reactivating from the free list */
2676 vg = vget(AFSTOV(tvc));
2679 /* This vnode no longer exists. */
2682 #endif /* AFS_OSF_ENV */
2687 /* Drop our reference counts. */
2689 vrele(AFSTOV(found_tvc));
2691 afs_duplicate_nfs_fids++;
2692 ReleaseSharedLock(&afs_xvcache);
2700 /* should I have a read lock on the vnode here? */
2702 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2703 afs_int32 retry = 0;
2704 osi_vnhold(tvc, &retry);
2707 found_tvc = (struct vcache *)0;
2708 ReleaseSharedLock(&afs_xvcache);
2709 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2713 #if !defined(AFS_OSF_ENV)
2714 osi_vnhold(tvc, (int *)0); /* already held, above */
2718 * We obtained the xvcache lock above.
2720 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2721 refpanic("FindVC VLRU inconsistent1");
2723 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2724 refpanic("FindVC VLRU inconsistent1");
2726 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2727 refpanic("FindVC VLRU inconsistent2");
2729 UpgradeSToWLock(&afs_xvcache, 568);
2730 QRemove(&tvc->vlruq);
2731 QAdd(&VLRU, &tvc->vlruq);
2732 ConvertWToSLock(&afs_xvcache);
2733 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2734 refpanic("FindVC VLRU inconsistent1");
2736 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2737 refpanic("FindVC VLRU inconsistent2");
2739 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2740 refpanic("FindVC VLRU inconsistent3");
2746 afs_stats_cmperf.vcacheHits++;
2748 afs_stats_cmperf.vcacheMisses++;
2749 if (afs_IsPrimaryCellNum(afid->Cell))
2750 afs_stats_cmperf.vlocalAccesses++;
2752 afs_stats_cmperf.vremoteAccesses++;
2754 *avcp = tvc; /* May be null */
2756 ReleaseSharedLock(&afs_xvcache);
2757 return (tvc ? 1 : 0);
2759 } /*afs_NFSFindVCache */
2767 * Initialize vcache related variables
2770 afs_vcacheInit(int astatSize)
2772 register struct vcache *tvp;
2774 #if defined(AFS_OSF_ENV)
2775 if (!afs_maxvcount) {
2776 #if defined(AFS_OSF30_ENV)
2777 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
2779 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
2781 if (astatSize < afs_maxvcount) {
2782 afs_maxvcount = astatSize;
2785 #else /* AFS_OSF_ENV */
2789 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2790 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2792 #if !defined(AFS_OSF_ENV)
2793 /* Allocate and thread the struct vcache entries */
2794 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
2795 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
2797 Initial_freeVCList = tvp;
2798 freeVCList = &(tvp[0]);
2799 for (i = 0; i < astatSize - 1; i++) {
2800 tvp[i].nextfree = &(tvp[i + 1]);
2802 tvp[astatSize - 1].nextfree = NULL;
2803 #ifdef KERNEL_HAVE_PIN
2804 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2809 #if defined(AFS_SGI_ENV)
2810 for (i = 0; i < astatSize; i++) {
2811 char name[METER_NAMSZ];
2812 struct vcache *tvc = &tvp[i];
2814 tvc->v.v_number = ++afsvnumbers;
2815 tvc->vc_rwlockid = OSI_NO_LOCKID;
2816 initnsema(&tvc->vc_rwlock, 1,
2817 makesname(name, "vrw", tvc->v.v_number));
2818 #ifndef AFS_SGI53_ENV
2819 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2821 #ifndef AFS_SGI62_ENV
2822 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2823 #endif /* AFS_SGI62_ENV */
2837 shutdown_vcache(void)
2840 struct afs_cbr *tsp, *nsp;
2842 * XXX We may potentially miss some of the vcaches because if when there're no
2843 * free vcache entries and all the vcache entries are active ones then we allocate
2844 * an additional one - admittedly we almost never had that occur.
2846 #if !defined(AFS_OSF_ENV)
2847 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2849 #ifdef KERNEL_HAVE_PIN
2850 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
2854 register struct afs_q *tq, *uq;
2855 register struct vcache *tvc;
2856 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2860 osi_FreeSmallSpace(tvc->mvid);
2861 tvc->mvid = (struct VenusFid *)0;
2864 aix_gnode_rele(AFSTOV(tvc));
2866 if (tvc->linkData) {
2867 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2872 * Also free the remaining ones in the Cache
2874 for (i = 0; i < VCSIZE; i++) {
2875 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2877 osi_FreeSmallSpace(tvc->mvid);
2878 tvc->mvid = (struct VenusFid *)0;
2882 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2883 #ifdef AFS_AIX32_ENV
2886 vms_delete(tvc->segid);
2888 tvc->segid = tvc->vmh = NULL;
2890 osi_Panic("flushVcache: vm race");
2898 #if defined(AFS_SUN5_ENV)
2904 if (tvc->linkData) {
2905 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2909 afs_FreeAllAxs(&(tvc->Access));
2915 * Free any leftover callback queue
2917 for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
2919 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
2923 #if !defined(AFS_OSF_ENV)
2924 freeVCList = Initial_freeVCList = 0;
2926 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2927 LOCK_INIT(&afs_xvcb, "afs_xvcb");