2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
38 #include <afsconfig.h>
39 #include "afs/param.h"
44 #include "afs/sysincludes.h" /*Standard vendor system headers */
45 #include "afsincludes.h" /*AFS-based standard headers */
46 #include "afs/afs_stats.h"
47 #include "afs/afs_cbqueue.h"
48 #include "afs/afs_osidnlc.h"
51 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
52 afs_int32 afs_vcount = 0; /* number of vcache in use now */
53 #endif /* AFS_OSF_ENV */
61 #endif /* AFS_SGI64_ENV */
63 /* Exported variables */
64 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
65 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
66 struct vcache *freeVCList; /*Free list for stat cache entries */
67 struct vcache *Initial_freeVCList; /*Initial list for above */
68 struct afs_q VLRU; /*vcache LRU */
69 afs_int32 vcachegen = 0;
70 unsigned int afs_paniconwarn = 0;
71 struct vcache *afs_vhashT[VCSIZE];
72 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
73 afs_int32 afs_bulkStatsLost;
74 int afs_norefpanic = 0;
76 /* Forward declarations */
77 static afs_int32 afs_QueueVCB(struct vcache *avc);
82 * Generate an index into the hash table for a given Fid.
85 afs_HashCBRFid(struct AFSFid *fid) {
86 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
92 * Insert a CBR entry into the hash table.
93 * Must be called with afs_xvcb held.
96 afs_InsertHashCBR(struct afs_cbr *cbr) {
97 int slot = afs_HashCBRFid(&cbr->fid);
99 cbr->hash_next = afs_cbrHashT[slot];
100 if (afs_cbrHashT[slot])
101 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
103 cbr->hash_pprev = &afs_cbrHashT[slot];
104 afs_cbrHashT[slot] = cbr;
111 * Flush the given vcache entry.
114 * avc : Pointer to vcache entry to flush.
115 * slept : Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
118 * afs_xvcache lock must be held for writing upon entry to
119 * prevent people from changing the vrefCount field, and to
120 * protect the lruq and hnext fields.
121 * LOCK: afs_FlushVCache afs_xvcache W
122 * REFCNT: vcache ref count must be zero on entry except for osf1
123 * RACE: lock is dropped and reobtained, permitting race in caller
127 afs_FlushVCache(struct vcache *avc, int *slept)
128 { /*afs_FlushVCache */
130 register afs_int32 i, code;
131 register struct vcache **uvc, *wvc;
134 AFS_STATCNT(afs_FlushVCache);
135 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
136 ICL_TYPE_INT32, avc->states);
139 VN_LOCK(AFSTOV(avc));
143 code = osi_VM_FlushVCache(avc, slept);
147 if (avc->states & CVFlushed) {
151 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
152 refpanic("LRU vs. Free inconsistency");
154 avc->states |= CVFlushed;
155 /* pull the entry out of the lruq and put it on the free list */
156 QRemove(&avc->vlruq);
157 avc->vlruq.prev = avc->vlruq.next = (struct afs_q *)0;
159 /* keep track of # of files that we bulk stat'd, but never used
160 * before they got recycled.
162 if (avc->states & CBulkStat)
165 /* remove entry from the hash chain */
166 i = VCHash(&avc->fid);
167 uvc = &afs_vhashT[i];
168 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
171 avc->hnext = (struct vcache *)NULL;
176 osi_Panic("flushvcache"); /* not in correct hash bucket */
178 osi_FreeSmallSpace(avc->mvid);
179 avc->mvid = (struct VenusFid *)0;
181 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
182 avc->linkData = NULL;
184 #if defined(AFS_XBSD_ENV)
185 /* OK, there are no internal vrefCounts, so there shouldn't
186 * be any more refs here. */
188 avc->v->v_data = NULL; /* remove from vnode */
189 avc->v = NULL; /* also drop the ptr to vnode */
192 afs_FreeAllAxs(&(avc->Access));
194 /* we can't really give back callbacks on RO files, since the
195 * server only tracks them on a per-volume basis, and we don't
196 * know whether we still have some other files from the same
198 if ((avc->states & CRO) == 0 && avc->callback) {
201 ObtainWriteLock(&afs_xcbhash, 460);
202 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
203 avc->states &= ~(CStatd | CUnique);
204 ReleaseWriteLock(&afs_xcbhash);
205 afs_symhint_inval(avc);
206 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
207 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
209 osi_dnlc_purgevp(avc);
212 * Next, keep track of which vnodes we've deleted for create's
213 * optimistic synchronization algorithm
216 if (avc->fid.Fid.Vnode & 1)
221 #if !defined(AFS_OSF_ENV)
222 /* put the entry in the free list */
223 avc->nextfree = freeVCList;
225 if (avc->vlruq.prev || avc->vlruq.next) {
226 refpanic("LRU vs. Free inconsistency");
229 /* This should put it back on the vnode free list since usecount is 1 */
232 if (VREFCOUNT(avc) > 0) {
233 VN_UNLOCK(AFSTOV(avc));
234 AFS_RELE(AFSTOV(avc));
236 if (afs_norefpanic) {
237 printf("flush vc refcnt < 1");
239 (void)vgone(avc, VX_NOSLEEP, NULL);
241 VN_UNLOCK(AFSTOV(avc));
243 osi_Panic("flush vc refcnt < 1");
245 #endif /* AFS_OSF_ENV */
246 avc->states |= CVFlushed;
251 VN_UNLOCK(AFSTOV(avc));
255 } /*afs_FlushVCache */
261 * The core of the inactive vnode op for all but IRIX.
264 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
266 AFS_STATCNT(afs_inactive);
267 if (avc->states & CDirty) {
268 /* we can't keep trying to push back dirty data forever. Give up. */
269 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
271 avc->states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
272 avc->states &= ~CDirty; /* Turn it off */
273 if (avc->states & CUnlinked) {
274 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
275 avc->states |= CUnlinkedDel;
278 afs_remunlink(avc, 1); /* ignore any return code */
287 * Description: allocate a callback return structure from the
288 * free list and return it.
290 * Env: The alloc and free routines are both called with the afs_xvcb lock
291 * held, so we don't have to worry about blocking in osi_Alloc.
293 static struct afs_cbr *afs_cbrSpace = 0;
297 register struct afs_cbr *tsp;
300 while (!afs_cbrSpace) {
301 if (afs_stats_cmperf.CallBackAlloced >= 2) {
302 /* don't allocate more than 2 * AFS_NCBRS for now */
304 afs_stats_cmperf.CallBackFlushes++;
308 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
309 sizeof(struct afs_cbr));
310 for (i = 0; i < AFS_NCBRS - 1; i++) {
311 tsp[i].next = &tsp[i + 1];
313 tsp[AFS_NCBRS - 1].next = 0;
315 afs_stats_cmperf.CallBackAlloced++;
319 afs_cbrSpace = tsp->next;
326 * Description: free a callback return structure, removing it from all lists.
329 * asp -- the address of the structure to free.
331 * Environment: the xvcb lock is held over these calls.
334 afs_FreeCBR(register struct afs_cbr *asp)
336 *(asp->pprev) = asp->next;
338 asp->next->pprev = asp->pprev;
340 *(asp->hash_pprev) = asp->hash_next;
342 asp->hash_next->hash_pprev = asp->hash_pprev;
344 asp->next = afs_cbrSpace;
352 * Description: flush all queued callbacks to all servers.
356 * Environment: holds xvcb lock over RPC to guard against race conditions
357 * when a new callback is granted for the same file later on.
360 afs_FlushVCBs(afs_int32 lockit)
362 struct AFSFid *tfids;
363 struct AFSCallBack callBacks[1];
364 struct AFSCBFids fidArray;
365 struct AFSCBs cbArray;
367 struct afs_cbr *tcbrp;
371 struct vrequest treq;
373 int safety1, safety2, safety3;
374 XSTATS_DECLS if ((code = afs_InitReq(&treq, afs_osi_credp)))
376 treq.flags |= O_NONBLOCK;
377 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
380 MObtainWriteLock(&afs_xvcb, 273);
381 ObtainReadLock(&afs_xserver);
382 for (i = 0; i < NSERVERS; i++) {
383 for (safety1 = 0, tsp = afs_servers[i];
384 tsp && safety1 < afs_totalServers + 10;
385 tsp = tsp->next, safety1++) {
387 if (tsp->cbrs == (struct afs_cbr *)0)
390 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
391 * and make an RPC, over and over again.
393 tcount = 0; /* number found so far */
394 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
395 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
396 /* if buffer is full, or we've queued all we're going
397 * to from this server, we should flush out the
400 fidArray.AFSCBFids_len = tcount;
401 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
402 cbArray.AFSCBs_len = 1;
403 cbArray.AFSCBs_val = callBacks;
404 memset(&callBacks[0], 0, sizeof(callBacks[0]));
405 callBacks[0].CallBackType = CB_EXCLUSIVE;
406 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
407 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
408 tsp->cell->cellNum, &treq, 0,
412 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
415 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
423 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
428 /* ignore return code, since callbacks may have
429 * been returned anyway, we shouldn't leave them
430 * around to be returned again.
432 * Next, see if we are done with this server, and if so,
433 * break to deal with the next one.
439 /* if to flush full buffer */
440 /* if we make it here, we have an entry at the head of cbrs,
441 * which we should copy to the file ID array and then free.
444 tfids[tcount++] = tcbrp->fid;
446 /* Freeing the CBR will unlink it from the server's CBR list */
448 } /* while loop for this one server */
449 if (safety2 > afs_cacheStats) {
450 afs_warn("possible internal error afs_flushVCBs (%d)\n",
453 } /* for loop for this hash chain */
454 } /* loop through all hash chains */
455 if (safety1 > afs_totalServers + 2) {
457 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
458 safety1, afs_totalServers + 2);
460 osi_Panic("afs_flushVCBS safety1");
463 ReleaseReadLock(&afs_xserver);
465 MReleaseWriteLock(&afs_xvcb);
466 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
474 * Queue a callback on the given fid.
480 * Locks the xvcb lock.
481 * Called when the xvcache lock is already held.
485 afs_QueueVCB(struct vcache *avc)
488 struct afs_cbr *tcbp;
490 AFS_STATCNT(afs_QueueVCB);
491 /* The callback is really just a struct server ptr. */
492 tsp = (struct server *)(avc->callback);
494 /* we now have a pointer to the server, so we just allocate
495 * a queue entry and queue it.
497 MObtainWriteLock(&afs_xvcb, 274);
498 tcbp = afs_AllocCBR();
499 tcbp->fid = avc->fid.Fid;
501 tcbp->next = tsp->cbrs;
503 tsp->cbrs->pprev = &tcbp->next;
506 tcbp->pprev = &tsp->cbrs;
508 afs_InsertHashCBR(tcbp);
510 /* now release locks and return */
511 MReleaseWriteLock(&afs_xvcb);
520 * Remove a queued callback for a given Fid.
523 * afid: The fid we want cleansed of queued callbacks.
526 * Locks xvcb and xserver locks.
527 * Typically called with xdcache, xvcache and/or individual vcache
532 afs_RemoveVCB(struct VenusFid *afid)
535 struct afs_cbr *cbr, *ncbr;
537 AFS_STATCNT(afs_RemoveVCB);
538 MObtainWriteLock(&afs_xvcb, 275);
540 slot = afs_HashCBRFid(&afid->Fid);
541 ncbr = afs_cbrHashT[slot];
545 ncbr = cbr->hash_next;
547 if (afid->Fid.Volume == cbr->fid.Volume &&
548 afid->Fid.Vnode == cbr->fid.Vnode &&
549 afid->Fid.Unique == cbr->fid.Unique) {
554 MReleaseWriteLock(&afs_xvcb);
557 #if defined(AFS_LINUX22_ENV) && !defined(AFS_LINUX26_ENV)
560 __shrink_dcache_parent(struct dentry *parent)
562 struct dentry *this_parent = parent;
563 struct list_head *next;
565 LIST_HEAD(afs_dentry_unused);
568 next = this_parent->d_subdirs.next;
570 while (next != &this_parent->d_subdirs) {
571 struct list_head *tmp = next;
572 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
574 if (!DCOUNT(dentry)) {
575 list_del(&dentry->d_lru);
576 list_add(&dentry->d_lru, afs_dentry_unused.prev);
580 * Descend a level if the d_subdirs list is non-empty.
582 if (!list_empty(&dentry->d_subdirs)) {
583 this_parent = dentry;
588 * All done at this level ... ascend and resume the search.
590 if (this_parent != parent) {
591 next = this_parent->d_child.next;
592 this_parent = this_parent->d_parent;
597 struct dentry *dentry;
598 struct list_head *tmp;
600 tmp = afs_dentry_unused.prev;
602 if (tmp == &afs_dentry_unused)
604 #ifdef AFS_LINUX24_ENV
609 #endif /* AFS_LINUX24_ENV */
610 dentry = list_entry(tmp, struct dentry, d_lru);
612 #ifdef AFS_LINUX24_ENV
613 /* Unused dentry with a count? */
618 #ifdef AFS_LINUX24_ENV
619 list_del_init(&dentry->d_hash); /* d_drop */
621 list_del(&dentry->d_hash);
622 INIT_LIST_HEAD(&dentry->d_hash);
623 #endif /* AFS_LINUX24_ENV */
632 /* afs_TryFlushDcacheChildren -- Shakes loose vcache references held by
633 * children of the dentry
635 * LOCKS -- Called with afs_xvcache write locked. Drops and reaquires
636 * AFS_GLOCK, so it can call dput, which may call iput, but
637 * keeps afs_xvcache exclusively.
639 * Tree traversal algorithm from fs/dcache.c: select_parent()
642 afs_TryFlushDcacheChildren(struct vcache *tvc)
644 struct inode *ip = AFSTOI(tvc);
645 struct dentry *this_parent;
646 struct list_head *next;
647 struct list_head *cur;
648 struct list_head *head = &ip->i_dentry;
649 struct dentry *dentry;
653 #ifndef old_vcache_scheme
656 while ((cur = cur->next) != head) {
657 dentry = list_entry(cur, struct dentry, d_alias);
659 if (!list_empty(&dentry->d_hash) && !list_empty(&dentry->d_subdirs))
660 __shrink_dcache_parent(dentry);
662 if (!DCOUNT(dentry)) {
664 #ifdef AFS_LINUX24_ENV
665 list_del_init(&dentry->d_hash); /* d_drop */
667 list_del(&dentry->d_hash);
668 INIT_LIST_HEAD(&dentry->d_hash);
669 #endif /* AFS_LINUX24_ENV */
681 while ((cur = cur->next) != head) {
682 dentry = list_entry(cur, struct dentry, d_alias);
684 afs_Trace3(afs_iclSetp, CM_TRACE_TRYFLUSHDCACHECHILDREN,
685 ICL_TYPE_POINTER, ip, ICL_TYPE_STRING,
686 dentry->d_parent->d_name.name, ICL_TYPE_STRING,
687 dentry->d_name.name);
689 if (!DCOUNT(dentry)) {
702 #endif /* AFS_LINUX22_ENV && !AFS_LINUX26_ENV */
708 * This routine is responsible for allocating a new cache entry
709 * from the free list. It formats the cache entry and inserts it
710 * into the appropriate hash tables. It must be called with
711 * afs_xvcache write-locked so as to prevent several processes from
712 * trying to create a new cache entry simultaneously.
715 * afid : The file id of the file whose cache entry is being
718 /* LOCK: afs_NewVCache afs_xvcache W */
720 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
724 afs_int32 anumber = VCACHE_FREE;
726 struct gnode *gnodepnt;
729 struct vm_info *vm_info_ptr;
730 #endif /* AFS_MACH_ENV */
733 #endif /* AFS_OSF_ENV */
734 struct afs_q *tq, *uq;
737 AFS_STATCNT(afs_NewVCache);
740 if (afs_vcount >= afs_maxvcount) {
743 * If we are using > 33 % of the total system vnodes for AFS vcache
744 * entries or we are using the maximum number of vcache entries,
745 * then free some. (if our usage is > 33% we should free some, if
746 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
747 * we _must_ free some -- no choice).
749 if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount)) {
751 struct afs_q *tq, *uq;
756 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
759 if (tvc->states & CVFlushed)
760 refpanic("CVFlushed on VLRU");
761 else if (i++ > afs_maxvcount)
762 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
763 else if (QNext(uq) != tq)
764 refpanic("VLRU inconsistent");
765 else if (VREFCOUNT(tvc) < 1)
766 refpanic("refcnt 0 on VLRU");
768 if (VREFCOUNT(tvc) == 1 && tvc->opens == 0
769 && (tvc->states & CUnlinkedDel) == 0) {
770 code = afs_FlushVCache(tvc, &fv_slept);
777 continue; /* start over - may have raced. */
783 if (anumber == VCACHE_FREE) {
784 printf("NewVCache: warning none freed, using %d of %d\n",
785 afs_vcount, afs_maxvcount);
786 if (afs_vcount >= afs_maxvcount) {
787 osi_Panic("NewVCache - none freed");
788 /* XXX instead of panicing, should do afs_maxvcount++
789 * and magic up another one */
795 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
796 /* What should we do ???? */
797 osi_Panic("afs_NewVCache: no more vnodes");
802 tvc->nextfree = NULL;
804 #else /* AFS_OSF_ENV */
805 /* pull out a free cache entry */
808 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
812 if (tvc->states & CVFlushed) {
813 refpanic("CVFlushed on VLRU");
814 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
815 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
816 } else if (QNext(uq) != tq) {
817 refpanic("VLRU inconsistent");
819 #ifdef AFS_DARWIN_ENV
820 if ((VREFCOUNT(tvc) < DARWIN_REFBASE) ||
821 (VREFCOUNT(tvc) < 1+DARWIN_REFBASE &&
822 UBCINFOEXISTS(&tvc->v))) {
824 DARWIN_REFBASE + (UBCINFOEXISTS(&tvc->v) ? 1 : 0));
826 if (tvc->opens == 0 && ((tvc->states & CUnlinkedDel) == 0)
827 && VREFCOUNT(tvc) == DARWIN_REFBASE+1
828 && UBCINFOEXISTS(&tvc->v)) {
829 osi_VM_TryReclaim(tvc, &fv_slept);
833 continue; /* start over - may have raced. */
836 #elif defined(AFS_LINUX22_ENV)
837 if (tvc != afs_globalVp && VREFCOUNT(tvc) && tvc->opens == 0) {
838 #if defined(AFS_LINUX26_ENV)
840 d_prune_aliases(AFSTOI(tvc));
843 afs_TryFlushDcacheChildren(tvc);
848 if (VREFCOUNT(tvc) ==
849 #ifdef AFS_DARWIN_ENV
855 && (tvc->states & CUnlinkedDel) == 0) {
856 #if defined(AFS_XBSD_ENV)
858 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
859 * then it puts the vnode on the free list.
860 * If we don't do this we end up with a cleaned vnode that's
861 * not on the free list.
862 * XXX assume FreeBSD is the same for now.
867 code = afs_FlushVCache(tvc, &fv_slept);
875 continue; /* start over - may have raced. */
883 /* none free, making one is better than a panic */
884 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
885 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
886 #ifdef KERNEL_HAVE_PIN
887 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
890 /* In case it still comes here we need to fill this */
891 tvc->v.v_vm_info = VM_INFO_NULL;
892 vm_info_init(tvc->v.v_vm_info);
893 /* perhaps we should also do close_flush on non-NeXT mach systems;
894 * who knows; we don't currently have the sources.
896 #endif /* AFS_MACH_ENV */
897 #if defined(AFS_SGI_ENV)
899 char name[METER_NAMSZ];
900 memset(tvc, 0, sizeof(struct vcache));
901 tvc->v.v_number = ++afsvnumbers;
902 tvc->vc_rwlockid = OSI_NO_LOCKID;
903 initnsema(&tvc->vc_rwlock, 1,
904 makesname(name, "vrw", tvc->v.v_number));
905 #ifndef AFS_SGI53_ENV
906 initnsema(&tvc->v.v_sync, 0,
907 makesname(name, "vsy", tvc->v.v_number));
909 #ifndef AFS_SGI62_ENV
910 initnlock(&tvc->v.v_lock,
911 makesname(name, "vlk", tvc->v.v_number));
914 #endif /* AFS_SGI_ENV */
916 tvc = freeVCList; /* take from free list */
917 freeVCList = tvc->nextfree;
918 tvc->nextfree = NULL;
920 #endif /* AFS_OSF_ENV */
923 vm_info_ptr = tvc->v.v_vm_info;
924 #endif /* AFS_MACH_ENV */
926 #if defined(AFS_XBSD_ENV)
928 panic("afs_NewVCache(): free vcache with vnode attached");
931 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV)
932 memset((char *)tvc, 0, sizeof(struct vcache));
937 RWLOCK_INIT(&tvc->lock, "vcache lock");
938 #if defined(AFS_SUN5_ENV)
939 RWLOCK_INIT(&tvc->vlock, "vcache vlock");
940 #endif /* defined(AFS_SUN5_ENV) */
943 tvc->v.v_vm_info = vm_info_ptr;
944 tvc->v.v_vm_info->pager = MEMORY_OBJECT_NULL;
945 #endif /* AFS_MACH_ENV */
948 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
950 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
957 #ifdef AFS_FBSD50_ENV
958 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
960 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
962 panic("afs getnewvnode"); /* can't happen */
964 if (tvc->v != NULL) {
965 /* I'd like to know if this ever happens...
966 We don't drop global for the rest of this function,
967 so if we do lose the race, the other thread should
968 have found the same vnode and finished initializing
969 the vcache entry. Is it conceivable that this vcache
970 entry could be recycled during this interval? If so,
971 then there probably needs to be some sort of additional
972 mutual exclusion (an Embryonic flag would suffice).
974 printf("afs_NewVCache: lost the race\n");
978 tvc->v->v_data = tvc;
979 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
982 tvc->parentVnode = 0;
984 tvc->linkData = NULL;
987 tvc->execsOrWriters = 0;
991 tvc->last_looker = 0;
993 tvc->asynchrony = -1;
995 afs_symhint_inval(tvc);
997 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
1000 tvc->truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
1001 hzero(tvc->m.DataVersion); /* in case we copy it into flushDV */
1002 #if defined(AFS_LINUX22_ENV)
1004 struct inode *ip = AFSTOI(tvc);
1005 struct address_space *mapping = &ip->i_data;
1007 #if defined(AFS_LINUX26_ENV)
1008 inode_init_once(ip);
1010 sema_init(&ip->i_sem, 1);
1011 INIT_LIST_HEAD(&ip->i_hash);
1012 INIT_LIST_HEAD(&ip->i_dentry);
1013 #if defined(AFS_LINUX24_ENV)
1014 sema_init(&ip->i_zombie, 1);
1015 init_waitqueue_head(&ip->i_wait);
1016 spin_lock_init(&ip->i_data.i_shared_lock);
1017 #ifdef STRUCT_ADDRESS_SPACE_HAS_PAGE_LOCK
1018 spin_lock_init(&ip->i_data.page_lock);
1020 INIT_LIST_HEAD(&ip->i_data.clean_pages);
1021 INIT_LIST_HEAD(&ip->i_data.dirty_pages);
1022 INIT_LIST_HEAD(&ip->i_data.locked_pages);
1023 INIT_LIST_HEAD(&ip->i_dirty_buffers);
1024 #ifdef STRUCT_INODE_HAS_I_DIRTY_DATA_BUFFERS
1025 INIT_LIST_HEAD(&ip->i_dirty_data_buffers);
1027 #ifdef STRUCT_INODE_HAS_I_DEVICES
1028 INIT_LIST_HEAD(&ip->i_devices);
1030 #ifdef STRUCT_INODE_HAS_I_TRUNCATE_SEM
1031 init_rwsem(&ip->i_truncate_sem);
1033 #ifdef STRUCT_INODE_HAS_I_ALLOC_SEM
1034 init_rwsem(&ip->i_alloc_sem);
1037 #else /* AFS_LINUX22_ENV */
1038 sema_init(&ip->i_atomic_write, 1);
1039 init_waitqueue(&ip->i_wait);
1043 #if defined(AFS_LINUX24_ENV)
1045 ip->i_mapping = mapping;
1046 #ifdef STRUCT_ADDRESS_SPACE_HAS_GFP_MASK
1047 ip->i_data.gfp_mask = GFP_HIGHUSER;
1049 #if defined(AFS_LINUX26_ENV)
1050 mapping_set_gfp_mask(mapping, GFP_HIGHUSER);
1052 extern struct backing_dev_info afs_backing_dev_info;
1054 mapping->backing_dev_info = &afs_backing_dev_info;
1059 #if !defined(AFS_LINUX26_ENV)
1061 ip->i_dev = afs_globalVFS->s_dev;
1063 #ifdef STRUCT_INODE_HAS_I_SECURITY
1064 ip->i_security = NULL;
1065 if (security_inode_alloc(ip))
1066 panic("Cannot allocate inode security");
1069 ip->i_sb = afs_globalVFS;
1070 put_inode_on_dummy_list(ip);
1075 /* Hold it for the LRU (should make count 2) */
1076 VN_HOLD(AFSTOV(tvc));
1077 #else /* AFS_OSF_ENV */
1078 #if !defined(AFS_XBSD_ENV)
1079 VREFCOUNT_SET(tvc, 1); /* us */
1080 #endif /* AFS_XBSD_ENV */
1081 #endif /* AFS_OSF_ENV */
1082 #ifdef AFS_AIX32_ENV
1083 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1084 tvc->vmh = tvc->segid = NULL;
1087 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV) || defined(AFS_SUN5_ENV)
1088 #if defined(AFS_SUN5_ENV)
1089 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1091 #if defined(AFS_SUN55_ENV)
1092 /* This is required if the kaio (kernel aynchronous io)
1093 ** module is installed. Inside the kernel, the function
1094 ** check_vp( common/os/aio.c) checks to see if the kernel has
1095 ** to provide asynchronous io for this vnode. This
1096 ** function extracts the device number by following the
1097 ** v_data field of the vnode. If we do not set this field
1098 ** then the system panics. The value of the v_data field
1099 ** is not really important for AFS vnodes because the kernel
1100 ** does not do asynchronous io for regular files. Hence,
1101 ** for the time being, we fill up the v_data field with the
1102 ** vnode pointer itself. */
1103 tvc->v.v_data = (char *)tvc;
1104 #endif /* AFS_SUN55_ENV */
1106 afs_BozonInit(&tvc->pvnLock, tvc);
1110 tvc->callback = serverp; /* to minimize chance that clear
1111 * request is lost */
1112 /* initialize vnode data, note vrefCount is v.v_count */
1114 /* Don't forget to free the gnode space */
1115 tvc->v.v_gnode = gnodepnt =
1116 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1117 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1119 #ifdef AFS_SGI64_ENV
1120 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1121 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1122 #ifdef AFS_SGI65_ENV
1123 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1124 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1126 bhv_head_init(&(tvc->v.v_bh));
1127 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1129 #ifdef AFS_SGI65_ENV
1130 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1131 #ifdef VNODE_TRACING
1132 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1134 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1136 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1137 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1139 vnode_pcache_init(&tvc->v);
1140 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1141 /* Above define is never true execpt in SGI test kernels. */
1142 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1144 #ifdef INTR_KTHREADS
1145 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1148 SetAfsVnode(AFSTOV(tvc));
1149 #endif /* AFS_SGI64_ENV */
1150 #ifdef AFS_DARWIN_ENV
1151 tvc->v.v_ubcinfo = UBC_INFO_NULL;
1152 lockinit(&tvc->rwlock, PINOD, "vcache rwlock", 0, 0);
1153 cache_purge(AFSTOV(tvc));
1154 tvc->v.v_data = tvc;
1155 tvc->v.v_tag = VT_AFS;
1156 /* VLISTNONE(&tvc->v); */
1157 tvc->v.v_freelist.tqe_next = 0;
1158 tvc->v.v_freelist.tqe_prev = (struct vnode **)0xdeadb;
1159 tvc->vrefCount+=DARWIN_REFBASE;
1162 * The proper value for mvstat (for root fids) is setup by the caller.
1165 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1167 if (afs_globalVFS == 0)
1168 osi_Panic("afs globalvfs");
1169 vSetVfsp(tvc, afs_globalVFS);
1170 vSetType(tvc, VREG);
1172 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1173 tvc->v.v_vfsprev = NULL;
1174 afs_globalVFS->vfs_vnodes = &tvc->v;
1175 if (tvc->v.v_vfsnext != NULL)
1176 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1177 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1178 gnodepnt->gn_vnode = &tvc->v;
1181 tvc->v.g_dev = ((struct mount *)afs_globalVFS->vfs_data)->m_dev;
1183 #if defined(AFS_DUX40_ENV)
1184 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1187 /* Is this needed??? */
1188 insmntque(tvc, afs_globalVFS);
1189 #endif /* AFS_OSF_ENV */
1190 #endif /* AFS_DUX40_ENV */
1191 #if defined(AFS_SGI_ENV)
1192 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1193 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1195 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1196 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1197 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1198 osi_Assert(tvc->v.v_filocks == NULL);
1199 #if !defined(AFS_SGI65_ENV)
1200 osi_Assert(tvc->v.v_filocksem == NULL);
1202 osi_Assert(tvc->cred == NULL);
1203 #ifdef AFS_SGI64_ENV
1204 vnode_pcache_reinit(&tvc->v);
1205 tvc->v.v_rdev = NODEV;
1207 vn_initlist((struct vnlist *)&tvc->v);
1209 #endif /* AFS_SGI_ENV */
1211 osi_dnlc_purgedp(tvc); /* this may be overkill */
1212 memset((char *)&(tvc->quick), 0, sizeof(struct vtodc));
1213 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1217 tvc->hnext = afs_vhashT[i];
1218 afs_vhashT[i] = tvc;
1219 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1220 refpanic("NewVCache VLRU inconsistent");
1222 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1223 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1224 refpanic("NewVCache VLRU inconsistent2");
1226 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1227 refpanic("NewVCache VLRU inconsistent3");
1229 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1230 refpanic("NewVCache VLRU inconsistent4");
1236 } /*afs_NewVCache */
1240 * afs_FlushActiveVcaches
1246 * doflocks : Do we handle flocks?
1248 /* LOCK: afs_FlushActiveVcaches afs_xvcache N */
1250 afs_FlushActiveVcaches(register afs_int32 doflocks)
1252 register struct vcache *tvc;
1254 register struct conn *tc;
1255 register afs_int32 code;
1256 register struct AFS_UCRED *cred = NULL;
1257 struct vrequest treq, ureq;
1258 struct AFSVolSync tsync;
1260 XSTATS_DECLS AFS_STATCNT(afs_FlushActiveVcaches);
1261 ObtainReadLock(&afs_xvcache);
1262 for (i = 0; i < VCSIZE; i++) {
1263 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1264 if (doflocks && tvc->flockCount != 0) {
1265 /* if this entry has an flock, send a keep-alive call out */
1267 ReleaseReadLock(&afs_xvcache);
1268 ObtainWriteLock(&tvc->lock, 51);
1270 afs_InitReq(&treq, afs_osi_credp);
1271 treq.flags |= O_NONBLOCK;
1273 tc = afs_Conn(&tvc->fid, &treq, SHARED_LOCK);
1275 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1278 RXAFS_ExtendLock(tc->id,
1279 (struct AFSFid *)&tvc->fid.Fid,
1285 } while (afs_Analyze
1286 (tc, code, &tvc->fid, &treq,
1287 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1289 ReleaseWriteLock(&tvc->lock);
1290 ObtainReadLock(&afs_xvcache);
1294 if ((tvc->states & CCore) || (tvc->states & CUnlinkedDel)) {
1296 * Don't let it evaporate in case someone else is in
1297 * this code. Also, drop the afs_xvcache lock while
1298 * getting vcache locks.
1301 ReleaseReadLock(&afs_xvcache);
1302 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1303 afs_BozonLock(&tvc->pvnLock, tvc);
1305 #if defined(AFS_SGI_ENV)
1307 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1309 osi_Assert(VREFCOUNT(tvc) > 0);
1310 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1312 ObtainWriteLock(&tvc->lock, 52);
1313 if (tvc->states & CCore) {
1314 tvc->states &= ~CCore;
1315 /* XXXX Find better place-holder for cred XXXX */
1316 cred = (struct AFS_UCRED *)tvc->linkData;
1317 tvc->linkData = NULL; /* XXX */
1318 afs_InitReq(&ureq, cred);
1319 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1320 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1321 tvc->execsOrWriters);
1322 code = afs_StoreOnLastReference(tvc, &ureq);
1323 ReleaseWriteLock(&tvc->lock);
1324 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1325 afs_BozonUnlock(&tvc->pvnLock, tvc);
1327 hzero(tvc->flushDV);
1330 if (code && code != VNOVNODE) {
1331 afs_StoreWarn(code, tvc->fid.Fid.Volume,
1332 /* /dev/console */ 1);
1334 } else if (tvc->states & CUnlinkedDel) {
1338 ReleaseWriteLock(&tvc->lock);
1339 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1340 afs_BozonUnlock(&tvc->pvnLock, tvc);
1342 #if defined(AFS_SGI_ENV)
1343 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1345 afs_remunlink(tvc, 0);
1346 #if defined(AFS_SGI_ENV)
1347 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1350 /* lost (or won, perhaps) the race condition */
1351 ReleaseWriteLock(&tvc->lock);
1352 #if defined(AFS_SUN_ENV) || defined(AFS_ALPHA_ENV)
1353 afs_BozonUnlock(&tvc->pvnLock, tvc);
1356 #if defined(AFS_SGI_ENV)
1357 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1359 ObtainReadLock(&afs_xvcache);
1365 AFS_RELE(AFSTOV(tvc));
1367 /* Matches write code setting CCore flag */
1371 #ifdef AFS_DARWIN_ENV
1372 if (VREFCOUNT(tvc) == 1+DARWIN_REFBASE
1373 && UBCINFOEXISTS(&tvc->v)) {
1375 panic("flushactive open, hasubc, but refcnt 1");
1376 osi_VM_TryReclaim(tvc, 0);
1381 ReleaseReadLock(&afs_xvcache);
1389 * Make sure a cache entry is up-to-date status-wise.
1391 * NOTE: everywhere that calls this can potentially be sped up
1392 * by checking CStatd first, and avoiding doing the InitReq
1393 * if this is up-to-date.
1395 * Anymore, the only places that call this KNOW already that the
1396 * vcache is not up-to-date, so we don't screw around.
1399 * avc : Ptr to vcache entry to verify.
1404 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1406 register struct vcache *tvc;
1408 AFS_STATCNT(afs_VerifyVCache);
1410 #if defined(AFS_OSF_ENV)
1411 ObtainReadLock(&avc->lock);
1412 if (afs_IsWired(avc)) {
1413 ReleaseReadLock(&avc->lock);
1416 ReleaseReadLock(&avc->lock);
1417 #endif /* AFS_OSF_ENV */
1418 /* otherwise we must fetch the status info */
1420 ObtainWriteLock(&avc->lock, 53);
1421 if (avc->states & CStatd) {
1422 ReleaseWriteLock(&avc->lock);
1425 ObtainWriteLock(&afs_xcbhash, 461);
1426 avc->states &= ~(CStatd | CUnique);
1427 avc->callback = NULL;
1428 afs_DequeueCallback(avc);
1429 ReleaseWriteLock(&afs_xcbhash);
1430 ReleaseWriteLock(&avc->lock);
1432 /* since we've been called back, or the callback has expired,
1433 * it's possible that the contents of this directory, or this
1434 * file's name have changed, thus invalidating the dnlc contents.
1436 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1437 osi_dnlc_purgedp(avc);
1439 osi_dnlc_purgevp(avc);
1441 /* fetch the status info */
1442 tvc = afs_GetVCache(&avc->fid, areq, NULL, avc);
1445 /* Put it back; caller has already incremented vrefCount */
1449 } /*afs_VerifyVCache */
1456 * Simple copy of stat info into cache.
1459 * avc : Ptr to vcache entry involved.
1460 * astat : Ptr to stat info to copy.
1463 * Nothing interesting.
1465 * Callers: as of 1992-04-29, only called by WriteVCache
1468 afs_SimpleVStat(register struct vcache *avc,
1469 register struct AFSFetchStatus *astat, struct vrequest *areq)
1472 AFS_STATCNT(afs_SimpleVStat);
1475 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1476 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1478 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1480 #ifdef AFS_64BIT_CLIENT
1481 FillInt64(length, astat->Length_hi, astat->Length);
1482 #else /* AFS_64BIT_CLIENT */
1483 length = astat->Length;
1484 #endif /* AFS_64BIT_CLIENT */
1485 #if defined(AFS_SGI_ENV)
1486 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1487 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1488 if (length < avc->m.Length) {
1489 vnode_t *vp = (vnode_t *) avc;
1491 osi_Assert(WriteLocked(&avc->lock));
1492 ReleaseWriteLock(&avc->lock);
1494 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1496 ObtainWriteLock(&avc->lock, 67);
1499 /* if writing the file, don't fetch over this value */
1500 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1501 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1502 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1503 avc->m.Length = length;
1504 avc->m.Date = astat->ClientModTime;
1506 avc->m.Owner = astat->Owner;
1507 avc->m.Group = astat->Group;
1508 avc->m.Mode = astat->UnixModeBits;
1509 if (vType(avc) == VREG) {
1510 avc->m.Mode |= S_IFREG;
1511 } else if (vType(avc) == VDIR) {
1512 avc->m.Mode |= S_IFDIR;
1513 } else if (vType(avc) == VLNK) {
1514 avc->m.Mode |= S_IFLNK;
1515 if ((avc->m.Mode & 0111) == 0)
1518 if (avc->states & CForeign) {
1519 struct axscache *ac;
1520 avc->anyAccess = astat->AnonymousAccess;
1522 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1524 * Caller has at least one bit not covered by anonymous, and
1525 * thus may have interesting rights.
1527 * HOWEVER, this is a really bad idea, because any access query
1528 * for bits which aren't covered by anonymous, on behalf of a user
1529 * who doesn't have any special rights, will result in an answer of
1530 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1531 * It's an especially bad idea under Ultrix, since (due to the lack of
1532 * a proper access() call) it must perform several afs_access() calls
1533 * in order to create magic mode bits that vary according to who makes
1534 * the call. In other words, _every_ stat() generates a test for
1537 #endif /* badidea */
1538 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1539 ac->axess = astat->CallerAccess;
1540 else /* not found, add a new one if possible */
1541 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1545 } /*afs_SimpleVStat */
1552 * Store the status info *only* back to the server for a
1556 * avc : Ptr to the vcache entry.
1557 * astatus : Ptr to the status info to store.
1558 * areq : Ptr to the associated vrequest.
1561 * Must be called with a shared lock held on the vnode.
1565 afs_WriteVCache(register struct vcache *avc,
1566 register struct AFSStoreStatus *astatus,
1567 struct vrequest *areq)
1571 struct AFSFetchStatus OutStatus;
1572 struct AFSVolSync tsync;
1573 XSTATS_DECLS AFS_STATCNT(afs_WriteVCache);
1574 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1575 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1578 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1580 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1583 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->fid.Fid,
1584 astatus, &OutStatus, &tsync);
1589 } while (afs_Analyze
1590 (tc, code, &avc->fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1591 SHARED_LOCK, NULL));
1593 UpgradeSToWLock(&avc->lock, 20);
1595 /* success, do the changes locally */
1596 afs_SimpleVStat(avc, &OutStatus, areq);
1598 * Update the date, too. SimpleVStat didn't do this, since
1599 * it thought we were doing this after fetching new status
1600 * over a file being written.
1602 avc->m.Date = OutStatus.ClientModTime;
1604 /* failure, set up to check with server next time */
1605 ObtainWriteLock(&afs_xcbhash, 462);
1606 afs_DequeueCallback(avc);
1607 avc->states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1608 ReleaseWriteLock(&afs_xcbhash);
1609 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
1610 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1612 ConvertWToSLock(&avc->lock);
1615 } /*afs_WriteVCache */
1621 * Copy astat block into vcache info
1624 * avc : Ptr to vcache entry.
1625 * astat : Ptr to stat block to copy in.
1626 * areq : Ptr to associated request.
1629 * Must be called under a write lock
1631 * Note: this code may get dataversion and length out of sync if the file has
1632 * been modified. This is less than ideal. I haven't thought about
1633 * it sufficiently to be certain that it is adequate.
1636 afs_ProcessFS(register struct vcache *avc,
1637 register struct AFSFetchStatus *astat, struct vrequest *areq)
1640 AFS_STATCNT(afs_ProcessFS);
1642 #ifdef AFS_64BIT_CLIENT
1643 FillInt64(length, astat->Length_hi, astat->Length);
1644 #else /* AFS_64BIT_CLIENT */
1645 length = astat->Length;
1646 #endif /* AFS_64BIT_CLIENT */
1647 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1648 * number for each bulk status request. Under no circumstances
1649 * should afs_DoBulkStat store a sequence number if the new
1650 * length will be ignored when afs_ProcessFS is called with
1651 * new stats. If you change the following conditional then you
1652 * also need to change the conditional in afs_DoBulkStat. */
1654 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1655 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1657 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1659 /* if we're writing or mapping this file, don't fetch over these
1662 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1663 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1664 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1665 avc->m.Length = length;
1666 avc->m.Date = astat->ClientModTime;
1668 hset64(avc->m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1669 avc->m.Owner = astat->Owner;
1670 avc->m.Mode = astat->UnixModeBits;
1671 avc->m.Group = astat->Group;
1672 avc->m.LinkCount = astat->LinkCount;
1673 if (astat->FileType == File) {
1674 vSetType(avc, VREG);
1675 avc->m.Mode |= S_IFREG;
1676 } else if (astat->FileType == Directory) {
1677 vSetType(avc, VDIR);
1678 avc->m.Mode |= S_IFDIR;
1679 } else if (astat->FileType == SymbolicLink) {
1680 if (afs_fakestat_enable && (avc->m.Mode & 0111) == 0) {
1681 vSetType(avc, VDIR);
1682 avc->m.Mode |= S_IFDIR;
1684 vSetType(avc, VLNK);
1685 avc->m.Mode |= S_IFLNK;
1687 if ((avc->m.Mode & 0111) == 0) {
1691 avc->anyAccess = astat->AnonymousAccess;
1693 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1695 * Caller has at least one bit not covered by anonymous, and
1696 * thus may have interesting rights.
1698 * HOWEVER, this is a really bad idea, because any access query
1699 * for bits which aren't covered by anonymous, on behalf of a user
1700 * who doesn't have any special rights, will result in an answer of
1701 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1702 * It's an especially bad idea under Ultrix, since (due to the lack of
1703 * a proper access() call) it must perform several afs_access() calls
1704 * in order to create magic mode bits that vary according to who makes
1705 * the call. In other words, _every_ stat() generates a test for
1708 #endif /* badidea */
1710 struct axscache *ac;
1711 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1712 ac->axess = astat->CallerAccess;
1713 else /* not found, add a new one if possible */
1714 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1716 #ifdef AFS_LINUX22_ENV
1717 vcache2inode(avc); /* Set the inode attr cache */
1719 #ifdef AFS_DARWIN_ENV
1720 osi_VM_Setup(avc, 1);
1723 } /*afs_ProcessFS */
1727 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1728 char *name, struct VenusFid *nfid,
1729 struct AFSFetchStatus *OutStatusp,
1730 struct AFSCallBack *CallBackp, struct server **serverp,
1731 struct AFSVolSync *tsyncp)
1735 register struct conn *tc;
1736 struct AFSFetchStatus OutDirStatus;
1737 XSTATS_DECLS if (!name)
1738 name = ""; /* XXX */
1740 tc = afs_Conn(afid, areq, SHARED_LOCK);
1743 *serverp = tc->srvr->server;
1745 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1748 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1749 (struct AFSFid *)&nfid->Fid, OutStatusp,
1750 &OutDirStatus, CallBackp, tsyncp);
1755 } while (afs_Analyze
1756 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1767 * Given a file id and a vrequest structure, fetch the status
1768 * information associated with the file.
1772 * areq : Ptr to associated vrequest structure, specifying the
1773 * user whose authentication tokens will be used.
1774 * avc : caller may already have a vcache for this file, which is
1778 * The cache entry is returned with an increased vrefCount field.
1779 * The entry must be discarded by calling afs_PutVCache when you
1780 * are through using the pointer to the cache entry.
1782 * You should not hold any locks when calling this function, except
1783 * locks on other vcache entries. If you lock more than one vcache
1784 * entry simultaneously, you should lock them in this order:
1786 * 1. Lock all files first, then directories.
1787 * 2. Within a particular type, lock entries in Fid.Vnode order.
1789 * This locking hierarchy is convenient because it allows locking
1790 * of a parent dir cache entry, given a file (to check its access
1791 * control list). It also allows renames to be handled easily by
1792 * locking directories in a constant order.
1793 * NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1795 /* might have a vcache structure already, which must
1796 * already be held by the caller */
1799 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1800 afs_int32 * cached, struct vcache *avc)
1803 afs_int32 code, newvcache = 0;
1804 register struct vcache *tvc;
1808 AFS_STATCNT(afs_GetVCache);
1811 *cached = 0; /* Init just in case */
1813 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1817 ObtainSharedLock(&afs_xvcache, 5);
1819 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU);
1821 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1822 ReleaseSharedLock(&afs_xvcache);
1823 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1831 if (tvc->states & CStatd) {
1832 ReleaseSharedLock(&afs_xvcache);
1836 UpgradeSToWLock(&afs_xvcache, 21);
1838 /* no cache entry, better grab one */
1839 tvc = afs_NewVCache(afid, NULL);
1842 ConvertWToSLock(&afs_xvcache);
1843 afs_stats_cmperf.vcacheMisses++;
1846 ReleaseSharedLock(&afs_xvcache);
1848 ObtainWriteLock(&tvc->lock, 54);
1850 if (tvc->states & CStatd) {
1851 #ifdef AFS_LINUX22_ENV
1854 ReleaseWriteLock(&tvc->lock);
1855 #ifdef AFS_DARWIN_ENV
1856 osi_VM_Setup(tvc, 0);
1860 #if defined(AFS_OSF_ENV)
1861 if (afs_IsWired(tvc)) {
1862 ReleaseWriteLock(&tvc->lock);
1865 #endif /* AFS_OSF_ENV */
1867 VOP_LOCK(AFSTOV(tvc), LK_EXCLUSIVE | LK_RETRY, curproc);
1868 uvm_vnp_uncache(AFSTOV(tvc));
1869 VOP_UNLOCK(AFSTOV(tvc), 0, curproc);
1873 * XXX - I really don't like this. Should try to understand better.
1874 * It seems that sometimes, when we get called, we already hold the
1875 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1876 * We can't drop the vnode lock, because that could result in a race.
1877 * Sometimes, though, we get here and don't hold the vnode lock.
1878 * I hate code paths that sometimes hold locks and sometimes don't.
1879 * In any event, the dodge we use here is to check whether the vnode
1880 * is locked, and if it isn't, then we gain and drop it around the call
1881 * to vinvalbuf; otherwise, we leave it alone.
1888 #ifdef AFS_FBSD50_ENV
1889 iheldthelock = VOP_ISLOCKED(vp, curthread);
1891 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1892 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1894 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1896 iheldthelock = VOP_ISLOCKED(vp, curproc);
1898 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
1899 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
1901 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
1906 ObtainWriteLock(&afs_xcbhash, 464);
1907 tvc->states &= ~CUnique;
1909 afs_DequeueCallback(tvc);
1910 ReleaseWriteLock(&afs_xcbhash);
1912 /* It is always appropriate to throw away all the access rights? */
1913 afs_FreeAllAxs(&(tvc->Access));
1914 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
1916 if ((tvp->states & VForeign)) {
1918 tvc->states |= CForeign;
1919 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
1920 && (tvp->rootUnique == afid->Fid.Unique)) {
1924 if (tvp->states & VRO)
1926 if (tvp->states & VBackup)
1927 tvc->states |= CBackup;
1928 /* now copy ".." entry back out of volume structure, if necessary */
1929 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
1931 tvc->mvid = (struct VenusFid *)
1932 osi_AllocSmallSpace(sizeof(struct VenusFid));
1933 *tvc->mvid = tvp->dotdot;
1935 afs_PutVolume(tvp, READ_LOCK);
1939 afs_RemoveVCB(afid);
1941 struct AFSFetchStatus OutStatus;
1943 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
1944 afs_ProcessFS(tvc, &OutStatus, areq);
1945 tvc->states |= CStatd | CUnique;
1948 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
1953 ReleaseWriteLock(&tvc->lock);
1955 ObtainReadLock(&afs_xvcache);
1957 ReleaseReadLock(&afs_xvcache);
1961 ReleaseWriteLock(&tvc->lock);
1964 } /*afs_GetVCache */
1969 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
1970 afs_int32 * cached, struct vcache *adp, char *aname)
1972 afs_int32 code, now, newvcache = 0;
1973 struct VenusFid nfid;
1974 register struct vcache *tvc;
1976 struct AFSFetchStatus OutStatus;
1977 struct AFSCallBack CallBack;
1978 struct AFSVolSync tsync;
1979 struct server *serverp = 0;
1983 AFS_STATCNT(afs_GetVCache);
1985 *cached = 0; /* Init just in case */
1987 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1991 ObtainReadLock(&afs_xvcache);
1992 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
1995 ReleaseReadLock(&afs_xvcache);
1997 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1998 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2002 ObtainReadLock(&tvc->lock);
2004 if (tvc->states & CStatd) {
2008 ReleaseReadLock(&tvc->lock);
2011 tvc->states &= ~CUnique;
2013 ReleaseReadLock(&tvc->lock);
2014 ObtainReadLock(&afs_xvcache);
2018 ReleaseReadLock(&afs_xvcache);
2020 /* lookup the file */
2023 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
2025 afs_RemoteLookup(&adp->fid, areq, aname, &nfid, &OutStatus, &CallBack,
2028 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2032 ObtainSharedLock(&afs_xvcache, 6);
2033 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU /* no xstats now */ );
2035 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2036 ReleaseSharedLock(&afs_xvcache);
2037 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2043 /* no cache entry, better grab one */
2044 UpgradeSToWLock(&afs_xvcache, 22);
2045 tvc = afs_NewVCache(&nfid, serverp);
2047 ConvertWToSLock(&afs_xvcache);
2050 ReleaseSharedLock(&afs_xvcache);
2051 ObtainWriteLock(&tvc->lock, 55);
2053 /* It is always appropriate to throw away all the access rights? */
2054 afs_FreeAllAxs(&(tvc->Access));
2055 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2057 if ((tvp->states & VForeign)) {
2059 tvc->states |= CForeign;
2060 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2061 && (tvp->rootUnique == afid->Fid.Unique))
2064 if (tvp->states & VRO)
2066 if (tvp->states & VBackup)
2067 tvc->states |= CBackup;
2068 /* now copy ".." entry back out of volume structure, if necessary */
2069 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2071 tvc->mvid = (struct VenusFid *)
2072 osi_AllocSmallSpace(sizeof(struct VenusFid));
2073 *tvc->mvid = tvp->dotdot;
2078 ObtainWriteLock(&afs_xcbhash, 465);
2079 afs_DequeueCallback(tvc);
2080 tvc->states &= ~(CStatd | CUnique);
2081 ReleaseWriteLock(&afs_xcbhash);
2082 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2083 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2085 afs_PutVolume(tvp, READ_LOCK);
2086 ReleaseWriteLock(&tvc->lock);
2087 ObtainReadLock(&afs_xvcache);
2089 ReleaseReadLock(&afs_xvcache);
2093 ObtainWriteLock(&afs_xcbhash, 466);
2094 if (origCBs == afs_allCBs) {
2095 if (CallBack.ExpirationTime) {
2096 tvc->callback = serverp;
2097 tvc->cbExpires = CallBack.ExpirationTime + now;
2098 tvc->states |= CStatd | CUnique;
2099 tvc->states &= ~CBulkFetching;
2100 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2101 } else if (tvc->states & CRO) {
2102 /* adapt gives us an hour. */
2103 tvc->cbExpires = 3600 + osi_Time();
2104 /*XXX*/ tvc->states |= CStatd | CUnique;
2105 tvc->states &= ~CBulkFetching;
2106 afs_QueueCallback(tvc, CBHash(3600), tvp);
2108 tvc->callback = NULL;
2109 afs_DequeueCallback(tvc);
2110 tvc->states &= ~(CStatd | CUnique);
2111 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2112 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2115 afs_DequeueCallback(tvc);
2116 tvc->states &= ~CStatd;
2117 tvc->states &= ~CUnique;
2118 tvc->callback = NULL;
2119 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2120 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2122 ReleaseWriteLock(&afs_xcbhash);
2124 afs_PutVolume(tvp, READ_LOCK);
2125 afs_ProcessFS(tvc, &OutStatus, areq);
2127 ReleaseWriteLock(&tvc->lock);
2133 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2134 afs_int32 * cached, struct volume *tvolp)
2136 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2137 afs_int32 getNewFid = 0;
2139 struct VenusFid nfid;
2140 register struct vcache *tvc;
2141 struct server *serverp = 0;
2142 struct AFSFetchStatus OutStatus;
2143 struct AFSCallBack CallBack;
2144 struct AFSVolSync tsync;
2150 if (!tvolp->rootVnode || getNewFid) {
2151 struct VenusFid tfid;
2154 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2155 origCBs = afs_allCBs; /* ignore InitCallBackState */
2157 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2162 /* ReleaseReadLock(&tvolp->lock); */
2163 ObtainWriteLock(&tvolp->lock, 56);
2164 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2165 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2166 ReleaseWriteLock(&tvolp->lock);
2167 /* ObtainReadLock(&tvolp->lock);*/
2170 afid->Fid.Vnode = tvolp->rootVnode;
2171 afid->Fid.Unique = tvolp->rootUnique;
2174 ObtainSharedLock(&afs_xvcache, 7);
2176 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2177 if (!FidCmp(&(tvc->fid), afid)) {
2179 /* Grab this vnode, possibly reactivating from the free list */
2180 /* for the present (95.05.25) everything on the hash table is
2181 * definitively NOT in the free list -- at least until afs_reclaim
2182 * can be safely implemented */
2185 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2189 #endif /* AFS_OSF_ENV */
2190 #ifdef AFS_DARWIN14_ENV
2191 /* It'd really suck if we allowed duplicate vcaches for the
2192 same fid to happen. Wonder if this will work? */
2193 struct vnode *vp = AFSTOV(tvc);
2194 if (vp->v_flag & (VXLOCK|VORECLAIM|VTERMINATE)) {
2195 printf("precluded FindVCache on %x (%d:%d:%d)\n",
2196 vp, tvc->fid.Fid.Volume, tvc->fid.Fid.Vnode,
2197 tvc->fid.Fid.Unique);
2198 simple_lock(&vp->v_interlock);
2199 SET(vp->v_flag, VTERMWANT);
2200 simple_unlock(&vp->v_interlock);
2201 (void)tsleep((caddr_t)&vp->v_ubcinfo, PINOD, "vget1", 0);
2202 printf("VTERMWANT ended on %x\n", vp);
2210 if (!haveStatus && (!tvc || !(tvc->states & CStatd))) {
2211 /* Mount point no longer stat'd or unknown. FID may have changed. */
2214 AFS_RELE(AFSTOV(tvc));
2218 ReleaseSharedLock(&afs_xvcache);
2223 UpgradeSToWLock(&afs_xvcache, 23);
2224 /* no cache entry, better grab one */
2225 tvc = afs_NewVCache(afid, NULL);
2227 afs_stats_cmperf.vcacheMisses++;
2231 afs_stats_cmperf.vcacheHits++;
2233 /* we already bumped the ref count in the for loop above */
2234 #else /* AFS_OSF_ENV */
2237 UpgradeSToWLock(&afs_xvcache, 24);
2238 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2239 refpanic("GRVC VLRU inconsistent0");
2241 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2242 refpanic("GRVC VLRU inconsistent1");
2244 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2245 refpanic("GRVC VLRU inconsistent2");
2247 QRemove(&tvc->vlruq); /* move to lruq head */
2248 QAdd(&VLRU, &tvc->vlruq);
2249 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2250 refpanic("GRVC VLRU inconsistent3");
2252 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2253 refpanic("GRVC VLRU inconsistent4");
2255 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2256 refpanic("GRVC VLRU inconsistent5");
2261 ReleaseWriteLock(&afs_xvcache);
2263 if (tvc->states & CStatd) {
2267 ObtainReadLock(&tvc->lock);
2268 tvc->states &= ~CUnique;
2269 tvc->callback = NULL; /* redundant, perhaps */
2270 ReleaseReadLock(&tvc->lock);
2273 ObtainWriteLock(&tvc->lock, 57);
2275 /* It is always appropriate to throw away all the access rights? */
2276 afs_FreeAllAxs(&(tvc->Access));
2279 tvc->states |= CForeign;
2280 if (tvolp->states & VRO)
2282 if (tvolp->states & VBackup)
2283 tvc->states |= CBackup;
2284 /* now copy ".." entry back out of volume structure, if necessary */
2285 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2286 && (tvolp->rootUnique == afid->Fid.Unique)) {
2289 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2291 tvc->mvid = (struct VenusFid *)
2292 osi_AllocSmallSpace(sizeof(struct VenusFid));
2293 *tvc->mvid = tvolp->dotdot;
2297 afs_RemoveVCB(afid);
2300 struct VenusFid tfid;
2303 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2304 origCBs = afs_allCBs; /* ignore InitCallBackState */
2306 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2311 ObtainWriteLock(&afs_xcbhash, 467);
2312 afs_DequeueCallback(tvc);
2313 tvc->callback = NULL;
2314 tvc->states &= ~(CStatd | CUnique);
2315 ReleaseWriteLock(&afs_xcbhash);
2316 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2317 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2318 ReleaseWriteLock(&tvc->lock);
2319 ObtainReadLock(&afs_xvcache);
2321 ReleaseReadLock(&afs_xvcache);
2325 ObtainWriteLock(&afs_xcbhash, 468);
2326 if (origCBs == afs_allCBs) {
2327 tvc->states |= CTruth;
2328 tvc->callback = serverp;
2329 if (CallBack.ExpirationTime != 0) {
2330 tvc->cbExpires = CallBack.ExpirationTime + start;
2331 tvc->states |= CStatd;
2332 tvc->states &= ~CBulkFetching;
2333 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2334 } else if (tvc->states & CRO) {
2335 /* adapt gives us an hour. */
2336 tvc->cbExpires = 3600 + osi_Time();
2337 /*XXX*/ tvc->states |= CStatd;
2338 tvc->states &= ~CBulkFetching;
2339 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2342 afs_DequeueCallback(tvc);
2343 tvc->callback = NULL;
2344 tvc->states &= ~(CStatd | CUnique);
2345 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2346 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2348 ReleaseWriteLock(&afs_xcbhash);
2349 afs_ProcessFS(tvc, &OutStatus, areq);
2351 ReleaseWriteLock(&tvc->lock);
2358 * must be called with avc write-locked
2359 * don't absolutely have to invalidate the hint unless the dv has
2360 * changed, but be sure to get it right else there will be consistency bugs.
2363 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2364 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2367 afs_uint32 start = 0;
2368 register struct conn *tc;
2369 struct AFSCallBack CallBack;
2370 struct AFSVolSync tsync;
2371 struct volume *volp;
2374 tc = afs_Conn(afid, areq, SHARED_LOCK);
2375 avc->quick.stamp = 0;
2376 avc->h1.dchint = NULL; /* invalidate hints */
2378 avc->callback = tc->srvr->server;
2380 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2383 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2391 } while (afs_Analyze
2392 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2393 SHARED_LOCK, NULL));
2396 afs_ProcessFS(avc, Outsp, areq);
2397 volp = afs_GetVolume(afid, areq, READ_LOCK);
2398 ObtainWriteLock(&afs_xcbhash, 469);
2399 avc->states |= CTruth;
2400 if (avc->callback /* check for race */ ) {
2401 if (CallBack.ExpirationTime != 0) {
2402 avc->cbExpires = CallBack.ExpirationTime + start;
2403 avc->states |= CStatd;
2404 avc->states &= ~CBulkFetching;
2405 afs_QueueCallback(avc, CBHash(CallBack.ExpirationTime), volp);
2406 } else if (avc->states & CRO) { /* ordinary callback on a read-only volume -- AFS 3.2 style */
2407 avc->cbExpires = 3600 + start;
2408 avc->states |= CStatd;
2409 avc->states &= ~CBulkFetching;
2410 afs_QueueCallback(avc, CBHash(3600), volp);
2412 afs_DequeueCallback(avc);
2413 avc->callback = NULL;
2414 avc->states &= ~(CStatd | CUnique);
2415 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2416 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2419 afs_DequeueCallback(avc);
2420 avc->callback = NULL;
2421 avc->states &= ~(CStatd | CUnique);
2422 if ((avc->states & CForeign) || (avc->fid.Fid.Vnode & 1))
2423 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2425 ReleaseWriteLock(&afs_xcbhash);
2427 afs_PutVolume(volp, READ_LOCK);
2429 /* used to undo the local callback, but that's too extreme.
2430 * There are plenty of good reasons that fetchstatus might return
2431 * an error, such as EPERM. If we have the vnode cached, statd,
2432 * with callback, might as well keep track of the fact that we
2433 * don't have access...
2435 if (code == EPERM || code == EACCES) {
2436 struct axscache *ac;
2437 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2439 else /* not found, add a new one if possible */
2440 afs_AddAxs(avc->Access, areq->uid, 0);
2451 * Stuff some information into the vcache for the given file.
2454 * afid : File in question.
2455 * OutStatus : Fetch status on the file.
2456 * CallBack : Callback info.
2457 * tc : RPC connection involved.
2458 * areq : vrequest involved.
2461 * Nothing interesting.
2464 afs_StuffVcache(register struct VenusFid *afid,
2465 struct AFSFetchStatus *OutStatus,
2466 struct AFSCallBack *CallBack, register struct conn *tc,
2467 struct vrequest *areq)
2469 register afs_int32 code, i, newvcache = 0;
2470 register struct vcache *tvc;
2471 struct AFSVolSync tsync;
2473 struct axscache *ac;
2476 AFS_STATCNT(afs_StuffVcache);
2477 #ifdef IFS_VCACHECOUNT
2482 ObtainSharedLock(&afs_xvcache, 8);
2484 tvc = afs_FindVCache(afid, &retry, DO_VLRU /* no stats */ );
2486 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2487 ReleaseSharedLock(&afs_xvcache);
2488 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2494 /* no cache entry, better grab one */
2495 UpgradeSToWLock(&afs_xvcache, 25);
2496 tvc = afs_NewVCache(afid, NULL);
2498 ConvertWToSLock(&afs_xvcache);
2501 ReleaseSharedLock(&afs_xvcache);
2502 ObtainWriteLock(&tvc->lock, 58);
2504 tvc->states &= ~CStatd;
2505 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2506 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2508 /* Is it always appropriate to throw away all the access rights? */
2509 afs_FreeAllAxs(&(tvc->Access));
2511 /*Copy useful per-volume info */
2512 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2514 if (newvcache && (tvp->states & VForeign))
2515 tvc->states |= CForeign;
2516 if (tvp->states & VRO)
2518 if (tvp->states & VBackup)
2519 tvc->states |= CBackup;
2521 * Now, copy ".." entry back out of volume structure, if
2524 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2526 tvc->mvid = (struct VenusFid *)
2527 osi_AllocSmallSpace(sizeof(struct VenusFid));
2528 *tvc->mvid = tvp->dotdot;
2531 /* store the stat on the file */
2532 afs_RemoveVCB(afid);
2533 afs_ProcessFS(tvc, OutStatus, areq);
2534 tvc->callback = tc->srvr->server;
2536 /* we use osi_Time twice below. Ideally, we would use the time at which
2537 * the FetchStatus call began, instead, but we don't have it here. So we
2538 * make do with "now". In the CRO case, it doesn't really matter. In
2539 * the other case, we hope that the difference between "now" and when the
2540 * call actually began execution on the server won't be larger than the
2541 * padding which the server keeps. Subtract 1 second anyway, to be on
2542 * the safe side. Can't subtract more because we don't know how big
2543 * ExpirationTime is. Possible consistency problems may arise if the call
2544 * timeout period becomes longer than the server's expiration padding. */
2545 ObtainWriteLock(&afs_xcbhash, 470);
2546 if (CallBack->ExpirationTime != 0) {
2547 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2548 tvc->states |= CStatd;
2549 tvc->states &= ~CBulkFetching;
2550 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2551 } else if (tvc->states & CRO) {
2552 /* old-fashioned AFS 3.2 style */
2553 tvc->cbExpires = 3600 + osi_Time();
2554 /*XXX*/ tvc->states |= CStatd;
2555 tvc->states &= ~CBulkFetching;
2556 afs_QueueCallback(tvc, CBHash(3600), tvp);
2558 afs_DequeueCallback(tvc);
2559 tvc->callback = NULL;
2560 tvc->states &= ~(CStatd | CUnique);
2561 if ((tvc->states & CForeign) || (tvc->fid.Fid.Vnode & 1))
2562 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2564 ReleaseWriteLock(&afs_xcbhash);
2566 afs_PutVolume(tvp, READ_LOCK);
2568 /* look in per-pag cache */
2569 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2570 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2571 else /* not found, add a new one if possible */
2572 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2574 ReleaseWriteLock(&tvc->lock);
2575 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2576 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2577 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2579 * Release ref count... hope this guy stays around...
2582 } /*afs_StuffVcache */
2589 * Decrements the reference count on a cache entry.
2592 * avc : Pointer to the cache entry to decrement.
2595 * Nothing interesting.
2598 afs_PutVCache(register struct vcache *avc)
2600 AFS_STATCNT(afs_PutVCache);
2602 * Can we use a read lock here?
2604 ObtainReadLock(&afs_xvcache);
2606 ReleaseReadLock(&afs_xvcache);
2607 } /*afs_PutVCache */
2613 * Find a vcache entry given a fid.
2616 * afid : Pointer to the fid whose cache entry we desire.
2617 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2618 * unlock the vnode, and try again.
2619 * flags: bit 1 to specify whether to compute hit statistics. Not
2620 * set if FindVCache is called as part of internal bookkeeping.
2623 * Must be called with the afs_xvcache lock at least held at
2624 * the read level. In order to do the VLRU adjustment, the xvcache lock
2625 * must be shared-- we upgrade it here.
2629 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2632 register struct vcache *tvc;
2635 AFS_STATCNT(afs_FindVCache);
2638 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2639 if (FidMatches(afid, tvc)) {
2641 /* Grab this vnode, possibly reactivating from the free list */
2644 vg = vget(AFSTOV(tvc));
2648 #endif /* AFS_OSF_ENV */
2653 /* should I have a read lock on the vnode here? */
2657 #if !defined(AFS_OSF_ENV)
2658 osi_vnhold(tvc, retry); /* already held, above */
2659 if (retry && *retry)
2663 * only move to front of vlru if we have proper vcache locking)
2665 if (flag & DO_VLRU) {
2666 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2667 refpanic("FindVC VLRU inconsistent1");
2669 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2670 refpanic("FindVC VLRU inconsistent1");
2672 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2673 refpanic("FindVC VLRU inconsistent2");
2675 UpgradeSToWLock(&afs_xvcache, 26);
2676 QRemove(&tvc->vlruq);
2677 QAdd(&VLRU, &tvc->vlruq);
2678 ConvertWToSLock(&afs_xvcache);
2679 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2680 refpanic("FindVC VLRU inconsistent1");
2682 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2683 refpanic("FindVC VLRU inconsistent2");
2685 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2686 refpanic("FindVC VLRU inconsistent3");
2692 if (flag & DO_STATS) {
2694 afs_stats_cmperf.vcacheHits++;
2696 afs_stats_cmperf.vcacheMisses++;
2697 if (afs_IsPrimaryCellNum(afid->Cell))
2698 afs_stats_cmperf.vlocalAccesses++;
2700 afs_stats_cmperf.vremoteAccesses++;
2702 #ifdef AFS_LINUX22_ENV
2703 if (tvc && (tvc->states & CStatd))
2704 vcache2inode(tvc); /* mainly to reset i_nlink */
2706 #ifdef AFS_DARWIN_ENV
2708 osi_VM_Setup(tvc, 0);
2711 } /*afs_FindVCache */
2717 * Find a vcache entry given a fid. Does a wildcard match on what we
2718 * have for the fid. If more than one entry, don't return anything.
2721 * avcp : Fill in pointer if we found one and only one.
2722 * afid : Pointer to the fid whose cache entry we desire.
2723 * retry: (SGI-specific) tell the caller to drop the lock on xvcache,
2724 * unlock the vnode, and try again.
2725 * flags: bit 1 to specify whether to compute hit statistics. Not
2726 * set if FindVCache is called as part of internal bookkeeping.
2729 * Must be called with the afs_xvcache lock at least held at
2730 * the read level. In order to do the VLRU adjustment, the xvcache lock
2731 * must be shared-- we upgrade it here.
2734 * number of matches found.
2737 int afs_duplicate_nfs_fids = 0;
2740 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
2742 register struct vcache *tvc;
2744 afs_int32 count = 0;
2745 struct vcache *found_tvc = NULL;
2747 AFS_STATCNT(afs_FindVCache);
2749 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2753 ObtainSharedLock(&afs_xvcache, 331);
2756 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2757 /* Match only on what we have.... */
2758 if (((tvc->fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
2759 && (tvc->fid.Fid.Volume == afid->Fid.Volume)
2760 && ((tvc->fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
2761 && (tvc->fid.Cell == afid->Cell)) {
2763 /* Grab this vnode, possibly reactivating from the free list */
2766 vg = vget(AFSTOV(tvc));
2769 /* This vnode no longer exists. */
2772 #endif /* AFS_OSF_ENV */
2777 /* Drop our reference counts. */
2779 vrele(AFSTOV(found_tvc));
2781 afs_duplicate_nfs_fids++;
2782 ReleaseSharedLock(&afs_xvcache);
2790 /* should I have a read lock on the vnode here? */
2792 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2793 afs_int32 retry = 0;
2794 osi_vnhold(tvc, &retry);
2797 found_tvc = (struct vcache *)0;
2798 ReleaseSharedLock(&afs_xvcache);
2799 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2803 #if !defined(AFS_OSF_ENV)
2804 osi_vnhold(tvc, (int *)0); /* already held, above */
2808 * We obtained the xvcache lock above.
2810 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2811 refpanic("FindVC VLRU inconsistent1");
2813 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2814 refpanic("FindVC VLRU inconsistent1");
2816 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2817 refpanic("FindVC VLRU inconsistent2");
2819 UpgradeSToWLock(&afs_xvcache, 568);
2820 QRemove(&tvc->vlruq);
2821 QAdd(&VLRU, &tvc->vlruq);
2822 ConvertWToSLock(&afs_xvcache);
2823 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2824 refpanic("FindVC VLRU inconsistent1");
2826 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2827 refpanic("FindVC VLRU inconsistent2");
2829 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2830 refpanic("FindVC VLRU inconsistent3");
2836 afs_stats_cmperf.vcacheHits++;
2838 afs_stats_cmperf.vcacheMisses++;
2839 if (afs_IsPrimaryCellNum(afid->Cell))
2840 afs_stats_cmperf.vlocalAccesses++;
2842 afs_stats_cmperf.vremoteAccesses++;
2844 *avcp = tvc; /* May be null */
2846 ReleaseSharedLock(&afs_xvcache);
2847 return (tvc ? 1 : 0);
2849 } /*afs_NFSFindVCache */
2857 * Initialize vcache related variables
2860 afs_vcacheInit(int astatSize)
2862 register struct vcache *tvp;
2864 #if defined(AFS_OSF_ENV)
2865 if (!afs_maxvcount) {
2866 #if defined(AFS_OSF30_ENV)
2867 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
2869 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
2871 if (astatSize < afs_maxvcount) {
2872 afs_maxvcount = astatSize;
2875 #else /* AFS_OSF_ENV */
2879 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
2880 LOCK_INIT(&afs_xvcb, "afs_xvcb");
2882 #if !defined(AFS_OSF_ENV)
2883 /* Allocate and thread the struct vcache entries */
2884 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
2885 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
2887 Initial_freeVCList = tvp;
2888 freeVCList = &(tvp[0]);
2889 for (i = 0; i < astatSize - 1; i++) {
2890 tvp[i].nextfree = &(tvp[i + 1]);
2892 tvp[astatSize - 1].nextfree = NULL;
2893 #ifdef KERNEL_HAVE_PIN
2894 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
2899 #if defined(AFS_SGI_ENV)
2900 for (i = 0; i < astatSize; i++) {
2901 char name[METER_NAMSZ];
2902 struct vcache *tvc = &tvp[i];
2904 tvc->v.v_number = ++afsvnumbers;
2905 tvc->vc_rwlockid = OSI_NO_LOCKID;
2906 initnsema(&tvc->vc_rwlock, 1,
2907 makesname(name, "vrw", tvc->v.v_number));
2908 #ifndef AFS_SGI53_ENV
2909 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
2911 #ifndef AFS_SGI62_ENV
2912 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
2913 #endif /* AFS_SGI62_ENV */
2927 shutdown_vcache(void)
2930 struct afs_cbr *tsp, *nsp;
2932 * XXX We may potentially miss some of the vcaches because if when there're no
2933 * free vcache entries and all the vcache entries are active ones then we allocate
2934 * an additional one - admittedly we almost never had that occur.
2938 register struct afs_q *tq, *uq;
2939 register struct vcache *tvc;
2940 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
2944 osi_FreeSmallSpace(tvc->mvid);
2945 tvc->mvid = (struct VenusFid *)0;
2948 aix_gnode_rele(AFSTOV(tvc));
2950 if (tvc->linkData) {
2951 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2956 * Also free the remaining ones in the Cache
2958 for (i = 0; i < VCSIZE; i++) {
2959 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2961 osi_FreeSmallSpace(tvc->mvid);
2962 tvc->mvid = (struct VenusFid *)0;
2966 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
2967 #ifdef AFS_AIX32_ENV
2970 vms_delete(tvc->segid);
2972 tvc->segid = tvc->vmh = NULL;
2974 osi_Panic("flushVcache: vm race");
2982 #if defined(AFS_SUN5_ENV)
2988 if (tvc->linkData) {
2989 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
2993 afs_FreeAllAxs(&(tvc->Access));
2999 * Free any leftover callback queue
3001 for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
3003 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3007 #if !defined(AFS_OSF_ENV)
3008 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3010 #ifdef KERNEL_HAVE_PIN
3011 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3013 #if !defined(AFS_OSF_ENV)
3014 freeVCList = Initial_freeVCList = 0;
3016 RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3017 LOCK_INIT(&afs_xvcb, "afs_xvcb");