2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
19 * afs_FlushActiveVcaches
22 * afs_WriteVCacheDiscon
40 #include <afsconfig.h>
41 #include "afs/param.h"
46 #include "afs/sysincludes.h" /*Standard vendor system headers */
47 #include "afsincludes.h" /*AFS-based standard headers */
48 #include "afs/afs_stats.h"
49 #include "afs/afs_cbqueue.h"
50 #include "afs/afs_osidnlc.h"
52 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
53 afs_int32 afs_maxvcount = 0; /* max number of vcache entries */
54 afs_int32 afs_vcount = 0; /* number of vcache in use now */
55 #endif /* AFS_OSF_ENV */
63 #endif /* AFS_SGI64_ENV */
65 /* Exported variables */
67 afs_rwlock_t afs_xvcdirty; /*Lock: discon vcache dirty list mgmt */
69 afs_rwlock_t afs_xvcache; /*Lock: alloc new stat cache entries */
70 afs_rwlock_t afs_xvreclaim; /*Lock: entries reclaimed, not on free list */
71 afs_lock_t afs_xvcb; /*Lock: fids on which there are callbacks */
72 #if !defined(AFS_LINUX22_ENV)
73 static struct vcache *freeVCList; /*Free list for stat cache entries */
74 struct vcache *ReclaimedVCList; /*Reclaimed list for stat entries */
75 static struct vcache *Initial_freeVCList; /*Initial list for above */
77 struct afs_q VLRU; /*vcache LRU */
78 afs_int32 vcachegen = 0;
79 unsigned int afs_paniconwarn = 0;
80 struct vcache *afs_vhashT[VCSIZE];
81 struct afs_q afs_vhashTV[VCSIZE];
82 static struct afs_cbr *afs_cbrHashT[CBRSIZE];
83 afs_int32 afs_bulkStatsLost;
84 int afs_norefpanic = 0;
87 /* Disk backed vcache definitions
88 * Both protected by xvcache */
89 static int afs_nextVcacheSlot = 0;
90 static struct afs_slotlist *afs_freeSlotList = NULL;
92 /* Forward declarations */
93 static afs_int32 afs_QueueVCB(struct vcache *avc);
96 * Generate an index into the hash table for a given Fid.
98 * \return The hash value.
101 afs_HashCBRFid(struct AFSFid *fid)
103 return (fid->Volume + fid->Vnode + fid->Unique) % CBRSIZE;
107 * Insert a CBR entry into the hash table.
108 * Must be called with afs_xvcb held.
113 afs_InsertHashCBR(struct afs_cbr *cbr)
115 int slot = afs_HashCBRFid(&cbr->fid);
117 cbr->hash_next = afs_cbrHashT[slot];
118 if (afs_cbrHashT[slot])
119 afs_cbrHashT[slot]->hash_pprev = &cbr->hash_next;
121 cbr->hash_pprev = &afs_cbrHashT[slot];
122 afs_cbrHashT[slot] = cbr;
127 * Flush the given vcache entry.
130 * afs_xvcache lock must be held for writing upon entry to
131 * prevent people from changing the vrefCount field, and to
132 * protect the lruq and hnext fields.
133 * LOCK: afs_FlushVCache afs_xvcache W
134 * REFCNT: vcache ref count must be zero on entry except for osf1
135 * RACE: lock is dropped and reobtained, permitting race in caller
137 * \param avc Pointer to vcache entry to flush.
138 * \param slept Pointer to int to set 1 if we sleep/drop locks, 0 if we don't.
142 afs_FlushVCache(struct vcache *avc, int *slept)
143 { /*afs_FlushVCache */
146 struct vcache **uvc, *wvc;
149 AFS_STATCNT(afs_FlushVCache);
150 afs_Trace2(afs_iclSetp, CM_TRACE_FLUSHV, ICL_TYPE_POINTER, avc,
151 ICL_TYPE_INT32, avc->f.states);
154 VN_LOCK(AFSTOV(avc));
158 code = osi_VM_FlushVCache(avc, slept);
162 if (avc->f.states & CVFlushed) {
166 #if !defined(AFS_LINUX22_ENV)
167 if (avc->nextfree || !avc->vlruq.prev || !avc->vlruq.next) { /* qv afs.h */
168 refpanic("LRU vs. Free inconsistency");
171 avc->f.states |= CVFlushed;
172 /* pull the entry out of the lruq and put it on the free list */
173 QRemove(&avc->vlruq);
175 /* keep track of # of files that we bulk stat'd, but never used
176 * before they got recycled.
178 if (avc->f.states & CBulkStat)
181 /* remove entry from the hash chain */
182 i = VCHash(&avc->f.fid);
183 uvc = &afs_vhashT[i];
184 for (wvc = *uvc; wvc; uvc = &wvc->hnext, wvc = *uvc) {
187 avc->hnext = (struct vcache *)NULL;
192 /* remove entry from the volume hash table */
193 QRemove(&avc->vhashq);
196 osi_FreeSmallSpace(avc->mvid);
197 avc->mvid = (struct VenusFid *)0;
199 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
200 avc->linkData = NULL;
202 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
203 /* OK, there are no internal vrefCounts, so there shouldn't
204 * be any more refs here. */
206 #ifdef AFS_DARWIN80_ENV
207 vnode_clearfsnode(AFSTOV(avc));
208 vnode_removefsref(AFSTOV(avc));
210 avc->v->v_data = NULL; /* remove from vnode */
212 AFSTOV(avc) = NULL; /* also drop the ptr to vnode */
215 #ifdef AFS_SUN510_ENV
216 /* As we use private vnodes, cleanup is up to us */
217 vn_reinit(AFSTOV(avc));
219 afs_FreeAllAxs(&(avc->Access));
221 /* we can't really give back callbacks on RO files, since the
222 * server only tracks them on a per-volume basis, and we don't
223 * know whether we still have some other files from the same
225 if ((avc->f.states & CRO) == 0 && avc->callback) {
228 ObtainWriteLock(&afs_xcbhash, 460);
229 afs_DequeueCallback(avc); /* remove it from queued callbacks list */
230 avc->f.states &= ~(CStatd | CUnique);
231 ReleaseWriteLock(&afs_xcbhash);
232 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
233 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
235 osi_dnlc_purgevp(avc);
238 * Next, keep track of which vnodes we've deleted for create's
239 * optimistic synchronization algorithm
242 if (avc->f.fid.Fid.Vnode & 1)
247 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
248 /* put the entry in the free list */
249 avc->nextfree = freeVCList;
251 if (avc->vlruq.prev || avc->vlruq.next) {
252 refpanic("LRU vs. Free inconsistency");
254 avc->f.states |= CVFlushed;
256 /* This should put it back on the vnode free list since usecount is 1 */
259 if (VREFCOUNT_GT(avc,0)) {
260 #if defined(AFS_OSF_ENV)
261 VN_UNLOCK(AFSTOV(avc));
263 AFS_RELE(AFSTOV(avc));
265 if (afs_norefpanic) {
266 printf("flush vc refcnt < 1");
268 #if defined(AFS_OSF_ENV)
269 (void)vgone(avc, VX_NOSLEEP, NULL);
271 VN_UNLOCK(AFSTOV(avc));
274 osi_Panic("flush vc refcnt < 1");
276 #endif /* AFS_OSF_ENV */
281 VN_UNLOCK(AFSTOV(avc));
285 } /*afs_FlushVCache */
289 * The core of the inactive vnode op for all but IRIX.
295 afs_InactiveVCache(struct vcache *avc, struct AFS_UCRED *acred)
297 AFS_STATCNT(afs_inactive);
298 if (avc->f.states & CDirty) {
299 /* we can't keep trying to push back dirty data forever. Give up. */
300 afs_InvalidateAllSegments(avc); /* turns off dirty bit */
302 avc->f.states &= ~CMAPPED; /* mainly used by SunOS 4.0.x */
303 avc->f.states &= ~CDirty; /* Turn it off */
304 if (avc->f.states & CUnlinked) {
305 if (CheckLock(&afs_xvcache) || CheckLock(&afs_xdcache)) {
306 avc->f.states |= CUnlinkedDel;
309 afs_remunlink(avc, 1); /* ignore any return code */
316 * Allocate a callback return structure from the
317 * free list and return it.
319 * Environment: The alloc and free routines are both called with the afs_xvcb lock
320 * held, so we don't have to worry about blocking in osi_Alloc.
322 * \return The allocated afs_cbr.
324 static struct afs_cbr *afs_cbrSpace = 0;
328 register struct afs_cbr *tsp;
331 while (!afs_cbrSpace) {
332 if (afs_stats_cmperf.CallBackAlloced >= 2) {
333 /* don't allocate more than 2 * AFS_NCBRS for now */
335 afs_stats_cmperf.CallBackFlushes++;
339 (struct afs_cbr *)afs_osi_Alloc(AFS_NCBRS *
340 sizeof(struct afs_cbr));
341 for (i = 0; i < AFS_NCBRS - 1; i++) {
342 tsp[i].next = &tsp[i + 1];
344 tsp[AFS_NCBRS - 1].next = 0;
346 afs_stats_cmperf.CallBackAlloced++;
350 afs_cbrSpace = tsp->next;
355 * Free a callback return structure, removing it from all lists.
357 * Environment: the xvcb lock is held over these calls.
359 * \param asp The address of the structure to free.
364 afs_FreeCBR(register struct afs_cbr *asp)
366 *(asp->pprev) = asp->next;
368 asp->next->pprev = asp->pprev;
370 *(asp->hash_pprev) = asp->hash_next;
372 asp->hash_next->hash_pprev = asp->hash_pprev;
374 asp->next = afs_cbrSpace;
380 * Flush all queued callbacks to all servers.
382 * Environment: holds xvcb lock over RPC to guard against race conditions
383 * when a new callback is granted for the same file later on.
385 * \return 0 for success.
388 afs_FlushVCBs(afs_int32 lockit)
390 struct AFSFid *tfids;
391 struct AFSCallBack callBacks[1];
392 struct AFSCBFids fidArray;
393 struct AFSCBs cbArray;
395 struct afs_cbr *tcbrp;
399 struct vrequest treq;
401 int safety1, safety2, safety3;
403 if ((code = afs_InitReq(&treq, afs_osi_credp)))
405 treq.flags |= O_NONBLOCK;
406 tfids = afs_osi_Alloc(sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
409 MObtainWriteLock(&afs_xvcb, 273);
410 ObtainReadLock(&afs_xserver);
411 for (i = 0; i < NSERVERS; i++) {
412 for (safety1 = 0, tsp = afs_servers[i];
413 tsp && safety1 < afs_totalServers + 10;
414 tsp = tsp->next, safety1++) {
416 if (tsp->cbrs == (struct afs_cbr *)0)
419 /* otherwise, grab a block of AFS_MAXCBRSCALL from the list
420 * and make an RPC, over and over again.
422 tcount = 0; /* number found so far */
423 for (safety2 = 0; safety2 < afs_cacheStats; safety2++) {
424 if (tcount >= AFS_MAXCBRSCALL || !tsp->cbrs) {
425 /* if buffer is full, or we've queued all we're going
426 * to from this server, we should flush out the
429 fidArray.AFSCBFids_len = tcount;
430 fidArray.AFSCBFids_val = (struct AFSFid *)tfids;
431 cbArray.AFSCBs_len = 1;
432 cbArray.AFSCBs_val = callBacks;
433 memset(&callBacks[0], 0, sizeof(callBacks[0]));
434 callBacks[0].CallBackType = CB_EXCLUSIVE;
435 for (safety3 = 0; safety3 < MAXHOSTS * 2; safety3++) {
436 tc = afs_ConnByHost(tsp, tsp->cell->fsport,
437 tsp->cell->cellNum, &treq, 0,
441 (AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS);
444 RXAFS_GiveUpCallBacks(tc->id, &fidArray,
452 AFS_STATS_FS_RPCIDX_GIVEUPCALLBACKS, SHARED_LOCK,
457 /* ignore return code, since callbacks may have
458 * been returned anyway, we shouldn't leave them
459 * around to be returned again.
461 * Next, see if we are done with this server, and if so,
462 * break to deal with the next one.
468 /* if to flush full buffer */
469 /* if we make it here, we have an entry at the head of cbrs,
470 * which we should copy to the file ID array and then free.
473 tfids[tcount++] = tcbrp->fid;
475 /* Freeing the CBR will unlink it from the server's CBR list */
477 } /* while loop for this one server */
478 if (safety2 > afs_cacheStats) {
479 afs_warn("possible internal error afs_flushVCBs (%d)\n",
482 } /* for loop for this hash chain */
483 } /* loop through all hash chains */
484 if (safety1 > afs_totalServers + 2) {
486 ("AFS internal error (afs_flushVCBs) (%d > %d), continuing...\n",
487 safety1, afs_totalServers + 2);
489 osi_Panic("afs_flushVCBS safety1");
492 ReleaseReadLock(&afs_xserver);
494 MReleaseWriteLock(&afs_xvcb);
495 afs_osi_Free(tfids, sizeof(struct AFSFid) * AFS_MAXCBRSCALL);
500 * Queue a callback on the given fid.
503 * Locks the xvcb lock.
504 * Called when the xvcache lock is already held.
506 * \param avc vcache entry
507 * \return 0 for success < 0 otherwise.
511 afs_QueueVCB(struct vcache *avc)
514 struct afs_cbr *tcbp;
516 AFS_STATCNT(afs_QueueVCB);
517 /* The callback is really just a struct server ptr. */
518 tsp = (struct server *)(avc->callback);
520 /* we now have a pointer to the server, so we just allocate
521 * a queue entry and queue it.
523 MObtainWriteLock(&afs_xvcb, 274);
524 tcbp = afs_AllocCBR();
525 tcbp->fid = avc->f.fid.Fid;
527 tcbp->next = tsp->cbrs;
529 tsp->cbrs->pprev = &tcbp->next;
532 tcbp->pprev = &tsp->cbrs;
534 afs_InsertHashCBR(tcbp);
536 /* now release locks and return */
537 MReleaseWriteLock(&afs_xvcb);
543 * Remove a queued callback for a given Fid.
546 * Locks xvcb and xserver locks.
547 * Typically called with xdcache, xvcache and/or individual vcache
550 * \param afid The fid we want cleansed of queued callbacks.
555 afs_RemoveVCB(struct VenusFid *afid)
558 struct afs_cbr *cbr, *ncbr;
560 AFS_STATCNT(afs_RemoveVCB);
561 MObtainWriteLock(&afs_xvcb, 275);
563 slot = afs_HashCBRFid(&afid->Fid);
564 ncbr = afs_cbrHashT[slot];
568 ncbr = cbr->hash_next;
570 if (afid->Fid.Volume == cbr->fid.Volume &&
571 afid->Fid.Vnode == cbr->fid.Vnode &&
572 afid->Fid.Unique == cbr->fid.Unique) {
577 MReleaseWriteLock(&afs_xvcb);
581 afs_FlushReclaimedVcaches(void)
583 #if !defined(AFS_LINUX22_ENV)
586 struct vcache *tmpReclaimedVCList = NULL;
588 ObtainWriteLock(&afs_xvreclaim, 76);
589 while (ReclaimedVCList) {
590 tvc = ReclaimedVCList; /* take from free list */
591 ReclaimedVCList = tvc->nextfree;
592 tvc->nextfree = NULL;
593 code = afs_FlushVCache(tvc, &fv_slept);
595 /* Ok, so, if we got code != 0, uh, wtf do we do? */
596 /* Probably, build a temporary list and then put all back when we
597 get to the end of the list */
598 /* This is actually really crappy, but we need to not leak these.
599 We probably need a way to be smarter about this. */
600 tvc->nextfree = tmpReclaimedVCList;
601 tmpReclaimedVCList = tvc;
602 printf("Reclaim list flush %lx failed: %d\n", (unsigned long) tvc, code);
604 if (tvc->f.states & (CVInit
605 #ifdef AFS_DARWIN80_ENV
609 tvc->f.states &= ~(CVInit
610 #ifdef AFS_DARWIN80_ENV
614 afs_osi_Wakeup(&tvc->f.states);
617 if (tmpReclaimedVCList)
618 ReclaimedVCList = tmpReclaimedVCList;
620 ReleaseWriteLock(&afs_xvreclaim);
625 * This routine is responsible for allocating a new cache entry
626 * from the free list. It formats the cache entry and inserts it
627 * into the appropriate hash tables. It must be called with
628 * afs_xvcache write-locked so as to prevent several processes from
629 * trying to create a new cache entry simultaneously.
631 * LOCK: afs_NewVCache afs_xvcache W
633 * \param afid The file id of the file whose cache entry is being created.
635 * \return The new vcache struct.
638 afs_NewVCache(struct VenusFid *afid, struct server *serverp)
642 afs_int32 anumber = VCACHE_FREE;
644 struct gnode *gnodepnt;
648 #endif /* AFS_OSF_ENV */
649 struct afs_q *tq, *uq;
652 AFS_STATCNT(afs_NewVCache);
654 afs_FlushReclaimedVcaches();
656 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
657 # if defined(AFS_OSF30_ENV) || defined(AFS_LINUX22_ENV)
658 if (afs_vcount >= afs_maxvcount)
661 * If we are using > 33 % of the total system vnodes for AFS vcache
662 * entries or we are using the maximum number of vcache entries,
663 * then free some. (if our usage is > 33% we should free some, if
664 * our usage is > afs_maxvcount, set elsewhere to 0.5*nvnode,
665 * we _must_ free some -- no choice).
667 if (((3 * afs_vcount) > nvnode) || (afs_vcount >= afs_maxvcount))
674 for (tq = VLRU.prev; tq != &VLRU && anumber > 0; tq = uq) {
677 if (tvc->f.states & CVFlushed) {
678 refpanic("CVFlushed on VLRU");
679 } else if (i++ > afs_maxvcount) {
680 refpanic("Exceeded pool of AFS vnodes(VLRU cycle?)");
681 } else if (QNext(uq) != tq) {
682 refpanic("VLRU inconsistent");
683 } else if (!VREFCOUNT_GT(tvc,0)) {
684 refpanic("refcnt 0 on VLRU");
687 # if defined(AFS_LINUX22_ENV)
688 if (tvc != afs_globalVp && VREFCOUNT(tvc) > 1 && tvc->opens == 0) {
689 struct dentry *dentry;
690 struct list_head *cur, *head;
692 # if defined(AFS_LINUX24_ENV)
693 spin_lock(&dcache_lock);
694 # endif /* AFS_LINUX24_ENV */
695 head = &(AFSTOV(tvc))->i_dentry;
699 while ((cur = cur->next) != head) {
700 dentry = list_entry(cur, struct dentry, d_alias);
702 if (d_unhashed(dentry))
707 # if defined(AFS_LINUX24_ENV)
708 spin_unlock(&dcache_lock);
709 # endif /* AFS_LINUX24_ENV */
710 if (d_invalidate(dentry) == -EBUSY) {
712 /* perhaps lock and try to continue? (use cur as head?) */
716 # if defined(AFS_LINUX24_ENV)
717 spin_lock(&dcache_lock);
718 # endif /* AFS_LINUX24_ENV */
721 # if defined(AFS_LINUX24_ENV)
722 spin_unlock(&dcache_lock);
723 # endif /* AFS_LINUX24_ENV */
727 # endif /* AFS_LINUX22_ENV */
729 if (VREFCOUNT_GT(tvc,0) && !VREFCOUNT_GT(tvc,1) &&
731 && (tvc->f.states & CUnlinkedDel) == 0) {
732 code = afs_FlushVCache(tvc, &fv_slept);
739 continue; /* start over - may have raced. */
745 if (anumber == VCACHE_FREE) {
746 printf("afs_NewVCache: warning none freed, using %d of %d\n",
747 afs_vcount, afs_maxvcount);
748 if (afs_vcount >= afs_maxvcount) {
749 printf("afs_NewVCache - none freed\n");
753 } /* finished freeing up space */
755 /* Alloc new vnode. */
756 #if defined(AFS_LINUX22_ENV)
761 ip = new_inode(afs_globalVFS);
763 osi_Panic("afs_NewVCache: no more inodes");
765 # if defined(STRUCT_SUPER_HAS_ALLOC_INODE)
768 tvc = afs_osi_Alloc(sizeof(struct vcache));
769 ip->u.generic_ip = tvc;
775 if (getnewvnode(MOUNT_AFS, &Afs_vnodeops, &nvc)) {
776 /* What should we do ???? */
777 osi_Panic("afs_NewVCache: no more vnodes");
782 tvc->nextfree = NULL;
786 /* If we create a new inode, we either give it a new slot number,
787 * or if one's available, use a slot number from the slot free list
789 if (afs_freeSlotList != NULL) {
790 struct afs_slotlist *tmp;
792 tvc->diskSlot = afs_freeSlotList->slot;
793 tmp = afs_freeSlotList;
794 afs_freeSlotList = tmp->next;
795 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
797 tvc->diskSlot = afs_nextVcacheSlot++;
800 #else /* AFS_OSF_ENV || AFS_LINUX22_ENV */
801 /* pull out a free cache entry */
805 for (tq = VLRU.prev; (anumber > 0) && (tq != &VLRU); tq = uq) {
809 if (tvc->f.states & CVFlushed) {
810 refpanic("CVFlushed on VLRU");
811 } else if (i++ > 2 * afs_cacheStats) { /* even allowing for a few xallocs... */
812 refpanic("Increase -stat parameter of afsd(VLRU cycle?)");
813 } else if (QNext(uq) != tq) {
814 refpanic("VLRU inconsistent");
815 } else if (tvc->f.states & CVInit) {
819 if (!VREFCOUNT_GT(tvc,0)
820 # if defined(AFS_DARWIN_ENV) && !defined(UKERNEL) && !defined(AFS_DARWIN80_ENV)
821 || ((VREFCOUNT(tvc) == 1) &&
822 (UBCINFOEXISTS(AFSTOV(tvc))))
824 && tvc->opens == 0 && (tvc->f.states & CUnlinkedDel) == 0) {
825 # if defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV)
826 # ifdef AFS_DARWIN80_ENV
827 vnode_t tvp = AFSTOV(tvc);
828 /* VREFCOUNT_GT only sees usecounts, not iocounts */
829 /* so this may fail to actually recycle the vnode now */
830 /* must call vnode_get to avoid races. */
832 if (vnode_get(tvp) == 0) {
834 /* must release lock, since vnode_put will immediately
835 reclaim if there are no other users */
836 ReleaseWriteLock(&afs_xvcache);
841 ObtainWriteLock(&afs_xvcache, 336);
843 /* we can't use the vnode_recycle return value to figure
844 * this out, since the iocount we have to hold makes it
846 if (AFSTOV(tvc) == tvp) {
847 if (anumber > 0 && fv_slept) {
848 QRemove(&tvc->vlruq);
849 QAdd(&VLRU, &tvc->vlruq);
854 # else /* AFS_DARWIN80_ENV */
856 * vgone() reclaims the vnode, which calls afs_FlushVCache(),
857 * then it puts the vnode on the free list.
858 * If we don't do this we end up with a cleaned vnode that's
859 * not on the free list.
860 * XXX assume FreeBSD is the same for now.
868 # else /* AFS_DARWIN80_ENV || AFS_XBSD_ENV */
869 code = afs_FlushVCache(tvc, &fv_slept);
870 # endif /* AFS_DARWIN80_ENV || AFS_XBSD_ENV */
879 continue; /* start over - may have raced. */
885 } /* end of if (!freeVCList) */
888 /* none free, making one is better than a panic */
889 afs_stats_cmperf.vcacheXAllocs++; /* count in case we have a leak */
890 tvc = (struct vcache *)afs_osi_Alloc(sizeof(struct vcache));
891 # if defined(AFS_DARWIN_ENV) && !defined(UKERNEL)
892 tvc->v = NULL; /* important to clean this, or use memset 0 */
894 # ifdef KERNEL_HAVE_PIN
895 pin((char *)tvc, sizeof(struct vcache)); /* XXX */
898 #ifdef AFS_DISCON_ENV
899 /* If we create a new inode, we either give it a new slot number,
900 * or if one's available, use a slot number from the slot free list
902 if (afs_freeSlotList != NULL) {
903 struct afs_slotlist *tmp;
905 tvc->diskSlot = afs_freeSlotList->slot;
906 tmp = afs_freeSlotList;
907 afs_freeSlotList = tmp->next;
908 afs_osi_Free(tmp, sizeof(struct afs_slotlist));
910 tvc->diskSlot = afs_nextVcacheSlot++;
913 # if defined(AFS_SGI_ENV)
915 char name[METER_NAMSZ];
916 memset(tvc, 0, sizeof(struct vcache));
917 tvc->v.v_number = ++afsvnumbers;
918 tvc->vc_rwlockid = OSI_NO_LOCKID;
919 initnsema(&tvc->vc_rwlock, 1,
920 makesname(name, "vrw", tvc->v.v_number));
921 #ifndef AFS_SGI53_ENV
922 initnsema(&tvc->v.v_sync, 0,
923 makesname(name, "vsy", tvc->v.v_number));
925 #ifndef AFS_SGI62_ENV
926 initnlock(&tvc->v.v_lock,
927 makesname(name, "vlk", tvc->v.v_number));
930 #endif /* AFS_SGI_ENV */
932 tvc = freeVCList; /* take from free list */
933 freeVCList = tvc->nextfree;
934 tvc->nextfree = NULL;
935 } /* end of if (!freeVCList) */
937 #endif /* AFS_OSF_ENV */
939 #if defined(AFS_XBSD_ENV) || defined(AFS_DARWIN_ENV)
941 panic("afs_NewVCache(): free vcache with vnode attached");
944 #if !defined(AFS_SGI_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
947 /* We need to preserve the slot that we're being stored into on
951 slot = tvc->diskSlot;
952 memset((char *)tvc, 0, sizeof(struct vcache));
953 tvc->diskSlot = slot;
956 memset((char *)tvc, 0, sizeof(struct vcache));
961 memset(&(tvc->f), 0, sizeof(struct fvcache));
964 AFS_RWLOCK_INIT(&tvc->lock, "vcache lock");
965 #if defined(AFS_SUN5_ENV)
966 AFS_RWLOCK_INIT(&tvc->vlock, "vcache vlock");
967 #endif /* defined(AFS_SUN5_ENV) */
970 tvc->linkData = NULL;
973 tvc->execsOrWriters = 0;
975 tvc->f.states = CVInit;
976 tvc->last_looker = 0;
978 tvc->asynchrony = -1;
981 tvc->flushDV.low = tvc->flushDV.high = AFS_MAXDV;
984 tvc->f.truncPos = AFS_NOTRUNC; /* don't truncate until we need to */
985 hzero(tvc->f.m.DataVersion); /* in case we copy it into flushDV */
987 tvc->callback = serverp; /* to minimize chance that clear
989 #if defined(AFS_DISCON_ENV)
990 QZero(&tvc->metadirty);
996 tvc->hnext = afs_vhashT[i];
998 QAdd(&afs_vhashTV[j], &tvc->vhashq);
1000 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1001 refpanic("NewVCache VLRU inconsistent");
1003 QAdd(&VLRU, &tvc->vlruq); /* put in lruq */
1004 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
1005 refpanic("NewVCache VLRU inconsistent2");
1007 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
1008 refpanic("NewVCache VLRU inconsistent3");
1010 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
1011 refpanic("NewVCache VLRU inconsistent4");
1014 /* it should now be safe to drop the xvcache lock */
1016 ReleaseWriteLock(&afs_xvcache);
1018 afs_nbsd_getnewvnode(tvc); /* includes one refcount */
1020 ObtainWriteLock(&afs_xvcache,337);
1021 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1023 #ifdef AFS_DARWIN_ENV
1024 ReleaseWriteLock(&afs_xvcache);
1026 afs_darwin_getnewvnode(tvc); /* includes one refcount */
1028 ObtainWriteLock(&afs_xvcache,338);
1029 #ifdef AFS_DARWIN80_ENV
1030 LOCKINIT(tvc->rwlock);
1032 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1039 ReleaseWriteLock(&afs_xvcache);
1041 #if defined(AFS_FBSD60_ENV)
1042 if (getnewvnode(MOUNT_AFS, afs_globalVFS, &afs_vnodeops, &vp))
1043 #elif defined(AFS_FBSD50_ENV)
1044 if (getnewvnode(MOUNT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
1046 if (getnewvnode(VT_AFS, afs_globalVFS, afs_vnodeop_p, &vp))
1048 panic("afs getnewvnode"); /* can't happen */
1050 ObtainWriteLock(&afs_xvcache,339);
1051 if (tvc->v != NULL) {
1052 /* I'd like to know if this ever happens...
1053 * We don't drop global for the rest of this function,
1054 * so if we do lose the race, the other thread should
1055 * have found the same vnode and finished initializing
1056 * the vcache entry. Is it conceivable that this vcache
1057 * entry could be recycled during this interval? If so,
1058 * then there probably needs to be some sort of additional
1059 * mutual exclusion (an Embryonic flag would suffice).
1061 printf("afs_NewVCache: lost the race\n");
1065 tvc->v->v_data = tvc;
1066 lockinit(&tvc->rwlock, PINOD, "vcache", 0, 0);
1070 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
1071 /* Hold it for the LRU (should make count 2) */
1072 VN_HOLD(AFSTOV(tvc));
1073 #else /* AFS_OSF_ENV */
1074 #if !(defined (AFS_DARWIN_ENV) || defined(AFS_XBSD_ENV))
1075 VREFCOUNT_SET(tvc, 1); /* us */
1076 #endif /* AFS_XBSD_ENV */
1077 #endif /* AFS_OSF_ENV */
1078 #ifdef AFS_AIX32_ENV
1079 LOCK_INIT(&tvc->pvmlock, "vcache pvmlock");
1080 tvc->vmh = tvc->segid = NULL;
1084 #if defined(AFS_CACHE_BYPASS)
1085 tvc->cachingStates = 0;
1086 tvc->cachingTransitions = 0;
1089 #ifdef AFS_BOZONLOCK_ENV
1090 #if defined(AFS_SUN5_ENV)
1091 rw_init(&tvc->rwlock, "vcache rwlock", RW_DEFAULT, NULL);
1093 #if defined(AFS_SUN55_ENV)
1094 /* This is required if the kaio (kernel aynchronous io)
1095 ** module is installed. Inside the kernel, the function
1096 ** check_vp( common/os/aio.c) checks to see if the kernel has
1097 ** to provide asynchronous io for this vnode. This
1098 ** function extracts the device number by following the
1099 ** v_data field of the vnode. If we do not set this field
1100 ** then the system panics. The value of the v_data field
1101 ** is not really important for AFS vnodes because the kernel
1102 ** does not do asynchronous io for regular files. Hence,
1103 ** for the time being, we fill up the v_data field with the
1104 ** vnode pointer itself. */
1105 tvc->v.v_data = (char *)tvc;
1106 #endif /* AFS_SUN55_ENV */
1108 afs_BozonInit(&tvc->pvnLock, tvc);
1111 /* initialize vnode data, note vrefCount is v.v_count */
1113 /* Don't forget to free the gnode space */
1114 tvc->v.v_gnode = gnodepnt =
1115 (struct gnode *)osi_AllocSmallSpace(sizeof(struct gnode));
1116 memset((char *)gnodepnt, 0, sizeof(struct gnode));
1118 #ifdef AFS_SGI64_ENV
1119 memset((void *)&(tvc->vc_bhv_desc), 0, sizeof(tvc->vc_bhv_desc));
1120 bhv_desc_init(&(tvc->vc_bhv_desc), tvc, tvc, &Afs_vnodeops);
1121 #ifdef AFS_SGI65_ENV
1122 vn_bhv_head_init(&(tvc->v.v_bh), "afsvp");
1123 vn_bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1125 bhv_head_init(&(tvc->v.v_bh));
1126 bhv_insert_initial(&(tvc->v.v_bh), &(tvc->vc_bhv_desc));
1128 #ifdef AFS_SGI65_ENV
1129 tvc->v.v_mreg = tvc->v.v_mregb = (struct pregion *)tvc;
1130 #ifdef VNODE_TRACING
1131 tvc->v.v_trace = ktrace_alloc(VNODE_TRACE_SIZE, 0);
1133 init_bitlock(&tvc->v.v_pcacheflag, VNODE_PCACHE_LOCKBIT, "afs_pcache",
1135 init_mutex(&tvc->v.v_filocksem, MUTEX_DEFAULT, "afsvfl", (long)tvc);
1136 init_mutex(&tvc->v.v_buf_lock, MUTEX_DEFAULT, "afsvnbuf", (long)tvc);
1138 vnode_pcache_init(&tvc->v);
1139 #if defined(DEBUG) && defined(VNODE_INIT_BITLOCK)
1140 /* Above define is never true execpt in SGI test kernels. */
1141 init_bitlock(&(tvc->v.v_flag, VLOCK, "vnode", tvc->v.v_number);
1143 #ifdef INTR_KTHREADS
1144 AFS_VN_INIT_BUF_LOCK(&(tvc->v));
1147 SetAfsVnode(AFSTOV(tvc));
1148 #endif /* AFS_SGI64_ENV */
1150 * The proper value for mvstat (for root fids) is setup by the caller.
1153 if (afid->Fid.Vnode == 1 && afid->Fid.Unique == 1)
1155 if (afs_globalVFS == 0)
1156 osi_Panic("afs globalvfs");
1157 #if !defined(AFS_LINUX22_ENV)
1158 vSetVfsp(tvc, afs_globalVFS);
1160 vSetType(tvc, VREG);
1162 tvc->v.v_vfsnext = afs_globalVFS->vfs_vnodes; /* link off vfs */
1163 tvc->v.v_vfsprev = NULL;
1164 afs_globalVFS->vfs_vnodes = &tvc->v;
1165 if (tvc->v.v_vfsnext != NULL)
1166 tvc->v.v_vfsnext->v_vfsprev = &tvc->v;
1167 tvc->v.v_next = gnodepnt->gn_vnode; /*Single vnode per gnode for us! */
1168 gnodepnt->gn_vnode = &tvc->v;
1170 #if defined(AFS_DUX40_ENV)
1171 insmntque(tvc, afs_globalVFS, &afs_ubcops);
1174 /* Is this needed??? */
1175 insmntque(tvc, afs_globalVFS);
1176 #endif /* AFS_OSF_ENV */
1177 #endif /* AFS_DUX40_ENV */
1178 #ifdef AFS_FBSD70_ENV
1179 #ifndef AFS_FBSD80_ENV /* yup. they put it back. */
1180 insmntque(AFSTOV(tvc), afs_globalVFS);
1183 #if defined(AFS_SGI_ENV)
1184 VN_SET_DPAGES(&(tvc->v), (struct pfdat *)NULL);
1185 osi_Assert((tvc->v.v_flag & VINACT) == 0);
1187 osi_Assert(VN_GET_PGCNT(&(tvc->v)) == 0);
1188 osi_Assert(tvc->mapcnt == 0 && tvc->vc_locktrips == 0);
1189 osi_Assert(tvc->vc_rwlockid == OSI_NO_LOCKID);
1190 osi_Assert(tvc->v.v_filocks == NULL);
1191 #if !defined(AFS_SGI65_ENV)
1192 osi_Assert(tvc->v.v_filocksem == NULL);
1194 osi_Assert(tvc->cred == NULL);
1195 #ifdef AFS_SGI64_ENV
1196 vnode_pcache_reinit(&tvc->v);
1197 tvc->v.v_rdev = NODEV;
1199 vn_initlist((struct vnlist *)&tvc->v);
1201 #endif /* AFS_SGI_ENV */
1203 osi_dnlc_purgedp(tvc); /* this may be overkill */
1204 memset((char *)&(tvc->callsort), 0, sizeof(struct afs_q));
1206 tvc->f.states &=~ CVInit;
1207 afs_osi_Wakeup(&tvc->f.states);
1211 } /*afs_NewVCache */
1217 * LOCK: afs_FlushActiveVcaches afs_xvcache N
1219 * \param doflocks : Do we handle flocks?
1222 afs_FlushActiveVcaches(register afs_int32 doflocks)
1224 register struct vcache *tvc;
1226 register struct afs_conn *tc;
1227 register afs_int32 code;
1228 register struct AFS_UCRED *cred = NULL;
1229 struct vrequest treq, ureq;
1230 struct AFSVolSync tsync;
1233 AFS_STATCNT(afs_FlushActiveVcaches);
1234 ObtainReadLock(&afs_xvcache);
1235 for (i = 0; i < VCSIZE; i++) {
1236 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
1237 if (tvc->f.states & CVInit) continue;
1238 #ifdef AFS_DARWIN80_ENV
1239 if (tvc->f.states & CDeadVnode &&
1240 (tvc->f.states & (CCore|CUnlinkedDel) ||
1241 tvc->flockCount)) panic("Dead vnode has core/unlinkedel/flock");
1243 if (doflocks && tvc->flockCount != 0) {
1244 /* if this entry has an flock, send a keep-alive call out */
1246 ReleaseReadLock(&afs_xvcache);
1247 ObtainWriteLock(&tvc->lock, 51);
1249 afs_InitReq(&treq, afs_osi_credp);
1250 treq.flags |= O_NONBLOCK;
1252 tc = afs_Conn(&tvc->f.fid, &treq, SHARED_LOCK);
1254 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_EXTENDLOCK);
1257 RXAFS_ExtendLock(tc->id,
1258 (struct AFSFid *)&tvc->f.fid.Fid,
1264 } while (afs_Analyze
1265 (tc, code, &tvc->f.fid, &treq,
1266 AFS_STATS_FS_RPCIDX_EXTENDLOCK, SHARED_LOCK, NULL));
1268 ReleaseWriteLock(&tvc->lock);
1269 #ifdef AFS_DARWIN80_ENV
1271 ObtainReadLock(&afs_xvcache);
1273 ObtainReadLock(&afs_xvcache);
1278 if ((tvc->f.states & CCore) || (tvc->f.states & CUnlinkedDel)) {
1280 * Don't let it evaporate in case someone else is in
1281 * this code. Also, drop the afs_xvcache lock while
1282 * getting vcache locks.
1285 ReleaseReadLock(&afs_xvcache);
1286 #ifdef AFS_BOZONLOCK_ENV
1287 afs_BozonLock(&tvc->pvnLock, tvc);
1289 #if defined(AFS_SGI_ENV)
1291 * That's because if we come in via the CUnlinkedDel bit state path we'll be have 0 refcnt
1293 osi_Assert(VREFCOUNT_GT(tvc,0));
1294 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1296 ObtainWriteLock(&tvc->lock, 52);
1297 if (tvc->f.states & CCore) {
1298 tvc->f.states &= ~CCore;
1299 /* XXXX Find better place-holder for cred XXXX */
1300 cred = (struct AFS_UCRED *)tvc->linkData;
1301 tvc->linkData = NULL; /* XXX */
1302 afs_InitReq(&ureq, cred);
1303 afs_Trace2(afs_iclSetp, CM_TRACE_ACTCCORE,
1304 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32,
1305 tvc->execsOrWriters);
1306 code = afs_StoreOnLastReference(tvc, &ureq);
1307 ReleaseWriteLock(&tvc->lock);
1308 #ifdef AFS_BOZONLOCK_ENV
1309 afs_BozonUnlock(&tvc->pvnLock, tvc);
1311 hzero(tvc->flushDV);
1314 if (code && code != VNOVNODE) {
1315 afs_StoreWarn(code, tvc->f.fid.Fid.Volume,
1316 /* /dev/console */ 1);
1318 } else if (tvc->f.states & CUnlinkedDel) {
1322 ReleaseWriteLock(&tvc->lock);
1323 #ifdef AFS_BOZONLOCK_ENV
1324 afs_BozonUnlock(&tvc->pvnLock, tvc);
1326 #if defined(AFS_SGI_ENV)
1327 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1329 afs_remunlink(tvc, 0);
1330 #if defined(AFS_SGI_ENV)
1331 AFS_RWLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1334 /* lost (or won, perhaps) the race condition */
1335 ReleaseWriteLock(&tvc->lock);
1336 #ifdef AFS_BOZONLOCK_ENV
1337 afs_BozonUnlock(&tvc->pvnLock, tvc);
1340 #if defined(AFS_SGI_ENV)
1341 AFS_RWUNLOCK((vnode_t *) tvc, VRWLOCK_WRITE);
1343 #ifdef AFS_DARWIN80_ENV
1346 AFS_RELE(AFSTOV(tvc));
1347 /* Matches write code setting CCore flag */
1350 ObtainReadLock(&afs_xvcache);
1352 ObtainReadLock(&afs_xvcache);
1355 AFS_RELE(AFSTOV(tvc));
1356 /* Matches write code setting CCore flag */
1363 ReleaseReadLock(&afs_xvcache);
1369 * Make sure a cache entry is up-to-date status-wise.
1371 * NOTE: everywhere that calls this can potentially be sped up
1372 * by checking CStatd first, and avoiding doing the InitReq
1373 * if this is up-to-date.
1375 * Anymore, the only places that call this KNOW already that the
1376 * vcache is not up-to-date, so we don't screw around.
1378 * \param avc : Ptr to vcache entry to verify.
1384 * Make sure a cache entry is up-to-date status-wise.
1386 * NOTE: everywhere that calls this can potentially be sped up
1387 * by checking CStatd first, and avoiding doing the InitReq
1388 * if this is up-to-date.
1390 * Anymore, the only places that call this KNOW already that the
1391 * vcache is not up-to-date, so we don't screw around.
1393 * \param avc Pointer to vcache entry to verify.
1396 * \return 0 for success or other error codes.
1399 afs_VerifyVCache2(struct vcache *avc, struct vrequest *areq)
1401 register struct vcache *tvc;
1403 AFS_STATCNT(afs_VerifyVCache);
1405 #if defined(AFS_OSF_ENV)
1406 ObtainReadLock(&avc->lock);
1407 if (afs_IsWired(avc)) {
1408 ReleaseReadLock(&avc->lock);
1411 ReleaseReadLock(&avc->lock);
1412 #endif /* AFS_OSF_ENV */
1413 /* otherwise we must fetch the status info */
1415 ObtainWriteLock(&avc->lock, 53);
1416 if (avc->f.states & CStatd) {
1417 ReleaseWriteLock(&avc->lock);
1420 ObtainWriteLock(&afs_xcbhash, 461);
1421 avc->f.states &= ~(CStatd | CUnique);
1422 avc->callback = NULL;
1423 afs_DequeueCallback(avc);
1424 ReleaseWriteLock(&afs_xcbhash);
1425 ReleaseWriteLock(&avc->lock);
1427 /* since we've been called back, or the callback has expired,
1428 * it's possible that the contents of this directory, or this
1429 * file's name have changed, thus invalidating the dnlc contents.
1431 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1432 osi_dnlc_purgedp(avc);
1434 osi_dnlc_purgevp(avc);
1436 /* fetch the status info */
1437 tvc = afs_GetVCache(&avc->f.fid, areq, NULL, avc);
1440 /* Put it back; caller has already incremented vrefCount */
1444 } /*afs_VerifyVCache */
1448 * Simple copy of stat info into cache.
1450 * Callers:as of 1992-04-29, only called by WriteVCache
1452 * \param avc Ptr to vcache entry involved.
1453 * \param astat Ptr to stat info to copy.
1457 afs_SimpleVStat(register struct vcache *avc,
1458 register struct AFSFetchStatus *astat, struct vrequest *areq)
1461 AFS_STATCNT(afs_SimpleVStat);
1464 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1465 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1467 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1469 #ifdef AFS_64BIT_CLIENT
1470 FillInt64(length, astat->Length_hi, astat->Length);
1471 #else /* AFS_64BIT_CLIENT */
1472 length = astat->Length;
1473 #endif /* AFS_64BIT_CLIENT */
1474 #if defined(AFS_SGI_ENV)
1475 osi_Assert((valusema(&avc->vc_rwlock) <= 0)
1476 && (OSI_GET_LOCKID() == avc->vc_rwlockid));
1477 if (length < avc->f.m.Length) {
1478 vnode_t *vp = (vnode_t *) avc;
1480 osi_Assert(WriteLocked(&avc->lock));
1481 ReleaseWriteLock(&avc->lock);
1483 PTOSSVP(vp, (off_t) length, (off_t) MAXLONG);
1485 ObtainWriteLock(&avc->lock, 67);
1488 /* if writing the file, don't fetch over this value */
1489 afs_Trace3(afs_iclSetp, CM_TRACE_SIMPLEVSTAT, ICL_TYPE_POINTER, avc,
1490 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1491 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1492 avc->f.m.Length = length;
1493 avc->f.m.Date = astat->ClientModTime;
1495 avc->f.m.Owner = astat->Owner;
1496 avc->f.m.Group = astat->Group;
1497 avc->f.m.Mode = astat->UnixModeBits;
1498 if (vType(avc) == VREG) {
1499 avc->f.m.Mode |= S_IFREG;
1500 } else if (vType(avc) == VDIR) {
1501 avc->f.m.Mode |= S_IFDIR;
1502 } else if (vType(avc) == VLNK) {
1503 avc->f.m.Mode |= S_IFLNK;
1504 if ((avc->f.m.Mode & 0111) == 0)
1507 if (avc->f.states & CForeign) {
1508 struct axscache *ac;
1509 avc->f.anyAccess = astat->AnonymousAccess;
1511 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1513 * Caller has at least one bit not covered by anonymous, and
1514 * thus may have interesting rights.
1516 * HOWEVER, this is a really bad idea, because any access query
1517 * for bits which aren't covered by anonymous, on behalf of a user
1518 * who doesn't have any special rights, will result in an answer of
1519 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1520 * It's an especially bad idea under Ultrix, since (due to the lack of
1521 * a proper access() call) it must perform several afs_access() calls
1522 * in order to create magic mode bits that vary according to who makes
1523 * the call. In other words, _every_ stat() generates a test for
1526 #endif /* badidea */
1527 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1528 ac->axess = astat->CallerAccess;
1529 else /* not found, add a new one if possible */
1530 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1533 } /*afs_SimpleVStat */
1537 * Store the status info *only* back to the server for a
1540 * Environment: Must be called with a shared lock held on the vnode.
1542 * \param avc Ptr to the vcache entry.
1543 * \param astatus Ptr to the status info to store.
1544 * \param areq Ptr to the associated vrequest.
1546 * \return Operation status.
1550 afs_WriteVCache(register struct vcache *avc,
1551 register struct AFSStoreStatus *astatus,
1552 struct vrequest *areq)
1555 struct afs_conn *tc;
1556 struct AFSFetchStatus OutStatus;
1557 struct AFSVolSync tsync;
1559 AFS_STATCNT(afs_WriteVCache);
1560 afs_Trace2(afs_iclSetp, CM_TRACE_WVCACHE, ICL_TYPE_POINTER, avc,
1561 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));
1563 tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK);
1565 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STORESTATUS);
1568 RXAFS_StoreStatus(tc->id, (struct AFSFid *)&avc->f.fid.Fid,
1569 astatus, &OutStatus, &tsync);
1574 } while (afs_Analyze
1575 (tc, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STORESTATUS,
1576 SHARED_LOCK, NULL));
1578 UpgradeSToWLock(&avc->lock, 20);
1580 /* success, do the changes locally */
1581 afs_SimpleVStat(avc, &OutStatus, areq);
1583 * Update the date, too. SimpleVStat didn't do this, since
1584 * it thought we were doing this after fetching new status
1585 * over a file being written.
1587 avc->f.m.Date = OutStatus.ClientModTime;
1589 /* failure, set up to check with server next time */
1590 ObtainWriteLock(&afs_xcbhash, 462);
1591 afs_DequeueCallback(avc);
1592 avc->f.states &= ~(CStatd | CUnique); /* turn off stat valid flag */
1593 ReleaseWriteLock(&afs_xcbhash);
1594 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
1595 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
1597 ConvertWToSLock(&avc->lock);
1600 } /*afs_WriteVCache */
1601 #if defined(AFS_DISCON_ENV)
1604 * Store status info only locally, set the proper disconnection flags
1605 * and add to dirty list.
1607 * \param avc The vcache to be written locally.
1608 * \param astatus Get attr fields from local store.
1609 * \param attrs This one is only of the vs_size.
1611 * \note Must be called with a shared lock on the vnode
1613 int afs_WriteVCacheDiscon(register struct vcache *avc,
1614 register struct AFSStoreStatus *astatus,
1615 struct vattr *attrs)
1618 afs_int32 flags = 0;
1620 UpgradeSToWLock(&avc->lock, 700);
1622 if (!astatus->Mask) {
1628 /* Set attributes. */
1629 if (astatus->Mask & AFS_SETMODTIME) {
1630 avc->f.m.Date = astatus->ClientModTime;
1631 flags |= VDisconSetTime;
1634 if (astatus->Mask & AFS_SETOWNER) {
1635 printf("Not allowed yet. \n");
1636 //avc->f.m.Owner = astatus->Owner;
1639 if (astatus->Mask & AFS_SETGROUP) {
1640 printf("Not allowed yet. \n");
1641 //avc->f.m.Group = astatus->Group;
1644 if (astatus->Mask & AFS_SETMODE) {
1645 avc->f.m.Mode = astatus->UnixModeBits;
1647 #if 0 /* XXX: Leaving this out, so it doesn't mess up the file type flag.*/
1649 if (vType(avc) == VREG) {
1650 avc->f.m.Mode |= S_IFREG;
1651 } else if (vType(avc) == VDIR) {
1652 avc->f.m.Mode |= S_IFDIR;
1653 } else if (vType(avc) == VLNK) {
1654 avc->f.m.Mode |= S_IFLNK;
1655 if ((avc->f.m.Mode & 0111) == 0)
1659 flags |= VDisconSetMode;
1660 } /* if(astatus.Mask & AFS_SETMODE) */
1662 } /* if (!astatus->Mask) */
1664 if (attrs->va_size > 0) {
1665 /* XXX: Do I need more checks? */
1666 /* Truncation operation. */
1667 flags |= VDisconTrunc;
1671 afs_DisconAddDirty(avc, flags, 1);
1673 /* XXX: How about the rest of the fields? */
1675 ConvertWToSLock(&avc->lock);
1683 * Copy astat block into vcache info
1685 * \note This code may get dataversion and length out of sync if the file has
1686 * been modified. This is less than ideal. I haven't thought about it sufficiently
1687 * to be certain that it is adequate.
1689 * \note Environment: Must be called under a write lock
1691 * \param avc Ptr to vcache entry.
1692 * \param astat Ptr to stat block to copy in.
1693 * \param areq Ptr to associated request.
1696 afs_ProcessFS(register struct vcache *avc,
1697 register struct AFSFetchStatus *astat, struct vrequest *areq)
1700 AFS_STATCNT(afs_ProcessFS);
1702 #ifdef AFS_64BIT_CLIENT
1703 FillInt64(length, astat->Length_hi, astat->Length);
1704 #else /* AFS_64BIT_CLIENT */
1705 length = astat->Length;
1706 #endif /* AFS_64BIT_CLIENT */
1707 /* WARNING: afs_DoBulkStat uses the Length field to store a sequence
1708 * number for each bulk status request. Under no circumstances
1709 * should afs_DoBulkStat store a sequence number if the new
1710 * length will be ignored when afs_ProcessFS is called with
1711 * new stats. If you change the following conditional then you
1712 * also need to change the conditional in afs_DoBulkStat. */
1714 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)
1715 && !AFS_VN_MAPPED((vnode_t *) avc)) {
1717 if ((avc->execsOrWriters <= 0) && !afs_DirtyPages(avc)) {
1719 /* if we're writing or mapping this file, don't fetch over these
1722 afs_Trace3(afs_iclSetp, CM_TRACE_PROCESSFS, ICL_TYPE_POINTER, avc,
1723 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length),
1724 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(length));
1725 avc->f.m.Length = length;
1726 avc->f.m.Date = astat->ClientModTime;
1728 hset64(avc->f.m.DataVersion, astat->dataVersionHigh, astat->DataVersion);
1729 avc->f.m.Owner = astat->Owner;
1730 avc->f.m.Mode = astat->UnixModeBits;
1731 avc->f.m.Group = astat->Group;
1732 avc->f.m.LinkCount = astat->LinkCount;
1733 if (astat->FileType == File) {
1734 vSetType(avc, VREG);
1735 avc->f.m.Mode |= S_IFREG;
1736 } else if (astat->FileType == Directory) {
1737 vSetType(avc, VDIR);
1738 avc->f.m.Mode |= S_IFDIR;
1739 } else if (astat->FileType == SymbolicLink) {
1740 if (afs_fakestat_enable && (avc->f.m.Mode & 0111) == 0) {
1741 vSetType(avc, VDIR);
1742 avc->f.m.Mode |= S_IFDIR;
1744 vSetType(avc, VLNK);
1745 avc->f.m.Mode |= S_IFLNK;
1747 if ((avc->f.m.Mode & 0111) == 0) {
1751 avc->f.anyAccess = astat->AnonymousAccess;
1753 if ((astat->CallerAccess & ~astat->AnonymousAccess))
1755 * Caller has at least one bit not covered by anonymous, and
1756 * thus may have interesting rights.
1758 * HOWEVER, this is a really bad idea, because any access query
1759 * for bits which aren't covered by anonymous, on behalf of a user
1760 * who doesn't have any special rights, will result in an answer of
1761 * the form "I don't know, lets make a FetchStatus RPC and find out!"
1762 * It's an especially bad idea under Ultrix, since (due to the lack of
1763 * a proper access() call) it must perform several afs_access() calls
1764 * in order to create magic mode bits that vary according to who makes
1765 * the call. In other words, _every_ stat() generates a test for
1768 #endif /* badidea */
1770 struct axscache *ac;
1771 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
1772 ac->axess = astat->CallerAccess;
1773 else /* not found, add a new one if possible */
1774 afs_AddAxs(avc->Access, areq->uid, astat->CallerAccess);
1776 } /*afs_ProcessFS */
1780 * Get fid from server.
1783 * \param areq Request to be passed on.
1784 * \param name Name of ?? to lookup.
1785 * \param OutStatus Fetch status.
1790 * \return Success status of operation.
1793 afs_RemoteLookup(register struct VenusFid *afid, struct vrequest *areq,
1794 char *name, struct VenusFid *nfid,
1795 struct AFSFetchStatus *OutStatusp,
1796 struct AFSCallBack *CallBackp, struct server **serverp,
1797 struct AFSVolSync *tsyncp)
1801 register struct afs_conn *tc;
1802 struct AFSFetchStatus OutDirStatus;
1805 name = ""; /* XXX */
1807 tc = afs_Conn(afid, areq, SHARED_LOCK);
1810 *serverp = tc->srvr->server;
1812 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_XLOOKUP);
1815 RXAFS_Lookup(tc->id, (struct AFSFid *)&afid->Fid, name,
1816 (struct AFSFid *)&nfid->Fid, OutStatusp,
1817 &OutDirStatus, CallBackp, tsyncp);
1822 } while (afs_Analyze
1823 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_XLOOKUP, SHARED_LOCK,
1833 * Given a file id and a vrequest structure, fetch the status
1834 * information associated with the file.
1836 * \param afid File ID.
1837 * \param areq Ptr to associated vrequest structure, specifying the
1838 * user whose authentication tokens will be used.
1839 * \param avc Caller may already have a vcache for this file, which is
1842 * \note Environment:
1843 * The cache entry is returned with an increased vrefCount field.
1844 * The entry must be discarded by calling afs_PutVCache when you
1845 * are through using the pointer to the cache entry.
1847 * You should not hold any locks when calling this function, except
1848 * locks on other vcache entries. If you lock more than one vcache
1849 * entry simultaneously, you should lock them in this order:
1851 * 1. Lock all files first, then directories.
1852 * 2. Within a particular type, lock entries in Fid.Vnode order.
1854 * This locking hierarchy is convenient because it allows locking
1855 * of a parent dir cache entry, given a file (to check its access
1856 * control list). It also allows renames to be handled easily by
1857 * locking directories in a constant order.
1859 * \note NB. NewVCache -> FlushVCache presently (4/10/95) drops the xvcache lock.
1861 * \note Might have a vcache structure already, which must
1862 * already be held by the caller
1865 afs_GetVCache(register struct VenusFid *afid, struct vrequest *areq,
1866 afs_int32 * cached, struct vcache *avc)
1869 afs_int32 code, newvcache = 0;
1870 register struct vcache *tvc;
1874 AFS_STATCNT(afs_GetVCache);
1877 *cached = 0; /* Init just in case */
1879 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1883 ObtainSharedLock(&afs_xvcache, 5);
1885 tvc = afs_FindVCache(afid, &retry, DO_STATS | DO_VLRU | IS_SLOCK);
1887 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
1888 ReleaseSharedLock(&afs_xvcache);
1889 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
1897 osi_Assert((tvc->f.states & CVInit) == 0);
1898 /* If we are in readdir, return the vnode even if not statd */
1899 if ((tvc->f.states & CStatd) || afs_InReadDir(tvc)) {
1900 ReleaseSharedLock(&afs_xvcache);
1904 UpgradeSToWLock(&afs_xvcache, 21);
1906 /* no cache entry, better grab one */
1907 tvc = afs_NewVCache(afid, NULL);
1910 ConvertWToSLock(&afs_xvcache);
1913 ReleaseSharedLock(&afs_xvcache);
1917 afs_stats_cmperf.vcacheMisses++;
1920 ReleaseSharedLock(&afs_xvcache);
1922 ObtainWriteLock(&tvc->lock, 54);
1924 if (tvc->f.states & CStatd) {
1925 ReleaseWriteLock(&tvc->lock);
1928 #if defined(AFS_OSF_ENV)
1929 if (afs_IsWired(tvc)) {
1930 ReleaseWriteLock(&tvc->lock);
1933 #endif /* AFS_OSF_ENV */
1934 #ifdef AFS_DARWIN80_ENV
1935 /* Darwin 8.0 only has bufs in nfs, so we shouldn't have to worry about them.
1938 #if defined(AFS_DARWIN_ENV) || defined(AFS_FBSD_ENV)
1940 * XXX - I really don't like this. Should try to understand better.
1941 * It seems that sometimes, when we get called, we already hold the
1942 * lock on the vnode (e.g., from afs_getattr via afs_VerifyVCache).
1943 * We can't drop the vnode lock, because that could result in a race.
1944 * Sometimes, though, we get here and don't hold the vnode lock.
1945 * I hate code paths that sometimes hold locks and sometimes don't.
1946 * In any event, the dodge we use here is to check whether the vnode
1947 * is locked, and if it isn't, then we gain and drop it around the call
1948 * to vinvalbuf; otherwise, we leave it alone.
1951 struct vnode *vp = AFSTOV(tvc);
1954 #if defined(AFS_DARWIN_ENV)
1955 iheldthelock = VOP_ISLOCKED(vp);
1957 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, current_proc());
1958 /* this is messy. we can call fsync which will try to reobtain this */
1959 if (VTOAFS(vp) == tvc)
1960 ReleaseWriteLock(&tvc->lock);
1961 if (UBCINFOEXISTS(vp)) {
1962 vinvalbuf(vp, V_SAVE, &afs_osi_cred, current_proc(), PINOD, 0);
1964 if (VTOAFS(vp) == tvc)
1965 ObtainWriteLock(&tvc->lock, 954);
1967 VOP_UNLOCK(vp, LK_EXCLUSIVE, current_proc());
1968 #elif defined(AFS_FBSD80_ENV)
1969 iheldthelock = VOP_ISLOCKED(vp);
1970 if (!iheldthelock) {
1971 /* nosleep/sleep lock order reversal */
1972 int glocked = ISAFS_GLOCK();
1975 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1979 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1982 #elif defined(AFS_FBSD60_ENV)
1983 iheldthelock = VOP_ISLOCKED(vp, curthread);
1985 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1986 vinvalbuf(vp, V_SAVE, curthread, PINOD, 0);
1988 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1989 #elif defined(AFS_FBSD50_ENV)
1990 iheldthelock = VOP_ISLOCKED(vp, curthread);
1992 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
1993 vinvalbuf(vp, V_SAVE, osi_curcred(), curthread, PINOD, 0);
1995 VOP_UNLOCK(vp, LK_EXCLUSIVE, curthread);
1996 #elif defined(AFS_FBSD40_ENV)
1997 iheldthelock = VOP_ISLOCKED(vp, curproc);
1999 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
2000 vinvalbuf(vp, V_SAVE, osi_curcred(), curproc, PINOD, 0);
2002 VOP_UNLOCK(vp, LK_EXCLUSIVE, curproc);
2003 #elif defined(AFS_OBSD_ENV)
2004 iheldthelock = VOP_ISLOCKED(vp, curproc);
2006 VOP_LOCK(vp, LK_EXCLUSIVE | LK_RETRY, curproc);
2007 uvm_vnp_uncache(vp);
2009 VOP_UNLOCK(vp, 0, curproc);
2015 ObtainWriteLock(&afs_xcbhash, 464);
2016 tvc->f.states &= ~CUnique;
2018 afs_DequeueCallback(tvc);
2019 ReleaseWriteLock(&afs_xcbhash);
2021 /* It is always appropriate to throw away all the access rights? */
2022 afs_FreeAllAxs(&(tvc->Access));
2023 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-volume info */
2025 if ((tvp->states & VForeign)) {
2027 tvc->f.states |= CForeign;
2028 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2029 && (tvp->rootUnique == afid->Fid.Unique)) {
2033 if (tvp->states & VRO)
2034 tvc->f.states |= CRO;
2035 if (tvp->states & VBackup)
2036 tvc->f.states |= CBackup;
2037 /* now copy ".." entry back out of volume structure, if necessary */
2038 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2040 tvc->mvid = (struct VenusFid *)
2041 osi_AllocSmallSpace(sizeof(struct VenusFid));
2042 *tvc->mvid = tvp->dotdot;
2044 afs_PutVolume(tvp, READ_LOCK);
2048 afs_RemoveVCB(afid);
2050 struct AFSFetchStatus OutStatus;
2052 if (afs_DynrootNewVnode(tvc, &OutStatus)) {
2053 afs_ProcessFS(tvc, &OutStatus, areq);
2054 tvc->f.states |= CStatd | CUnique;
2055 tvc->f.parent.vnode = OutStatus.ParentVnode;
2056 tvc->f.parent.unique = OutStatus.ParentUnique;
2060 if (AFS_IS_DISCONNECTED) {
2061 /* Nothing to do otherwise...*/
2063 printf("Network is down in afs_GetCache");
2065 code = afs_FetchStatus(tvc, afid, areq, &OutStatus);
2067 /* For the NFS translator's benefit, make sure
2068 * non-directory vnodes always have their parent FID set
2069 * correctly, even when created as a result of decoding an
2070 * NFS filehandle. It would be nice to also do this for
2071 * directories, but we can't because the fileserver fills
2072 * in the FID of the directory itself instead of that of
2075 if (!code && OutStatus.FileType != Directory &&
2076 !tvc->f.parent.vnode) {
2077 tvc->f.parent.vnode = OutStatus.ParentVnode;
2078 tvc->f.parent.unique = OutStatus.ParentUnique;
2079 /* XXX - SXW - It's conceivable we should mark ourselves
2080 * as dirty again here, incase we've been raced
2081 * out of the FetchStatus call.
2088 ReleaseWriteLock(&tvc->lock);
2094 ReleaseWriteLock(&tvc->lock);
2097 } /*afs_GetVCache */
2102 * Lookup a vcache by fid. Look inside the cache first, if not
2103 * there, lookup the file on the server, and then get it's fresh
2108 * \param cached Is element cached? If NULL, don't answer.
2112 * \return The found element or NULL.
2115 afs_LookupVCache(struct VenusFid *afid, struct vrequest *areq,
2116 afs_int32 * cached, struct vcache *adp, char *aname)
2118 afs_int32 code, now, newvcache = 0;
2119 struct VenusFid nfid;
2120 register struct vcache *tvc;
2122 struct AFSFetchStatus OutStatus;
2123 struct AFSCallBack CallBack;
2124 struct AFSVolSync tsync;
2125 struct server *serverp = 0;
2129 AFS_STATCNT(afs_GetVCache);
2131 *cached = 0; /* Init just in case */
2133 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2137 ObtainReadLock(&afs_xvcache);
2138 tvc = afs_FindVCache(afid, &retry, DO_STATS /* no vlru */ );
2141 ReleaseReadLock(&afs_xvcache);
2143 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2144 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2148 ObtainReadLock(&tvc->lock);
2150 if (tvc->f.states & CStatd) {
2154 ReleaseReadLock(&tvc->lock);
2157 tvc->f.states &= ~CUnique;
2159 ReleaseReadLock(&tvc->lock);
2161 ObtainReadLock(&afs_xvcache);
2164 ReleaseReadLock(&afs_xvcache);
2166 /* lookup the file */
2169 origCBs = afs_allCBs; /* if anything changes, we don't have a cb */
2171 if (AFS_IS_DISCONNECTED) {
2172 printf("Network is down in afs_LookupVcache\n");
2176 afs_RemoteLookup(&adp->f.fid, areq, aname, &nfid, &OutStatus,
2177 &CallBack, &serverp, &tsync);
2179 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2183 ObtainSharedLock(&afs_xvcache, 6);
2184 tvc = afs_FindVCache(&nfid, &retry, DO_VLRU | IS_SLOCK/* no xstats now */ );
2186 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2187 ReleaseSharedLock(&afs_xvcache);
2188 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2194 /* no cache entry, better grab one */
2195 UpgradeSToWLock(&afs_xvcache, 22);
2196 tvc = afs_NewVCache(&nfid, serverp);
2198 ConvertWToSLock(&afs_xvcache);
2201 ReleaseSharedLock(&afs_xvcache);
2206 ReleaseSharedLock(&afs_xvcache);
2207 ObtainWriteLock(&tvc->lock, 55);
2209 /* It is always appropriate to throw away all the access rights? */
2210 afs_FreeAllAxs(&(tvc->Access));
2211 tvp = afs_GetVolume(afid, areq, READ_LOCK); /* copy useful per-vol info */
2213 if ((tvp->states & VForeign)) {
2215 tvc->f.states |= CForeign;
2216 if (newvcache && (tvp->rootVnode == afid->Fid.Vnode)
2217 && (tvp->rootUnique == afid->Fid.Unique))
2220 if (tvp->states & VRO)
2221 tvc->f.states |= CRO;
2222 if (tvp->states & VBackup)
2223 tvc->f.states |= CBackup;
2224 /* now copy ".." entry back out of volume structure, if necessary */
2225 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2227 tvc->mvid = (struct VenusFid *)
2228 osi_AllocSmallSpace(sizeof(struct VenusFid));
2229 *tvc->mvid = tvp->dotdot;
2234 ObtainWriteLock(&afs_xcbhash, 465);
2235 afs_DequeueCallback(tvc);
2236 tvc->f.states &= ~(CStatd | CUnique);
2237 ReleaseWriteLock(&afs_xcbhash);
2238 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2239 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2241 afs_PutVolume(tvp, READ_LOCK);
2242 ReleaseWriteLock(&tvc->lock);
2247 ObtainWriteLock(&afs_xcbhash, 466);
2248 if (origCBs == afs_allCBs) {
2249 if (CallBack.ExpirationTime) {
2250 tvc->callback = serverp;
2251 tvc->cbExpires = CallBack.ExpirationTime + now;
2252 tvc->f.states |= CStatd | CUnique;
2253 tvc->f.states &= ~CBulkFetching;
2254 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvp);
2255 } else if (tvc->f.states & CRO) {
2256 /* adapt gives us an hour. */
2257 tvc->cbExpires = 3600 + osi_Time();
2258 /*XXX*/ tvc->f.states |= CStatd | CUnique;
2259 tvc->f.states &= ~CBulkFetching;
2260 afs_QueueCallback(tvc, CBHash(3600), tvp);
2262 tvc->callback = NULL;
2263 afs_DequeueCallback(tvc);
2264 tvc->f.states &= ~(CStatd | CUnique);
2265 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2266 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2269 afs_DequeueCallback(tvc);
2270 tvc->f.states &= ~CStatd;
2271 tvc->f.states &= ~CUnique;
2272 tvc->callback = NULL;
2273 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2274 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2276 ReleaseWriteLock(&afs_xcbhash);
2278 afs_PutVolume(tvp, READ_LOCK);
2279 afs_ProcessFS(tvc, &OutStatus, areq);
2281 ReleaseWriteLock(&tvc->lock);
2287 afs_GetRootVCache(struct VenusFid *afid, struct vrequest *areq,
2288 afs_int32 * cached, struct volume *tvolp)
2290 afs_int32 code = 0, i, newvcache = 0, haveStatus = 0;
2291 afs_int32 getNewFid = 0;
2293 struct VenusFid nfid;
2294 register struct vcache *tvc;
2295 struct server *serverp = 0;
2296 struct AFSFetchStatus OutStatus;
2297 struct AFSCallBack CallBack;
2298 struct AFSVolSync tsync;
2303 #ifdef AFS_DARWIN80_ENV
2310 if (!tvolp->rootVnode || getNewFid) {
2311 struct VenusFid tfid;
2314 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2315 origCBs = afs_allCBs; /* ignore InitCallBackState */
2317 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2322 /* ReleaseReadLock(&tvolp->lock); */
2323 ObtainWriteLock(&tvolp->lock, 56);
2324 tvolp->rootVnode = afid->Fid.Vnode = nfid.Fid.Vnode;
2325 tvolp->rootUnique = afid->Fid.Unique = nfid.Fid.Unique;
2326 ReleaseWriteLock(&tvolp->lock);
2327 /* ObtainReadLock(&tvolp->lock);*/
2330 afid->Fid.Vnode = tvolp->rootVnode;
2331 afid->Fid.Unique = tvolp->rootUnique;
2335 ObtainSharedLock(&afs_xvcache, 7);
2337 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2338 if (!FidCmp(&(tvc->f.fid), afid)) {
2339 if (tvc->f.states & CVInit) {
2340 ReleaseSharedLock(&afs_xvcache);
2341 afs_osi_Sleep(&tvc->f.states);
2345 /* Grab this vnode, possibly reactivating from the free list */
2346 /* for the present (95.05.25) everything on the hash table is
2347 * definitively NOT in the free list -- at least until afs_reclaim
2348 * can be safely implemented */
2350 vg = vget(AFSTOV(tvc)); /* this bumps ref count */
2354 #endif /* AFS_OSF_ENV */
2355 #ifdef AFS_DARWIN80_ENV
2356 if (tvc->f.states & CDeadVnode) {
2357 ReleaseSharedLock(&afs_xvcache);
2358 afs_osi_Sleep(&tvc->f.states);
2362 if (vnode_get(tvp)) /* this bumps ref count */
2364 if (vnode_ref(tvp)) {
2366 /* AFSTOV(tvc) may be NULL */
2376 if (!haveStatus && (!tvc || !(tvc->f.states & CStatd))) {
2377 /* Mount point no longer stat'd or unknown. FID may have changed. */
2380 AFS_RELE(AFSTOV(tvc));
2383 ReleaseSharedLock(&afs_xvcache);
2384 #ifdef AFS_DARWIN80_ENV
2387 vnode_put(AFSTOV(tvc));
2388 vnode_rele(AFSTOV(tvc));
2397 UpgradeSToWLock(&afs_xvcache, 23);
2398 /* no cache entry, better grab one */
2399 tvc = afs_NewVCache(afid, NULL);
2402 ReleaseWriteLock(&afs_xvcache);
2406 afs_stats_cmperf.vcacheMisses++;
2410 afs_stats_cmperf.vcacheHits++;
2411 #if defined(AFS_OSF_ENV) || defined(AFS_DARWIN80_ENV)
2412 /* we already bumped the ref count in the for loop above */
2413 #else /* AFS_OSF_ENV */
2416 UpgradeSToWLock(&afs_xvcache, 24);
2417 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2418 refpanic("GRVC VLRU inconsistent0");
2420 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2421 refpanic("GRVC VLRU inconsistent1");
2423 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2424 refpanic("GRVC VLRU inconsistent2");
2426 QRemove(&tvc->vlruq); /* move to lruq head */
2427 QAdd(&VLRU, &tvc->vlruq);
2428 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2429 refpanic("GRVC VLRU inconsistent3");
2431 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2432 refpanic("GRVC VLRU inconsistent4");
2434 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2435 refpanic("GRVC VLRU inconsistent5");
2440 ReleaseWriteLock(&afs_xvcache);
2442 if (tvc->f.states & CStatd) {
2446 ObtainReadLock(&tvc->lock);
2447 tvc->f.states &= ~CUnique;
2448 tvc->callback = NULL; /* redundant, perhaps */
2449 ReleaseReadLock(&tvc->lock);
2452 ObtainWriteLock(&tvc->lock, 57);
2454 /* It is always appropriate to throw away all the access rights? */
2455 afs_FreeAllAxs(&(tvc->Access));
2458 tvc->f.states |= CForeign;
2459 if (tvolp->states & VRO)
2460 tvc->f.states |= CRO;
2461 if (tvolp->states & VBackup)
2462 tvc->f.states |= CBackup;
2463 /* now copy ".." entry back out of volume structure, if necessary */
2464 if (newvcache && (tvolp->rootVnode == afid->Fid.Vnode)
2465 && (tvolp->rootUnique == afid->Fid.Unique)) {
2468 if (tvc->mvstat == 2 && tvolp->dotdot.Fid.Volume != 0) {
2470 tvc->mvid = (struct VenusFid *)
2471 osi_AllocSmallSpace(sizeof(struct VenusFid));
2472 *tvc->mvid = tvolp->dotdot;
2476 afs_RemoveVCB(afid);
2479 struct VenusFid tfid;
2482 tfid.Fid.Vnode = 0; /* Means get rootfid of volume */
2483 origCBs = afs_allCBs; /* ignore InitCallBackState */
2485 afs_RemoteLookup(&tfid, areq, NULL, &nfid, &OutStatus, &CallBack,
2490 ObtainWriteLock(&afs_xcbhash, 467);
2491 afs_DequeueCallback(tvc);
2492 tvc->callback = NULL;
2493 tvc->f.states &= ~(CStatd | CUnique);
2494 ReleaseWriteLock(&afs_xcbhash);
2495 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2496 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2497 ReleaseWriteLock(&tvc->lock);
2502 ObtainWriteLock(&afs_xcbhash, 468);
2503 if (origCBs == afs_allCBs) {
2504 tvc->f.states |= CTruth;
2505 tvc->callback = serverp;
2506 if (CallBack.ExpirationTime != 0) {
2507 tvc->cbExpires = CallBack.ExpirationTime + start;
2508 tvc->f.states |= CStatd;
2509 tvc->f.states &= ~CBulkFetching;
2510 afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), tvolp);
2511 } else if (tvc->f.states & CRO) {
2512 /* adapt gives us an hour. */
2513 tvc->cbExpires = 3600 + osi_Time();
2514 /*XXX*/ tvc->f.states |= CStatd;
2515 tvc->f.states &= ~CBulkFetching;
2516 afs_QueueCallback(tvc, CBHash(3600), tvolp);
2519 afs_DequeueCallback(tvc);
2520 tvc->callback = NULL;
2521 tvc->f.states &= ~(CStatd | CUnique);
2522 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2523 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2525 ReleaseWriteLock(&afs_xcbhash);
2526 afs_ProcessFS(tvc, &OutStatus, areq);
2528 ReleaseWriteLock(&tvc->lock);
2534 * Update callback status and (sometimes) attributes of a vnode.
2535 * Called after doing a fetch status RPC. Whilst disconnected, attributes
2536 * shouldn't be written to the vcache here.
2541 * \param Outsp Server status after rpc call.
2542 * \param acb Callback for this vnode.
2544 * \note The vcache must be write locked.
2547 afs_UpdateStatus(struct vcache *avc,
2548 struct VenusFid *afid,
2549 struct vrequest *areq,
2550 struct AFSFetchStatus *Outsp,
2551 struct AFSCallBack *acb,
2554 struct volume *volp;
2557 /* Dont write status in vcache if resyncing after a disconnection. */
2558 afs_ProcessFS(avc, Outsp, areq);
2560 volp = afs_GetVolume(afid, areq, READ_LOCK);
2561 ObtainWriteLock(&afs_xcbhash, 469);
2562 avc->f.states |= CTruth;
2563 if (avc->callback /* check for race */ ) {
2564 if (acb->ExpirationTime != 0) {
2565 avc->cbExpires = acb->ExpirationTime + start;
2566 avc->f.states |= CStatd;
2567 avc->f.states &= ~CBulkFetching;
2568 afs_QueueCallback(avc, CBHash(acb->ExpirationTime), volp);
2569 } else if (avc->f.states & CRO) {
2570 /* ordinary callback on a read-only volume -- AFS 3.2 style */
2571 avc->cbExpires = 3600 + start;
2572 avc->f.states |= CStatd;
2573 avc->f.states &= ~CBulkFetching;
2574 afs_QueueCallback(avc, CBHash(3600), volp);
2576 afs_DequeueCallback(avc);
2577 avc->callback = NULL;
2578 avc->f.states &= ~(CStatd | CUnique);
2579 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2580 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2583 afs_DequeueCallback(avc);
2584 avc->callback = NULL;
2585 avc->f.states &= ~(CStatd | CUnique);
2586 if ((avc->f.states & CForeign) || (avc->f.fid.Fid.Vnode & 1))
2587 osi_dnlc_purgedp(avc); /* if it (could be) a directory */
2589 ReleaseWriteLock(&afs_xcbhash);
2591 afs_PutVolume(volp, READ_LOCK);
2595 * Must be called with avc write-locked
2596 * don't absolutely have to invalidate the hint unless the dv has
2597 * changed, but be sure to get it right else there will be consistency bugs.
2600 afs_FetchStatus(struct vcache * avc, struct VenusFid * afid,
2601 struct vrequest * areq, struct AFSFetchStatus * Outsp)
2604 afs_uint32 start = 0;
2605 register struct afs_conn *tc;
2606 struct AFSCallBack CallBack;
2607 struct AFSVolSync tsync;
2610 tc = afs_Conn(afid, areq, SHARED_LOCK);
2611 avc->dchint = NULL; /* invalidate hints */
2613 avc->callback = tc->srvr->server;
2615 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHSTATUS);
2618 RXAFS_FetchStatus(tc->id, (struct AFSFid *)&afid->Fid, Outsp,
2626 } while (afs_Analyze
2627 (tc, code, afid, areq, AFS_STATS_FS_RPCIDX_FETCHSTATUS,
2628 SHARED_LOCK, NULL));
2631 afs_UpdateStatus(avc, afid, areq, Outsp, &CallBack, start);
2633 /* used to undo the local callback, but that's too extreme.
2634 * There are plenty of good reasons that fetchstatus might return
2635 * an error, such as EPERM. If we have the vnode cached, statd,
2636 * with callback, might as well keep track of the fact that we
2637 * don't have access...
2639 if (code == EPERM || code == EACCES) {
2640 struct axscache *ac;
2641 if (avc->Access && (ac = afs_FindAxs(avc->Access, areq->uid)))
2643 else /* not found, add a new one if possible */
2644 afs_AddAxs(avc->Access, areq->uid, 0);
2655 * Stuff some information into the vcache for the given file.
2658 * afid : File in question.
2659 * OutStatus : Fetch status on the file.
2660 * CallBack : Callback info.
2661 * tc : RPC connection involved.
2662 * areq : vrequest involved.
2665 * Nothing interesting.
2668 afs_StuffVcache(register struct VenusFid *afid,
2669 struct AFSFetchStatus *OutStatus,
2670 struct AFSCallBack *CallBack, register struct afs_conn *tc,
2671 struct vrequest *areq)
2673 register afs_int32 code, i, newvcache = 0;
2674 register struct vcache *tvc;
2675 struct AFSVolSync tsync;
2677 struct axscache *ac;
2680 AFS_STATCNT(afs_StuffVcache);
2681 #ifdef IFS_VCACHECOUNT
2686 ObtainSharedLock(&afs_xvcache, 8);
2688 tvc = afs_FindVCache(afid, &retry, DO_VLRU| IS_SLOCK /* no stats */ );
2690 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
2691 ReleaseSharedLock(&afs_xvcache);
2692 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
2698 /* no cache entry, better grab one */
2699 UpgradeSToWLock(&afs_xvcache, 25);
2700 tvc = afs_NewVCache(afid, NULL);
2702 ConvertWToSLock(&afs_xvcache);
2705 ReleaseSharedLock(&afs_xvcache);
2710 ReleaseSharedLock(&afs_xvcache);
2711 ObtainWriteLock(&tvc->lock, 58);
2713 tvc->f.states &= ~CStatd;
2714 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2715 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2717 /* Is it always appropriate to throw away all the access rights? */
2718 afs_FreeAllAxs(&(tvc->Access));
2720 /*Copy useful per-volume info */
2721 tvp = afs_GetVolume(afid, areq, READ_LOCK);
2723 if (newvcache && (tvp->states & VForeign))
2724 tvc->f.states |= CForeign;
2725 if (tvp->states & VRO)
2726 tvc->f.states |= CRO;
2727 if (tvp->states & VBackup)
2728 tvc->f.states |= CBackup;
2730 * Now, copy ".." entry back out of volume structure, if
2733 if (tvc->mvstat == 2 && tvp->dotdot.Fid.Volume != 0) {
2735 tvc->mvid = (struct VenusFid *)
2736 osi_AllocSmallSpace(sizeof(struct VenusFid));
2737 *tvc->mvid = tvp->dotdot;
2740 /* store the stat on the file */
2741 afs_RemoveVCB(afid);
2742 afs_ProcessFS(tvc, OutStatus, areq);
2743 tvc->callback = tc->srvr->server;
2745 /* we use osi_Time twice below. Ideally, we would use the time at which
2746 * the FetchStatus call began, instead, but we don't have it here. So we
2747 * make do with "now". In the CRO case, it doesn't really matter. In
2748 * the other case, we hope that the difference between "now" and when the
2749 * call actually began execution on the server won't be larger than the
2750 * padding which the server keeps. Subtract 1 second anyway, to be on
2751 * the safe side. Can't subtract more because we don't know how big
2752 * ExpirationTime is. Possible consistency problems may arise if the call
2753 * timeout period becomes longer than the server's expiration padding. */
2754 ObtainWriteLock(&afs_xcbhash, 470);
2755 if (CallBack->ExpirationTime != 0) {
2756 tvc->cbExpires = CallBack->ExpirationTime + osi_Time() - 1;
2757 tvc->f.states |= CStatd;
2758 tvc->f.states &= ~CBulkFetching;
2759 afs_QueueCallback(tvc, CBHash(CallBack->ExpirationTime), tvp);
2760 } else if (tvc->f.states & CRO) {
2761 /* old-fashioned AFS 3.2 style */
2762 tvc->cbExpires = 3600 + osi_Time();
2763 /*XXX*/ tvc->f.states |= CStatd;
2764 tvc->f.states &= ~CBulkFetching;
2765 afs_QueueCallback(tvc, CBHash(3600), tvp);
2767 afs_DequeueCallback(tvc);
2768 tvc->callback = NULL;
2769 tvc->f.states &= ~(CStatd | CUnique);
2770 if ((tvc->f.states & CForeign) || (tvc->f.fid.Fid.Vnode & 1))
2771 osi_dnlc_purgedp(tvc); /* if it (could be) a directory */
2773 ReleaseWriteLock(&afs_xcbhash);
2775 afs_PutVolume(tvp, READ_LOCK);
2777 /* look in per-pag cache */
2778 if (tvc->Access && (ac = afs_FindAxs(tvc->Access, areq->uid)))
2779 ac->axess = OutStatus->CallerAccess; /* substitute pags */
2780 else /* not found, add a new one if possible */
2781 afs_AddAxs(tvc->Access, areq->uid, OutStatus->CallerAccess);
2783 ReleaseWriteLock(&tvc->lock);
2784 afs_Trace4(afs_iclSetp, CM_TRACE_STUFFVCACHE, ICL_TYPE_POINTER, tvc,
2785 ICL_TYPE_POINTER, tvc->callback, ICL_TYPE_INT32,
2786 tvc->cbExpires, ICL_TYPE_INT32, tvc->cbExpires - osi_Time());
2788 * Release ref count... hope this guy stays around...
2791 } /*afs_StuffVcache */
2795 * Decrements the reference count on a cache entry.
2797 * \param avc Pointer to the cache entry to decrement.
2799 * \note Environment: Nothing interesting.
2802 afs_PutVCache(register struct vcache *avc)
2804 AFS_STATCNT(afs_PutVCache);
2805 #ifdef AFS_DARWIN80_ENV
2806 vnode_put(AFSTOV(avc));
2810 * Can we use a read lock here?
2812 ObtainReadLock(&afs_xvcache);
2814 ReleaseReadLock(&afs_xvcache);
2816 } /*afs_PutVCache */
2820 * Reset a vcache entry, so local contents are ignored, and the
2821 * server will be reconsulted next time the vcache is used
2823 * \param avc Pointer to the cache entry to reset
2826 * \note avc must be write locked on entry
2829 afs_ResetVCache(struct vcache *avc, struct AFS_UCRED *acred) {
2830 ObtainWriteLock(&afs_xcbhash, 456);
2831 afs_DequeueCallback(avc);
2832 avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */
2833 ReleaseWriteLock(&afs_xcbhash);
2834 /* now find the disk cache entries */
2835 afs_TryToSmush(avc, acred, 1);
2836 osi_dnlc_purgedp(avc);
2837 if (avc->linkData && !(avc->f.states & CCore)) {
2838 afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1);
2839 avc->linkData = NULL;
2844 * Sleepa when searching for a vcache. Releases all the pending locks,
2845 * sleeps then obtains the previously released locks.
2847 * \param vcache Enter sleep state.
2848 * \param flag Determines what locks to use.
2852 static void findvc_sleep(struct vcache *avc, int flag) {
2853 if (flag & IS_SLOCK) {
2854 ReleaseSharedLock(&afs_xvcache);
2856 if (flag & IS_WLOCK) {
2857 ReleaseWriteLock(&afs_xvcache);
2859 ReleaseReadLock(&afs_xvcache);
2862 afs_osi_Sleep(&avc->f.states);
2863 if (flag & IS_SLOCK) {
2864 ObtainSharedLock(&afs_xvcache, 341);
2866 if (flag & IS_WLOCK) {
2867 ObtainWriteLock(&afs_xvcache, 343);
2869 ObtainReadLock(&afs_xvcache);
2874 * Find a vcache entry given a fid.
2876 * \param afid Pointer to the fid whose cache entry we desire.
2877 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
2878 * unlock the vnode, and try again.
2879 * \param flag Bit 1 to specify whether to compute hit statistics. Not
2880 * set if FindVCache is called as part of internal bookkeeping.
2882 * \note Environment: Must be called with the afs_xvcache lock at least held at
2883 * the read level. In order to do the VLRU adjustment, the xvcache lock
2884 * must be shared-- we upgrade it here.
2888 afs_FindVCache(struct VenusFid *afid, afs_int32 * retry, afs_int32 flag)
2891 register struct vcache *tvc;
2893 #if defined( AFS_OSF_ENV)
2896 #ifdef AFS_DARWIN80_ENV
2900 AFS_STATCNT(afs_FindVCache);
2904 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
2905 if (FidMatches(afid, tvc)) {
2906 if (tvc->f.states & CVInit) {
2907 findvc_sleep(tvc, flag);
2911 /* Grab this vnode, possibly reactivating from the free list */
2913 vg = vget(AFSTOV(tvc));
2917 #endif /* AFS_OSF_ENV */
2918 #ifdef AFS_DARWIN80_ENV
2919 if (tvc->f.states & CDeadVnode) {
2920 findvc_sleep(tvc, flag);
2926 if (vnode_ref(tvp)) {
2928 /* AFSTOV(tvc) may be NULL */
2938 /* should I have a read lock on the vnode here? */
2942 #if !defined(AFS_OSF_ENV) && !defined(AFS_DARWIN80_ENV)
2943 osi_vnhold(tvc, retry); /* already held, above */
2944 if (retry && *retry)
2947 #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
2948 tvc->f.states |= CUBCinit;
2950 if (UBCINFOMISSING(AFSTOV(tvc)) ||
2951 UBCINFORECLAIMED(AFSTOV(tvc))) {
2952 ubc_info_init(AFSTOV(tvc));
2955 tvc->f.states &= ~CUBCinit;
2958 * only move to front of vlru if we have proper vcache locking)
2960 if (flag & DO_VLRU) {
2961 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2962 refpanic("FindVC VLRU inconsistent1");
2964 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2965 refpanic("FindVC VLRU inconsistent1");
2967 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2968 refpanic("FindVC VLRU inconsistent2");
2970 UpgradeSToWLock(&afs_xvcache, 26);
2971 QRemove(&tvc->vlruq);
2972 QAdd(&VLRU, &tvc->vlruq);
2973 ConvertWToSLock(&afs_xvcache);
2974 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
2975 refpanic("FindVC VLRU inconsistent1");
2977 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
2978 refpanic("FindVC VLRU inconsistent2");
2980 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
2981 refpanic("FindVC VLRU inconsistent3");
2987 if (flag & DO_STATS) {
2989 afs_stats_cmperf.vcacheHits++;
2991 afs_stats_cmperf.vcacheMisses++;
2992 if (afs_IsPrimaryCellNum(afid->Cell))
2993 afs_stats_cmperf.vlocalAccesses++;
2995 afs_stats_cmperf.vremoteAccesses++;
2998 } /*afs_FindVCache */
3001 * Find a vcache entry given a fid. Does a wildcard match on what we
3002 * have for the fid. If more than one entry, don't return anything.
3004 * \param avcp Fill in pointer if we found one and only one.
3005 * \param afid Pointer to the fid whose cache entry we desire.
3006 * \param retry (SGI-specific) tell the caller to drop the lock on xvcache,
3007 * unlock the vnode, and try again.
3008 * \param flags bit 1 to specify whether to compute hit statistics. Not
3009 * set if FindVCache is called as part of internal bookkeeping.
3011 * \note Environment: Must be called with the afs_xvcache lock at least held at
3012 * the read level. In order to do the VLRU adjustment, the xvcache lock
3013 * must be shared-- we upgrade it here.
3015 * \return Number of matches found.
3018 int afs_duplicate_nfs_fids = 0;
3021 afs_NFSFindVCache(struct vcache **avcp, struct VenusFid *afid)
3023 register struct vcache *tvc;
3025 afs_int32 count = 0;
3026 struct vcache *found_tvc = NULL;
3030 #ifdef AFS_DARWIN80_ENV
3034 AFS_STATCNT(afs_FindVCache);
3038 ObtainSharedLock(&afs_xvcache, 331);
3041 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3042 /* Match only on what we have.... */
3043 if (((tvc->f.fid.Fid.Vnode & 0xffff) == afid->Fid.Vnode)
3044 && (tvc->f.fid.Fid.Volume == afid->Fid.Volume)
3045 && ((tvc->f.fid.Fid.Unique & 0xffffff) == afid->Fid.Unique)
3046 && (tvc->f.fid.Cell == afid->Cell)) {
3047 if (tvc->f.states & CVInit) {
3048 ReleaseSharedLock(&afs_xvcache);
3049 afs_osi_Sleep(&tvc->f.states);
3053 /* Grab this vnode, possibly reactivating from the free list */
3055 vg = vget(AFSTOV(tvc));
3058 /* This vnode no longer exists. */
3061 #endif /* AFS_OSF_ENV */
3062 #ifdef AFS_DARWIN80_ENV
3063 if (tvc->f.states & CDeadVnode) {
3064 ReleaseSharedLock(&afs_xvcache);
3065 afs_osi_Sleep(&tvc->f.states);
3069 if (vnode_get(tvp)) {
3070 /* This vnode no longer exists. */
3073 if (vnode_ref(tvp)) {
3074 /* This vnode no longer exists. */
3076 /* AFSTOV(tvc) may be NULL */
3081 #endif /* AFS_DARWIN80_ENV */
3086 /* Drop our reference counts. */
3088 vrele(AFSTOV(found_tvc));
3090 afs_duplicate_nfs_fids++;
3091 ReleaseSharedLock(&afs_xvcache);
3092 #ifdef AFS_DARWIN80_ENV
3093 /* Drop our reference counts. */
3094 vnode_put(AFSTOV(tvc));
3095 vnode_put(AFSTOV(found_tvc));
3104 /* should I have a read lock on the vnode here? */
3106 #if defined(AFS_SGI_ENV) && !defined(AFS_SGI53_ENV)
3107 afs_int32 retry = 0;
3108 osi_vnhold(tvc, &retry);
3111 found_tvc = (struct vcache *)0;
3112 ReleaseSharedLock(&afs_xvcache);
3113 spunlock_psema(tvc->v.v_lock, retry, &tvc->v.v_sync, PINOD);
3117 #if !defined(AFS_OSF_ENV)
3118 osi_vnhold(tvc, (int *)0); /* already held, above */
3122 * We obtained the xvcache lock above.
3124 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3125 refpanic("FindVC VLRU inconsistent1");
3127 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3128 refpanic("FindVC VLRU inconsistent1");
3130 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3131 refpanic("FindVC VLRU inconsistent2");
3133 UpgradeSToWLock(&afs_xvcache, 568);
3134 QRemove(&tvc->vlruq);
3135 QAdd(&VLRU, &tvc->vlruq);
3136 ConvertWToSLock(&afs_xvcache);
3137 if ((VLRU.next->prev != &VLRU) || (VLRU.prev->next != &VLRU)) {
3138 refpanic("FindVC VLRU inconsistent1");
3140 if (tvc->vlruq.next->prev != &(tvc->vlruq)) {
3141 refpanic("FindVC VLRU inconsistent2");
3143 if (tvc->vlruq.prev->next != &(tvc->vlruq)) {
3144 refpanic("FindVC VLRU inconsistent3");
3150 afs_stats_cmperf.vcacheHits++;
3152 afs_stats_cmperf.vcacheMisses++;
3153 if (afs_IsPrimaryCellNum(afid->Cell))
3154 afs_stats_cmperf.vlocalAccesses++;
3156 afs_stats_cmperf.vremoteAccesses++;
3158 *avcp = tvc; /* May be null */
3160 ReleaseSharedLock(&afs_xvcache);
3161 return (tvc ? 1 : 0);
3163 } /*afs_NFSFindVCache */
3169 * Initialize vcache related variables
3174 afs_vcacheInit(int astatSize)
3176 register struct vcache *tvp;
3178 #if defined(AFS_OSF_ENV) || defined(AFS_LINUX22_ENV)
3179 if (!afs_maxvcount) {
3180 #if defined(AFS_LINUX22_ENV)
3181 afs_maxvcount = astatSize; /* no particular limit on linux? */
3182 #elif defined(AFS_OSF30_ENV)
3183 afs_maxvcount = max_vnodes / 2; /* limit ourselves to half the total */
3185 afs_maxvcount = nvnode / 2; /* limit ourselves to half the total */
3187 if (astatSize < afs_maxvcount) {
3188 afs_maxvcount = astatSize;
3191 #else /* AFS_OSF_ENV */
3195 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3196 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3198 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3199 /* Allocate and thread the struct vcache entries */
3200 tvp = (struct vcache *)afs_osi_Alloc(astatSize * sizeof(struct vcache));
3201 memset((char *)tvp, 0, sizeof(struct vcache) * astatSize);
3203 Initial_freeVCList = tvp;
3204 freeVCList = &(tvp[0]);
3205 for (i = 0; i < astatSize - 1; i++) {
3206 tvp[i].nextfree = &(tvp[i + 1]);
3208 tvp[astatSize - 1].nextfree = NULL;
3209 #ifdef KERNEL_HAVE_PIN
3210 pin((char *)tvp, astatSize * sizeof(struct vcache)); /* XXX */
3214 #if defined(AFS_SGI_ENV)
3215 for (i = 0; i < astatSize; i++) {
3216 char name[METER_NAMSZ];
3217 struct vcache *tvc = &tvp[i];
3219 tvc->v.v_number = ++afsvnumbers;
3220 tvc->vc_rwlockid = OSI_NO_LOCKID;
3221 initnsema(&tvc->vc_rwlock, 1,
3222 makesname(name, "vrw", tvc->v.v_number));
3223 #ifndef AFS_SGI53_ENV
3224 initnsema(&tvc->v.v_sync, 0, makesname(name, "vsy", tvc->v.v_number));
3226 #ifndef AFS_SGI62_ENV
3227 initnlock(&tvc->v.v_lock, makesname(name, "vlk", tvc->v.v_number));
3228 #endif /* AFS_SGI62_ENV */
3232 for(i = 0; i < VCSIZE; ++i)
3233 QInit(&afs_vhashTV[i]);
3240 shutdown_vcache(void)
3243 struct afs_cbr *tsp, *nsp;
3245 * XXX We may potentially miss some of the vcaches because if when there're no
3246 * free vcache entries and all the vcache entries are active ones then we allocate
3247 * an additional one - admittedly we almost never had that occur.
3251 register struct afs_q *tq, *uq;
3252 register struct vcache *tvc;
3253 for (tq = VLRU.prev; tq != &VLRU; tq = uq) {
3257 osi_FreeSmallSpace(tvc->mvid);
3258 tvc->mvid = (struct VenusFid *)0;
3261 aix_gnode_rele(AFSTOV(tvc));
3263 if (tvc->linkData) {
3264 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3269 * Also free the remaining ones in the Cache
3271 for (i = 0; i < VCSIZE; i++) {
3272 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3274 osi_FreeSmallSpace(tvc->mvid);
3275 tvc->mvid = (struct VenusFid *)0;
3279 afs_osi_Free(tvc->v.v_gnode, sizeof(struct gnode));
3280 #ifdef AFS_AIX32_ENV
3283 vms_delete(tvc->segid);
3285 tvc->segid = tvc->vmh = NULL;
3286 if (VREFCOUNT_GT(tvc,0))
3287 osi_Panic("flushVcache: vm race");
3295 #if defined(AFS_SUN5_ENV)
3301 if (tvc->linkData) {
3302 afs_osi_Free(tvc->linkData, strlen(tvc->linkData) + 1);
3306 afs_FreeAllAxs(&(tvc->Access));
3312 * Free any leftover callback queue
3314 for (tsp = afs_cbrSpace; tsp; tsp = nsp) {
3316 afs_osi_Free((char *)tsp, AFS_NCBRS * sizeof(struct afs_cbr));
3320 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3321 afs_osi_Free(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3323 #ifdef KERNEL_HAVE_PIN
3324 unpin(Initial_freeVCList, afs_cacheStats * sizeof(struct vcache));
3327 #if !defined(AFS_OSF_ENV) && !defined(AFS_LINUX22_ENV)
3328 freeVCList = Initial_freeVCList = 0;
3330 AFS_RWLOCK_INIT(&afs_xvcache, "afs_xvcache");
3331 LOCK_INIT(&afs_xvcb, "afs_xvcb");
3333 for(i = 0; i < VCSIZE; ++i)
3334 QInit(&afs_vhashTV[i]);
3337 void afs_DisconGiveUpCallbacks() {
3342 ObtainWriteLock(&afs_xvcache, 1002); /* XXX - should be a unique number */
3344 /* Somehow, walk the set of vcaches, with each one coming out as tvc */
3345 for (i = 0; i < VCSIZE; i++) {
3346 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3347 if ((tvc->f.states & CRO) == 0 && tvc->callback) {
3349 tvc->callback = NULL;
3354 /*printf("%d callbacks to be discarded. queued ... ", nq);*/
3357 ReleaseWriteLock(&afs_xvcache);
3358 /*printf("gone\n");*/
3363 * Clear the Statd flag from all vcaches
3365 * This function removes the Statd flag from all vcaches. It's used by
3366 * disconnected mode to tidy up during reconnection
3369 void afs_ClearAllStatdFlag() {
3373 ObtainWriteLock(&afs_xvcache, 715);
3375 for (i = 0; i < VCSIZE; i++) {
3376 for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) {
3377 tvc->f.states &= ~(CStatd|CUnique);
3380 ReleaseWriteLock(&afs_xvcache);