2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
13 #include <afsconfig.h>
14 #include "../afs/param.h"
18 #include "../afs/sysincludes.h" /*Standard vendor system headers*/
19 #include "../afs/afsincludes.h" /*AFS-based standard headers*/
20 #include "../afs/afs_stats.h" /* statistics */
21 #include "../afs/afs_cbqueue.h"
22 #include "../afs/afs_osidnlc.h"
24 /* Forward declarations. */
25 static void afs_GetDownD(int anumber, int *aneedSpace);
26 static void afs_FreeDiscardedDCache(void);
27 static void afs_DiscardDCache(struct dcache *);
29 /* Imported variables */
30 extern afs_rwlock_t afs_xvcache;
31 extern afs_rwlock_t afs_xcbhash;
32 extern afs_int32 afs_mariner;
33 extern afs_int32 cacheInfoModTime; /*Last time cache info modified*/
37 * --------------------- Exported definitions ---------------------
39 afs_lock_t afs_xdcache; /*Lock: alloc new disk cache entries*/
40 afs_int32 afs_freeDCList; /*Free list for disk cache entries*/
41 afs_int32 afs_freeDCCount; /*Count of elts in freeDCList*/
42 afs_int32 afs_discardDCList; /*Discarded disk cache entries*/
43 afs_int32 afs_discardDCCount; /*Count of elts in discardDCList*/
44 struct dcache *afs_freeDSList; /*Free list for disk slots */
45 struct dcache *afs_Initial_freeDSList; /*Initial list for above*/
46 ino_t cacheInode; /*Inode for CacheItems file*/
47 struct osi_file *afs_cacheInodep = 0; /* file for CacheItems inode */
48 struct afs_q afs_DLRU; /*dcache LRU*/
49 afs_int32 afs_dhashsize = 1024;
50 afs_int32 *afs_dvhashTbl; /*Data cache hash table*/
51 afs_int32 *afs_dchashTbl; /*Data cache hash table*/
52 afs_int32 *afs_dvnextTbl; /*Dcache hash table links */
53 afs_int32 *afs_dcnextTbl; /*Dcache hash table links */
54 struct dcache **afs_indexTable; /*Pointers to dcache entries*/
55 afs_hyper_t *afs_indexTimes; /*Dcache entry Access times*/
56 afs_int32 *afs_indexUnique; /*dcache entry Fid.Unique */
57 unsigned char *afs_indexFlags; /*(only one) Is there data there?*/
58 afs_hyper_t afs_indexCounter; /*Fake time for marking index
60 afs_int32 afs_cacheFiles =0; /*Size of afs_indexTable*/
61 afs_int32 afs_cacheBlocks; /*1K blocks in cache*/
62 afs_int32 afs_cacheStats; /*Stat entries in cache*/
63 afs_int32 afs_blocksUsed; /*Number of blocks in use*/
64 afs_int32 afs_blocksDiscarded; /*Blocks freed but not truncated */
65 afs_int32 afs_fsfragsize = 1023; /*Underlying Filesystem minimum unit
66 *of disk allocation usually 1K
67 *this value is (truefrag -1 ) to
68 *save a bunch of subtracts... */
69 #ifdef AFS_64BIT_CLIENT
70 #ifdef AFS_VM_RDWR_ENV
71 afs_size_t afs_vmMappingEnd; /* for large files (>= 2GB) the VM
72 * mapping an 32bit addressing machines
73 * can only be used below the 2 GB
74 * line. From this point upwards we
75 * must do direct I/O into the cache
76 * files. The value should be on a
78 #endif /* AFS_VM_RDWR_ENV */
79 #endif /* AFS_64BIT_CLIENT */
81 /* The following is used to ensure that new dcache's aren't obtained when
82 * the cache is nearly full.
84 int afs_WaitForCacheDrain = 0;
85 int afs_TruncateDaemonRunning = 0;
86 int afs_CacheTooFull = 0;
88 afs_int32 afs_dcentries; /* In-memory dcache entries */
91 int dcacheDisabled = 0;
93 extern struct dcache *afs_UFSGetDSlot();
94 extern struct volume *afs_UFSGetVolSlot();
95 extern int osi_UFSTruncate(), afs_osi_Read(), afs_osi_Write(), osi_UFSClose();
96 extern int afs_UFSRead(), afs_UFSWrite();
97 static int afs_UFSCacheFetchProc(), afs_UFSCacheStoreProc();
98 extern int afs_UFSHandleLink();
99 struct afs_cacheOps afs_UfsCacheOps = {
107 afs_UFSCacheFetchProc,
108 afs_UFSCacheStoreProc,
114 extern void *afs_MemCacheOpen();
115 extern struct dcache *afs_MemGetDSlot();
116 extern struct volume *afs_MemGetVolSlot();
117 extern int afs_MemCacheTruncate(), afs_MemReadBlk(), afs_MemWriteBlk(), afs_MemCacheClose();
118 extern int afs_MemRead(), afs_MemWrite(), afs_MemCacheFetchProc(), afs_MemCacheStoreProc();
119 extern int afs_MemHandleLink();
120 struct afs_cacheOps afs_MemCacheOps = {
122 afs_MemCacheTruncate,
128 afs_MemCacheFetchProc,
129 afs_MemCacheStoreProc,
135 int cacheDiskType; /*Type of backing disk for cache*/
136 struct afs_cacheOps *afs_cacheType;
145 * Warn about failing to store a file.
148 * acode : Associated error code.
149 * avolume : Volume involved.
150 * aflags : How to handle the output:
151 * aflags & 1: Print out on console
152 * aflags & 2: Print out on controlling tty
155 * Call this from close call when vnodeops is RCS unlocked.
159 afs_StoreWarn(acode, avolume, aflags)
160 register afs_int32 acode;
162 register afs_int32 aflags;
166 static char problem_fmt[] =
167 "afs: failed to store file in volume %d (%s)\n";
168 static char problem_fmt_w_error[] =
169 "afs: failed to store file in volume %d (error %d)\n";
170 static char netproblems[] = "network problems";
171 static char partfull[] = "partition full";
172 static char overquota[] = "over quota";
173 static char unknownerr[] = "unknown error";
175 AFS_STATCNT(afs_StoreWarn);
181 afs_warn(problem_fmt, avolume, netproblems);
183 afs_warnuser(problem_fmt, avolume, netproblems);
186 if (acode == ENOSPC) {
191 afs_warn(problem_fmt, avolume, partfull);
193 afs_warnuser(problem_fmt, avolume, partfull);
197 /* EDQUOT doesn't exist on solaris and won't be sent by the server.
198 * Instead ENOSPC will be sent...
200 if (acode == EDQUOT) {
205 afs_warn(problem_fmt, avolume, overquota);
207 afs_warnuser(problem_fmt, avolume, overquota);
215 afs_warn(problem_fmt_w_error, avolume, acode);
217 afs_warnuser(problem_fmt_w_error, avolume, acode);
221 void afs_MaybeWakeupTruncateDaemon() {
222 if (!afs_CacheTooFull && afs_CacheIsTooFull()) {
223 afs_CacheTooFull = 1;
224 if (!afs_TruncateDaemonRunning)
225 afs_osi_Wakeup((char *)afs_CacheTruncateDaemon);
226 } else if (!afs_TruncateDaemonRunning &&
227 afs_blocksDiscarded > CM_MAXDISCARDEDCHUNKS) {
228 afs_osi_Wakeup((char *)afs_CacheTruncateDaemon);
232 /* Keep statistics on run time for afs_CacheTruncateDaemon. This is a
233 * struct so we need only export one symbol for AIX.
236 osi_timeval_t CTD_beforeSleep;
237 osi_timeval_t CTD_afterSleep;
238 osi_timeval_t CTD_sleepTime;
239 osi_timeval_t CTD_runTime;
243 u_int afs_min_cache = 0;
244 void afs_CacheTruncateDaemon() {
245 osi_timeval_t CTD_tmpTime;
248 u_int dc_hiwat = (100-CM_DCACHECOUNTFREEPCT+CM_DCACHEEXTRAPCT)*afs_cacheFiles/100;
249 afs_min_cache = (((10 * AFS_CHUNKSIZE(0)) + afs_fsfragsize) & ~afs_fsfragsize)>>10;
251 osi_GetuTime(&CTD_stats.CTD_afterSleep);
252 afs_TruncateDaemonRunning = 1;
254 cb_lowat = ((CM_DCACHESPACEFREEPCT-CM_DCACHEEXTRAPCT)
255 * afs_cacheBlocks) / 100;
256 MObtainWriteLock(&afs_xdcache,266);
257 if (afs_CacheTooFull) {
258 int space_needed, slots_needed;
259 /* if we get woken up, we should try to clean something out */
260 for (counter = 0; counter < 10; counter++) {
261 space_needed = afs_blocksUsed - afs_blocksDiscarded - cb_lowat;
262 slots_needed = dc_hiwat - afs_freeDCCount - afs_discardDCCount;
263 afs_GetDownD(slots_needed, &space_needed);
264 if ((space_needed <= 0) && (slots_needed <= 0)) {
267 if (afs_termState == AFSOP_STOP_TRUNCDAEMON)
270 if (!afs_CacheIsTooFull())
271 afs_CacheTooFull = 0;
273 MReleaseWriteLock(&afs_xdcache);
276 * This is a defensive check to try to avoid starving threads
277 * that may need the global lock so thay can help free some
278 * cache space. If this thread won't be sleeping or truncating
279 * any cache files then give up the global lock so other
280 * threads get a chance to run.
282 if ((afs_termState!=AFSOP_STOP_TRUNCDAEMON) && afs_CacheTooFull &&
283 (!afs_blocksDiscarded || afs_WaitForCacheDrain)) {
284 afs_osi_Wait(100, 0, 0); /* 100 milliseconds */
288 * This is where we free the discarded cache elements.
290 while(afs_blocksDiscarded && !afs_WaitForCacheDrain &&
291 (afs_termState!=AFSOP_STOP_TRUNCDAEMON))
293 afs_FreeDiscardedDCache();
296 /* See if we need to continue to run. Someone may have
297 * signalled us while we were executing.
299 if (!afs_WaitForCacheDrain && !afs_CacheTooFull &&
300 (afs_termState!=AFSOP_STOP_TRUNCDAEMON))
302 /* Collect statistics on truncate daemon. */
303 CTD_stats.CTD_nSleeps++;
304 osi_GetuTime(&CTD_stats.CTD_beforeSleep);
305 afs_stats_GetDiff(CTD_tmpTime, CTD_stats.CTD_afterSleep,
306 CTD_stats.CTD_beforeSleep);
307 afs_stats_AddTo(CTD_stats.CTD_runTime, CTD_tmpTime);
309 afs_TruncateDaemonRunning = 0;
310 afs_osi_Sleep((char *)afs_CacheTruncateDaemon);
311 afs_TruncateDaemonRunning = 1;
313 osi_GetuTime(&CTD_stats.CTD_afterSleep);
314 afs_stats_GetDiff(CTD_tmpTime, CTD_stats.CTD_beforeSleep,
315 CTD_stats.CTD_afterSleep);
316 afs_stats_AddTo(CTD_stats.CTD_sleepTime, CTD_tmpTime);
318 if (afs_termState == AFSOP_STOP_TRUNCDAEMON) {
320 afs_termState = AFSOP_STOP_AFSDB;
322 afs_termState = AFSOP_STOP_RXEVENT;
324 afs_osi_Wakeup(&afs_termState);
335 * Make adjustment for the new size in the disk cache entry
337 * Major Assumptions Here:
338 * Assumes that frag size is an integral power of two, less one,
339 * and that this is a two's complement machine. I don't
340 * know of any filesystems which violate this assumption...
343 * adc : Ptr to dcache entry.
344 * anewsize : New size desired.
348 afs_AdjustSize(adc, newSize)
349 register struct dcache *adc;
350 register afs_int32 newSize;
354 register afs_int32 oldSize;
356 AFS_STATCNT(afs_AdjustSize);
358 adc->flags |= DFEntryMod;
359 oldSize = ((adc->f.chunkBytes + afs_fsfragsize)^afs_fsfragsize)>>10;/* round up */
360 adc->f.chunkBytes = newSize;
361 newSize = ((newSize + afs_fsfragsize)^afs_fsfragsize)>>10;/* round up */
362 if (newSize > oldSize) {
363 /* We're growing the file, wakeup the daemon */
364 afs_MaybeWakeupTruncateDaemon();
366 afs_blocksUsed += (newSize - oldSize);
367 afs_stats_cmperf.cacheBlocksInUse = afs_blocksUsed; /* XXX */
379 * This routine is responsible for moving at least one entry (but up
380 * to some number of them) from the LRU queue to the free queue.
383 * anumber : Number of entries that should ideally be moved.
384 * aneedSpace : How much space we need (1K blocks);
387 * The anumber parameter is just a hint; at least one entry MUST be
388 * moved, or we'll panic. We must be called with afs_xdcache
389 * write-locked. We should try to satisfy both anumber and aneedspace,
390 * whichever is more demanding - need to do several things:
391 * 1. only grab up to anumber victims if aneedSpace <= 0, not
392 * the whole set of MAXATONCE.
393 * 2. dynamically choose MAXATONCE to reflect severity of
394 * demand: something like (*aneedSpace >> (logChunk - 9))
395 * N.B. if we're called with aneedSpace <= 0 and anumber > 0, that
396 * indicates that the cache is not properly configured/tuned or
397 * something. We should be able to automatically correct that problem.
400 #define MAXATONCE 16 /* max we can obtain at once */
401 static void afs_GetDownD(int anumber, int *aneedSpace)
405 struct VenusFid *afid;
409 register struct vcache *tvc;
410 afs_uint32 victims[MAXATONCE];
411 struct dcache *victimDCs[MAXATONCE];
412 afs_hyper_t victimTimes[MAXATONCE];/* youngest (largest LRU time) first */
413 afs_uint32 victimPtr; /* next free item in victim arrays */
414 afs_hyper_t maxVictimTime; /* youngest (largest LRU time) victim */
415 afs_uint32 maxVictimPtr; /* where it is */
418 AFS_STATCNT(afs_GetDownD);
419 if (CheckLock(&afs_xdcache) != -1)
420 osi_Panic("getdownd nolock");
421 /* decrement anumber first for all dudes in free list */
422 /* SHOULD always decrement anumber first, even if aneedSpace >0,
423 * because we should try to free space even if anumber <=0 */
424 if (!aneedSpace || *aneedSpace <= 0) {
425 anumber -= afs_freeDCCount;
426 if (anumber <= 0) return; /* enough already free */
428 /* bounds check parameter */
429 if (anumber > MAXATONCE)
430 anumber = MAXATONCE; /* all we can do */
433 * The phase variable manages reclaims. Set to 0, the first pass,
434 * we don't reclaim active entries. Set to 1, we reclaim even active
438 for (i = 0; i < afs_cacheFiles; i++)
439 /* turn off all flags */
440 afs_indexFlags[i] &= ~IFFlag;
442 while (anumber > 0 || (aneedSpace && *aneedSpace >0)) {
443 /* find oldest entries for reclamation */
444 maxVictimPtr = victimPtr = 0;
445 hzero(maxVictimTime);
446 /* select victims from access time array */
447 for (i = 0; i < afs_cacheFiles; i++) {
448 if (afs_indexFlags[i] & (IFDataMod | IFFree | IFDiscarded)) {
449 /* skip if dirty or already free */
452 tdc = afs_indexTable[i];
453 if (tdc && (tdc->refCount != 0)) {
454 /* Referenced; can't use it! */
457 hset(vtime, afs_indexTimes[i]);
459 /* if we've already looked at this one, skip it */
460 if (afs_indexFlags[i] & IFFlag) continue;
462 if (victimPtr < MAXATONCE) {
463 /* if there's at least one free victim slot left */
464 victims[victimPtr] = i;
465 hset(victimTimes[victimPtr], vtime);
466 if (hcmp(vtime, maxVictimTime) > 0) {
467 hset(maxVictimTime, vtime);
468 maxVictimPtr = victimPtr;
472 else if (hcmp(vtime, maxVictimTime) < 0) {
474 * We're older than youngest victim, so we replace at
477 /* find youngest (largest LRU) victim */
479 if (j == victimPtr) osi_Panic("getdownd local");
481 hset(victimTimes[j], vtime);
482 /* recompute maxVictimTime */
483 hset(maxVictimTime, vtime);
484 for(j = 0; j < victimPtr; j++)
485 if (hcmp(maxVictimTime, victimTimes[j]) < 0) {
486 hset(maxVictimTime, victimTimes[j]);
492 /* now really reclaim the victims */
493 j = 0; /* flag to track if we actually got any of the victims */
494 /* first, hold all the victims, since we're going to release the lock
495 * during the truncate operation.
497 for(i=0; i < victimPtr; i++)
498 victimDCs[i] = afs_GetDSlot(victims[i], 0);
499 for(i = 0; i < victimPtr; i++) {
500 /* q is first elt in dcache entry */
502 /* now, since we're dropping the afs_xdcache lock below, we
503 * have to verify, before proceeding, that there are no other
504 * references to this dcache entry, even now. Note that we
505 * compare with 1, since we bumped it above when we called
506 * afs_GetDSlot to preserve the entry's identity.
508 if (tdc->refCount == 1) {
509 unsigned char chunkFlags;
510 afs_size_t tchunkoffset;
512 /* xdcache is lower than the xvcache lock */
513 MReleaseWriteLock(&afs_xdcache);
514 MObtainReadLock(&afs_xvcache);
515 tvc = afs_FindVCache(afid, 0,0, 0, 0 /* no stats, no vlru */ );
516 MReleaseReadLock(&afs_xvcache);
517 MObtainWriteLock(&afs_xdcache, 527);
519 if (tdc->refCount > 1) skip = 1;
521 tchunkoffset = AFS_CHUNKTOBASE(tdc->f.chunk);
522 chunkFlags = afs_indexFlags[tdc->index];
523 if (phase == 0 && osi_Active(tvc)) skip = 1;
524 if (phase > 0 && osi_Active(tvc) && (tvc->states & CDCLock)
525 && (chunkFlags & IFAnyPages)) skip = 1;
526 if (chunkFlags & IFDataMod) skip = 1;
527 afs_Trace4(afs_iclSetp, CM_TRACE_GETDOWND,
528 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, skip,
529 ICL_TYPE_INT32, tdc->index,
531 ICL_HANDLE_OFFSET(tchunkoffset));
533 #if defined(AFS_SUN5_ENV)
535 * Now we try to invalidate pages. We do this only for
536 * Solaris. For other platforms, it's OK to recycle a
537 * dcache entry out from under a page, because the strategy
538 * function can call afs_GetDCache().
540 if (!skip && (chunkFlags & IFAnyPages)) {
543 MReleaseWriteLock(&afs_xdcache);
544 MObtainWriteLock(&tvc->vlock, 543);
545 if (tvc->multiPage) {
549 /* block locking pages */
550 tvc->vstates |= VPageCleaning;
551 /* block getting new pages */
553 MReleaseWriteLock(&tvc->vlock);
554 /* One last recheck */
555 MObtainWriteLock(&afs_xdcache, 333);
556 chunkFlags = afs_indexFlags[tdc->index];
557 if (tdc->refCount > 1
558 || (chunkFlags & IFDataMod)
559 || (osi_Active(tvc) && (tvc->states & CDCLock)
560 && (chunkFlags & IFAnyPages))) {
562 MReleaseWriteLock(&afs_xdcache);
565 MReleaseWriteLock(&afs_xdcache);
567 code = osi_VM_GetDownD(tvc, tdc);
569 MObtainWriteLock(&afs_xdcache,269);
570 /* we actually removed all pages, clean and dirty */
572 afs_indexFlags[tdc->index] &= ~(IFDirtyPages| IFAnyPages);
575 MReleaseWriteLock(&afs_xdcache);
577 MObtainWriteLock(&tvc->vlock, 544);
578 if (--tvc->activeV == 0 && (tvc->vstates & VRevokeWait)) {
579 tvc->vstates &= ~VRevokeWait;
580 afs_osi_Wakeup((char *)&tvc->vstates);
583 if (tvc->vstates & VPageCleaning) {
584 tvc->vstates &= ~VPageCleaning;
585 afs_osi_Wakeup((char *)&tvc->vstates);
588 MReleaseWriteLock(&tvc->vlock);
590 #endif /* AFS_SUN5_ENV */
592 MReleaseWriteLock(&afs_xdcache);
596 MObtainWriteLock(&afs_xdcache, 528);
597 if (afs_indexFlags[tdc->index] &
598 (IFDataMod | IFDirtyPages | IFAnyPages)) skip = 1;
599 if (tdc->refCount > 1) skip = 1;
601 #if defined(AFS_SUN5_ENV)
603 /* no vnode, so IFDirtyPages is spurious (we don't
604 * sweep dcaches on vnode recycling, so we can have
605 * DIRTYPAGES set even when all pages are gone). Just
607 * Hold vcache lock to prevent vnode from being
608 * created while we're clearing IFDirtyPages.
610 afs_indexFlags[tdc->index] &= ~(IFDirtyPages | IFAnyPages);
614 /* skip this guy and mark him as recently used */
615 afs_indexFlags[tdc->index] |= IFFlag;
616 afs_Trace4(afs_iclSetp, CM_TRACE_GETDOWND,
617 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, 2,
618 ICL_TYPE_INT32, tdc->index,
620 ICL_HANDLE_OFFSET(tchunkoffset));
623 /* flush this dude from the data cache and reclaim;
624 * first, make sure no one will care that we damage
625 * it, by removing it from all hash tables. Then,
626 * melt it down for parts. Note that any concurrent
627 * (new possibility!) calls to GetDownD won't touch
628 * this guy because his reference count is > 0. */
629 afs_Trace4(afs_iclSetp, CM_TRACE_GETDOWND,
630 ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, 3,
631 ICL_TYPE_INT32, tdc->index,
633 ICL_HANDLE_OFFSET(tchunkoffset));
635 AFS_STATCNT(afs_gget);
637 afs_HashOutDCache(tdc);
638 if (tdc->f.chunkBytes != 0) {
641 *aneedSpace -= (tdc->f.chunkBytes + afs_fsfragsize) >> 10;
646 afs_DiscardDCache(tdc);
651 j = 1; /* we reclaimed at least one victim */
657 tdc->refCount--; /* put it back */
662 /* Phase is 0 and no one was found, so try phase 1 (ignore
663 * osi_Active flag) */
666 for (i = 0; i < afs_cacheFiles; i++)
667 /* turn off all flags */
668 afs_indexFlags[i] &= ~IFFlag;
672 /* found no one in phase 1, we're hosed */
673 if (victimPtr == 0) break;
675 } /* big while loop */
682 * Description: remove adc from any hash tables that would allow it to be located
683 * again by afs_FindDCache or afs_GetDCache.
685 * Parameters: adc -- pointer to dcache entry to remove from hash tables.
687 * Locks: Must have the afs_xdcache lock write-locked to call this function.
689 afs_HashOutDCache(adc)
692 { /*afs_HashOutDCache*/
697 AFS_STATCNT(afs_glink);
699 /* we know this guy's in the LRUQ. We'll move dude into DCQ below */
701 /* if this guy is in the hash table, pull him out */
702 if (adc->f.fid.Fid.Volume != 0) {
703 /* remove entry from first hash chains */
704 i = DCHash(&adc->f.fid, adc->f.chunk);
705 us = afs_dchashTbl[i];
706 if (us == adc->index) {
707 /* first dude in the list */
708 afs_dchashTbl[i] = afs_dcnextTbl[adc->index];
711 /* somewhere on the chain */
712 while (us != NULLIDX) {
713 if (afs_dcnextTbl[us] == adc->index) {
714 /* found item pointing at the one to delete */
715 afs_dcnextTbl[us] = afs_dcnextTbl[adc->index];
718 us = afs_dcnextTbl[us];
720 if (us == NULLIDX) osi_Panic("dcache hc");
722 /* remove entry from *other* hash chain */
723 i = DVHash(&adc->f.fid);
724 us = afs_dvhashTbl[i];
725 if (us == adc->index) {
726 /* first dude in the list */
727 afs_dvhashTbl[i] = afs_dvnextTbl[adc->index];
730 /* somewhere on the chain */
731 while (us != NULLIDX) {
732 if (afs_dvnextTbl[us] == adc->index) {
733 /* found item pointing at the one to delete */
734 afs_dvnextTbl[us] = afs_dvnextTbl[adc->index];
737 us = afs_dvnextTbl[us];
739 if (us == NULLIDX) osi_Panic("dcache hv");
743 /* prevent entry from being found on a reboot (it is already out of
744 * the hash table, but after a crash, we just look at fid fields of
745 * stable (old) entries).
747 adc->f.fid.Fid.Volume = 0; /* invalid */
749 /* mark entry as modified */
750 adc->flags |= DFEntryMod;
754 } /*afs_HashOutDCache */
761 * Flush the given dcache entry, pulling it from hash chains
762 * and truncating the associated cache file.
765 * adc: Ptr to dcache entry to flush.
768 * This routine must be called with the afs_xdcache lock held
774 register struct dcache *adc;
775 { /*afs_FlushDCache*/
777 AFS_STATCNT(afs_FlushDCache);
779 * Bump the number of cache files flushed.
781 afs_stats_cmperf.cacheFlushes++;
783 /* remove from all hash tables */
784 afs_HashOutDCache(adc);
786 /* Free its space; special case null operation, since truncate operation
787 * in UFS is slow even in this case, and this allows us to pre-truncate
788 * these files at more convenient times with fewer locks set
789 * (see afs_GetDownD).
791 if (adc->f.chunkBytes != 0) {
792 afs_DiscardDCache(adc);
793 afs_MaybeWakeupTruncateDaemon();
798 if (afs_WaitForCacheDrain) {
799 if (afs_blocksUsed <=
800 (CM_CACHESIZEDRAINEDPCT*afs_cacheBlocks)/100) {
801 afs_WaitForCacheDrain = 0;
802 afs_osi_Wakeup(&afs_WaitForCacheDrain);
805 } /*afs_FlushDCache*/
811 * Description: put a dcache entry on the free dcache entry list.
813 * Parameters: adc -- dcache entry to free
815 * Environment: called with afs_xdcache lock write-locked.
818 register struct dcache *adc; {
819 /* Thread on free list, update free list count and mark entry as
820 * freed in its indexFlags element. Also, ensure DCache entry gets
821 * written out (set DFEntryMod).
824 afs_dvnextTbl[adc->index] = afs_freeDCList;
825 afs_freeDCList = adc->index;
827 afs_indexFlags[adc->index] |= IFFree;
828 adc->flags |= DFEntryMod;
830 if (afs_WaitForCacheDrain) {
831 if ((afs_blocksUsed - afs_blocksDiscarded) <=
832 (CM_CACHESIZEDRAINEDPCT*afs_cacheBlocks)/100) {
833 afs_WaitForCacheDrain = 0;
834 afs_osi_Wakeup(&afs_WaitForCacheDrain);
844 * Discard the cache element by moving it to the discardDCList.
845 * This puts the cache element into a quasi-freed state, where
846 * the space may be reused, but the file has not been truncated.
848 * Major Assumptions Here:
849 * Assumes that frag size is an integral power of two, less one,
850 * and that this is a two's complement machine. I don't
851 * know of any filesystems which violate this assumption...
854 * adc : Ptr to dcache entry.
858 afs_DiscardDCache(adc)
859 register struct dcache *adc;
861 { /*afs_DiscardDCache*/
863 register afs_int32 size;
865 AFS_STATCNT(afs_DiscardDCache);
866 size = ((adc->f.chunkBytes + afs_fsfragsize)^afs_fsfragsize)>>10;/* round up */
867 afs_blocksDiscarded += size;
868 afs_stats_cmperf.cacheBlocksDiscarded = afs_blocksDiscarded;
870 afs_dvnextTbl[adc->index] = afs_discardDCList;
871 afs_discardDCList = adc->index;
872 afs_discardDCCount++;
874 adc->f.fid.Fid.Volume = 0;
875 adc->flags |= DFEntryMod;
876 afs_indexFlags[adc->index] |= IFDiscarded;
878 if (afs_WaitForCacheDrain) {
879 if ((afs_blocksUsed - afs_blocksDiscarded) <=
880 (CM_CACHESIZEDRAINEDPCT*afs_cacheBlocks)/100) {
881 afs_WaitForCacheDrain = 0;
882 afs_osi_Wakeup(&afs_WaitForCacheDrain);
886 } /*afs_DiscardDCache*/
889 * afs_FreeDiscardedDCache
892 * Free the next element on the list of discarded cache elements.
895 afs_FreeDiscardedDCache()
897 register struct dcache *tdc;
898 register struct osi_file *tfile;
899 register afs_int32 size;
901 AFS_STATCNT(afs_FreeDiscardedDCache);
903 MObtainWriteLock(&afs_xdcache,510);
904 if (!afs_blocksDiscarded) {
905 MReleaseWriteLock(&afs_xdcache);
910 * Get an entry from the list of discarded cache elements
912 tdc = afs_GetDSlot(afs_discardDCList, 0);
913 afs_discardDCList = afs_dvnextTbl[tdc->index];
914 afs_dvnextTbl[tdc->index] = NULLIDX;
915 afs_discardDCCount--;
916 size = ((tdc->f.chunkBytes + afs_fsfragsize)^afs_fsfragsize)>>10;/* round up */
917 afs_blocksDiscarded -= size;
918 afs_stats_cmperf.cacheBlocksDiscarded = afs_blocksDiscarded;
919 MReleaseWriteLock(&afs_xdcache);
922 * Truncate the element to reclaim its space
924 tfile = afs_CFileOpen(tdc->f.inode);
925 afs_CFileTruncate(tfile, 0);
926 afs_CFileClose(tfile);
927 afs_AdjustSize(tdc, 0);
930 * Free the element we just truncated
932 MObtainWriteLock(&afs_xdcache,511);
933 afs_indexFlags[tdc->index] &= ~IFDiscarded;
936 MReleaseWriteLock(&afs_xdcache);
940 * afs_MaybeFreeDiscardedDCache
943 * Free as many entries from the list of discarded cache elements
944 * as we need to get the free space down below CM_WAITFORDRAINPCT (98%).
949 afs_MaybeFreeDiscardedDCache()
952 AFS_STATCNT(afs_MaybeFreeDiscardedDCache);
954 while (afs_blocksDiscarded &&
955 (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100)) {
956 afs_FreeDiscardedDCache();
965 * Try to free up a certain number of disk slots.
968 * anumber : Targeted number of disk slots to free up.
970 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
971 extern SV_TYPE afs_sgibksync;
972 extern SV_TYPE afs_sgibkwait;
973 extern lock_t afs_sgibklock;
974 extern struct dcache *afs_sgibklist;
978 afs_GetDownDSlot(anumber)
981 { /*afs_GetDownDSlot*/
983 struct afs_q *tq, *nq;
989 AFS_STATCNT(afs_GetDownDSlot);
990 if (cacheDiskType == AFS_FCACHE_TYPE_MEM)
991 osi_Panic("diskless getdowndslot");
993 if (CheckLock(&afs_xdcache) != -1)
994 osi_Panic("getdowndslot nolock");
996 /* decrement anumber first for all dudes in free list */
997 for(tdc = afs_freeDSList; tdc; tdc = (struct dcache *)tdc->lruq.next)
1000 return; /* enough already free */
1002 for(cnt=0, tq = afs_DLRU.prev; tq != &afs_DLRU && anumber > 0;
1004 tdc = (struct dcache *) tq; /* q is first elt in dcache entry */
1005 nq = QPrev(tq); /* in case we remove it */
1006 if (tdc->refCount == 0) {
1007 if ((ix=tdc->index) == NULLIDX) osi_Panic("getdowndslot");
1008 /* pull the entry out of the lruq and put it on the free list */
1009 QRemove(&tdc->lruq);
1011 /* write-through if modified */
1012 if (tdc->flags & DFEntryMod) {
1013 #if defined(AFS_SGI_ENV) && defined(AFS_SGI_SHORTSTACK)
1015 * ask proxy to do this for us - we don't have the stack space
1017 while (tdc->flags & DFEntryMod) {
1020 s = SPLOCK(afs_sgibklock);
1021 if (afs_sgibklist == NULL) {
1022 /* if slot is free, grab it. */
1023 afs_sgibklist = tdc;
1024 SV_SIGNAL(&afs_sgibksync);
1026 /* wait for daemon to (start, then) finish. */
1027 SP_WAIT(afs_sgibklock, s, &afs_sgibkwait, PINOD);
1031 tdc->flags &= ~DFEntryMod;
1032 afs_WriteDCache(tdc, 1);
1039 struct osi_file * f = (struct osi_file *)tdc->ihint;
1047 /* finally put the entry in the free list */
1048 afs_indexTable[ix] = (struct dcache *) 0;
1049 afs_indexFlags[ix] &= ~IFEverUsed;
1050 tdc->index = NULLIDX;
1051 tdc->lruq.next = (struct afs_q *) afs_freeDSList;
1052 afs_freeDSList = tdc;
1056 } /*afs_GetDownDSlot*/
1064 * Decrement the reference count on a disk cache entry.
1067 * ad : Ptr to the dcache entry to decrement.
1070 * Nothing interesting.
1073 register struct dcache *ad;
1076 AFS_STATCNT(afs_PutDCache);
1077 #ifndef AFS_SUN5_ENVX
1078 MObtainWriteLock(&afs_xdcache,276);
1080 if (ad->refCount <= 0)
1081 osi_Panic("putdcache");
1083 #ifdef AFS_SUN5_ENVX
1084 MReleaseWriteLock(&ad->lock);
1086 MReleaseWriteLock(&afs_xdcache);
1097 * Try to discard all data associated with this file from the
1101 * avc : Pointer to the cache info for the file.
1104 * Both pvnLock and lock are write held.
1107 afs_TryToSmush(avc, acred, sync)
1108 register struct vcache *avc;
1109 struct AFS_UCRED *acred;
1111 { /*afs_TryToSmush*/
1113 register struct dcache *tdc;
1116 AFS_STATCNT(afs_TryToSmush);
1117 afs_Trace2(afs_iclSetp, CM_TRACE_TRYTOSMUSH, ICL_TYPE_POINTER, avc,
1118 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length));
1119 sync = 1; /* XX Temp testing XX*/
1121 #if defined(AFS_SUN5_ENV)
1122 ObtainWriteLock(&avc->vlock, 573);
1123 avc->activeV++; /* block new getpages */
1124 ReleaseWriteLock(&avc->vlock);
1127 /* Flush VM pages */
1128 osi_VM_TryToSmush(avc, acred, sync);
1131 * Get the hash chain containing all dce's for this fid
1133 i = DVHash(&avc->fid);
1134 MObtainWriteLock(&afs_xdcache,277);
1135 for(index = afs_dvhashTbl[i]; index != NULLIDX; index=i) {
1136 i = afs_dvnextTbl[index]; /* next pointer this hash table */
1137 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
1138 tdc = afs_GetDSlot(index, (struct dcache *)0);
1139 if (!FidCmp(&tdc->f.fid, &avc->fid)) {
1141 if ((afs_indexFlags[index] & IFDataMod) == 0 &&
1142 tdc->refCount == 1) {
1143 afs_FlushDCache(tdc);
1146 afs_indexTable[index] = 0;
1148 lockedPutDCache(tdc);
1151 #if defined(AFS_SUN5_ENV)
1152 ObtainWriteLock(&avc->vlock, 545);
1153 if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) {
1154 avc->vstates &= ~VRevokeWait;
1155 afs_osi_Wakeup((char *)&avc->vstates);
1157 ReleaseWriteLock(&avc->vlock);
1159 MReleaseWriteLock(&afs_xdcache);
1161 * It's treated like a callback so that when we do lookups we'll invalidate the unique bit if any
1162 * trytoSmush occured during the lookup call
1165 } /*afs_TryToSmush*/
1171 * Given the cached info for a file and a byte offset into the
1172 * file, make sure the dcache entry for that file and containing
1173 * the given byte is available, returning it to our caller.
1176 * avc : Pointer to the (held) vcache entry to look in.
1177 * abyte : Which byte we want to get to.
1180 * Pointer to the dcache entry covering the file & desired byte,
1181 * or NULL if not found.
1184 * The vcache entry is held upon entry.
1187 struct dcache *afs_FindDCache(avc, abyte)
1188 register struct vcache *avc;
1191 { /*afs_FindDCache*/
1194 register afs_int32 i, index;
1195 register struct dcache *tdc;
1197 AFS_STATCNT(afs_FindDCache);
1198 chunk = AFS_CHUNK(abyte);
1201 * Hash on the [fid, chunk] and get the corresponding dcache index
1202 * after write-locking the dcache.
1204 i = DCHash(&avc->fid, chunk);
1205 MObtainWriteLock(&afs_xdcache,278);
1206 for(index = afs_dchashTbl[i]; index != NULLIDX;) {
1207 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
1208 tdc = afs_GetDSlot(index, (struct dcache *)0);
1209 if (!FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk) {
1210 break; /* leaving refCount high for caller */
1212 lockedPutDCache(tdc);
1214 index = afs_dcnextTbl[index];
1216 MReleaseWriteLock(&afs_xdcache);
1217 if (index != NULLIDX) {
1218 hset(afs_indexTimes[tdc->index], afs_indexCounter);
1219 hadd32(afs_indexCounter, 1);
1223 return(struct dcache *) 0;
1225 } /*afs_FindDCache*/
1229 * afs_UFSCacheStoreProc
1232 * Called upon store.
1235 * acall : Ptr to the Rx call structure involved.
1236 * afile : Ptr to the related file descriptor.
1237 * alen : Size of the file in bytes.
1238 * avc : Ptr to the vcache entry.
1239 * shouldWake : is it "safe" to return early from close() ?
1240 * abytesToXferP : Set to the number of bytes to xfer.
1241 * NOTE: This parameter is only used if AFS_NOSTATS
1243 * abytesXferredP : Set to the number of bytes actually xferred.
1244 * NOTE: This parameter is only used if AFS_NOSTATS
1248 * Nothing interesting.
1250 static int afs_UFSCacheStoreProc(acall, afile, alen, avc, shouldWake,
1251 abytesToXferP, abytesXferredP)
1252 register struct rx_call *acall;
1253 struct osi_file *afile;
1254 register afs_int32 alen;
1255 afs_size_t *abytesToXferP;
1256 afs_size_t *abytesXferredP;
1259 { /* afs_UFSCacheStoreProc*/
1261 afs_int32 code, got;
1262 register char *tbuffer;
1265 AFS_STATCNT(UFS_CacheStoreProc);
1269 * In this case, alen is *always* the amount of data we'll be trying
1272 (*abytesToXferP) = alen;
1273 (*abytesXferredP) = 0;
1274 #endif /* AFS_NOSTATS */
1276 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
1277 ICL_TYPE_FID, &(avc->fid),
1278 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1279 ICL_TYPE_INT32, alen);
1280 tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
1282 tlen = (alen > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : alen);
1283 got = afs_osi_Read(afile, -1, tbuffer, tlen);
1285 #if !defined(AFS_SUN5_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_SGI64_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_FBSD_ENV)
1286 || (got != tlen && getuerror())
1289 osi_FreeLargeSpace(tbuffer);
1292 afs_Trace1(afs_iclSetp, CM_TRACE_STOREPROC2, ICL_TYPE_INT32, got);
1293 if (got == 0) printf("StoreProc: got == 0\n");
1294 #ifdef RX_ENABLE_LOCKS
1296 #endif /* RX_ENABLE_LOCKS */
1297 code = rx_Write(acall, tbuffer, got); /* writing 0 bytes will
1298 * push a short packet. Is that really what we want, just because the
1299 * data didn't come back from the disk yet? Let's try it and see. */
1300 #ifdef RX_ENABLE_LOCKS
1302 #endif /* RX_ENABLE_LOCKS */
1304 (*abytesXferredP) += code;
1305 #endif /* AFS_NOSTATS */
1307 osi_FreeLargeSpace(tbuffer);
1312 * If file has been locked on server, we can allow the store
1315 if (shouldWake && *shouldWake && (rx_GetRemoteStatus(acall) & 1)) {
1316 *shouldWake = 0; /* only do this once */
1320 afs_Trace4(afs_iclSetp, CM_TRACE_STOREPROC, ICL_TYPE_POINTER, avc,
1321 ICL_TYPE_FID, &(avc->fid),
1322 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->m.Length),
1323 ICL_TYPE_INT32, alen);
1324 osi_FreeLargeSpace(tbuffer);
1327 } /* afs_UFSCacheStoreProc*/
1331 * afs_UFSCacheFetchProc
1334 * Routine called on fetch; also tells people waiting for data
1335 * that more has arrived.
1338 * acall : Ptr to the Rx call structure.
1339 * afile : File descriptor for the cache file.
1340 * abase : Base offset to fetch.
1341 * adc : Ptr to the dcache entry for the file.
1342 * avc : Ptr to the vcache entry for the file.
1343 * abytesToXferP : Set to the number of bytes to xfer.
1344 * NOTE: This parameter is only used if AFS_NOSTATS
1346 * abytesXferredP : Set to the number of bytes actually xferred.
1347 * NOTE: This parameter is only used if AFS_NOSTATS
1351 * Nothing interesting.
1354 static int afs_UFSCacheFetchProc(acall, afile, abase, adc, avc,
1355 abytesToXferP, abytesXferredP, lengthFound)
1356 register struct rx_call *acall;
1358 afs_size_t *abytesToXferP;
1359 afs_size_t *abytesXferredP;
1362 struct osi_file *afile;
1363 afs_int32 lengthFound;
1364 { /*UFS_CacheFetchProc*/
1366 register afs_int32 code;
1367 register char *tbuffer;
1371 AFS_STATCNT(UFS_CacheFetchProc);
1372 afile->offset = 0; /* Each time start from the beginning */
1373 length = lengthFound;
1375 (*abytesToXferP) = 0;
1376 (*abytesXferredP) = 0;
1377 #endif /* AFS_NOSTATS */
1378 tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ);
1381 #ifdef RX_ENABLE_LOCKS
1383 #endif /* RX_ENABLE_LOCKS */
1384 code = rx_Read(acall, (char *)&length, sizeof(afs_int32));
1385 length = ntohl(length);
1386 #ifdef RX_ENABLE_LOCKS
1388 #endif /* RX_ENABLE_LOCKS */
1389 if (code != sizeof(afs_int32)) {
1390 osi_FreeLargeSpace(tbuffer);
1391 code = rx_Error(acall);
1392 return (code?code:-1); /* try to return code, not -1 */
1396 * The fetch protocol is extended for the AFS/DFS translator
1397 * to allow multiple blocks of data, each with its own length,
1398 * to be returned. As long as the top bit is set, there are more
1401 * We do not do this for AFS file servers because they sometimes
1402 * return large negative numbers as the transfer size.
1404 if (avc->states & CForeign) {
1405 moredata = length & 0x80000000;
1406 length &= ~0x80000000;
1411 (*abytesToXferP) += length;
1412 #endif /* AFS_NOSTATS */
1413 while (length > 0) {
1414 tlen = (length > AFS_LRALLOCSIZ ? AFS_LRALLOCSIZ : length);
1415 #ifdef RX_ENABLE_LOCKS
1417 #endif /* RX_ENABLE_LOCKS */
1418 code = rx_Read(acall, tbuffer, tlen);
1419 #ifdef RX_ENABLE_LOCKS
1421 #endif /* RX_ENABLE_LOCKS */
1423 (*abytesXferredP) += code;
1424 #endif /* AFS_NOSTATS */
1426 osi_FreeLargeSpace(tbuffer);
1427 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64READ,
1428 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
1429 ICL_TYPE_INT32, length);
1432 code = afs_osi_Write(afile, -1, tbuffer, tlen);
1434 osi_FreeLargeSpace(tbuffer);
1439 adc->validPos = abase;
1440 if (adc->flags & DFWaiting) {
1441 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
1442 ICL_TYPE_STRING, __FILE__,
1443 ICL_TYPE_INT32, __LINE__,
1444 ICL_TYPE_POINTER, adc,
1445 ICL_TYPE_INT32, adc->flags);
1446 adc->flags &= ~DFWaiting;
1447 afs_osi_Wakeup(&adc->validPos);
1451 osi_FreeLargeSpace(tbuffer);
1454 } /* afs_UFSCacheFetchProc*/
1460 * This function is called to obtain a reference to data stored in
1461 * the disk cache, locating a chunk of data containing the desired
1462 * byte and returning a reference to the disk cache entry, with its
1463 * reference count incremented.
1467 * avc : Ptr to a vcache entry (unlocked)
1468 * abyte : Byte position in the file desired
1469 * areq : Request structure identifying the requesting user.
1470 * aflags : Settings as follows:
1472 * 2 : Return after creating entry.
1473 * 4 : called from afs_vnop_write.c
1474 * *alen contains length of data to be written.
1476 * aoffset : Set to the offset within the chunk where the resident
1478 * alen : Set to the number of bytes of data after the desired
1479 * byte (including the byte itself) which can be read
1483 * The vcache entry pointed to by avc is unlocked upon entry.
1487 struct AFSVolSync tsync;
1488 struct AFSFetchStatus OutStatus;
1489 struct AFSCallBack CallBack;
1492 /* these fields are protected by the lock on the vcache and luck
1494 void updateV2DC(int l, struct vcache *v, struct dcache *d, int src) {
1495 if (!l || 0 == NBObtainWriteLock(&(v->lock),src)) {
1496 if (hsame(v->m.DataVersion, d->f.versionNo) && v->callback) {
1498 v->quick.stamp = d->stamp = MakeStamp();
1499 v->quick.minLoc = AFS_CHUNKTOBASE(d->f.chunk);
1500 /* Don't think I need these next two lines forever */
1501 v->quick.len = d->f.chunkBytes;
1504 if(l) ReleaseWriteLock(&((v)->lock));
1508 struct dcache *afs_GetDCache(avc, abyte, areq, aoffset, alen, aflags)
1509 register struct vcache *avc; /*Held*/
1511 afs_size_t *aoffset, *alen;
1513 register struct vrequest *areq;
1517 register afs_int32 i, code, code1, shortcut , adjustsize=0;
1522 afs_size_t maxGoodLength; /* amount of good data at server */
1523 struct rx_call *tcall;
1524 afs_size_t Position = 0;
1525 #ifdef AFS_64BIT_CLIENT
1527 #endif /* AFS_64BIT_CLIENT */
1528 afs_int32 size, tlen; /* size of segment to transfer */
1529 afs_size_t lengthFound; /* as returned from server */
1530 struct tlocal1 *tsmall;
1531 register struct dcache *tdc;
1532 register struct osi_file *file;
1533 register struct conn *tc;
1535 int doAdjustSize = 0;
1536 int doReallyAdjustSize = 0;
1537 int overWriteWholeChunk = 0;
1540 struct afs_stats_xferData *xferP; /* Ptr to this op's xfer struct */
1541 osi_timeval_t xferStartTime, /*FS xfer start time*/
1542 xferStopTime; /*FS xfer stop time*/
1543 afs_size_t bytesToXfer; /* # bytes to xfer*/
1544 afs_size_t bytesXferred; /* # bytes actually xferred*/
1545 struct afs_stats_AccessInfo *accP; /*Ptr to access record in stats*/
1546 int fromReplica; /*Are we reading from a replica?*/
1547 int numFetchLoops; /*# times around the fetch/analyze loop*/
1548 #endif /* AFS_NOSTATS */
1550 AFS_STATCNT(afs_GetDCache);
1556 * Determine the chunk number and offset within the chunk corresponding
1557 * to the desired byte.
1559 if (avc->fid.Fid.Vnode & 1) { /* if (vType(avc) == VDIR) */
1563 chunk = AFS_CHUNK(abyte);
1566 setLocks = aflags & 1;
1568 /* come back to here if we waited for the cache to drain. */
1572 /* check hints first! (might could use bcmp or some such...) */
1574 if (tdc = avc->h1.dchint) {
1575 MObtainReadLock(&afs_xdcache);
1576 if ( (tdc->index != NULLIDX) && !FidCmp(&tdc->f.fid, &avc->fid) &&
1577 chunk == tdc->f.chunk &&
1578 !(afs_indexFlags[tdc->index] & (IFFree|IFDiscarded))) {
1579 /* got the right one. It might not be the right version, and it
1580 * might be fetching, but it's the right dcache entry.
1582 /* All this code should be integrated better with what follows:
1583 * I can save a good bit more time under a write lock if I do..
1585 /* does avc need to be locked? */
1586 /* Note that the race labeled LOCKXXX is inconsequential: the xdcache
1587 * lock protects both the dcache slots AND the DLRU list. While
1588 * the slots and hash table and DLRU list all may change in the race,
1589 * THIS particular dcache structure cannot be recycled and its LRU
1590 * pointers must still be valid once we get the lock again. Still
1591 * we should either create another lock or invent a new method of
1592 * managing dcache structs -- CLOCK or something. */
1594 #ifdef AFS_SUN5_ENVX
1595 MObtainWriteLock(&tdc->lock,279);
1598 if (hsame(tdc->f.versionNo, avc->m.DataVersion)
1599 && !(tdc->flags & DFFetching)) {
1600 afs_stats_cmperf.dcacheHits++;
1601 MReleaseReadLock(&afs_xdcache);
1603 MObtainWriteLock(&afs_xdcache, 559); /* LOCKXXX */
1604 QRemove(&tdc->lruq);
1605 QAdd(&afs_DLRU, &tdc->lruq);
1606 MReleaseWriteLock(&afs_xdcache);
1609 #ifdef AFS_SUN5_ENVX
1610 MReleaseWriteLock(&tdc->lock);
1613 MReleaseReadLock(&afs_xdcache);
1619 * Hash on the [fid, chunk] and get the corresponding dcache index
1620 * after write-locking the dcache.
1623 i = DCHash(&avc->fid, chunk);
1624 afs_MaybeWakeupTruncateDaemon(); /* check to make sure our space is fine */
1625 MObtainWriteLock(&afs_xdcache,280);
1627 for(index = afs_dchashTbl[i]; index != NULLIDX;) {
1628 if (afs_indexUnique[index] == avc->fid.Fid.Unique) {
1629 tdc = afs_GetDSlot(index, (struct dcache *)0);
1630 if (!FidCmp(&tdc->f.fid, &avc->fid) && chunk == tdc->f.chunk) {
1631 /* Move it up in the beginning of the list */
1632 if (afs_dchashTbl[i] != index) {
1633 afs_dcnextTbl[us] = afs_dcnextTbl[index];
1634 afs_dcnextTbl[index] = afs_dchashTbl[i];
1635 afs_dchashTbl[i] = index;
1637 MReleaseWriteLock(&afs_xdcache);
1638 break; /* leaving refCount high for caller */
1640 tdc->refCount--; /* was incremented by afs_GetDSlot */
1644 index = afs_dcnextTbl[index];
1647 * If we didn't find the entry, we'll create one.
1649 if (index == NULLIDX) {
1650 afs_Trace2(afs_iclSetp, CM_TRACE_GETDCACHE1, ICL_TYPE_POINTER, avc,
1651 ICL_TYPE_INT32, chunk);
1653 if (afs_discardDCList == NULLIDX && afs_freeDCList == NULLIDX) {
1655 if (!setLocks) avc->states |= CDCLock;
1656 afs_GetDownD(5, (int*)0); /* just need slots */
1657 if (!setLocks) avc->states &= (~CDCLock);
1658 if (afs_discardDCList != NULLIDX || afs_freeDCList != NULLIDX)
1660 /* If we can't get space for 5 mins we give up and panic */
1661 if (++downDCount > 300)
1662 osi_Panic("getdcache");
1663 MReleaseWriteLock(&afs_xdcache);
1664 afs_osi_Wait(1000, 0, 0);
1668 if (afs_discardDCList == NULLIDX ||
1669 ((aflags & 2) && afs_freeDCList != NULLIDX)) {
1670 afs_indexFlags[afs_freeDCList] &= ~IFFree;
1671 tdc = afs_GetDSlot(afs_freeDCList, 0);
1672 afs_freeDCList = afs_dvnextTbl[tdc->index];
1675 afs_indexFlags[afs_discardDCList] &= ~IFDiscarded;
1676 tdc = afs_GetDSlot(afs_discardDCList, 0);
1677 afs_discardDCList = afs_dvnextTbl[tdc->index];
1678 afs_discardDCCount--;
1679 size = ((tdc->f.chunkBytes + afs_fsfragsize)^afs_fsfragsize)>>10;
1680 afs_blocksDiscarded -= size;
1681 afs_stats_cmperf.cacheBlocksDiscarded = afs_blocksDiscarded;
1683 /* Truncate the chunk so zeroes get filled properly */
1684 file = afs_CFileOpen(tdc->f.inode);
1685 afs_CFileTruncate(file, 0);
1686 afs_CFileClose(file);
1687 afs_AdjustSize(tdc, 0);
1692 * Fill in the newly-allocated dcache record.
1694 afs_indexFlags[tdc->index] &= ~(IFDirtyPages | IFAnyPages);
1695 tdc->f.fid = avc->fid;
1696 afs_indexUnique[tdc->index] = tdc->f.fid.Fid.Unique;
1697 hones(tdc->f.versionNo); /* invalid value */
1698 tdc->f.chunk = chunk;
1699 tdc->validPos = AFS_CHUNKTOBASE(chunk);
1701 if (tdc->lruq.prev == &tdc->lruq) osi_Panic("lruq 1");
1703 * Now add to the two hash chains - note that i is still set
1704 * from the above DCHash call.
1706 afs_dcnextTbl[tdc->index] = afs_dchashTbl[i];
1707 afs_dchashTbl[i] = tdc->index;
1708 i = DVHash(&avc->fid);
1709 afs_dvnextTbl[tdc->index] = afs_dvhashTbl[i];
1710 afs_dvhashTbl[i] = tdc->index;
1711 tdc->flags = DFEntryMod;
1713 afs_MaybeWakeupTruncateDaemon();
1714 MReleaseWriteLock(&afs_xdcache);
1716 } /* else hint failed... */
1718 afs_Trace4(afs_iclSetp, CM_TRACE_GETDCACHE2, ICL_TYPE_POINTER, avc,
1719 ICL_TYPE_POINTER, tdc,
1720 ICL_TYPE_INT32, hgetlo(tdc->f.versionNo),
1721 ICL_TYPE_INT32, hgetlo(avc->m.DataVersion));
1723 * Here we have the unlocked entry in tdc, with its refCount
1724 * incremented. Note: we don't use the S-lock; it costs concurrency
1725 * when storing a file back to the server.
1727 if (setLocks) ObtainReadLock(&avc->lock);
1730 * Not a newly created file so we need to check the file's length and
1731 * compare data versions since someone could have changed the data or we're
1732 * reading a file written elsewhere. We only want to bypass doing no-op
1733 * read rpcs on newly created files (dv of 0) since only then we guarantee
1734 * that this chunk's data hasn't been filled by another client.
1736 size = AFS_CHUNKSIZE(abyte);
1737 if (aflags & 4) /* called from write */
1739 else /* called from read */
1740 tlen = tdc->validPos - abyte;
1741 Position = AFS_CHUNKTOBASE(chunk);
1742 afs_Trace4(afs_iclSetp, CM_TRACE_GETDCACHE3,
1743 ICL_TYPE_INT32, tlen,
1744 ICL_TYPE_INT32, aflags,
1745 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(abyte),
1746 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(Position));
1747 if ((aflags & 4) && (hiszero(avc->m.DataVersion)))
1749 if ((aflags & 4) && (abyte == Position) && (tlen >= size))
1750 overWriteWholeChunk = 1;
1751 if (doAdjustSize || overWriteWholeChunk) {
1752 #if defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV)
1754 #ifdef AFS_SGI64_ENV
1755 if (doAdjustSize) adjustsize = NBPP;
1756 #else /* AFS_SGI64_ENV */
1757 if (doAdjustSize) adjustsize = 8192;
1758 #endif /* AFS_SGI64_ENV */
1759 #else /* AFS_SGI_ENV */
1760 if (doAdjustSize) adjustsize = 4096;
1761 #endif /* AFS_SGI_ENV */
1762 if (AFS_CHUNKTOBASE(chunk)+adjustsize >= avc->m.Length &&
1763 #else /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
1764 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV)
1765 if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk) >= avc->m.Length)) &&
1767 if (AFS_CHUNKTOBASE(chunk) >= avc->m.Length &&
1769 #endif /* defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV) */
1770 !hsame(avc->m.DataVersion, tdc->f.versionNo))
1771 doReallyAdjustSize = 1;
1772 if (doReallyAdjustSize || overWriteWholeChunk) {
1773 doReallyAdjustSize = 0;
1774 /* no data in file to read at this position */
1776 ReleaseReadLock(&avc->lock);
1777 ObtainWriteLock(&avc->lock,64);
1779 /* check again, now that we have a write lock */
1780 #if defined(AFS_AIX32_ENV) || defined(AFS_SGI_ENV)
1781 if (AFS_CHUNKTOBASE(chunk)+adjustsize >= avc->m.Length &&
1783 #if defined(AFS_SUN_ENV) || defined(AFS_OSF_ENV)
1784 if ((doAdjustSize || (AFS_CHUNKTOBASE(chunk) >= avc->m.Length)) &&
1786 if (AFS_CHUNKTOBASE(chunk) >= avc->m.Length &&
1789 !hsame(avc->m.DataVersion, tdc->f.versionNo))
1790 doReallyAdjustSize = 1;
1791 if (doReallyAdjustSize || overWriteWholeChunk) {
1792 file = afs_CFileOpen(tdc->f.inode);
1793 afs_CFileTruncate(file, 0);
1794 afs_CFileClose(file);
1795 afs_AdjustSize(tdc, 0);
1796 hset(tdc->f.versionNo, avc->m.DataVersion);
1797 tdc->flags |= DFEntryMod;
1800 ReleaseWriteLock(&avc->lock);
1801 ObtainReadLock(&avc->lock);
1805 if (setLocks) ReleaseReadLock(&avc->lock);
1808 * We must read in the whole chunk if the version number doesn't
1812 /* don't need data, just a unique dcache entry */
1813 hset(afs_indexTimes[tdc->index], afs_indexCounter);
1814 hadd32(afs_indexCounter, 1);
1815 updateV2DC(setLocks,avc,tdc,567);
1816 if (vType(avc) == VDIR)
1819 *aoffset = AFS_CHUNKOFFSET(abyte);
1820 if (tdc->validPos < abyte)
1821 *alen = (afs_size_t) 0;
1823 *alen = tdc->validPos - abyte;
1824 return tdc; /* check if we're done */
1826 osi_Assert(setLocks || WriteLocked(&avc->lock));
1828 if (setLocks) ObtainReadLock(&avc->lock);
1829 if (!hsame(avc->m.DataVersion, tdc->f.versionNo) && !overWriteWholeChunk) {
1831 * Version number mismatch.
1834 ReleaseReadLock(&avc->lock);
1835 ObtainWriteLock(&avc->lock,65);
1839 * If data ever existed for this vnode, and this is a text object,
1840 * do some clearing. Now, you'd think you need only do the flush
1841 * when VTEXT is on, but VTEXT is turned off when the text object
1842 * is freed, while pages are left lying around in memory marked
1843 * with this vnode. If we would reactivate (create a new text
1844 * object from) this vnode, we could easily stumble upon some of
1845 * these old pages in pagein. So, we always flush these guys.
1846 * Sun has a wonderful lack of useful invariants in this system.
1848 * avc->flushDV is the data version # of the file at the last text
1849 * flush. Clearly, at least, we don't have to flush the file more
1850 * often than it changes
1852 if (hcmp(avc->flushDV, avc->m.DataVersion) < 0) {
1854 * By here, the cache entry is always write-locked. We can
1855 * deadlock if we call osi_Flush with the cache entry locked...
1857 ReleaseWriteLock(&avc->lock);
1860 * Call osi_FlushPages in open, read/write, and map, since it
1861 * is too hard here to figure out if we should lock the
1864 ObtainWriteLock(&avc->lock,66);
1867 /* Watch for standard race condition */
1868 if (hsame(avc->m.DataVersion, tdc->f.versionNo)) {
1869 updateV2DC(0,avc,tdc,569); /* set hint */
1870 if (setLocks) ReleaseWriteLock(&avc->lock);
1871 afs_stats_cmperf.dcacheHits++;
1875 /* Sleep here when cache needs to be drained. */
1877 (afs_blocksUsed > (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100)) {
1878 /* Make sure truncate daemon is running */
1879 afs_MaybeWakeupTruncateDaemon();
1880 tdc->refCount--; /* we'll re-obtain the dcache when we re-try. */
1881 ReleaseWriteLock(&avc->lock);
1882 while ((afs_blocksUsed-afs_blocksDiscarded) >
1883 (CM_WAITFORDRAINPCT*afs_cacheBlocks)/100) {
1884 afs_WaitForCacheDrain = 1;
1885 afs_osi_Sleep(&afs_WaitForCacheDrain);
1887 afs_MaybeFreeDiscardedDCache();
1888 /* need to check if someone else got the chunk first. */
1889 goto RetryGetDCache;
1892 /* Do not fetch data beyond truncPos. */
1893 maxGoodLength = avc->m.Length;
1894 if (avc->truncPos < maxGoodLength) maxGoodLength = avc->truncPos;
1895 Position = AFS_CHUNKBASE(abyte);
1896 if (vType(avc) == VDIR) {
1897 size = avc->m.Length;
1898 if (size > tdc->f.chunkBytes) {
1899 /* pre-reserve space for file */
1900 afs_AdjustSize(tdc, size);
1902 size = 999999999; /* max size for transfer */
1905 size = AFS_CHUNKSIZE(abyte); /* expected max size */
1906 /* don't read past end of good data on server */
1907 if (Position + size > maxGoodLength)
1908 size = maxGoodLength - Position;
1909 if (size < 0) size = 0; /* Handle random races */
1910 if (size > tdc->f.chunkBytes) {
1911 /* pre-reserve space for file */
1912 afs_AdjustSize(tdc, size); /* changes chunkBytes */
1913 /* max size for transfer still in size */
1916 if (afs_mariner && !tdc->f.chunk)
1917 afs_MarinerLog("fetch$Fetching", avc); /* , Position, size, afs_indexCounter );*/
1919 * Right now, we only have one tool, and it's a hammer. So, we
1920 * fetch the whole file.
1922 DZap(&tdc->f.inode); /* pages in cache may be old */
1924 if (file = tdc->ihint) {
1925 if (tdc->f.inode == file->inum )
1932 file = osi_UFSOpen(tdc->f.inode);
1937 file = afs_CFileOpen(tdc->f.inode);
1938 afs_RemoveVCB(&avc->fid);
1939 tdc->f.states |= DWriting;
1940 tdc->flags |= DFFetching;
1941 tdc->validPos = Position; /* which is AFS_CHUNKBASE(abyte) */
1942 if (tdc->flags & DFFetchReq) {
1943 tdc->flags &= ~DFFetchReq;
1944 afs_osi_Wakeup(&tdc->validPos);
1946 tsmall = (struct tlocal1 *) osi_AllocLargeSpace(sizeof(struct tlocal1));
1949 * Remember if we are doing the reading from a replicated volume,
1950 * and how many times we've zipped around the fetch/analyze loop.
1952 fromReplica = (avc->states & CRO) ? 1 : 0;
1954 accP = &(afs_stats_cmfullperf.accessinf);
1956 (accP->replicatedRefs)++;
1958 (accP->unreplicatedRefs)++;
1959 #endif /* AFS_NOSTATS */
1960 /* this is a cache miss */
1961 afs_Trace4(afs_iclSetp, CM_TRACE_FETCHPROC, ICL_TYPE_POINTER, avc,
1962 ICL_TYPE_FID, &(avc->fid),
1963 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(Position),
1964 ICL_TYPE_INT32, size);
1966 if (size) afs_stats_cmperf.dcacheMisses++;
1969 * Dynamic root support: fetch data from local memory.
1971 if (afs_IsDynroot(avc)) {
1975 afs_GetDynroot(&dynrootDir, &dynrootLen, &tsmall->OutStatus);
1977 dynrootDir += Position;
1978 dynrootLen -= Position;
1979 if (size > dynrootLen)
1981 if (size < 0) size = 0;
1982 code = afs_osi_Write(file, -1, dynrootDir, size);
1990 tdc->validPos = Position + size;
1991 afs_CFileTruncate(file, size); /* prune it */
1994 * Not a dynamic vnode: do the real fetch.
1997 tc = afs_Conn(&avc->fid, areq, SHARED_LOCK);
1999 afs_int32 length_hi, length, bytes;
2003 (accP->numReplicasAccessed)++;
2005 #endif /* AFS_NOSTATS */
2006 avc->callback = tc->srvr->server;
2007 ConvertWToSLock(&avc->lock);
2009 #ifdef RX_ENABLE_LOCKS
2011 #endif /* RX_ENABLE_LOCKS */
2012 tcall = rx_NewCall(tc->id);
2013 #ifdef RX_ENABLE_LOCKS
2015 #endif /* RX_ENABLE_LOCKS */
2018 XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_FETCHDATA);
2019 #ifdef AFS_64BIT_CLIENT
2020 length_hi = code = 0;
2021 if (!afs_serverHasNo64Bit(tc)) {
2023 #ifdef RX_ENABLE_LOCKS
2025 #endif /* RX_ENABLE_LOCKS */
2026 code = StartRXAFS_FetchData64(tcall,
2027 (struct AFSFid *) &avc->fid.Fid,
2030 #ifdef RX_ENABLE_LOCKS
2032 #endif /* RX_ENABLE_LOCKS */
2034 bytes = rx_Read(tcall, (char *)&length_hi, sizeof(afs_int32));
2035 #ifdef RX_ENABLE_LOCKS
2037 #endif /* RX_ENABLE_LOCKS */
2038 if (bytes == sizeof(afs_int32)) {
2039 length_hi = ntohl(length_hi);
2042 code = rx_Error(tcall);
2043 #ifdef RX_ENABLE_LOCKS
2045 #endif /* RX_ENABLE_LOCKS */
2046 code1 = rx_EndCall(tcall, code);
2047 #ifdef RX_ENABLE_LOCKS
2049 #endif /* RX_ENABLE_LOCKS */
2050 afs_Trace2(afs_iclSetp, CM_TRACE_FETCH64CODE,
2051 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code);
2052 tcall = (struct rx_call *) 0;
2056 if (code == RXGEN_OPCODE || afs_serverHasNo64Bit(tc)) {
2057 if (Position > 0xFFFFFFFF) {
2062 #ifdef RX_ENABLE_LOCKS
2064 #endif /* RX_ENABLE_LOCKS */
2066 tcall = rx_NewCall(tc->id);
2067 code = StartRXAFS_FetchData(tcall,
2068 (struct AFSFid *) &avc->fid.Fid, pos, size);
2069 #ifdef RX_ENABLE_LOCKS
2071 #endif /* RX_ENABLE_LOCKS */
2073 afs_serverSetNo64Bit(tc);
2076 #ifdef RX_ENABLE_LOCKS
2078 #endif /* RX_ENABLE_LOCKS */
2079 bytes = rx_Read(tcall, (char *)&length, sizeof(afs_int32));
2080 #ifdef RX_ENABLE_LOCKS
2082 #endif /* RX_ENABLE_LOCKS */
2083 if (bytes == sizeof(afs_int32)) {
2084 length = ntohl(length);
2086 code = rx_Error(tcall);
2089 FillInt64(lengthFound, length_hi, length);
2090 afs_Trace3(afs_iclSetp, CM_TRACE_FETCH64LENG,
2091 ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, code,
2092 ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(lengthFound));
2093 #else /* AFS_64BIT_CLIENT */
2094 #ifdef RX_ENABLE_LOCKS
2096 #endif /* RX_ENABLE_LOCKS */
2097 code = StartRXAFS_FetchData(tcall,
2098 (struct AFSFid *) &avc->fid.Fid,
2100 #ifdef RX_ENABLE_LOCKS
2102 #endif /* RX_ENABLE_LOCKS */
2104 #ifdef RX_ENABLE_LOCKS
2106 #endif /* RX_ENABLE_LOCKS */
2107 bytes = rx_Read(tcall, (char *)&length, sizeof(afs_int32));
2108 #ifdef RX_ENABLE_LOCKS
2110 #endif /* RX_ENABLE_LOCKS */
2111 if (bytes == sizeof(afs_int32)) {
2112 length = ntohl(length);
2114 code = rx_Error(tcall);
2117 #endif /* AFS_64BIT_CLIENT */
2121 xferP = &(afs_stats_cmfullperf.rpc.fsXferTimes[AFS_STATS_FS_XFERIDX_FETCHDATA]);
2122 osi_GetuTime(&xferStartTime);
2124 code = afs_CacheFetchProc(tcall, file,
2125 (afs_size_t) Position, tdc, avc,
2126 &bytesToXfer, &bytesXferred, length);
2128 osi_GetuTime(&xferStopTime);
2129 (xferP->numXfers)++;
2131 (xferP->numSuccesses)++;
2132 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_FETCHDATA] += bytesXferred;
2133 (xferP->sumBytes) += (afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_FETCHDATA] >> 10);
2134 afs_stats_XferSumBytes[AFS_STATS_FS_XFERIDX_FETCHDATA] &= 0x3FF;
2135 if (bytesXferred < xferP->minBytes)
2136 xferP->minBytes = bytesXferred;
2137 if (bytesXferred > xferP->maxBytes)
2138 xferP->maxBytes = bytesXferred;
2141 * Tally the size of the object. Note: we tally the actual size,
2142 * NOT the number of bytes that made it out over the wire.
2144 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET0)
2145 (xferP->count[0])++;
2147 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET1)
2148 (xferP->count[1])++;
2150 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET2)
2151 (xferP->count[2])++;
2153 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET3)
2154 (xferP->count[3])++;
2156 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET4)
2157 (xferP->count[4])++;
2159 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET5)
2160 (xferP->count[5])++;
2162 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET6)
2163 (xferP->count[6])++;
2165 if (bytesToXfer <= AFS_STATS_MAXBYTES_BUCKET7)
2166 (xferP->count[7])++;
2168 (xferP->count[8])++;
2170 afs_stats_GetDiff(elapsedTime, xferStartTime, xferStopTime);
2171 afs_stats_AddTo((xferP->sumTime), elapsedTime);
2172 afs_stats_SquareAddTo((xferP->sqrTime), elapsedTime);
2173 if (afs_stats_TimeLessThan(elapsedTime, (xferP->minTime))) {
2174 afs_stats_TimeAssign((xferP->minTime), elapsedTime);
2176 if (afs_stats_TimeGreaterThan(elapsedTime, (xferP->maxTime))) {
2177 afs_stats_TimeAssign((xferP->maxTime), elapsedTime);
2181 code = afs_CacheFetchProc(tcall, file, Position, tdc, avc, 0, 0, length);
2182 #endif /* AFS_NOSTATS */
2185 #ifdef RX_ENABLE_LOCKS
2187 #endif /* RX_ENABLE_LOCKS */
2188 code = EndRXAFS_FetchData(tcall,
2192 #ifdef RX_ENABLE_LOCKS
2194 #endif /* RX_ENABLE_LOCKS */
2197 #ifdef RX_ENABLE_LOCKS
2199 #endif /* RX_ENABLE_LOCKS */
2201 code1 = rx_EndCall(tcall, code);
2202 #ifdef RX_ENABLE_LOCKS
2204 #endif /* RX_ENABLE_LOCKS */
2205 UpgradeSToWLock(&avc->lock,27);
2210 if ( !code && code1 )
2214 /* callback could have been broken (or expired) in a race here,
2215 * but we return the data anyway. It's as good as we knew about
2216 * when we started. */
2218 * validPos is updated by CacheFetchProc, and can only be
2219 * modifed under an S or W lock, which we've blocked out
2221 size = tdc->validPos - Position; /* actual segment size */
2222 if (size < 0) size = 0;
2223 afs_CFileTruncate(file, size); /* prune it */
2226 ObtainWriteLock(&afs_xcbhash, 453);
2227 afs_DequeueCallback(avc);
2228 avc->states &= ~(CStatd | CUnique);
2229 avc->callback = (struct server *)0;
2230 ReleaseWriteLock(&afs_xcbhash);
2231 if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
2232 osi_dnlc_purgedp(avc);
2236 (afs_Analyze(tc, code, &avc->fid, areq,
2237 AFS_STATS_FS_RPCIDX_FETCHDATA,
2238 SHARED_LOCK, (struct cell *)0));
2242 * In the case of replicated access, jot down info on the number of
2243 * attempts it took before we got through or gave up.
2246 if (numFetchLoops <= 1)
2247 (accP->refFirstReplicaOK)++;
2248 if (numFetchLoops > accP->maxReplicasPerRef)
2249 accP->maxReplicasPerRef = numFetchLoops;
2251 #endif /* AFS_NOSTATS */
2253 tdc->flags &= ~DFFetching;
2254 if (tdc->flags & DFWaiting) {
2255 afs_Trace4(afs_iclSetp, CM_TRACE_DCACHEWAIT,
2256 ICL_TYPE_STRING, __FILE__,
2257 ICL_TYPE_INT32, __LINE__,
2258 ICL_TYPE_POINTER, tdc,
2259 ICL_TYPE_INT32, tdc->flags);
2260 tdc->flags &= ~DFWaiting;
2261 afs_osi_Wakeup(&tdc->validPos);
2263 if (avc->execsOrWriters == 0) tdc->f.states &= ~DWriting;
2265 /* now, if code != 0, we have an error and should punt */
2267 afs_CFileTruncate(file, 0);
2268 afs_AdjustSize(tdc, 0);
2269 afs_CFileClose(file);
2270 ZapDCE(tdc); /* sets DFEntryMod */
2271 if (vType(avc) == VDIR) {
2272 DZap(&tdc->f.inode);
2274 #ifdef AFS_SUN5_ENVX
2279 ObtainWriteLock(&afs_xcbhash, 454);
2280 afs_DequeueCallback(avc);
2281 avc->states &= ~( CStatd | CUnique );
2282 ReleaseWriteLock(&afs_xcbhash);
2283 if (avc->fid.Fid.Vnode & 1 || (vType(avc) == VDIR))
2284 osi_dnlc_purgedp(avc);
2285 if (setLocks) ReleaseWriteLock(&avc->lock);
2286 osi_FreeLargeSpace(tsmall);
2287 tdc = (struct dcache *) 0;
2291 /* otherwise we copy in the just-fetched info */
2292 afs_CFileClose(file);
2293 afs_AdjustSize(tdc, size); /* new size */
2295 * Copy appropriate fields into vcache
2297 afs_ProcessFS(avc, &tsmall->OutStatus, areq);
2298 hset64(tdc->f.versionNo, tsmall->OutStatus.dataVersionHigh, tsmall->OutStatus.DataVersion);
2299 tdc->flags |= DFEntryMod;
2300 afs_indexFlags[tdc->index] |= IFEverUsed;
2301 if (setLocks) ReleaseWriteLock(&avc->lock);
2302 osi_FreeLargeSpace(tsmall);
2303 } /*Data version numbers don't match*/
2306 * Data version numbers match. Release locks if we locked
2307 * them, and remember we've had a cache hit.
2310 ReleaseReadLock(&avc->lock);
2311 afs_stats_cmperf.dcacheHits++;
2312 } /*Data version numbers match*/
2314 updateV2DC(setLocks,avc,tdc,332); /* set hint */
2317 * See if this was a reference to a file in the local cell.
2319 if (avc->fid.Cell == LOCALCELL)
2320 afs_stats_cmperf.dlocalAccesses++;
2322 afs_stats_cmperf.dremoteAccesses++;
2324 /* Fix up LRU info */
2327 hset(afs_indexTimes[tdc->index], afs_indexCounter);
2328 hadd32(afs_indexCounter, 1);
2330 /* return the data */
2331 if (vType(avc) == VDIR)
2334 *aoffset = AFS_CHUNKOFFSET(abyte);
2335 *alen = *aoffset + tdc->f.chunkBytes - abyte;
2344 * afs_WriteThroughDSlots
2347 * Sweep through the dcache slots and write out any modified
2348 * in-memory data back on to our caching store.
2354 * The afs_xdcache is write-locked through this whole affair.
2357 afs_WriteThroughDSlots()
2359 { /*afs_WriteThroughDSlots*/
2361 register struct dcache *tdc;
2362 register afs_int32 i, touchedit=0;
2364 AFS_STATCNT(afs_WriteThroughDSlots);
2365 MObtainWriteLock(&afs_xdcache,283);
2366 for(i = 0; i < afs_cacheFiles; i++) {
2367 tdc = afs_indexTable[i];
2368 if (tdc && (tdc->flags & DFEntryMod)) {
2369 tdc->flags &= ~DFEntryMod;
2370 afs_WriteDCache(tdc, 1);
2374 if (!touchedit && (cacheDiskType != AFS_FCACHE_TYPE_MEM)) {
2375 /* Touch the file to make sure that the mtime on the file is kept up-to-date
2376 * to avoid losing cached files on cold starts because their mtime seems old...
2378 struct afs_fheader theader;
2380 theader.magic = AFS_FHMAGIC;
2381 theader.firstCSize = AFS_FIRSTCSIZE;
2382 theader.otherCSize = AFS_OTHERCSIZE;
2383 theader.version = AFS_CI_VERSION;
2384 afs_osi_Write(afs_cacheInodep, 0, &theader, sizeof(theader));
2386 MReleaseWriteLock(&afs_xdcache);
2388 } /*afs_WriteThroughDSlots*/
2394 * Return a pointer to an freshly initialized dcache entry using
2395 * a memory-based cache.
2398 * aslot : Dcache slot to look at.
2399 * tmpdc : Ptr to dcache entry.
2402 * Nothing interesting.
2405 struct dcache *afs_MemGetDSlot(aslot, tmpdc)
2406 register afs_int32 aslot;
2407 register struct dcache *tmpdc;
2409 { /*afs_MemGetDSlot*/
2411 register afs_int32 code;
2412 register struct dcache *tdc;
2413 register char *tfile;
2415 AFS_STATCNT(afs_MemGetDSlot);
2416 if (CheckLock(&afs_xdcache) != -1) osi_Panic("getdslot nolock");
2417 if (aslot < 0 || aslot >= afs_cacheFiles) osi_Panic("getdslot slot");
2418 tdc = afs_indexTable[aslot];
2420 QRemove(&tdc->lruq); /* move to queue head */
2421 QAdd(&afs_DLRU, &tdc->lruq);
2425 if (tmpdc == (struct dcache *)0) {
2426 if (!afs_freeDSList) afs_GetDownDSlot(4);
2427 if (!afs_freeDSList) {
2428 /* none free, making one is better than a panic */
2429 afs_stats_cmperf.dcacheXAllocs++; /* count in case we have a leak */
2430 tdc = (struct dcache *) afs_osi_Alloc(sizeof (struct dcache));
2431 #ifdef AFS_AIX32_ENV
2432 pin((char *)tdc, sizeof(struct dcache)); /* XXX */
2435 tdc = afs_freeDSList;
2436 afs_freeDSList = (struct dcache *) tdc->lruq.next;
2438 tdc->flags = 0; /* up-to-date, not in free q */
2439 QAdd(&afs_DLRU, &tdc->lruq);
2440 if (tdc->lruq.prev == &tdc->lruq) osi_Panic("lruq 3");
2447 /* initialize entry */
2448 tdc->f.fid.Cell = 0;
2449 tdc->f.fid.Fid.Volume = 0;
2451 hones(tdc->f.versionNo);
2452 tdc->f.inode = aslot;
2453 tdc->flags |= DFEntryMod;
2456 afs_indexUnique[aslot] = tdc->f.fid.Fid.Unique;
2458 if (tmpdc == (struct dcache *)0)
2459 afs_indexTable[aslot] = tdc;
2462 } /*afs_MemGetDSlot*/
2464 unsigned int last_error = 0, lasterrtime = 0;
2470 * Return a pointer to an freshly initialized dcache entry using
2471 * a UFS-based disk cache.
2474 * aslot : Dcache slot to look at.
2475 * tmpdc : Ptr to dcache entry.
2478 * afs_xdcache lock write-locked.
2480 struct dcache *afs_UFSGetDSlot(aslot, tmpdc)
2481 register afs_int32 aslot;
2482 register struct dcache *tmpdc;
2484 { /*afs_UFSGetDSlot*/
2486 register afs_int32 code;
2487 register struct dcache *tdc;
2489 AFS_STATCNT(afs_UFSGetDSlot);
2490 if (CheckLock(&afs_xdcache) != -1) osi_Panic("getdslot nolock");
2491 if (aslot < 0 || aslot >= afs_cacheFiles) osi_Panic("getdslot slot");
2492 tdc = afs_indexTable[aslot];
2494 #ifdef AFS_SUN5_ENVX
2495 mutex_enter(&tdc->lock);
2497 QRemove(&tdc->lruq); /* move to queue head */
2498 QAdd(&afs_DLRU, &tdc->lruq);
2502 /* otherwise we should read it in from the cache file */
2504 * If we weren't passed an in-memory region to place the file info,
2505 * we have to allocate one.
2507 if (tmpdc == (struct dcache *)0) {
2508 if (!afs_freeDSList) afs_GetDownDSlot(4);
2509 if (!afs_freeDSList) {
2510 /* none free, making one is better than a panic */
2511 afs_stats_cmperf.dcacheXAllocs++; /* count in case we have a leak */
2512 tdc = (struct dcache *) afs_osi_Alloc(sizeof (struct dcache));
2513 #ifdef AFS_AIX32_ENV
2514 pin((char *)tdc, sizeof(struct dcache)); /* XXX */
2517 tdc = afs_freeDSList;
2518 afs_freeDSList = (struct dcache *) tdc->lruq.next;
2520 tdc->flags = 0; /* up-to-date, not in free q */
2521 QAdd(&afs_DLRU, &tdc->lruq);
2522 if (tdc->lruq.prev == &tdc->lruq) osi_Panic("lruq 3");
2530 #ifdef AFS_SUN5_ENVX
2531 mutex_enter(&tdc->lock);
2534 * Seek to the aslot'th entry and read it in.
2536 code = afs_osi_Read(afs_cacheInodep, sizeof(struct fcache) * aslot + sizeof(struct afs_fheader),
2537 (char *)(&tdc->f), sizeof(struct fcache));
2538 if (code != sizeof(struct fcache)) {
2539 tdc->f.fid.Cell = 0;
2540 tdc->f.fid.Fid.Volume = 0;
2542 hones(tdc->f.versionNo);
2543 tdc->flags |= DFEntryMod;
2544 #if !defined(AFS_SUN5_ENV) && !defined(AFS_OSF_ENV) && !defined(AFS_SGI64_ENV) && !defined(AFS_LINUX20_ENV) && !defined(AFS_DARWIN_ENV) && !defined(AFS_FBSD_ENV)
2545 last_error = getuerror();
2547 lasterrtime = osi_Time();
2548 afs_indexUnique[aslot] = tdc->f.fid.Fid.Unique;
2554 * If we didn't read into a temporary dcache region, update the
2555 * slot pointer table.
2557 if (tmpdc == (struct dcache *)0)
2558 afs_indexTable[aslot] = tdc;
2561 } /*afs_UFSGetDSlot*/
2569 * write a particular dcache entry back to its home in the
2573 * adc : Pointer to the dcache entry to write.
2574 * atime : If true, set the modtime on the file to the current time.
2577 * Must be called with the afs_xdcache lock at least read-locked.
2578 * The reference count is not changed.
2581 afs_WriteDCache(adc, atime)
2583 register struct dcache *adc;
2585 { /*afs_WriteDCache*/
2587 register struct osi_file *tfile;
2588 register afs_int32 code;
2590 if (cacheDiskType == AFS_FCACHE_TYPE_MEM) return 0;
2591 AFS_STATCNT(afs_WriteDCache);
2593 adc->f.modTime = osi_Time();
2595 * Seek to the right dcache slot and write the in-memory image out to disk.
2597 code = afs_osi_Write(afs_cacheInodep, sizeof(struct fcache) * adc->index + sizeof(struct afs_fheader),
2598 (char *)(&adc->f), sizeof(struct fcache));
2599 if (code != sizeof(struct fcache)) return EIO;
2602 } /*afs_WriteDCache*/
2610 * Wake up users of a particular file waiting for stores to take
2614 * avc : Ptr to related vcache entry.
2617 * Nothing interesting.
2621 register struct vcache *avc;
2626 register struct brequest *tb;
2628 AFS_STATCNT(afs_wakeup);
2629 for (i = 0; i < NBRS; i++, tb++) {
2630 /* if request is valid and for this file, we've found it */
2631 if (tb->refCount > 0 && avc == tb->vnode) {
2634 * If CSafeStore is on, then we don't awaken the guy
2635 * waiting for the store until the whole store has finished.
2636 * Otherwise, we do it now. Note that if CSafeStore is on,
2637 * the BStore routine actually wakes up the user, instead
2639 * I think this is redundant now because this sort of thing
2640 * is already being handled by the higher-level code.
2642 if ((avc->states & CSafeStore) == 0) {
2644 tb->flags |= BUVALID;
2645 if (tb->flags & BUWAIT) {
2646 tb->flags &= ~BUWAIT;
2662 * Given a file name and inode, set up that file to be an
2663 * active member in the AFS cache. This also involves checking
2664 * the usability of its data.
2667 * afile : Name of the cache file to initialize.
2668 * ainode : Inode of the file.
2671 * This function is called only during initialization.
2674 int afs_InitCacheFile(afile, ainode)
2678 { /*afs_InitCacheFile*/
2680 register afs_int32 code;
2681 #if defined(AFS_LINUX22_ENV)
2682 struct dentry *filevp;
2684 struct vnode *filevp;
2688 struct osi_file *tfile;
2689 struct osi_stat tstat;
2690 register struct dcache *tdc;
2692 AFS_STATCNT(afs_InitCacheFile);
2693 index = afs_stats_cmperf.cacheNumEntries;
2694 if (index >= afs_cacheFiles) return EINVAL;
2696 MObtainWriteLock(&afs_xdcache,282);
2697 tdc = afs_GetDSlot(index, (struct dcache *)0);
2698 MReleaseWriteLock(&afs_xdcache);
2700 code = gop_lookupname(afile,
2703 (struct vnode **) 0,
2710 * We have a VN_HOLD on filevp. Get the useful info out and
2711 * return. We make use of the fact that the cache is in the
2712 * UFS file system, and just record the inode number.
2714 #ifdef AFS_LINUX22_ENV
2715 tdc->f.inode = VTOI(filevp->d_inode)->i_number;
2718 tdc->f.inode = afs_vnodeToInumber(filevp);
2722 AFS_RELE((struct vnode *)filevp);
2724 #endif /* AFS_LINUX22_ENV */
2727 tdc->f.inode = ainode;
2730 if ((tdc->f.states & DWriting) ||
2731 tdc->f.fid.Fid.Volume == 0) fileIsBad = 1;
2732 tfile = osi_UFSOpen(tdc->f.inode);
2733 code = afs_osi_Stat(tfile, &tstat);
2734 if (code) osi_Panic("initcachefile stat");
2737 * If file size doesn't match the cache info file, it's probably bad.
2739 if (tdc->f.chunkBytes != tstat.size) fileIsBad = 1;
2740 tdc->f.chunkBytes = 0;
2743 * If file changed within T (120?) seconds of cache info file, it's
2744 * probably bad. In addition, if slot changed within last T seconds,
2745 * the cache info file may be incorrectly identified, and so slot
2748 if (cacheInfoModTime < tstat.mtime + 120) fileIsBad = 1;
2749 if (cacheInfoModTime < tdc->f.modTime + 120) fileIsBad = 1;
2750 /* In case write through is behind, make sure cache items entry is
2751 * at least as new as the chunk.
2753 if (tdc->f.modTime < tstat.mtime) fileIsBad = 1;
2755 tdc->f.fid.Fid.Volume = 0; /* not in the hash table */
2756 if (tstat.size != 0)
2757 osi_UFSTruncate(tfile, 0);
2758 /* put entry in free cache slot list */
2759 afs_dvnextTbl[tdc->index] = afs_freeDCList;
2760 afs_freeDCList = index;
2762 afs_indexFlags[index] |= IFFree;
2763 afs_indexUnique[index] = 0;
2767 * We must put this entry in the appropriate hash tables.
2768 * Note that i is still set from the above DCHash call
2770 code = DCHash(&tdc->f.fid, tdc->f.chunk);
2771 afs_dcnextTbl[tdc->index] = afs_dchashTbl[code];
2772 afs_dchashTbl[code] = tdc->index;
2773 code = DVHash(&tdc->f.fid);
2774 afs_dvnextTbl[tdc->index] = afs_dvhashTbl[code];
2775 afs_dvhashTbl[code] = tdc->index;
2776 afs_AdjustSize(tdc, tstat.size); /* adjust to new size */
2778 /* has nontrivial amt of data */
2779 afs_indexFlags[index] |= IFEverUsed;
2780 afs_stats_cmperf.cacheFilesReused++;
2782 * Initialize index times to file's mod times; init indexCounter
2785 hset32(afs_indexTimes[index], tstat.atime);
2786 if (hgetlo(afs_indexCounter) < tstat.atime) {
2787 hset32(afs_indexCounter, tstat.atime);
2789 afs_indexUnique[index] = tdc->f.fid.Fid.Unique;
2790 } /*File is not bad*/
2792 osi_UFSClose(tfile);
2793 tdc->f.states &= ~DWriting;
2794 tdc->flags &= ~DFEntryMod;
2795 /* don't set f.modTime; we're just cleaning up */
2796 afs_WriteDCache(tdc, 0);
2798 afs_stats_cmperf.cacheNumEntries++;
2801 } /*afs_InitCacheFile*/
2804 /*Max # of struct dcache's resident at any time*/
2806 * If 'dchint' is enabled then in-memory dcache min is increased because of
2815 * Initialize dcache related variables.
2817 void afs_dcacheInit(int afiles, int ablocks, int aDentries, int achunk,
2820 register struct dcache *tdp;
2824 afs_freeDCList = NULLIDX;
2825 afs_discardDCList = NULLIDX;
2826 afs_freeDCCount = 0;
2827 afs_freeDSList = (struct dcache *)0;
2828 hzero(afs_indexCounter);
2830 LOCK_INIT(&afs_xdcache, "afs_xdcache");
2836 if (achunk < 0 || achunk > 30)
2837 achunk = 13; /* Use default */
2838 AFS_SETCHUNKSIZE(achunk);
2844 if(aflags & AFSCALL_INIT_MEMCACHE) {
2846 * Use a memory cache instead of a disk cache
2848 cacheDiskType = AFS_FCACHE_TYPE_MEM;
2849 afs_cacheType = &afs_MemCacheOps;
2850 afiles = (afiles < aDentries) ? afiles : aDentries; /* min */
2851 ablocks = afiles * (AFS_FIRSTCSIZE/1024);
2852 /* ablocks is reported in 1K blocks */
2853 code = afs_InitMemCache(afiles * AFS_FIRSTCSIZE, AFS_FIRSTCSIZE, aflags);
2855 printf("afsd: memory cache too large for available memory.\n");
2856 printf("afsd: AFS files cannot be accessed.\n\n");
2858 afiles = ablocks = 0;
2861 printf("Memory cache: Allocating %d dcache entries...", aDentries);
2863 cacheDiskType = AFS_FCACHE_TYPE_UFS;
2864 afs_cacheType = &afs_UfsCacheOps;
2867 if (aDentries > 512)
2868 afs_dhashsize = 2048;
2869 /* initialize hash tables */
2870 afs_dvhashTbl = (afs_int32 *) afs_osi_Alloc(afs_dhashsize * sizeof(afs_int32));
2871 afs_dchashTbl = (afs_int32 *) afs_osi_Alloc(afs_dhashsize * sizeof(afs_int32));
2872 for(i=0;i< afs_dhashsize;i++) {
2873 afs_dvhashTbl[i] = NULLIDX;
2874 afs_dchashTbl[i] = NULLIDX;
2876 afs_dvnextTbl = (afs_int32 *) afs_osi_Alloc(afiles * sizeof(afs_int32));
2877 afs_dcnextTbl = (afs_int32 *) afs_osi_Alloc(afiles * sizeof(afs_int32));
2878 for(i=0;i< afiles;i++) {
2879 afs_dvnextTbl[i] = NULLIDX;
2880 afs_dcnextTbl[i] = NULLIDX;
2883 /* Allocate and zero the pointer array to the dcache entries */
2884 afs_indexTable = (struct dcache **)
2885 afs_osi_Alloc(sizeof(struct dcache *) * afiles);
2886 memset((char *)afs_indexTable, 0, sizeof(struct dcache *) * afiles);
2887 afs_indexTimes = (afs_hyper_t *) afs_osi_Alloc(afiles * sizeof(afs_hyper_t));
2888 memset((char *)afs_indexTimes, 0, afiles * sizeof(afs_hyper_t));
2889 afs_indexUnique = (afs_int32 *) afs_osi_Alloc(afiles * sizeof(afs_uint32));
2890 memset((char *)afs_indexUnique, 0, afiles * sizeof(afs_uint32));
2891 afs_indexFlags = (u_char *) afs_osi_Alloc(afiles * sizeof(u_char));
2892 memset((char *)afs_indexFlags, 0, afiles * sizeof(char));
2894 /* Allocate and thread the struct dcache entries themselves */
2895 tdp = afs_Initial_freeDSList =
2896 (struct dcache *) afs_osi_Alloc(aDentries * sizeof(struct dcache));
2897 memset((char *)tdp, 0, aDentries * sizeof(struct dcache));
2898 #ifdef AFS_AIX32_ENV
2899 pin((char *)afs_indexTable, sizeof(struct dcache *) * afiles);/* XXX */
2900 pin((char *)afs_indexTimes, sizeof(afs_hyper_t) * afiles); /* XXX */
2901 pin((char *)afs_indexFlags, sizeof(char) * afiles); /* XXX */
2902 pin((char *)afs_indexUnique, sizeof(afs_int32) * afiles); /* XXX */
2903 pin((char *)tdp, aDentries * sizeof(struct dcache)); /* XXX */
2904 pin((char *)afs_dvhashTbl, sizeof(afs_int32) * afs_dhashsize); /* XXX */
2905 pin((char *)afs_dchashTbl, sizeof(afs_int32) * afs_dhashsize); /* XXX */
2906 pin((char *)afs_dcnextTbl, sizeof(afs_int32) * afiles); /* XXX */
2907 pin((char *)afs_dvnextTbl, sizeof(afs_int32) * afiles); /* XXX */
2910 afs_freeDSList = &tdp[0];
2911 for(i=0; i < aDentries-1; i++) {
2912 tdp[i].lruq.next = (struct afs_q *) (&tdp[i+1]);
2914 tdp[aDentries-1].lruq.next = (struct afs_q *) 0;
2916 afs_stats_cmperf.cacheBlocksOrig = afs_stats_cmperf.cacheBlocksTotal = afs_cacheBlocks = ablocks;
2917 afs_ComputeCacheParms(); /* compute parms based on cache size */
2919 afs_dcentries = aDentries;
2928 void shutdown_dcache(void)
2932 afs_osi_Free(afs_dvnextTbl, afs_cacheFiles * sizeof(afs_int32));
2933 afs_osi_Free(afs_dcnextTbl, afs_cacheFiles * sizeof(afs_int32));
2934 afs_osi_Free(afs_indexTable, afs_cacheFiles * sizeof(struct dcache *));
2935 afs_osi_Free(afs_indexTimes, afs_cacheFiles * sizeof(afs_hyper_t));
2936 afs_osi_Free(afs_indexUnique, afs_cacheFiles * sizeof(afs_uint32));
2937 afs_osi_Free(afs_indexFlags, afs_cacheFiles * sizeof(u_char));
2938 afs_osi_Free(afs_Initial_freeDSList, afs_dcentries * sizeof(struct dcache));
2939 #ifdef AFS_AIX32_ENV
2940 unpin((char *)afs_dcnextTbl, afs_cacheFiles * sizeof(afs_int32));
2941 unpin((char *)afs_dvnextTbl, afs_cacheFiles * sizeof(afs_int32));
2942 unpin((char *)afs_indexTable, afs_cacheFiles * sizeof(struct dcache *));
2943 unpin((char *)afs_indexTimes, afs_cacheFiles * sizeof(afs_hyper_t));
2944 unpin((char *)afs_indexUnique, afs_cacheFiles * sizeof(afs_uint32));
2945 unpin((u_char *)afs_indexFlags, afs_cacheFiles * sizeof(u_char));
2946 unpin(afs_Initial_freeDSList, afs_dcentries * sizeof(struct dcache));
2950 for(i=0;i< afs_dhashsize;i++) {
2951 afs_dvhashTbl[i] = NULLIDX;
2952 afs_dchashTbl[i] = NULLIDX;
2956 afs_blocksUsed = afs_dcentries = 0;
2957 hzero(afs_indexCounter);
2959 afs_freeDCCount = 0;
2960 afs_freeDCList = NULLIDX;
2961 afs_discardDCList = NULLIDX;
2962 afs_freeDSList = afs_Initial_freeDSList = 0;
2964 LOCK_INIT(&afs_xdcache, "afs_xdcache");