2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
14 #include "afs/sysincludes.h"
15 #include "afsincludes.h"
17 #if !defined(AFS_LINUX26_ENV)
22 #if defined(AFS_AIX31_ENV)
25 #if !defined(AFS_AIX_ENV) && !defined(AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV)
26 #include "h/kernel.h" /* Doesn't needed, so it should go */
28 #endif /* !defined(UKERNEL) */
30 #include "afs/afs_osi.h"
34 #if !defined(UKERNEL) && !defined(AFS_LINUX20_ENV)
36 #endif /* !defined(UKERNEL) */
39 #include "afs/volerrors.h"
40 #include "afs/exporter.h"
41 #include "afs/prs_fs.h"
42 #include "afs/afs_chunkops.h"
45 #include "afs/afs_stats.h"
49 #define BUF_TIME_MAX 0x7fffffff
51 #define NPB 8 /* must be a pwer of 2 */
52 static int afs_max_buffers; /* should be an integral multiple of NPB */
55 #define AFS_BUFFER_PAGESIZE 2048
58 /* If you change any of this PH stuff, make sure you don't break DZap() */
59 /* use last two bits for page */
61 /* use next five bits for fid */
63 /* page hash table size - this is pretty intertwined with pHash */
64 #define PHSIZE (PHPAGEMASK + PHFIDMASK + 1)
66 #define pHash(fid,page) ((((afs_int32)(fid)) & PHFIDMASK) \
67 | (page & PHPAGEMASK))
70 #undef dirty /* XXX */
73 static struct buffer *Buffers = 0;
74 static char *BufferData;
77 extern struct buf *geteblk();
80 #define timecounter afs_timecounter
83 /* A note on locking in 'struct buffer'
85 * afs_bufferLock protects the hash chain, and the 'lockers' field where that
86 * has a zero value. It must be held whenever lockers is incremented from zero.
88 * The individual buffer lock protects the contents of the structure, including
91 * For safety: afs_bufferLock and the individual buffer lock must be held
92 * when obtaining a reference on a structure. Only the individual buffer lock
93 * need be held when releasing a reference.
95 * The locking hierarchy is afs_bufferLock-> buffer.lock
99 static afs_lock_t afs_bufferLock;
100 static struct buffer *phTable[PHSIZE]; /* page hash table */
102 static afs_int32 timecounter;
104 /* Prototypes for static routines */
105 static struct buffer *afs_newslot(struct dcache *adc, afs_int32 apage,
108 static int dinit_flag = 0;
112 /* Initialize the venus buffer system. */
120 /* round up to next multiple of NPB, since we allocate multiple pages per chunk */
121 abuffers = ((abuffers - 1) | (NPB - 1)) + 1;
122 afs_max_buffers = abuffers << 2; /* possibly grow up to 4 times as big */
123 LOCK_INIT(&afs_bufferLock, "afs_bufferLock");
124 Buffers = afs_osi_Alloc(afs_max_buffers * sizeof(struct buffer));
125 osi_Assert(Buffers != NULL);
127 afs_stats_cmperf.bufAlloced = nbuffers = abuffers;
128 for (i = 0; i < PHSIZE; i++)
130 for (i = 0; i < abuffers; i++) {
131 if ((i & (NPB - 1)) == 0) {
132 /* time to allocate a fresh buffer */
133 BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB);
134 osi_Assert(BufferData != NULL);
136 /* Fill in each buffer with an empty indication. */
139 afs_reset_inode(&tb->inode);
142 tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))];
145 AFS_RWLOCK_INIT(&tb->lock, "buffer lock");
151 DRead(struct dcache *adc, int page, struct DirBuffer *entry)
153 /* Read a page from the disk. */
154 struct buffer *tb, *tb2;
155 struct osi_file *tfile;
160 memset(entry, 0, sizeof(struct DirBuffer));
162 ObtainWriteLock(&afs_bufferLock, 256);
164 #define bufmatch(tb) (tb->page == page && tb->fid == adc->index)
165 #define buf_Front(head,parent,p) {(parent)->hashNext = (p)->hashNext; (p)->hashNext= *(head);*(head)=(p);}
167 /* this apparently-complicated-looking code is simply an example of
168 * a little bit of loop unrolling, and is a standard linked-list
169 * traversal trick. It saves a few assignments at the the expense
170 * of larger code size. This could be simplified by better use of
173 if ((tb = phTable[pHash(adc->index, page)])) {
175 ObtainWriteLock(&tb->lock, 257);
177 ReleaseWriteLock(&afs_bufferLock);
178 tb->accesstime = timecounter++;
179 AFS_STATS(afs_stats_cmperf.bufHits++);
180 ReleaseWriteLock(&tb->lock);
182 entry->data = tb->data;
185 struct buffer **bufhead;
186 bufhead = &(phTable[pHash(adc->index, page)]);
187 while ((tb2 = tb->hashNext)) {
189 buf_Front(bufhead, tb, tb2);
190 ObtainWriteLock(&tb2->lock, 258);
192 ReleaseWriteLock(&afs_bufferLock);
193 tb2->accesstime = timecounter++;
194 AFS_STATS(afs_stats_cmperf.bufHits++);
195 ReleaseWriteLock(&tb2->lock);
197 entry->data = tb2->data;
200 if ((tb = tb2->hashNext)) {
202 buf_Front(bufhead, tb2, tb);
203 ObtainWriteLock(&tb->lock, 259);
205 ReleaseWriteLock(&afs_bufferLock);
206 tb->accesstime = timecounter++;
207 AFS_STATS(afs_stats_cmperf.bufHits++);
208 ReleaseWriteLock(&tb->lock);
210 entry->data = tb->data;
220 AFS_STATS(afs_stats_cmperf.bufMisses++);
222 /* The last thing we looked at was either tb or tb2 (or nothing). That
223 * is at least the oldest buffer on one particular hash chain, so it's
224 * a pretty good place to start looking for the truly oldest buffer.
226 tb = afs_newslot(adc, page, (tb ? tb : tb2));
228 ReleaseWriteLock(&afs_bufferLock);
231 ObtainWriteLock(&tb->lock, 260);
233 ReleaseWriteLock(&afs_bufferLock);
235 if (adc->f.chunk == 0 && adc->f.chunkBytes == 0) {
236 /* The directory blob is empty, apparently. This is not a valid dir
237 * blob, so throw an error. */
240 } else if (page * AFS_BUFFER_PAGESIZE >= adc->f.chunkBytes) {
241 code = ENOENT; /* past the end */
245 tfile = afs_CFileOpen(&adc->f.inode);
251 afs_CFileRead(tfile, tb->page * AFS_BUFFER_PAGESIZE, tb->data,
252 AFS_BUFFER_PAGESIZE);
253 afs_CFileClose(tfile);
254 if (code < AFS_BUFFER_PAGESIZE) {
258 /* Note that findslot sets the page field in the buffer equal to
259 * what it is searching for. */
260 ReleaseWriteLock(&tb->lock);
262 entry->data = tb->data;
267 afs_reset_inode(&tb->inode);
269 ReleaseWriteLock(&tb->lock);
274 FixupBucket(struct buffer *ap)
276 struct buffer **lp, *tp;
278 /* first try to get it out of its current hash bucket, in which it
280 AFS_STATCNT(FixupBucket);
283 for (tp = *lp; tp; tp = tp->hashNext) {
290 /* now figure the new hash bucket */
291 i = pHash(ap->fid, ap->page);
292 ap->hashIndex = i; /* remember where we are for deletion */
293 ap->hashNext = phTable[i]; /* add us to the list */
294 phTable[i] = ap; /* at the front, since it's LRU */
297 /* lp is pointer to a fairly-old buffer */
298 static struct buffer *
299 afs_newslot(struct dcache *adc, afs_int32 apage, struct buffer *lp)
301 /* Find a usable buffer slot */
305 struct osi_file *tfile;
307 AFS_STATCNT(afs_newslot);
308 /* we take a pointer here to a buffer which was at the end of an
309 * LRU hash chain. Odds are, it's one of the older buffers, not
310 * one of the newer. Having an older buffer to start with may
311 * permit us to avoid a few of the assignments in the "typical
312 * case" for loop below.
314 if (lp && (lp->lockers == 0)) {
320 /* timecounter might have wrapped, if machine is very very busy
321 * and stays up for a long time. Timecounter mustn't wrap twice
322 * (positive->negative->positive) before calling newslot, but that
323 * would require 2 billion consecutive cache hits... Anyway, the
324 * penalty is only that the cache replacement policy will be
325 * almost MRU for the next ~2 billion DReads... newslot doesn't
326 * get called nearly as often as DRead, so in order to avoid the
327 * performance penalty of using the hypers, it's worth doing the
328 * extra check here every time. It's probably cheaper than doing
329 * hcmp, anyway. There is a little performance hit resulting from
330 * resetting all the access times to 0, but it only happens once
331 * every month or so, and the access times will rapidly sort
332 * themselves back out after just a few more DReads.
334 if (timecounter < 0) {
337 for (i = 0; i < nbuffers; i++, tp++) {
339 if (!lp && !tp->lockers) /* one is as good as the rest, I guess */
343 /* this is the typical case */
345 for (i = 0; i < nbuffers; i++, tp++) {
346 if (tp->lockers == 0) {
347 if (!lp || tp->accesstime < lt) {
356 /* No unlocked buffers. If still possible, allocate a new increment */
357 if (nbuffers + NPB > afs_max_buffers) {
358 /* There are no unlocked buffers -- this used to panic, but that
359 * seems extreme. To the best of my knowledge, all the callers
360 * of DRead are prepared to handle a zero return. Some of them
361 * just panic directly, but not all of them. */
362 afs_warn("afs: all buffers locked\n");
366 BufferData = afs_osi_Alloc(AFS_BUFFER_PAGESIZE * NPB);
367 osi_Assert(BufferData != NULL);
368 for (i = 0; i< NPB; i++) {
369 /* Fill in each buffer with an empty indication. */
370 tp = &Buffers[i + nbuffers];
372 afs_reset_inode(&tp->inode);
375 tp->data = &BufferData[AFS_BUFFER_PAGESIZE * i];
378 AFS_RWLOCK_INIT(&tp->lock, "buffer lock");
380 lp = &Buffers[nbuffers];
385 /* see DFlush for rationale for not getting and locking the dcache */
386 tfile = afs_CFileOpen(&lp->inode);
388 return NULL; /* Callers will flag as EIO */
390 afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data,
391 AFS_BUFFER_PAGESIZE);
393 afs_CFileClose(tfile);
394 AFS_STATS(afs_stats_cmperf.bufFlushDirty++);
397 /* Zero out the data so we don't leak something we shouldn't. */
398 memset(lp->data, 0, AFS_BUFFER_PAGESIZE);
399 /* Now fill in the header. */
400 lp->fid = adc->index;
401 afs_copy_inode(&lp->inode, &adc->f.inode);
403 lp->accesstime = timecounter++;
404 FixupBucket(lp); /* move to the right hash bucket */
410 DRelease(struct DirBuffer *entry, int flag)
414 AFS_STATCNT(DRelease);
421 ObtainWriteLock(&tp->lock, 261);
425 ReleaseWriteLock(&tp->lock);
429 DVOffset(struct DirBuffer *entry)
433 AFS_STATCNT(DVOffset);
436 return AFS_BUFFER_PAGESIZE * bp->page
437 + (char *)entry->data - (char *)bp->data;
441 * Zap one dcache entry: destroy one FID's buffers.
443 * 1/1/91 - I've modified the hash function to take the page as well
444 * as the *fid, so that lookup will be a bit faster. That presents some
445 * difficulties for Zap, which now has to have some knowledge of the nature
446 * of the hash function. Oh well. This should use the list traversal
449 * \param adc The dcache entry to be zapped.
452 DZap(struct dcache *adc)
455 /* Destroy all buffers pertaining to a particular fid. */
459 ObtainReadLock(&afs_bufferLock);
461 for (i = 0; i <= PHPAGEMASK; i++)
462 for (tb = phTable[pHash(adc->index, i)]; tb; tb = tb->hashNext)
463 if (tb->fid == adc->index) {
464 ObtainWriteLock(&tb->lock, 262);
466 afs_reset_inode(&tb->inode);
468 ReleaseWriteLock(&tb->lock);
470 ReleaseReadLock(&afs_bufferLock);
474 DFlushBuffer(struct buffer *ab)
476 struct osi_file *tfile;
478 tfile = afs_CFileOpen(&ab->inode);
480 afs_CFileWrite(tfile, ab->page * AFS_BUFFER_PAGESIZE,
481 ab->data, AFS_BUFFER_PAGESIZE);
482 ab->dirty = 0; /* Clear the dirty flag */
483 afs_CFileClose(tfile);
487 DFlushDCache(struct dcache *adc)
492 ObtainReadLock(&afs_bufferLock);
494 for (i = 0; i <= PHPAGEMASK; i++)
495 for (tb = phTable[pHash(adc->index, i)]; tb; tb = tb->hashNext)
496 if (tb->fid == adc->index) {
497 ObtainWriteLock(&tb->lock, 701);
499 ReleaseReadLock(&afs_bufferLock);
504 ReleaseWriteLock(&tb->lock);
505 ObtainReadLock(&afs_bufferLock);
508 ReleaseReadLock(&afs_bufferLock);
514 /* Flush all the modified buffers. */
520 ObtainReadLock(&afs_bufferLock);
521 for (i = 0; i < nbuffers; i++, tb++) {
523 ObtainWriteLock(&tb->lock, 263);
525 ReleaseReadLock(&afs_bufferLock);
527 /* it seems safe to do this I/O without having the dcache
528 * locked, since the only things that will update the data in
529 * a directory are the buffer package, which holds the relevant
530 * tb->lock while doing the write, or afs_GetDCache, which
531 * DZap's the directory while holding the dcache lock.
532 * It is not possible to lock the dcache or even call
533 * afs_GetDSlot to map the index to the dcache since the dir
534 * package's caller has some dcache object locked already (so
535 * we cannot lock afs_xdcache). In addition, we cannot obtain
536 * a dcache lock while holding the tb->lock of the same file
537 * since that can deadlock with DRead/DNew */
541 ReleaseWriteLock(&tb->lock);
542 ObtainReadLock(&afs_bufferLock);
545 ReleaseReadLock(&afs_bufferLock);
551 DNew(struct dcache *adc, int page, struct DirBuffer *entry)
553 /* Same as read, only do *not* even try to read the page, since it
554 * probably doesn't exist. */
558 ObtainWriteLock(&afs_bufferLock, 264);
559 if ((tb = afs_newslot(adc, page, NULL)) == 0) {
560 ReleaseWriteLock(&afs_bufferLock);
563 /* extend the chunk, if needed */
564 /* Do it now, not in DFlush or afs_newslot when the data is written out,
565 * since now our caller has adc->lock writelocked, and we can't acquire
566 * that lock (or even map from a fid to a dcache) in afs_newslot or
567 * DFlush due to lock hierarchy issues */
568 if ((page + 1) * AFS_BUFFER_PAGESIZE > adc->f.chunkBytes) {
569 afs_AdjustSize(adc, (page + 1) * AFS_BUFFER_PAGESIZE);
570 osi_Assert(afs_WriteDCache(adc, 1) == 0);
572 ObtainWriteLock(&tb->lock, 265);
574 ReleaseWriteLock(&afs_bufferLock);
575 ReleaseWriteLock(&tb->lock);
577 entry->data = tb->data;
583 shutdown_bufferpackage(void)
588 AFS_STATCNT(shutdown_bufferpackage);
589 /* Free all allocated Buffers and associated buffer pages */
594 for (i = 0; i < nbuffers; i += NPB, tp += NPB) {
595 afs_osi_Free(tp->data, NPB * AFS_BUFFER_PAGESIZE);
597 afs_osi_Free(Buffers, nbuffers * sizeof(struct buffer));
601 for (i = 0; i < PHSIZE; i++)
604 if (afs_cold_shutdown) {
605 memset(&afs_bufferLock, 0, sizeof(afs_lock_t));