2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
14 #include "afs/sysincludes.h"
15 #include "afsincludes.h"
20 #if defined(AFS_AIX31_ENV)
23 #if !defined(AFS_AIX_ENV) && !defined(AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV)
24 #include "h/kernel.h" /* Doesn't needed, so it should go */
26 #endif /* !defined(UKERNEL) */
28 #include "afs/afs_osi.h"
32 #if !defined(UKERNEL) && !defined(AFS_LINUX20_ENV)
34 #endif /* !defined(UKERNEL) */
37 #include "afs/volerrors.h"
38 #include "afs/exporter.h"
39 #include "afs/prs_fs.h"
40 #include "afs/afs_chunkops.h"
43 #include "afs/afs_stats.h"
47 #define BUF_TIME_MAX 0x7fffffff
49 /* number of pages per Unix buffer, when we're using Unix buffer pool */
52 #define AFS_BUFFER_PAGESIZE 2048
55 /* If you change any of this PH stuff, make sure you don't break DZap() */
56 /* use last two bits for page */
58 /* use next five bits for fid */
60 /* page hash table size - this is pretty intertwined with pHash */
61 #define PHSIZE (PHPAGEMASK + PHFIDMASK + 1)
63 #define pHash(fid,page) ((((afs_int32)(fid)) & PHFIDMASK) \
64 | (page & PHPAGEMASK))
67 #undef dirty /* XXX */
70 static struct buffer *Buffers = 0;
71 static char *BufferData;
74 extern struct buf *geteblk();
77 #define timecounter afs_timecounter
80 /* A note on locking in 'struct buffer'
82 * afs_bufferLock protects the hash chain, and the 'lockers' field where that
83 * has a zero value. It must be held whenever lockers is incremented from zero.
85 * The individual buffer lock protects the contents of the structure, including
88 * For safety: afs_bufferLock and the individual buffer lock must be held
89 * when obtaining a reference on a structure. Only the individual buffer lock
90 * need be held when releasing a reference.
92 * The locking hierarchy is afs_bufferLock-> buffer.lock
96 static afs_lock_t afs_bufferLock;
97 static struct buffer *phTable[PHSIZE]; /* page hash table */
99 static afs_int32 timecounter;
101 /* Prototypes for static routines */
102 static struct buffer *afs_newslot(struct dcache *adc, afs_int32 apage,
103 register struct buffer *lp);
105 static int dinit_flag = 0;
109 /* Initialize the venus buffer system. */
111 register struct buffer *tb;
112 #if defined(AFS_USEBUFFERS)
113 struct buf *tub; /* unix buffer for allocation */
120 #if defined(AFS_USEBUFFERS)
121 /* round up to next multiple of NPB, since we allocate multiple pages per chunk */
122 abuffers = ((abuffers - 1) | (NPB - 1)) + 1;
124 LOCK_INIT(&afs_bufferLock, "afs_bufferLock");
126 (struct buffer *)afs_osi_Alloc(abuffers * sizeof(struct buffer));
127 #if !defined(AFS_USEBUFFERS)
128 BufferData = (char *)afs_osi_Alloc(abuffers * AFS_BUFFER_PAGESIZE);
131 afs_stats_cmperf.bufAlloced = nbuffers = abuffers;
132 for (i = 0; i < PHSIZE; i++)
134 for (i = 0; i < abuffers; i++) {
135 #if defined(AFS_USEBUFFERS)
136 if ((i & (NPB - 1)) == 0) {
137 /* time to allocate a fresh buffer */
138 tub = geteblk(AFS_BUFFER_PAGESIZE * NPB);
139 BufferData = (char *)tub->b_un.b_addr;
142 /* Fill in each buffer with an empty indication. */
145 afs_reset_inode(&tb->inode);
148 #if defined(AFS_USEBUFFERS)
149 if ((i & (NPB - 1)) == 0)
153 tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))];
155 tb->data = &BufferData[AFS_BUFFER_PAGESIZE * i];
159 AFS_RWLOCK_INIT(&tb->lock, "buffer lock");
165 DRead(register struct dcache *adc, register int page)
167 /* Read a page from the disk. */
168 register struct buffer *tb, *tb2;
169 struct osi_file *tfile;
173 ObtainWriteLock(&afs_bufferLock, 256);
175 #define bufmatch(tb) (tb->page == page && tb->fid == adc->index)
176 #define buf_Front(head,parent,p) {(parent)->hashNext = (p)->hashNext; (p)->hashNext= *(head);*(head)=(p);}
178 /* this apparently-complicated-looking code is simply an example of
179 * a little bit of loop unrolling, and is a standard linked-list
180 * traversal trick. It saves a few assignments at the the expense
181 * of larger code size. This could be simplified by better use of
184 if ((tb = phTable[pHash(adc->index, page)])) {
186 ObtainWriteLock(&tb->lock, 257);
188 ReleaseWriteLock(&afs_bufferLock);
189 tb->accesstime = timecounter++;
190 AFS_STATS(afs_stats_cmperf.bufHits++);
191 ReleaseWriteLock(&tb->lock);
194 register struct buffer **bufhead;
195 bufhead = &(phTable[pHash(adc->index, page)]);
196 while ((tb2 = tb->hashNext)) {
198 buf_Front(bufhead, tb, tb2);
199 ObtainWriteLock(&tb2->lock, 258);
201 ReleaseWriteLock(&afs_bufferLock);
202 tb2->accesstime = timecounter++;
203 AFS_STATS(afs_stats_cmperf.bufHits++);
204 ReleaseWriteLock(&tb2->lock);
207 if ((tb = tb2->hashNext)) {
209 buf_Front(bufhead, tb2, tb);
210 ObtainWriteLock(&tb->lock, 259);
212 ReleaseWriteLock(&afs_bufferLock);
213 tb->accesstime = timecounter++;
214 AFS_STATS(afs_stats_cmperf.bufHits++);
215 ReleaseWriteLock(&tb->lock);
225 AFS_STATS(afs_stats_cmperf.bufMisses++);
227 /* The last thing we looked at was either tb or tb2 (or nothing). That
228 * is at least the oldest buffer on one particular hash chain, so it's
229 * a pretty good place to start looking for the truly oldest buffer.
231 tb = afs_newslot(adc, page, (tb ? tb : tb2));
233 ReleaseWriteLock(&afs_bufferLock);
236 ObtainWriteLock(&tb->lock, 260);
238 ReleaseWriteLock(&afs_bufferLock);
239 if (page * AFS_BUFFER_PAGESIZE >= adc->f.chunkBytes) {
241 afs_reset_inode(&tb->inode);
243 ReleaseWriteLock(&tb->lock);
246 tfile = afs_CFileOpen(&adc->f.inode);
248 afs_CFileRead(tfile, tb->page * AFS_BUFFER_PAGESIZE, tb->data,
249 AFS_BUFFER_PAGESIZE);
250 afs_CFileClose(tfile);
251 if (code < AFS_BUFFER_PAGESIZE) {
253 afs_reset_inode(&tb->inode);
255 ReleaseWriteLock(&tb->lock);
258 /* Note that findslot sets the page field in the buffer equal to
259 * what it is searching for. */
260 ReleaseWriteLock(&tb->lock);
265 FixupBucket(register struct buffer *ap)
267 register struct buffer **lp, *tp;
269 /* first try to get it out of its current hash bucket, in which it
271 AFS_STATCNT(FixupBucket);
274 for (tp = *lp; tp; tp = tp->hashNext) {
281 /* now figure the new hash bucket */
282 i = pHash(ap->fid, ap->page);
283 ap->hashIndex = i; /* remember where we are for deletion */
284 ap->hashNext = phTable[i]; /* add us to the list */
285 phTable[i] = ap; /* at the front, since it's LRU */
288 /* lp is pointer to a fairly-old buffer */
289 static struct buffer *
290 afs_newslot(struct dcache *adc, afs_int32 apage, register struct buffer *lp)
292 /* Find a usable buffer slot */
293 register afs_int32 i;
295 register struct buffer *tp;
296 struct osi_file *tfile;
298 AFS_STATCNT(afs_newslot);
299 /* we take a pointer here to a buffer which was at the end of an
300 * LRU hash chain. Odds are, it's one of the older buffers, not
301 * one of the newer. Having an older buffer to start with may
302 * permit us to avoid a few of the assignments in the "typical
303 * case" for loop below.
305 if (lp && (lp->lockers == 0)) {
311 /* timecounter might have wrapped, if machine is very very busy
312 * and stays up for a long time. Timecounter mustn't wrap twice
313 * (positive->negative->positive) before calling newslot, but that
314 * would require 2 billion consecutive cache hits... Anyway, the
315 * penalty is only that the cache replacement policy will be
316 * almost MRU for the next ~2 billion DReads... newslot doesn't
317 * get called nearly as often as DRead, so in order to avoid the
318 * performance penalty of using the hypers, it's worth doing the
319 * extra check here every time. It's probably cheaper than doing
320 * hcmp, anyway. There is a little performance hit resulting from
321 * resetting all the access times to 0, but it only happens once
322 * every month or so, and the access times will rapidly sort
323 * themselves back out after just a few more DReads.
325 if (timecounter < 0) {
328 for (i = 0; i < nbuffers; i++, tp++) {
330 if (!lp && !tp->lockers) /* one is as good as the rest, I guess */
334 /* this is the typical case */
336 for (i = 0; i < nbuffers; i++, tp++) {
337 if (tp->lockers == 0) {
338 if (!lp || tp->accesstime < lt) {
347 /* There are no unlocked buffers -- this used to panic, but that
348 * seems extreme. To the best of my knowledge, all the callers
349 * of DRead are prepared to handle a zero return. Some of them
350 * just panic directly, but not all of them. */
351 afs_warn("afs: all buffers locked\n");
356 /* see DFlush for rationale for not getting and locking the dcache */
357 tfile = afs_CFileOpen(&lp->inode);
358 afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data,
359 AFS_BUFFER_PAGESIZE);
361 afs_CFileClose(tfile);
362 AFS_STATS(afs_stats_cmperf.bufFlushDirty++);
365 /* Now fill in the header. */
366 lp->fid = adc->index;
367 afs_copy_inode(&lp->inode, &adc->f.inode);
369 lp->accesstime = timecounter++;
370 FixupBucket(lp); /* move to the right hash bucket */
376 DRelease(void *loc, int flag)
378 /* Release a buffer, specifying whether or not the buffer has been
379 * modified by the locker. */
380 register struct buffer *bp = (struct buffer *)loc;
382 #if defined(AFS_USEBUFFERS)
383 register struct buffer *tp;
386 AFS_STATCNT(DRelease);
389 #if defined(AFS_USEBUFFERS)
390 /* look for buffer by scanning Unix buffers for appropriate address */
392 for (index = 0; index < nbuffers; index += NPB, tp += NPB) {
393 if ((afs_int32) bp >= (afs_int32) tp->data
395 (afs_int32) tp->data + AFS_BUFFER_PAGESIZE * NPB) {
396 /* we found the right range */
397 index += ((afs_int32) bp - (afs_int32) tp->data) >> LOGPS;
402 index = (((char *)bp) - ((char *)BufferData)) >> LOGPS;
404 bp = &(Buffers[index]);
405 ObtainWriteLock(&bp->lock, 261);
409 ReleaseWriteLock(&bp->lock);
413 DVOffset(register void *ap)
415 /* Return the byte within a file represented by a buffer pointer. */
416 register struct buffer *bp;
418 #if defined(AFS_USEBUFFERS)
419 register struct buffer *tp;
421 AFS_STATCNT(DVOffset);
423 #if defined(AFS_USEBUFFERS)
424 /* look for buffer by scanning Unix buffers for appropriate address */
426 for (index = 0; index < nbuffers; index += NPB, tp += NPB) {
427 if ((afs_int32) bp >= (afs_int32) tp->data
429 (afs_int32) tp->data + AFS_BUFFER_PAGESIZE * NPB) {
430 /* we found the right range */
431 index += ((afs_int32) bp - (afs_int32) tp->data) >> LOGPS;
436 index = (((char *)bp) - ((char *)BufferData)) >> LOGPS;
438 if (index < 0 || index >= nbuffers)
440 bp = &(Buffers[index]);
441 return AFS_BUFFER_PAGESIZE * bp->page + (int)(((char *)ap) - bp->data);
445 * Zap one dcache entry: destroy one FID's buffers.
447 * 1/1/91 - I've modified the hash function to take the page as well
448 * as the *fid, so that lookup will be a bit faster. That presents some
449 * difficulties for Zap, which now has to have some knowledge of the nature
450 * of the hash function. Oh well. This should use the list traversal
453 * \param adc The dcache entry to be zapped.
456 DZap(struct dcache *adc)
459 /* Destroy all buffers pertaining to a particular fid. */
460 register struct buffer *tb;
463 ObtainReadLock(&afs_bufferLock);
465 for (i = 0; i <= PHPAGEMASK; i++)
466 for (tb = phTable[pHash(adc->index, i)]; tb; tb = tb->hashNext)
467 if (tb->fid == adc->index) {
468 ObtainWriteLock(&tb->lock, 262);
470 afs_reset_inode(&tb->inode);
472 ReleaseWriteLock(&tb->lock);
474 ReleaseReadLock(&afs_bufferLock);
478 DFlushBuffer(struct buffer *ab) {
479 struct osi_file *tfile;
481 tfile = afs_CFileOpen(&ab->inode);
482 afs_CFileWrite(tfile, ab->page * AFS_BUFFER_PAGESIZE,
483 ab->data, AFS_BUFFER_PAGESIZE);
484 ab->dirty = 0; /* Clear the dirty flag */
485 afs_CFileClose(tfile);
489 DFlushDCache(struct dcache *adc)
494 ObtainReadLock(&afs_bufferLock);
496 for (i = 0; i <= PHPAGEMASK; i++)
497 for (tb = phTable[pHash(adc->index, i)]; tb; tb = tb->hashNext)
498 if (tb->fid == adc->index) {
499 ObtainWriteLock(&tb->lock, 701);
501 ReleaseReadLock(&afs_bufferLock);
506 ReleaseWriteLock(&tb->lock);
507 ObtainReadLock(&afs_bufferLock);
510 ReleaseReadLock(&afs_bufferLock);
516 /* Flush all the modified buffers. */
518 register struct buffer *tb;
522 ObtainReadLock(&afs_bufferLock);
523 for (i = 0; i < nbuffers; i++, tb++) {
525 ObtainWriteLock(&tb->lock, 263);
527 ReleaseReadLock(&afs_bufferLock);
529 /* it seems safe to do this I/O without having the dcache
530 * locked, since the only things that will update the data in
531 * a directory are the buffer package, which holds the relevant
532 * tb->lock while doing the write, or afs_GetDCache, which
533 * DZap's the directory while holding the dcache lock.
534 * It is not possible to lock the dcache or even call
535 * afs_GetDSlot to map the index to the dcache since the dir
536 * package's caller has some dcache object locked already (so
537 * we cannot lock afs_xdcache). In addition, we cannot obtain
538 * a dcache lock while holding the tb->lock of the same file
539 * since that can deadlock with DRead/DNew */
543 ReleaseWriteLock(&tb->lock);
544 ObtainReadLock(&afs_bufferLock);
547 ReleaseReadLock(&afs_bufferLock);
551 DNew(register struct dcache *adc, register int page)
553 /* Same as read, only do *not* even try to read the page, since it probably doesn't exist. */
554 register struct buffer *tb;
556 ObtainWriteLock(&afs_bufferLock, 264);
557 if ((tb = afs_newslot(adc, page, NULL)) == 0) {
558 ReleaseWriteLock(&afs_bufferLock);
561 /* extend the chunk, if needed */
562 /* Do it now, not in DFlush or afs_newslot when the data is written out,
563 * since now our caller has adc->lock writelocked, and we can't acquire
564 * that lock (or even map from a fid to a dcache) in afs_newslot or
565 * DFlush due to lock hierarchy issues */
566 if ((page + 1) * AFS_BUFFER_PAGESIZE > adc->f.chunkBytes) {
567 afs_AdjustSize(adc, (page + 1) * AFS_BUFFER_PAGESIZE);
568 afs_WriteDCache(adc, 1);
570 ObtainWriteLock(&tb->lock, 265);
572 ReleaseWriteLock(&afs_bufferLock);
573 ReleaseWriteLock(&tb->lock);
578 shutdown_bufferpackage(void)
580 #if defined(AFS_USEBUFFERS)
581 register struct buffer *tp;
585 AFS_STATCNT(shutdown_bufferpackage);
586 /* Free all allocated Buffers and associated buffer pages */
588 if (afs_cold_shutdown) {
590 #if !defined(AFS_USEBUFFERS)
591 afs_osi_Free(BufferData, nbuffers * AFS_BUFFER_PAGESIZE);
594 for (i = 0; i < nbuffers; i += NPB, tp += NPB) {
595 /* The following check shouldn't be necessary and it will be removed soon */
598 ("afs: shutdown_bufferpackage: bufp == 0!! Shouldn't happen\n");
605 afs_osi_Free(Buffers, nbuffers * sizeof(struct buffer));
608 for (i = 0; i < PHSIZE; i++)
610 memset(&afs_bufferLock, 0, sizeof(afs_lock_t));