2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 #include <afsconfig.h>
11 #include "afs/param.h"
16 #include "afs/sysincludes.h"
17 #include "afsincludes.h"
22 #if defined(AFS_AIX31_ENV)
25 #if !defined(AFS_AIX_ENV) && !defined(AFS_SUN5_ENV) && !defined(AFS_SGI_ENV) && !defined(AFS_LINUX20_ENV)
26 #include "h/kernel.h" /* Doesn't needed, so it should go */
28 #endif /* !defined(UKERNEL) */
30 #include "afs/afs_osi.h"
34 #if !defined(UKERNEL) && !defined(AFS_LINUX20_ENV)
36 #endif /* !defined(UKERNEL) */
39 #include "afs/volerrors.h"
40 #include "afs/exporter.h"
41 #include "afs/prs_fs.h"
42 #include "afs/afs_chunkops.h"
45 #include "afs/afs_stats.h"
46 #include "afs/longc_procs.h"
50 #define BUF_TIME_MAX 0x7fffffff
52 /* number of pages per Unix buffer, when we're using Unix buffer pool */
55 #define AFS_BUFFER_PAGESIZE 2048
58 /* If you change any of this PH stuff, make sure you don't break DZap() */
59 /* use last two bits for page */
61 /* use next five bits for fid */
63 /* page hash table size - this is pretty intertwined with pHash */
64 #define PHSIZE (PHPAGEMASK + PHFIDMASK + 1)
66 #define pHash(fid,page) ((((afs_int32)(fid)) & PHFIDMASK) \
67 | (page & PHPAGEMASK))
70 #undef dirty /* XXX */
73 static struct buffer *Buffers = 0;
74 static char *BufferData;
77 extern struct buf *geteblk();
80 #define timecounter afs_timecounter
82 /* The locks for individual buffer entries are now sometimes obtained while holding the
83 * afs_bufferLock. Thus we now have a locking hierarchy: afs_bufferLock -> Buffers[].lock.
85 static afs_lock_t afs_bufferLock;
86 static struct buffer *phTable[PHSIZE]; /* page hash table */
88 static afs_int32 timecounter;
90 /* Prototypes for static routines */
91 static struct buffer *afs_newslot(struct dcache *adc, afs_int32 apage,
92 register struct buffer *lp);
94 static int dinit_flag = 0;
98 /* Initialize the venus buffer system. */
100 register struct buffer *tb;
101 #if defined(AFS_USEBUFFERS)
102 struct buf *tub; /* unix buffer for allocation */
109 #if defined(AFS_USEBUFFERS)
110 /* round up to next multiple of NPB, since we allocate multiple pages per chunk */
111 abuffers = ((abuffers - 1) | (NPB - 1)) + 1;
113 LOCK_INIT(&afs_bufferLock, "afs_bufferLock");
115 (struct buffer *)afs_osi_Alloc(abuffers * sizeof(struct buffer));
116 #if !defined(AFS_USEBUFFERS)
117 BufferData = (char *)afs_osi_Alloc(abuffers * AFS_BUFFER_PAGESIZE);
120 afs_stats_cmperf.bufAlloced = nbuffers = abuffers;
121 for (i = 0; i < PHSIZE; i++)
123 for (i = 0; i < abuffers; i++) {
124 #if defined(AFS_USEBUFFERS)
125 if ((i & (NPB - 1)) == 0) {
126 /* time to allocate a fresh buffer */
127 tub = geteblk(AFS_BUFFER_PAGESIZE * NPB);
128 BufferData = (char *)tub->b_un.b_addr;
131 /* Fill in each buffer with an empty indication. */
137 #if defined(AFS_USEBUFFERS)
138 if ((i & (NPB - 1)) == 0)
142 tb->data = &BufferData[AFS_BUFFER_PAGESIZE * (i & (NPB - 1))];
144 tb->data = &BufferData[AFS_BUFFER_PAGESIZE * i];
148 RWLOCK_INIT(&tb->lock, "buffer lock");
154 DRead(register struct dcache *adc, register int page)
156 /* Read a page from the disk. */
157 register struct buffer *tb, *tb2;
158 struct osi_file *tfile;
162 MObtainWriteLock(&afs_bufferLock, 256);
164 #define bufmatch(tb) (tb->page == page && tb->fid == adc->index)
165 #define buf_Front(head,parent,p) {(parent)->hashNext = (p)->hashNext; (p)->hashNext= *(head);*(head)=(p);}
167 /* this apparently-complicated-looking code is simply an example of
168 * a little bit of loop unrolling, and is a standard linked-list
169 * traversal trick. It saves a few assignments at the the expense
170 * of larger code size. This could be simplified by better use of
173 if ((tb = phTable[pHash(adc->index, page)])) {
175 MObtainWriteLock(&tb->lock, 257);
176 ReleaseWriteLock(&afs_bufferLock);
178 tb->accesstime = timecounter++;
179 AFS_STATS(afs_stats_cmperf.bufHits++);
180 MReleaseWriteLock(&tb->lock);
183 register struct buffer **bufhead;
184 bufhead = &(phTable[pHash(adc->index, page)]);
185 while ((tb2 = tb->hashNext)) {
187 buf_Front(bufhead, tb, tb2);
188 MObtainWriteLock(&tb2->lock, 258);
189 ReleaseWriteLock(&afs_bufferLock);
191 tb2->accesstime = timecounter++;
192 AFS_STATS(afs_stats_cmperf.bufHits++);
193 MReleaseWriteLock(&tb2->lock);
196 if ((tb = tb2->hashNext)) {
198 buf_Front(bufhead, tb2, tb);
199 MObtainWriteLock(&tb->lock, 259);
200 ReleaseWriteLock(&afs_bufferLock);
202 tb->accesstime = timecounter++;
203 AFS_STATS(afs_stats_cmperf.bufHits++);
204 MReleaseWriteLock(&tb->lock);
214 AFS_STATS(afs_stats_cmperf.bufMisses++);
216 /* The last thing we looked at was either tb or tb2 (or nothing). That
217 * is at least the oldest buffer on one particular hash chain, so it's
218 * a pretty good place to start looking for the truly oldest buffer.
220 tb = afs_newslot(adc, page, (tb ? tb : tb2));
222 MReleaseWriteLock(&afs_bufferLock);
225 MObtainWriteLock(&tb->lock, 260);
226 MReleaseWriteLock(&afs_bufferLock);
228 if (page * AFS_BUFFER_PAGESIZE >= adc->f.chunkBytes) {
232 MReleaseWriteLock(&tb->lock);
235 tfile = afs_CFileOpen(adc->f.inode);
237 afs_CFileRead(tfile, tb->page * AFS_BUFFER_PAGESIZE, tb->data,
238 AFS_BUFFER_PAGESIZE);
239 afs_CFileClose(tfile);
240 if (code < AFS_BUFFER_PAGESIZE) {
244 MReleaseWriteLock(&tb->lock);
247 /* Note that findslot sets the page field in the buffer equal to
248 * what it is searching for. */
249 MReleaseWriteLock(&tb->lock);
254 FixupBucket(register struct buffer *ap)
256 register struct buffer **lp, *tp;
258 /* first try to get it out of its current hash bucket, in which it
260 AFS_STATCNT(FixupBucket);
263 for (tp = *lp; tp; tp = tp->hashNext) {
270 /* now figure the new hash bucket */
271 i = pHash(ap->fid, ap->page);
272 ap->hashIndex = i; /* remember where we are for deletion */
273 ap->hashNext = phTable[i]; /* add us to the list */
274 phTable[i] = ap; /* at the front, since it's LRU */
277 /* lp is pointer to a fairly-old buffer */
278 static struct buffer *
279 afs_newslot(struct dcache *adc, afs_int32 apage, register struct buffer *lp)
281 /* Find a usable buffer slot */
282 register afs_int32 i;
284 register struct buffer *tp;
285 struct osi_file *tfile;
287 AFS_STATCNT(afs_newslot);
288 /* we take a pointer here to a buffer which was at the end of an
289 * LRU hash chain. Odds are, it's one of the older buffers, not
290 * one of the newer. Having an older buffer to start with may
291 * permit us to avoid a few of the assignments in the "typical
292 * case" for loop below.
294 if (lp && (lp->lockers == 0)) {
301 /* timecounter might have wrapped, if machine is very very busy
302 * and stays up for a long time. Timecounter mustn't wrap twice
303 * (positive->negative->positive) before calling newslot, but that
304 * would require 2 billion consecutive cache hits... Anyway, the
305 * penalty is only that the cache replacement policy will be
306 * almost MRU for the next ~2 billion DReads... newslot doesn't
307 * get called nearly as often as DRead, so in order to avoid the
308 * performance penalty of using the hypers, it's worth doing the
309 * extra check here every time. It's probably cheaper than doing
310 * hcmp, anyway. There is a little performance hit resulting from
311 * resetting all the access times to 0, but it only happens once
312 * every month or so, and the access times will rapidly sort
313 * themselves back out after just a few more DReads.
315 if (timecounter < 0) {
318 for (i = 0; i < nbuffers; i++, tp++) {
320 if (!lp && !tp->lockers) /* one is as good as the rest, I guess */
324 /* this is the typical case */
326 for (i = 0; i < nbuffers; i++, tp++) {
327 if (tp->lockers == 0) {
328 if (tp->accesstime < lt) {
337 /* There are no unlocked buffers -- this used to panic, but that
338 * seems extreme. To the best of my knowledge, all the callers
339 * of DRead are prepared to handle a zero return. Some of them
340 * just panic directly, but not all of them. */
341 afs_warn("all buffers locked");
346 /* see DFlush for rationale for not getting and locking the dcache */
347 tfile = afs_CFileOpen(lp->inode);
348 afs_CFileWrite(tfile, lp->page * AFS_BUFFER_PAGESIZE, lp->data,
349 AFS_BUFFER_PAGESIZE);
351 afs_CFileClose(tfile);
352 AFS_STATS(afs_stats_cmperf.bufFlushDirty++);
355 /* Now fill in the header. */
356 lp->fid = adc->index;
357 lp->inode = adc->f.inode;
359 lp->accesstime = timecounter++;
360 FixupBucket(lp); /* move to the right hash bucket */
366 DRelease(register struct buffer *bp, int flag)
368 /* Release a buffer, specifying whether or not the buffer has been
369 * modified by the locker. */
371 #if defined(AFS_USEBUFFERS)
372 register struct buffer *tp;
375 AFS_STATCNT(DRelease);
378 #if defined(AFS_USEBUFFERS)
379 /* look for buffer by scanning Unix buffers for appropriate address */
381 for (index = 0; index < nbuffers; index += NPB, tp += NPB) {
382 if ((afs_int32) bp >= (afs_int32) tp->data
384 (afs_int32) tp->data + AFS_BUFFER_PAGESIZE * NPB) {
385 /* we found the right range */
386 index += ((afs_int32) bp - (afs_int32) tp->data) >> LOGPS;
391 index = (((char *)bp) - ((char *)BufferData)) >> LOGPS;
393 bp = &(Buffers[index]);
394 MObtainWriteLock(&bp->lock, 261);
398 MReleaseWriteLock(&bp->lock);
402 DVOffset(register void *ap)
404 /* Return the byte within a file represented by a buffer pointer. */
405 register struct buffer *bp;
407 #if defined(AFS_USEBUFFERS)
408 register struct buffer *tp;
410 AFS_STATCNT(DVOffset);
412 #if defined(AFS_USEBUFFERS)
413 /* look for buffer by scanning Unix buffers for appropriate address */
415 for (index = 0; index < nbuffers; index += NPB, tp += NPB) {
416 if ((afs_int32) bp >= (afs_int32) tp->data
418 (afs_int32) tp->data + AFS_BUFFER_PAGESIZE * NPB) {
419 /* we found the right range */
420 index += ((afs_int32) bp - (afs_int32) tp->data) >> LOGPS;
425 index = (((char *)bp) - ((char *)BufferData)) >> LOGPS;
427 if (index < 0 || index >= nbuffers)
429 bp = &(Buffers[index]);
430 return AFS_BUFFER_PAGESIZE * bp->page + (int)(((char *)ap) - bp->data);
433 /* 1/1/91 - I've modified the hash function to take the page as well
434 * as the *fid, so that lookup will be a bit faster. That presents some
435 * difficulties for Zap, which now has to have some knowledge of the nature
436 * of the hash function. Oh well. This should use the list traversal
440 DZap(struct dcache *adc)
443 /* Destroy all buffers pertaining to a particular fid. */
444 register struct buffer *tb;
447 MObtainReadLock(&afs_bufferLock);
449 for (i = 0; i <= PHPAGEMASK; i++)
450 for (tb = phTable[pHash(adc->index, i)]; tb; tb = tb->hashNext)
451 if (tb->fid == adc->index) {
452 MObtainWriteLock(&tb->lock, 262);
456 MReleaseWriteLock(&tb->lock);
458 MReleaseReadLock(&afs_bufferLock);
464 /* Flush all the modified buffers. */
466 register struct buffer *tb;
467 struct osi_file *tfile;
471 MObtainReadLock(&afs_bufferLock);
472 for (i = 0; i < nbuffers; i++, tb++) {
474 MObtainWriteLock(&tb->lock, 263);
476 MReleaseReadLock(&afs_bufferLock);
478 /* it seems safe to do this I/O without having the dcache
479 * locked, since the only things that will update the data in
480 * a directory are the buffer package, which holds the relevant
481 * tb->lock while doing the write, or afs_GetDCache, which
482 * DZap's the directory while holding the dcache lock.
483 * It is not possible to lock the dcache or even call
484 * afs_GetDSlot to map the index to the dcache since the dir
485 * package's caller has some dcache object locked already (so
486 * we cannot lock afs_xdcache). In addition, we cannot obtain
487 * a dcache lock while holding the tb->lock of the same file
488 * since that can deadlock with DRead/DNew */
489 tfile = afs_CFileOpen(tb->inode);
490 afs_CFileWrite(tfile, tb->page * AFS_BUFFER_PAGESIZE,
491 tb->data, AFS_BUFFER_PAGESIZE);
492 tb->dirty = 0; /* Clear the dirty flag */
493 afs_CFileClose(tfile);
496 MReleaseWriteLock(&tb->lock);
497 MObtainReadLock(&afs_bufferLock);
500 MReleaseReadLock(&afs_bufferLock);
504 DNew(register struct dcache *adc, register int page)
506 /* Same as read, only do *not* even try to read the page, since it probably doesn't exist. */
507 register struct buffer *tb;
509 MObtainWriteLock(&afs_bufferLock, 264);
510 if ((tb = afs_newslot(adc, page, NULL)) == 0) {
511 MReleaseWriteLock(&afs_bufferLock);
514 /* extend the chunk, if needed */
515 /* Do it now, not in DFlush or afs_newslot when the data is written out,
516 * since now our caller has adc->lock writelocked, and we can't acquire
517 * that lock (or even map from a fid to a dcache) in afs_newslot or
518 * DFlush due to lock hierarchy issues */
519 if ((page + 1) * AFS_BUFFER_PAGESIZE > adc->f.chunkBytes) {
520 afs_AdjustSize(adc, (page + 1) * AFS_BUFFER_PAGESIZE);
521 afs_WriteDCache(adc, 1);
523 MObtainWriteLock(&tb->lock, 265);
524 MReleaseWriteLock(&afs_bufferLock);
526 MReleaseWriteLock(&tb->lock);
531 shutdown_bufferpackage(void)
533 #if defined(AFS_USEBUFFERS)
534 register struct buffer *tp;
537 extern int afs_cold_shutdown;
539 AFS_STATCNT(shutdown_bufferpackage);
540 /* Free all allocated Buffers and associated buffer pages */
542 if (afs_cold_shutdown) {
544 #if !defined(AFS_USEBUFFERS)
545 afs_osi_Free(BufferData, nbuffers * AFS_BUFFER_PAGESIZE);
548 for (i = 0; i < nbuffers; i += NPB, tp += NPB) {
549 /* The following check shouldn't be necessary and it will be removed soon */
552 ("shutdown_bufferpackage: bufp == 0!! Shouldn't happen\n");
559 afs_osi_Free(Buffers, nbuffers * sizeof(struct buffer));
562 for (i = 0; i < PHSIZE; i++)
564 memset((char *)&afs_bufferLock, 0, sizeof(afs_lock_t));