2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
12 #include <afs/param.h>
23 #include "cm_memmap.h"
26 #define TRACE_BUFFER 1
29 extern void afsi_log(char *pattern, ...);
31 /* This module implements the buffer package used by the local transaction
32 * system (cm). It is initialized by calling cm_Init, which calls buf_Init;
33 * it must be initalized before any of its main routines are called.
35 * Each buffer is hashed into a hash table by file ID and offset, and if its
36 * reference count is zero, it is also in a free list.
38 * There are two locks involved in buffer processing. The global lock
39 * buf_globalLock protects all of the global variables defined in this module,
40 * the reference counts and hash pointers in the actual cm_buf_t structures,
41 * and the LRU queue pointers in the buffer structures.
43 * The mutexes in the buffer structures protect the remaining fields in the
44 * buffers, as well the data itself.
46 * The locking hierarchy here is this:
48 * - resv multiple simul. buffers reservation
49 * - lock buffer I/O flags
50 * - lock buffer's mutex
51 * - lock buf_globalLock
55 /* global debugging log */
56 osi_log_t *buf_logp = NULL;
58 /* Global lock protecting hash tables and free lists */
59 osi_rwlock_t buf_globalLock;
61 /* ptr to head of the free list (most recently used) and the
62 * tail (the guy to remove first). We use osi_Q* functions
63 * to put stuff in buf_freeListp, and maintain the end
67 /* a pointer to a list of all buffers, just so that we can find them
68 * easily for debugging, and for the incr syncer. Locked under
72 /* defaults setup; these variables may be manually assigned into
73 * before calling cm_Init, as a way of changing these defaults.
76 /* callouts for reading and writing data, etc */
77 cm_buf_ops_t *cm_buf_opsp;
80 /* for experimental disk caching support in Win95 client */
81 cm_buf_t *buf_diskFreeListp;
82 cm_buf_t *buf_diskFreeListEndp;
83 cm_buf_t *buf_diskAllp;
84 extern int cm_diskCacheEnabled;
85 #endif /* DISKCACHE95 */
87 /* set this to 1 when we are terminating to prevent access attempts */
88 static int buf_ShutdownFlag = 0;
91 void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
93 void buf_HoldLocked(cm_buf_t *bp)
98 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
99 refCount = InterlockedIncrement(&bp->refCount);
100 #ifdef DEBUG_REFCOUNT
101 osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
102 afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
106 /* hold a reference to an already held buffer */
107 #ifdef DEBUG_REFCOUNT
108 void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
110 void buf_Hold(cm_buf_t *bp)
115 lock_ObtainRead(&buf_globalLock);
116 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
117 refCount = InterlockedIncrement(&bp->refCount);
118 #ifdef DEBUG_REFCOUNT
119 osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
120 afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
122 lock_ReleaseRead(&buf_globalLock);
125 /* code to drop reference count while holding buf_globalLock */
126 #ifdef DEBUG_REFCOUNT
127 void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
129 void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
135 lock_AssertWrite(&buf_globalLock);
137 lock_AssertRead(&buf_globalLock);
139 /* ensure that we're in the LRU queue if our ref count is 0 */
140 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
142 refCount = InterlockedDecrement(&bp->refCount);
143 #ifdef DEBUG_REFCOUNT
144 osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
145 afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
149 osi_panic("buf refcount 0",__FILE__,__LINE__);;
151 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
155 * If we are read locked there could be a race condition
156 * with buf_Find() so we must obtain a write lock and
157 * double check that the refCount is actually zero
158 * before we remove the buffer from the LRU queue.
161 lock_ConvertRToW(&buf_globalLock);
163 if (bp->refCount == 0 &&
164 !(bp->flags & CM_BUF_INLRU)) {
165 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
167 /* watch for transition from empty to one element */
168 if (!cm_data.buf_freeListEndp)
169 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
170 bp->flags |= CM_BUF_INLRU;
174 lock_ConvertWToR(&buf_globalLock);
178 /* release a buffer. Buffer must be referenced, but unlocked. */
179 #ifdef DEBUG_REFCOUNT
180 void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
182 void buf_Release(cm_buf_t *bp)
187 /* ensure that we're in the LRU queue if our ref count is 0 */
188 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
190 refCount = InterlockedDecrement(&bp->refCount);
191 #ifdef DEBUG_REFCOUNT
192 osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
193 afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
197 osi_panic("buf refcount 0",__FILE__,__LINE__);;
199 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
202 lock_ObtainWrite(&buf_globalLock);
203 if (bp->refCount == 0 &&
204 !(bp->flags & CM_BUF_INLRU)) {
205 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
207 /* watch for transition from empty to one element */
208 if (!cm_data.buf_freeListEndp)
209 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
210 bp->flags |= CM_BUF_INLRU;
212 lock_ReleaseWrite(&buf_globalLock);
216 /* incremental sync daemon. Writes all dirty buffers every 5000 ms */
217 void buf_IncrSyncer(long parm)
219 cm_buf_t **bpp, *bp, *prevbp;
220 long i; /* counter */
224 while (buf_ShutdownFlag == 0) {
226 i = SleepEx(5000, 1);
227 if (i != 0) continue;
232 /* go through all of the dirty buffers */
233 lock_ObtainRead(&buf_globalLock);
234 for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
235 lock_ReleaseRead(&buf_globalLock);
236 /* all dirty buffers are held when they are added to the
237 * dirty list. No need for an additional hold.
239 lock_ObtainMutex(&bp->mx);
241 if (bp->flags & CM_BUF_DIRTY) {
242 /* start cleaning the buffer; don't touch log pages since
243 * the log code counts on knowing exactly who is writing
244 * a log page at any given instant.
247 req.flags |= CM_REQ_NORETRY;
248 wasDirty |= buf_CleanAsyncLocked(bp, &req);
251 /* the buffer may or may not have been dirty
252 * and if dirty may or may not have been cleaned
253 * successfully. check the dirty flag again.
255 if (!(bp->flags & CM_BUF_DIRTY)) {
256 /* remove the buffer from the dirty list */
257 lock_ObtainWrite(&buf_globalLock);
258 #ifdef DEBUG_REFCOUNT
259 if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
260 osi_Log1(afsd_logp,"buf_IncrSyncer bp 0x%p list corruption",bp);
261 afsi_log("buf_IncrSyncer bp 0x%p list corruption", bp);
266 bp->flags &= ~CM_BUF_INDL;
267 if (cm_data.buf_dirtyListp == NULL)
268 cm_data.buf_dirtyListEndp = NULL;
269 else if (cm_data.buf_dirtyListEndp == bp)
270 cm_data.buf_dirtyListEndp = prevbp;
271 buf_ReleaseLocked(bp, TRUE);
272 lock_ConvertWToR(&buf_globalLock);
274 /* advance the pointer so we don't loop forever */
275 lock_ObtainRead(&buf_globalLock);
279 lock_ReleaseMutex(&bp->mx);
280 } /* for loop over a bunch of buffers */
281 lock_ReleaseRead(&buf_globalLock);
282 } /* whole daemon's while loop */
286 buf_ValidateBuffers(void)
288 cm_buf_t * bp, *bpf, *bpa, *bpb;
289 afs_uint64 countb = 0, countf = 0, counta = 0;
291 if (cm_data.buf_freeListp == NULL && cm_data.buf_freeListEndp != NULL ||
292 cm_data.buf_freeListp != NULL && cm_data.buf_freeListEndp == NULL) {
293 afsi_log("cm_ValidateBuffers failure: inconsistent free list pointers");
294 fprintf(stderr, "cm_ValidateBuffers failure: inconsistent free list pointers\n");
298 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
299 if (bp->magic != CM_BUF_MAGIC) {
300 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
301 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
307 if (countb > cm_data.buf_nbuffers) {
308 afsi_log("cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers");
309 fprintf(stderr, "cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers\n");
314 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
315 if (bp->magic != CM_BUF_MAGIC) {
316 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
317 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
323 if (countf > cm_data.buf_nbuffers) {
324 afsi_log("cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers");
325 fprintf(stderr, "cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers\n");
330 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
331 if (bp->magic != CM_BUF_MAGIC) {
332 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
333 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
339 if (counta > cm_data.buf_nbuffers) {
340 afsi_log("cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers");
341 fprintf(stderr, "cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers\n");
346 if (countb != countf) {
347 afsi_log("cm_ValidateBuffers failure: countb != countf");
348 fprintf(stderr, "cm_ValidateBuffers failure: countb != countf\n");
352 if (counta != cm_data.buf_nbuffers) {
353 afsi_log("cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers");
354 fprintf(stderr, "cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers\n");
361 void buf_Shutdown(void)
363 buf_ShutdownFlag = 1;
366 /* initialize the buffer package; called with no locks
367 * held during the initialization phase.
369 long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
371 static osi_once_t once;
380 cm_data.buf_nbuffers = nbuffers;
382 /* Have to be able to reserve a whole chunk */
383 if (((cm_data.buf_nbuffers - 3) * cm_data.buf_blockSize) < cm_chunkSize)
384 return CM_ERROR_TOOFEWBUFS;
387 /* recall for callouts */
390 if (osi_Once(&once)) {
391 /* initialize global locks */
392 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
395 /* remember this for those who want to reset it */
396 cm_data.buf_nOrigBuffers = cm_data.buf_nbuffers;
398 /* lower hash size to a prime number */
399 cm_data.buf_hashSize = osi_PrimeLessThan((afs_uint32)(cm_data.buf_nbuffers/7 + 1));
401 /* create hash table */
402 memset((void *)cm_data.buf_scacheHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
404 /* another hash table */
405 memset((void *)cm_data.buf_fileHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
407 /* create buffer headers and put in free list */
408 bp = cm_data.bufHeaderBaseAddress;
409 data = cm_data.bufDataBaseAddress;
410 cm_data.buf_allp = NULL;
412 for (i=0; i<cm_data.buf_nbuffers; i++) {
413 osi_assertx(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress,
414 "invalid cm_buf_t address");
415 osi_assertx(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData,
416 "invalid cm_buf_t data address");
418 /* allocate and zero some storage */
419 memset(bp, 0, sizeof(cm_buf_t));
420 bp->magic = CM_BUF_MAGIC;
421 /* thread on list of all buffers */
422 bp->allp = cm_data.buf_allp;
423 cm_data.buf_allp = bp;
425 osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
426 bp->flags |= CM_BUF_INLRU;
427 lock_InitializeMutex(&bp->mx, "Buffer mutex");
429 /* grab appropriate number of bytes from aligned zone */
432 /* setup last buffer pointer */
434 cm_data.buf_freeListEndp = bp;
438 data += cm_data.buf_blockSize;
441 /* none reserved at first */
442 cm_data.buf_reservedBufs = 0;
444 /* just for safety's sake */
445 cm_data.buf_maxReservedBufs = cm_data.buf_nbuffers - 3;
447 bp = cm_data.bufHeaderBaseAddress;
448 data = cm_data.bufDataBaseAddress;
450 for (i=0; i<cm_data.buf_nbuffers; i++) {
451 lock_InitializeMutex(&bp->mx, "Buffer mutex");
454 bp->waitRequests = 0;
455 bp->flags &= ~CM_BUF_WAITING;
461 buf_ValidateBufQueues();
465 /* init the buffer trace log */
466 buf_logp = osi_LogCreate("buffer", 1000);
467 osi_LogEnable(buf_logp);
472 /* and create the incr-syncer */
473 phandle = thrd_Create(0, 0,
474 (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
477 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
478 CloseHandle(phandle);
482 buf_ValidateBufQueues();
487 /* add nbuffers to the buffer pool, if possible.
488 * Called with no locks held.
490 long buf_AddBuffers(afs_uint64 nbuffers)
492 /* The size of a virtual cache cannot be changed after it has
493 * been created. Subsequent calls to MapViewofFile() with
494 * an existing mapping object name would not allow the
495 * object to be resized. Return failure immediately.
497 * A similar problem now occurs with the persistent cache
498 * given that the memory mapped file now contains a complex
501 afsi_log("request to add %d buffers to the existing cache of size %d denied",
502 nbuffers, cm_data.buf_nbuffers);
504 return CM_ERROR_INVAL;
507 /* interface to set the number of buffers to an exact figure.
508 * Called with no locks held.
510 long buf_SetNBuffers(afs_uint64 nbuffers)
513 return CM_ERROR_INVAL;
514 if (nbuffers == cm_data.buf_nbuffers)
516 else if (nbuffers > cm_data.buf_nbuffers)
517 return buf_AddBuffers(nbuffers - cm_data.buf_nbuffers);
519 return CM_ERROR_INVAL;
522 /* wait for reading or writing to clear; called with write-locked
523 * buffer and unlocked scp and returns with locked buffer.
525 void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
530 osi_assertx(scp->magic == CM_SCACHE_MAGIC, "invalid cm_scache_t magic");
531 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
534 /* if no IO is happening, we're done */
535 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
538 /* otherwise I/O is happening, but some other thread is waiting for
539 * the I/O already. Wait for that guy to figure out what happened,
540 * and then check again.
542 if ( bp->flags & CM_BUF_WAITING ) {
545 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%p", bp);
547 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING set for 0x%p", bp);
548 bp->flags |= CM_BUF_WAITING;
549 bp->waitCount = bp->waitRequests = 1;
551 osi_SleepM((LONG_PTR)bp, &bp->mx);
553 smb_UpdateServerPriority();
555 lock_ObtainMutex(&bp->mx);
556 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%p", bp);
558 if (bp->waitCount == 0) {
559 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING reset for 0x%p", bp);
560 bp->flags &= ~CM_BUF_WAITING;
561 bp->waitRequests = 0;
565 if (scp = cm_FindSCache(&bp->fid))
569 lock_ObtainRead(&scp->rw);
570 if (scp->flags & CM_SCACHEFLAG_WAITING) {
571 osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
572 osi_Wakeup((LONG_PTR)&scp->flags);
574 lock_ReleaseRead(&scp->rw);
578 /* if we get here, the IO is done, but we may have to wakeup people waiting for
579 * the I/O to complete. Do so.
581 if (bp->flags & CM_BUF_WAITING) {
582 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
583 osi_Wakeup((LONG_PTR) bp);
585 osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%p", bp);
588 cm_ReleaseSCache(scp);
591 /* find a buffer, if any, for a particular file ID and offset. Assumes
592 * that buf_globalLock is write locked when called.
594 cm_buf_t *buf_FindLocked(struct cm_scache *scp, osi_hyper_t *offsetp)
599 i = BUF_HASH(&scp->fid, offsetp);
600 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) {
601 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
602 && offsetp->LowPart == bp->offset.LowPart
603 && offsetp->HighPart == bp->offset.HighPart) {
609 /* return whatever we found, if anything */
613 /* find a buffer with offset *offsetp for vnode *scp. Called
614 * with no locks held.
616 cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
620 lock_ObtainRead(&buf_globalLock);
621 bp = buf_FindLocked(scp, offsetp);
622 lock_ReleaseRead(&buf_globalLock);
627 /* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
630 * Makes sure that there's only one person writing this block
631 * at any given time, and also ensures that the log is forced sufficiently far,
632 * if this buffer contains logged data.
634 * Returns non-zero if the buffer was dirty.
636 long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
640 cm_scache_t * scp = NULL;
643 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
645 while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
647 lock_ReleaseMutex(&bp->mx);
649 scp = cm_FindSCache(&bp->fid);
651 osi_Log2(buf_logp, "buf_CleanAsyncLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
654 LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
655 code = (*cm_buf_opsp->Writep)(scp, &offset,
657 /* we might as well try to write all of the contiguous
658 * dirty buffers in one RPC
665 osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
667 cm_ReleaseSCache(scp);
670 osi_Log1(buf_logp, "buf_CleanAsyncLocked unable to start I/O - scp not found buf 0x%p", bp);
671 code = CM_ERROR_NOSUCHFILE;
674 lock_ObtainMutex(&bp->mx);
675 /* if the Write routine returns No Such File, clear the dirty flag
676 * because we aren't going to be able to write this data to the file
679 if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD){
680 bp->flags &= ~CM_BUF_DIRTY;
681 bp->flags |= CM_BUF_ERROR;
682 bp->dirty_offset = 0;
683 bp->dirty_length = 0;
685 bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
690 /* Disk cache support */
691 /* write buffer to disk cache (synchronous for now) */
692 diskcache_Update(bp->dcp, bp->datap, cm_data.buf_blockSize, bp->dataVersion);
693 #endif /* DISKCACHE95 */
695 /* if we get here and retries are not permitted
696 * then we need to exit this loop regardless of
697 * whether or not we were able to clear the dirty bit
699 if (reqp->flags & CM_REQ_NORETRY)
703 if (!(bp->flags & CM_BUF_DIRTY)) {
704 /* remove buffer from dirty buffer queue */
708 /* do logging after call to GetLastError, or else */
710 /* if someone was waiting for the I/O that just completed or failed,
713 if (bp->flags & CM_BUF_WAITING) {
714 /* turn off flags and wakeup users */
715 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
716 osi_Wakeup((LONG_PTR) bp);
721 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
722 * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
723 * The buffer must already be clean, and no I/O should be happening to it.
725 void buf_Recycle(cm_buf_t *bp)
730 cm_buf_t *prevBp, *nextBp;
732 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
734 /* if we get here, we know that the buffer still has a 0 ref count,
735 * and that it is clean and has no currently pending I/O. This is
736 * the dude to return.
737 * Remember that as long as the ref count is 0, we know that we won't
738 * have any lock conflicts, so we can grab the buffer lock out of
739 * order in the locking hierarchy.
741 osi_Log3( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x:%08x",
742 bp, bp->offset.HighPart, bp->offset.LowPart);
744 osi_assertx(bp->refCount == 0, "cm_buf_t refcount != 0");
745 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)),
746 "incorrect cm_buf_t flags");
747 lock_AssertWrite(&buf_globalLock);
749 if (bp->flags & CM_BUF_INHASH) {
750 /* Remove from hash */
752 i = BUF_HASH(&bp->fid, &bp->offset);
753 lbpp = &(cm_data.buf_scacheHashTablepp[i]);
754 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
759 /* we better find it */
760 osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
762 *lbpp = bp->hashp; /* hash out */
765 /* Remove from file hash */
767 i = BUF_FILEHASH(&bp->fid);
768 prevBp = bp->fileHashBackp;
769 bp->fileHashBackp = NULL;
770 nextBp = bp->fileHashp;
771 bp->fileHashp = NULL;
773 prevBp->fileHashp = nextBp;
775 cm_data.buf_fileHashTablepp[i] = nextBp;
777 nextBp->fileHashBackp = prevBp;
779 bp->flags &= ~CM_BUF_INHASH;
782 /* make the fid unrecognizable */
783 memset(&bp->fid, 0, sizeof(cm_fid_t));
786 /* recycle a buffer, removing it from the free list, hashing in its new identity
787 * and returning it write-locked so that no one can use it. Called without
788 * any locks held, and can return an error if it loses the race condition and
789 * finds that someone else created the desired buffer.
791 * If success is returned, the buffer is returned write-locked.
793 * May be called with null scp and offsetp, if we're just trying to reclaim some
794 * space from the buffer pool. In that case, the buffer will be returned
795 * without being hashed into the hash table.
797 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
799 cm_buf_t *bp; /* buffer we're dealing with */
800 cm_buf_t *nextBp; /* next buffer in file hash chain */
801 afs_uint32 i; /* temp */
804 cm_InitReq(&req); /* just in case */
807 buf_ValidateBufQueues();
812 lock_ObtainRead(&scp->bufCreateLock);
813 lock_ObtainWrite(&buf_globalLock);
814 /* check to see if we lost the race */
816 if (bp = buf_FindLocked(scp, offsetp)) {
817 /* Do not call buf_ReleaseLocked() because we
818 * do not want to allow the buffer to be added
821 afs_int32 refCount = InterlockedDecrement(&bp->refCount);
822 #ifdef DEBUG_REFCOUNT
823 osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, refCount);
824 afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
826 lock_ReleaseWrite(&buf_globalLock);
827 lock_ReleaseRead(&scp->bufCreateLock);
828 return CM_BUF_EXISTS;
832 /* does this fix the problem below? it's a simple solution. */
833 if (!cm_data.buf_freeListEndp)
835 lock_ReleaseWrite(&buf_globalLock);
836 lock_ReleaseRead(&scp->bufCreateLock);
837 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List is empty - sleeping 200ms");
842 /* for debugging, assert free list isn't empty, although we
843 * really should try waiting for a running tranasction to finish
844 * instead of this; or better, we should have a transaction
845 * throttler prevent us from entering this situation.
847 osi_assertx(cm_data.buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
849 /* look at all buffers in free list, some of which may temp.
850 * have high refcounts and which then should be skipped,
851 * starting cleaning I/O for those which are dirty. If we find
852 * a clean buffer, we rehash it, lock it and return it.
854 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
855 /* check to see if it really has zero ref count. This
856 * code can bump refcounts, at least, so it may not be
859 if (bp->refCount > 0)
862 /* we don't have to lock buffer itself, since the ref
863 * count is 0 and we know it will stay zero as long as
864 * we hold the global lock.
867 /* don't recycle someone in our own chunk */
868 if (!cm_FidCmp(&bp->fid, &scp->fid)
869 && (bp->offset.LowPart & (-cm_chunkSize))
870 == (offsetp->LowPart & (-cm_chunkSize)))
873 /* if this page is being filled (!) or cleaned, see if
874 * the I/O has completed. If not, skip it, otherwise
875 * do the final processing for the I/O.
877 if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
878 /* probably shouldn't do this much work while
879 * holding the big lock? Watch for contention
885 if (bp->flags & CM_BUF_DIRTY) {
886 /* if the buffer is dirty, start cleaning it and
887 * move on to the next buffer. We do this with
888 * just the lock required to minimize contention
892 lock_ReleaseWrite(&buf_globalLock);
893 lock_ReleaseRead(&scp->bufCreateLock);
895 /* grab required lock and clean; this only
896 * starts the I/O. By the time we're back,
897 * it'll still be marked dirty, but it will also
898 * have the WRITING flag set, so we won't get
901 buf_CleanAsync(bp, &req);
903 /* now put it back and go around again */
908 /* if we get here, we know that the buffer still has a 0
909 * ref count, and that it is clean and has no currently
910 * pending I/O. This is the dude to return.
911 * Remember that as long as the ref count is 0, we know
912 * that we won't have any lock conflicts, so we can grab
913 * the buffer lock out of order in the locking hierarchy.
917 /* clean up junk flags */
918 bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
919 bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
921 /* now hash in as our new buffer, and give it the
922 * appropriate label, if requested.
925 bp->flags |= CM_BUF_INHASH;
930 bp->offset = *offsetp;
931 i = BUF_HASH(&scp->fid, offsetp);
932 bp->hashp = cm_data.buf_scacheHashTablepp[i];
933 cm_data.buf_scacheHashTablepp[i] = bp;
934 i = BUF_FILEHASH(&scp->fid);
935 nextBp = cm_data.buf_fileHashTablepp[i];
936 bp->fileHashp = nextBp;
937 bp->fileHashBackp = NULL;
939 nextBp->fileHashBackp = bp;
940 cm_data.buf_fileHashTablepp[i] = bp;
943 /* we should move it from the lru queue. It better still be there,
944 * since we've held the global (big) lock since we found it there.
946 osi_assertx(bp->flags & CM_BUF_INLRU,
947 "buf_GetNewLocked: LRU screwup");
949 if (cm_data.buf_freeListEndp == bp) {
950 /* we're the last guy in this queue, so maintain it */
951 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
953 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
954 bp->flags &= ~CM_BUF_INLRU;
956 /* grab the mutex so that people don't use it
957 * before the caller fills it with data. Again, no one
958 * should have been able to get to this dude to lock it.
960 if (!lock_TryMutex(&bp->mx)) {
961 osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0",
963 osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
966 /* prepare to return it. Give it a refcount */
968 #ifdef DEBUG_REFCOUNT
969 osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
970 afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
972 lock_ReleaseWrite(&buf_globalLock);
973 lock_ReleaseRead(&scp->bufCreateLock);
977 buf_ValidateBufQueues();
980 } /* for all buffers in lru queue */
981 lock_ReleaseWrite(&buf_globalLock);
982 lock_ReleaseRead(&scp->bufCreateLock);
983 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List has no buffers with a zero refcount - sleeping 100ms");
984 Sleep(100); /* give some time for a buffer to be freed */
985 } /* while loop over everything */
989 /* get a page, returning it held but unlocked. Doesn't fill in the page
990 * with I/O, since we're going to write the whole thing new.
992 long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
996 osi_hyper_t pageOffset;
1000 pageOffset.HighPart = offsetp->HighPart;
1001 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
1003 bp = buf_Find(scp, &pageOffset);
1005 /* lock it and break out */
1006 lock_ObtainMutex(&bp->mx);
1010 /* otherwise, we have to create a page */
1011 code = buf_GetNewLocked(scp, &pageOffset, &bp);
1013 /* check if the buffer was created in a race condition branch.
1014 * If so, go around so we can hold a reference to it.
1016 if (code == CM_BUF_EXISTS)
1019 /* something else went wrong */
1023 /* otherwise, we have a locked buffer that we just created */
1026 } /* big while loop */
1028 /* wait for reads */
1029 if (bp->flags & CM_BUF_READING)
1030 buf_WaitIO(scp, bp);
1032 /* once it has been read once, we can unlock it and return it, still
1033 * with its refcount held.
1035 lock_ReleaseMutex(&bp->mx);
1037 osi_Log4(buf_logp, "buf_GetNew returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
1038 bp, scp, offsetp->HighPart, offsetp->LowPart);
1042 /* get a page, returning it held but unlocked. Make sure it is complete */
1043 /* The scp must be unlocked when passed to this function */
1044 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
1048 osi_hyper_t pageOffset;
1049 unsigned long tcount;
1053 cm_diskcache_t *dcp;
1054 #endif /* DISKCACHE95 */
1057 pageOffset.HighPart = offsetp->HighPart;
1058 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
1062 buf_ValidateBufQueues();
1063 #endif /* TESTING */
1065 bp = buf_Find(scp, &pageOffset);
1067 /* lock it and break out */
1068 lock_ObtainMutex(&bp->mx);
1071 /* touch disk chunk to update LRU info */
1072 diskcache_Touch(bp->dcp);
1073 #endif /* DISKCACHE95 */
1077 /* otherwise, we have to create a page */
1078 code = buf_GetNewLocked(scp, &pageOffset, &bp);
1079 /* bp->mx is now held */
1081 /* check if the buffer was created in a race condition branch.
1082 * If so, go around so we can hold a reference to it.
1084 if (code == CM_BUF_EXISTS)
1087 /* something else went wrong */
1090 buf_ValidateBufQueues();
1091 #endif /* TESTING */
1095 /* otherwise, we have a locked buffer that we just created */
1098 } /* big while loop */
1100 /* if we get here, we have a locked buffer that may have just been
1101 * created, in which case it needs to be filled with data.
1104 /* load the page; freshly created pages should be idle */
1105 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)), "incorrect cm_buf_t flags");
1107 /* start the I/O; may drop lock */
1108 bp->flags |= CM_BUF_READING;
1109 code = (*cm_buf_opsp->Readp)(bp, cm_data.buf_blockSize, &tcount, NULL);
1112 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, cm_data.buf_blockSize, &bp->dataVersion, &tcount, &dcp);
1113 bp->dcp = dcp; /* pointer to disk cache struct. */
1114 #endif /* DISKCACHE95 */
1117 /* failure or queued */
1118 if (code != ERROR_IO_PENDING) {
1120 bp->flags |= CM_BUF_ERROR;
1121 bp->flags &= ~CM_BUF_READING;
1122 if (bp->flags & CM_BUF_WAITING) {
1123 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1124 osi_Wakeup((LONG_PTR) bp);
1126 lock_ReleaseMutex(&bp->mx);
1129 buf_ValidateBufQueues();
1130 #endif /* TESTING */
1134 /* otherwise, I/O completed instantly and we're done, except
1135 * for padding the xfr out with 0s and checking for EOF
1137 if (tcount < (unsigned long) cm_data.buf_blockSize) {
1138 memset(bp->datap+tcount, 0, cm_data.buf_blockSize - tcount);
1140 bp->flags |= CM_BUF_EOF;
1142 bp->flags &= ~CM_BUF_READING;
1143 if (bp->flags & CM_BUF_WAITING) {
1144 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1145 osi_Wakeup((LONG_PTR) bp);
1151 /* wait for reads, either that which we started above, or that someone
1152 * else started. We don't care if we return a buffer being cleaned.
1154 if (bp->flags & CM_BUF_READING)
1155 buf_WaitIO(scp, bp);
1157 /* once it has been read once, we can unlock it and return it, still
1158 * with its refcount held.
1160 lock_ReleaseMutex(&bp->mx);
1163 /* now remove from queue; will be put in at the head (farthest from
1164 * being recycled) when we're done in buf_Release.
1166 lock_ObtainWrite(&buf_globalLock);
1167 if (bp->flags & CM_BUF_INLRU) {
1168 if (cm_data.buf_freeListEndp == bp)
1169 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1170 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
1171 bp->flags &= ~CM_BUF_INLRU;
1173 lock_ReleaseWrite(&buf_globalLock);
1175 osi_Log4(buf_logp, "buf_Get returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
1176 bp, scp, offsetp->HighPart, offsetp->LowPart);
1178 buf_ValidateBufQueues();
1179 #endif /* TESTING */
1183 /* count # of elements in the free list;
1184 * we don't bother doing the proper locking for accessing dataVersion or flags
1185 * since it is a pain, and this is really just an advisory call. If you need
1186 * to do better at some point, rewrite this function.
1188 long buf_CountFreeList(void)
1194 lock_ObtainRead(&buf_globalLock);
1195 for(bufp = cm_data.buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1196 /* if the buffer doesn't have an identity, or if the buffer
1197 * has been invalidate (by having its DV stomped upon), then
1198 * count it as free, since it isn't really being utilized.
1200 if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion == CM_BUF_VERSION_BAD)
1203 lock_ReleaseRead(&buf_globalLock);
1207 /* clean a buffer synchronously */
1208 long buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
1211 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1213 lock_ObtainMutex(&bp->mx);
1214 code = buf_CleanAsyncLocked(bp, reqp);
1215 lock_ReleaseMutex(&bp->mx);
1220 /* wait for a buffer's cleaning to finish */
1221 void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
1223 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1226 lock_ObtainMutex(&bp->mx);
1227 if (bp->flags & CM_BUF_WRITING) {
1228 buf_WaitIO(scp, bp);
1231 lock_ReleaseMutex(&bp->mx);
1234 /* set the dirty flag on a buffer, and set associated write-ahead log,
1235 * if there is one. Allow one to be added to a buffer, but not changed.
1237 * The buffer must be locked before calling this routine.
1239 void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length)
1241 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1242 osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
1244 if (bp->flags & CM_BUF_DIRTY) {
1246 osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
1248 if (bp->dirty_offset <= offset) {
1249 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1250 /* dirty_length remains the same */
1252 bp->dirty_length = offset + length - bp->dirty_offset;
1254 } else /* bp->dirty_offset > offset */ {
1255 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1256 bp->dirty_length = bp->dirty_offset + bp->dirty_length - offset;
1258 bp->dirty_length = length;
1260 bp->dirty_offset = offset;
1263 osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
1266 bp->flags |= CM_BUF_DIRTY;
1268 /* and turn off EOF flag, since it has associated data now */
1269 bp->flags &= ~CM_BUF_EOF;
1271 bp->dirty_offset = offset;
1272 bp->dirty_length = length;
1274 /* and add to the dirty list.
1275 * we obtain a hold on the buffer for as long as it remains
1276 * in the list. buffers are only removed from the list by
1277 * the buf_IncrSyncer function regardless of when else the
1278 * dirty flag might be cleared.
1280 * This should never happen but just in case there is a bug
1281 * elsewhere, never add to the dirty list if the buffer is
1284 lock_ObtainWrite(&buf_globalLock);
1285 if (!(bp->flags & CM_BUF_INDL)) {
1287 if (!cm_data.buf_dirtyListp) {
1288 cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
1290 cm_data.buf_dirtyListEndp->dirtyp = bp;
1291 cm_data.buf_dirtyListEndp = bp;
1294 bp->flags |= CM_BUF_INDL;
1296 lock_ReleaseWrite(&buf_globalLock);
1300 /* clean all buffers, reset log pointers and invalidate all buffers.
1301 * Called with no locks held, and returns with same.
1303 * This function is guaranteed to clean and remove the log ptr of all the
1304 * buffers that were dirty or had non-zero log ptrs before the call was
1305 * made. That's sufficient to clean up any garbage left around by recovery,
1306 * which is all we're counting on this for; there may be newly created buffers
1307 * added while we're running, but that should be OK.
1309 * In an environment where there are no transactions (artificially imposed, for
1310 * example, when switching the database to raw mode), this function is used to
1311 * make sure that all updates have been written to the disk. In that case, we don't
1312 * really require that we forget the log association between pages and logs, but
1313 * it also doesn't hurt. Since raw mode I/O goes through this buffer package, we don't
1314 * have to worry about invalidating data in the buffers.
1316 * This function is used at the end of recovery as paranoia to get the recovered
1317 * database out to disk. It removes all references to the recovery log and cleans
1320 long buf_CleanAndReset(void)
1326 lock_ObtainRead(&buf_globalLock);
1327 for(i=0; i<cm_data.buf_hashSize; i++) {
1328 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp = bp->hashp) {
1329 if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
1331 lock_ReleaseRead(&buf_globalLock);
1333 /* now no locks are held; clean buffer and go on */
1335 req.flags |= CM_REQ_NORETRY;
1337 buf_CleanAsync(bp, &req);
1338 buf_CleanWait(NULL, bp, FALSE);
1340 /* relock and release buffer */
1341 lock_ObtainRead(&buf_globalLock);
1342 buf_ReleaseLocked(bp, FALSE);
1344 } /* over one bucket */
1345 } /* for loop over all hash buckets */
1348 lock_ReleaseRead(&buf_globalLock);
1351 buf_ValidateBufQueues();
1352 #endif /* TESTING */
1354 /* and we're done */
1358 /* called without global lock being held, reserves buffers for callers
1359 * that need more than one held (not locked) at once.
1361 void buf_ReserveBuffers(afs_uint64 nbuffers)
1363 lock_ObtainWrite(&buf_globalLock);
1365 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1366 cm_data.buf_reserveWaiting = 1;
1367 osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1368 osi_SleepW((LONG_PTR) &cm_data.buf_reservedBufs, &buf_globalLock);
1369 lock_ObtainWrite(&buf_globalLock);
1372 cm_data.buf_reservedBufs += nbuffers;
1376 lock_ReleaseWrite(&buf_globalLock);
1379 int buf_TryReserveBuffers(afs_uint64 nbuffers)
1383 lock_ObtainWrite(&buf_globalLock);
1384 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1388 cm_data.buf_reservedBufs += nbuffers;
1391 lock_ReleaseWrite(&buf_globalLock);
1395 /* called without global lock held, releases reservation held by
1396 * buf_ReserveBuffers.
1398 void buf_UnreserveBuffers(afs_uint64 nbuffers)
1400 lock_ObtainWrite(&buf_globalLock);
1401 cm_data.buf_reservedBufs -= nbuffers;
1402 if (cm_data.buf_reserveWaiting) {
1403 cm_data.buf_reserveWaiting = 0;
1404 osi_Wakeup((LONG_PTR) &cm_data.buf_reservedBufs);
1406 lock_ReleaseWrite(&buf_globalLock);
1409 /* truncate the buffers past sizep, zeroing out the page, if we don't
1410 * end on a page boundary.
1412 * Requires cm_bufCreateLock to be write locked.
1414 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1418 cm_buf_t *nbufp; /* next buffer, if didRelease */
1424 /* assert that cm_bufCreateLock is held in write mode */
1425 lock_AssertWrite(&scp->bufCreateLock);
1427 i = BUF_FILEHASH(&scp->fid);
1429 lock_ObtainRead(&buf_globalLock);
1430 bufp = cm_data.buf_fileHashTablepp[i];
1432 lock_ReleaseRead(&buf_globalLock);
1436 buf_HoldLocked(bufp);
1437 lock_ReleaseRead(&buf_globalLock);
1439 lock_ObtainMutex(&bufp->mx);
1441 bufEnd.HighPart = 0;
1442 bufEnd.LowPart = cm_data.buf_blockSize;
1443 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1445 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1446 LargeIntegerLessThan(*sizep, bufEnd)) {
1447 buf_WaitIO(scp, bufp);
1449 lock_ObtainWrite(&scp->rw);
1451 /* make sure we have a callback (so we have the right value for
1452 * the length), and wait for it to be safe to do a truncate.
1454 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1455 CM_SCACHESYNC_NEEDCALLBACK
1456 | CM_SCACHESYNC_GETSTATUS
1457 | CM_SCACHESYNC_SETSIZE
1458 | CM_SCACHESYNC_BUFLOCKED);
1461 /* if we succeeded in our locking, and this applies to the right
1462 * file, and the truncate request overlaps the buffer either
1463 * totally or partially, then do something.
1465 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1466 && LargeIntegerLessThan(*sizep, bufEnd)) {
1469 /* destroy the buffer, turning off its dirty bit, if
1470 * we're truncating the whole buffer. Otherwise, set
1471 * the dirty bit, and clear out the tail of the buffer
1472 * if we just overlap some.
1474 if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1475 /* truncating the entire page */
1476 bufp->flags &= ~CM_BUF_DIRTY;
1477 bufp->dirty_offset = 0;
1478 bufp->dirty_length = 0;
1479 bufp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
1480 bufp->dirtyCounter++;
1483 /* don't set dirty, since dirty implies
1484 * currently up-to-date. Don't need to do this,
1485 * since we'll update the length anyway.
1487 * Zero out remainder of the page, in case we
1488 * seek and write past EOF, and make this data
1491 bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
1492 osi_assertx(bufferPos != 0, "non-zero bufferPos");
1493 memset(bufp->datap + bufferPos, 0,
1494 cm_data.buf_blockSize - bufferPos);
1498 cm_SyncOpDone( scp, bufp,
1499 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
1500 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
1502 lock_ReleaseWrite(&scp->rw);
1503 lock_ReleaseMutex(&bufp->mx);
1506 nbufp = bufp->fileHashp;
1510 /* This forces the loop to end and the error code
1511 * to be returned. */
1519 buf_ValidateBufQueues();
1520 #endif /* TESTING */
1526 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1529 cm_buf_t *bp; /* buffer we're hacking on */
1534 i = BUF_FILEHASH(&scp->fid);
1537 lock_ObtainRead(&buf_globalLock);
1538 bp = cm_data.buf_fileHashTablepp[i];
1541 lock_ReleaseRead(&buf_globalLock);
1543 for (; bp; bp = nbp) {
1544 didRelease = 0; /* haven't released this buffer yet */
1546 /* clean buffer synchronously */
1547 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1548 lock_ObtainMutex(&bp->mx);
1550 /* start cleaning the buffer, and wait for it to finish */
1551 buf_CleanAsyncLocked(bp, reqp);
1552 buf_WaitIO(scp, bp);
1553 lock_ReleaseMutex(&bp->mx);
1556 * if the error for the previous buffer was BADFD
1557 * then all buffers for the FID are bad. Do not
1558 * attempt to stabalize.
1560 if (code != CM_ERROR_BADFD) {
1561 code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1562 if (code && code != CM_ERROR_BADFD)
1565 if (code == CM_ERROR_BADFD) {
1566 /* if the scp's FID is bad its because we received VNOVNODE
1567 * when attempting to FetchStatus before the write. This
1568 * page therefore contains data that can no longer be stored.
1570 lock_ObtainMutex(&bp->mx);
1571 bp->flags &= ~CM_BUF_DIRTY;
1572 bp->flags |= CM_BUF_ERROR;
1573 bp->error = CM_ERROR_BADFD;
1574 bp->dirty_offset = 0;
1575 bp->dirty_length = 0;
1576 bp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
1578 lock_ReleaseMutex(&bp->mx);
1581 /* actually, we only know that buffer is clean if ref
1582 * count is 1, since we don't have buffer itself locked.
1584 if (!(bp->flags & CM_BUF_DIRTY)) {
1585 lock_ObtainWrite(&buf_globalLock);
1586 if (bp->refCount == 1) { /* bp is held above */
1587 nbp = bp->fileHashp;
1589 buf_HoldLocked(nbp);
1590 buf_ReleaseLocked(bp, TRUE);
1594 lock_ReleaseWrite(&buf_globalLock);
1598 (*cm_buf_opsp->Unstabilizep)(scp, userp);
1603 lock_ObtainRead(&buf_globalLock);
1604 nbp = bp->fileHashp;
1606 buf_HoldLocked(nbp);
1607 buf_ReleaseLocked(bp, FALSE);
1608 lock_ReleaseRead(&buf_globalLock);
1610 } /* for loop over a bunch of buffers */
1613 buf_ValidateBufQueues();
1614 #endif /* TESTING */
1620 /* Must be called with scp->rw held */
1621 long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
1627 lock_AssertAny(&scp->rw);
1629 i = BUF_FILEHASH(&scp->fid);
1631 lock_ObtainRead(&buf_globalLock);
1633 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp = bp->fileHashp) {
1634 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1635 if (bp->dataVersion == fromVersion) {
1636 bp->dataVersion = toVersion;
1641 lock_ReleaseRead(&buf_globalLock);
1649 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1653 cm_buf_t *bp; /* buffer we're hacking on */
1654 cm_buf_t *nbp; /* next one */
1657 i = BUF_FILEHASH(&scp->fid);
1659 lock_ObtainRead(&buf_globalLock);
1660 bp = cm_data.buf_fileHashTablepp[i];
1663 lock_ReleaseRead(&buf_globalLock);
1664 for (; bp; bp = nbp) {
1665 /* clean buffer synchronously */
1666 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1667 lock_ObtainMutex(&bp->mx);
1668 if (bp->flags & CM_BUF_DIRTY) {
1672 cm_ReleaseUser(bp->userp);
1675 wasDirty = buf_CleanAsyncLocked(bp, reqp);
1676 buf_CleanWait(scp, bp, TRUE);
1677 if (bp->flags & CM_BUF_ERROR) {
1683 lock_ReleaseMutex(&bp->mx);
1686 lock_ObtainRead(&buf_globalLock);
1687 nbp = bp->fileHashp;
1689 buf_HoldLocked(nbp);
1690 buf_ReleaseLocked(bp, FALSE);
1691 lock_ReleaseRead(&buf_globalLock);
1692 } /* for loop over a bunch of buffers */
1695 buf_ValidateBufQueues();
1696 #endif /* TESTING */
1704 buf_ValidateBufQueues(void)
1706 cm_buf_t * bp, *bpb, *bpf, *bpa;
1707 afs_uint32 countf=0, countb=0, counta=0;
1709 lock_ObtainRead(&buf_globalLock);
1710 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1711 if (bp->magic != CM_BUF_MAGIC)
1712 osi_panic("buf magic error",__FILE__,__LINE__);
1717 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
1718 if (bp->magic != CM_BUF_MAGIC)
1719 osi_panic("buf magic error",__FILE__,__LINE__);
1724 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
1725 if (bp->magic != CM_BUF_MAGIC)
1726 osi_panic("buf magic error",__FILE__,__LINE__);
1730 lock_ReleaseRead(&buf_globalLock);
1732 if (countb != countf)
1733 osi_panic("buf magic error",__FILE__,__LINE__);
1735 if (counta != cm_data.buf_nbuffers)
1736 osi_panic("buf magic error",__FILE__,__LINE__);
1738 #endif /* TESTING */
1740 /* dump the contents of the buf_scacheHashTablepp. */
1741 int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
1748 if (cm_data.buf_scacheHashTablepp == NULL)
1752 lock_ObtainRead(&buf_globalLock);
1754 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\r\n",
1755 cookie, cm_data.buf_hashSize);
1756 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1758 for (i = 0; i < cm_data.buf_hashSize; i++)
1760 for (bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp)
1762 StringCbPrintfA(output, sizeof(output),
1763 "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
1764 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1765 "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
1766 cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
1767 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1768 bp->offset.LowPart, bp->dataVersion, bp->flags,
1769 bp->cmFlags, bp->refCount);
1770 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1774 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\r\n", cookie);
1775 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1777 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_freeListEndp\r\n", cookie);
1778 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1779 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1780 StringCbPrintfA(output, sizeof(output),
1781 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1782 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1783 "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
1784 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1785 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1786 bp->offset.LowPart, bp->dataVersion, bp->flags,
1787 bp->cmFlags, bp->refCount);
1788 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1790 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
1791 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1793 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
1794 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1795 for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
1796 StringCbPrintfA(output, sizeof(output),
1797 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1798 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1799 "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
1800 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1801 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1802 bp->offset.LowPart, bp->dataVersion, bp->flags,
1803 bp->cmFlags, bp->refCount);
1804 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1806 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
1807 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1810 lock_ReleaseRead(&buf_globalLock);
1814 void buf_ForceTrace(BOOL flush)
1823 len = GetTempPath(sizeof(buf)-10, buf);
1824 StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log");
1825 handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ,
1826 NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
1827 if (handle == INVALID_HANDLE_VALUE) {
1828 osi_panic("Cannot create log file", __FILE__, __LINE__);
1830 osi_LogPrint(buf_logp, handle);
1832 FlushFileBuffers(handle);
1833 CloseHandle(handle);
1836 long buf_DirtyBuffersExist(cm_fid_t *fidp)
1839 afs_uint32 bcount = 0;
1842 i = BUF_FILEHASH(fidp);
1844 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->allp, bcount++) {
1845 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY))
1852 long buf_CleanDirtyBuffers(cm_scache_t *scp)
1855 afs_uint32 bcount = 0;
1856 cm_fid_t * fidp = &scp->fid;
1858 for (bp = cm_data.buf_allp; bp; bp=bp->allp, bcount++) {
1859 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
1861 lock_ObtainMutex(&bp->mx);
1862 bp->cmFlags &= ~CM_BUF_CMSTORING;
1863 bp->flags &= ~CM_BUF_DIRTY;
1864 bp->dirty_offset = 0;
1865 bp->dirty_length = 0;
1866 bp->flags |= CM_BUF_ERROR;
1867 bp->error = VNOVNODE;
1868 bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
1870 if (bp->flags & CM_BUF_WAITING) {
1871 osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);
1872 osi_Wakeup((long) &bp);
1874 lock_ReleaseMutex(&bp->mx);