2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
12 #include <afs/param.h>
23 #include "cm_memmap.h"
26 #define TRACE_BUFFER 1
29 extern void afsi_log(char *pattern, ...);
31 /* This module implements the buffer package used by the local transaction
32 * system (cm). It is initialized by calling cm_Init, which calls buf_Init;
33 * it must be initalized before any of its main routines are called.
35 * Each buffer is hashed into a hash table by file ID and offset, and if its
36 * reference count is zero, it is also in a free list.
38 * There are two locks involved in buffer processing. The global lock
39 * buf_globalLock protects all of the global variables defined in this module,
40 * the reference counts and hash pointers in the actual cm_buf_t structures,
41 * and the LRU queue pointers in the buffer structures.
43 * The mutexes in the buffer structures protect the remaining fields in the
44 * buffers, as well the data itself.
46 * The locking hierarchy here is this:
48 * - resv multiple simul. buffers reservation
49 * - lock buffer I/O flags
50 * - lock buffer's mutex
51 * - lock buf_globalLock
55 /* global debugging log */
56 osi_log_t *buf_logp = NULL;
58 /* Global lock protecting hash tables and free lists */
59 osi_rwlock_t buf_globalLock;
61 /* ptr to head of the free list (most recently used) and the
62 * tail (the guy to remove first). We use osi_Q* functions
63 * to put stuff in buf_freeListp, and maintain the end
67 /* a pointer to a list of all buffers, just so that we can find them
68 * easily for debugging, and for the incr syncer. Locked under
72 /* defaults setup; these variables may be manually assigned into
73 * before calling cm_Init, as a way of changing these defaults.
76 /* callouts for reading and writing data, etc */
77 cm_buf_ops_t *cm_buf_opsp;
80 /* for experimental disk caching support in Win95 client */
81 cm_buf_t *buf_diskFreeListp;
82 cm_buf_t *buf_diskFreeListEndp;
83 cm_buf_t *buf_diskAllp;
84 extern int cm_diskCacheEnabled;
85 #endif /* DISKCACHE95 */
87 /* set this to 1 when we are terminating to prevent access attempts */
88 static int buf_ShutdownFlag = 0;
90 void buf_HoldLocked(cm_buf_t *bp)
92 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
93 InterlockedIncrement(&bp->refCount);
96 /* hold a reference to an already held buffer */
97 void buf_Hold(cm_buf_t *bp)
99 lock_ObtainRead(&buf_globalLock);
101 lock_ReleaseRead(&buf_globalLock);
104 /* code to drop reference count while holding buf_globalLock */
105 void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
110 lock_AssertWrite(&buf_globalLock);
112 lock_AssertRead(&buf_globalLock);
114 /* ensure that we're in the LRU queue if our ref count is 0 */
115 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
117 refCount = InterlockedDecrement(&bp->refCount);
120 osi_panic("buf refcount 0",__FILE__,__LINE__);;
122 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
126 * If we are read locked there could be a race condition
127 * with buf_Find() so we must obtain a write lock and
128 * double check that the refCount is actually zero
129 * before we remove the buffer from the LRU queue.
132 lock_ReleaseRead(&buf_globalLock);
133 lock_ObtainWrite(&buf_globalLock);
136 if (bp->refCount == 0 &&
137 !(bp->flags & CM_BUF_INLRU)) {
138 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
140 /* watch for transition from empty to one element */
141 if (!cm_data.buf_freeListEndp)
142 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
143 bp->flags |= CM_BUF_INLRU;
147 lock_ReleaseWrite(&buf_globalLock);
148 lock_ObtainRead(&buf_globalLock);
153 /* release a buffer. Buffer must be referenced, but unlocked. */
154 void buf_Release(cm_buf_t *bp)
158 /* ensure that we're in the LRU queue if our ref count is 0 */
159 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
161 refCount = InterlockedDecrement(&bp->refCount);
164 osi_panic("buf refcount 0",__FILE__,__LINE__);;
166 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
169 lock_ObtainWrite(&buf_globalLock);
170 if (bp->refCount == 0 &&
171 !(bp->flags & CM_BUF_INLRU)) {
172 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
174 /* watch for transition from empty to one element */
175 if (!cm_data.buf_freeListEndp)
176 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
177 bp->flags |= CM_BUF_INLRU;
179 lock_ReleaseWrite(&buf_globalLock);
183 /* incremental sync daemon. Writes all dirty buffers every 5000 ms */
184 void buf_IncrSyncer(long parm)
187 long i; /* counter */
191 while (buf_ShutdownFlag == 0) {
193 i = SleepEx(5000, 1);
194 if (i != 0) continue;
199 /* now go through our percentage of the buffers */
200 for (bpp = &cm_data.buf_dirtyListp; bp = *bpp; ) {
202 /* all dirty buffers are held when they are added to the
203 * dirty list. No need for an additional hold.
206 if (bp->flags & CM_BUF_DIRTY) {
207 /* start cleaning the buffer; don't touch log pages since
208 * the log code counts on knowing exactly who is writing
209 * a log page at any given instant.
212 req.flags |= CM_REQ_NORETRY;
213 wasDirty |= buf_CleanAsync(bp, &req);
216 /* the buffer may or may not have been dirty
217 * and if dirty may or may not have been cleaned
218 * successfully. check the dirty flag again.
220 if (!(bp->flags & CM_BUF_DIRTY)) {
221 lock_ObtainMutex(&bp->mx);
222 if (!(bp->flags & CM_BUF_DIRTY)) {
223 /* remove the buffer from the dirty list */
224 lock_ObtainWrite(&buf_globalLock);
227 if (cm_data.buf_dirtyListp == NULL)
228 cm_data.buf_dirtyListEndp = NULL;
229 buf_ReleaseLocked(bp, TRUE);
230 lock_ReleaseWrite(&buf_globalLock);
232 /* advance the pointer so we don't loop forever */
235 lock_ReleaseMutex(&bp->mx);
237 /* advance the pointer so we don't loop forever */
240 } /* for loop over a bunch of buffers */
241 } /* whole daemon's while loop */
245 buf_ValidateBuffers(void)
247 cm_buf_t * bp, *bpf, *bpa, *bpb;
248 afs_uint64 countb = 0, countf = 0, counta = 0;
250 if (cm_data.buf_freeListp == NULL && cm_data.buf_freeListEndp != NULL ||
251 cm_data.buf_freeListp != NULL && cm_data.buf_freeListEndp == NULL) {
252 afsi_log("cm_ValidateBuffers failure: inconsistent free list pointers");
253 fprintf(stderr, "cm_ValidateBuffers failure: inconsistent free list pointers\n");
257 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
258 if (bp->magic != CM_BUF_MAGIC) {
259 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
260 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
266 if (countb > cm_data.buf_nbuffers) {
267 afsi_log("cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers");
268 fprintf(stderr, "cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers\n");
273 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
274 if (bp->magic != CM_BUF_MAGIC) {
275 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
276 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
282 if (countf > cm_data.buf_nbuffers) {
283 afsi_log("cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers");
284 fprintf(stderr, "cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers\n");
289 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
290 if (bp->magic != CM_BUF_MAGIC) {
291 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
292 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
298 if (counta > cm_data.buf_nbuffers) {
299 afsi_log("cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers");
300 fprintf(stderr, "cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers\n");
305 if (countb != countf) {
306 afsi_log("cm_ValidateBuffers failure: countb != countf");
307 fprintf(stderr, "cm_ValidateBuffers failure: countb != countf\n");
311 if (counta != cm_data.buf_nbuffers) {
312 afsi_log("cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers");
313 fprintf(stderr, "cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers\n");
320 void buf_Shutdown(void)
322 buf_ShutdownFlag = 1;
325 /* initialize the buffer package; called with no locks
326 * held during the initialization phase.
328 long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
330 static osi_once_t once;
339 cm_data.buf_nbuffers = nbuffers;
341 /* Have to be able to reserve a whole chunk */
342 if (((cm_data.buf_nbuffers - 3) * cm_data.buf_blockSize) < cm_chunkSize)
343 return CM_ERROR_TOOFEWBUFS;
346 /* recall for callouts */
349 if (osi_Once(&once)) {
350 /* initialize global locks */
351 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
354 /* remember this for those who want to reset it */
355 cm_data.buf_nOrigBuffers = cm_data.buf_nbuffers;
357 /* lower hash size to a prime number */
358 cm_data.buf_hashSize = osi_PrimeLessThan((afs_uint32)(cm_data.buf_nbuffers/7 + 1));
360 /* create hash table */
361 memset((void *)cm_data.buf_scacheHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
363 /* another hash table */
364 memset((void *)cm_data.buf_fileHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
366 /* create buffer headers and put in free list */
367 bp = cm_data.bufHeaderBaseAddress;
368 data = cm_data.bufDataBaseAddress;
369 cm_data.buf_allp = NULL;
371 for (i=0; i<cm_data.buf_nbuffers; i++) {
372 osi_assertx(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress,
373 "invalid cm_buf_t address");
374 osi_assertx(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData,
375 "invalid cm_buf_t data address");
377 /* allocate and zero some storage */
378 memset(bp, 0, sizeof(cm_buf_t));
379 bp->magic = CM_BUF_MAGIC;
380 /* thread on list of all buffers */
381 bp->allp = cm_data.buf_allp;
382 cm_data.buf_allp = bp;
384 osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
385 bp->flags |= CM_BUF_INLRU;
386 lock_InitializeMutex(&bp->mx, "Buffer mutex");
388 /* grab appropriate number of bytes from aligned zone */
391 /* setup last buffer pointer */
393 cm_data.buf_freeListEndp = bp;
397 data += cm_data.buf_blockSize;
400 /* none reserved at first */
401 cm_data.buf_reservedBufs = 0;
403 /* just for safety's sake */
404 cm_data.buf_maxReservedBufs = cm_data.buf_nbuffers - 3;
406 bp = cm_data.bufHeaderBaseAddress;
407 data = cm_data.bufDataBaseAddress;
409 for (i=0; i<cm_data.buf_nbuffers; i++) {
410 lock_InitializeMutex(&bp->mx, "Buffer mutex");
413 bp->waitRequests = 0;
414 bp->flags &= ~CM_BUF_WAITING;
420 buf_ValidateBufQueues();
424 /* init the buffer trace log */
425 buf_logp = osi_LogCreate("buffer", 1000);
426 osi_LogEnable(buf_logp);
431 /* and create the incr-syncer */
432 phandle = thrd_Create(0, 0,
433 (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
436 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
437 CloseHandle(phandle);
441 buf_ValidateBufQueues();
446 /* add nbuffers to the buffer pool, if possible.
447 * Called with no locks held.
449 long buf_AddBuffers(afs_uint64 nbuffers)
451 /* The size of a virtual cache cannot be changed after it has
452 * been created. Subsequent calls to MapViewofFile() with
453 * an existing mapping object name would not allow the
454 * object to be resized. Return failure immediately.
456 * A similar problem now occurs with the persistent cache
457 * given that the memory mapped file now contains a complex
460 afsi_log("request to add %d buffers to the existing cache of size %d denied",
461 nbuffers, cm_data.buf_nbuffers);
463 return CM_ERROR_INVAL;
466 /* interface to set the number of buffers to an exact figure.
467 * Called with no locks held.
469 long buf_SetNBuffers(afs_uint64 nbuffers)
472 return CM_ERROR_INVAL;
473 if (nbuffers == cm_data.buf_nbuffers)
475 else if (nbuffers > cm_data.buf_nbuffers)
476 return buf_AddBuffers(nbuffers - cm_data.buf_nbuffers);
478 return CM_ERROR_INVAL;
481 /* wait for reading or writing to clear; called with write-locked
482 * buffer and unlocked scp and returns with locked buffer.
484 void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
489 osi_assertx(scp->magic == CM_SCACHE_MAGIC, "invalid cm_scache_t magic");
490 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
493 /* if no IO is happening, we're done */
494 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
497 /* otherwise I/O is happening, but some other thread is waiting for
498 * the I/O already. Wait for that guy to figure out what happened,
499 * and then check again.
501 if ( bp->flags & CM_BUF_WAITING ) {
504 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%p", bp);
506 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING set for 0x%p", bp);
507 bp->flags |= CM_BUF_WAITING;
508 bp->waitCount = bp->waitRequests = 1;
510 osi_SleepM((LONG_PTR)bp, &bp->mx);
512 smb_UpdateServerPriority();
514 lock_ObtainMutex(&bp->mx);
515 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%p", bp);
517 if (bp->waitCount == 0) {
518 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING reset for 0x%p", bp);
519 bp->flags &= ~CM_BUF_WAITING;
520 bp->waitRequests = 0;
524 if (scp = cm_FindSCache(&bp->fid))
528 lock_ObtainMutex(&scp->mx);
529 if (scp->flags & CM_SCACHEFLAG_WAITING) {
530 osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
531 osi_Wakeup((LONG_PTR)&scp->flags);
533 lock_ReleaseMutex(&scp->mx);
537 /* if we get here, the IO is done, but we may have to wakeup people waiting for
538 * the I/O to complete. Do so.
540 if (bp->flags & CM_BUF_WAITING) {
541 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
542 osi_Wakeup((LONG_PTR) bp);
544 osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%p", bp);
547 cm_ReleaseSCache(scp);
550 /* find a buffer, if any, for a particular file ID and offset. Assumes
551 * that buf_globalLock is write locked when called.
553 cm_buf_t *buf_FindLocked(struct cm_scache *scp, osi_hyper_t *offsetp)
558 i = BUF_HASH(&scp->fid, offsetp);
559 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) {
560 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
561 && offsetp->LowPart == bp->offset.LowPart
562 && offsetp->HighPart == bp->offset.HighPart) {
568 /* return whatever we found, if anything */
572 /* find a buffer with offset *offsetp for vnode *scp. Called
573 * with no locks held.
575 cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
579 lock_ObtainRead(&buf_globalLock);
580 bp = buf_FindLocked(scp, offsetp);
581 lock_ReleaseRead(&buf_globalLock);
586 /* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
589 * Makes sure that there's only one person writing this block
590 * at any given time, and also ensures that the log is forced sufficiently far,
591 * if this buffer contains logged data.
593 * Returns non-zero if the buffer was dirty.
595 long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
599 cm_scache_t * scp = NULL;
602 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
604 while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
606 lock_ReleaseMutex(&bp->mx);
608 scp = cm_FindSCache(&bp->fid);
610 osi_Log2(buf_logp, "buf_CleanAsyncLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
613 LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
614 code = (*cm_buf_opsp->Writep)(scp, &offset, bp->dirty_length, 0, bp->userp, reqp);
615 osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
617 cm_ReleaseSCache(scp);
620 osi_Log1(buf_logp, "buf_CleanAsyncLocked unable to start I/O - scp not found buf 0x%p", bp);
621 code = CM_ERROR_NOSUCHFILE;
624 lock_ObtainMutex(&bp->mx);
625 /* if the Write routine returns No Such File, clear the dirty flag
626 * because we aren't going to be able to write this data to the file
629 if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD){
630 bp->flags &= ~CM_BUF_DIRTY;
631 bp->flags |= CM_BUF_ERROR;
632 bp->dirty_offset = 0;
633 bp->dirty_length = 0;
635 bp->dataVersion = -1; /* bad */
640 /* Disk cache support */
641 /* write buffer to disk cache (synchronous for now) */
642 diskcache_Update(bp->dcp, bp->datap, cm_data.buf_blockSize, bp->dataVersion);
643 #endif /* DISKCACHE95 */
645 /* if we get here and retries are not permitted
646 * then we need to exit this loop regardless of
647 * whether or not we were able to clear the dirty bit
649 if (reqp->flags & CM_REQ_NORETRY)
653 if (!(bp->flags & CM_BUF_DIRTY)) {
654 /* remove buffer from dirty buffer queue */
658 /* do logging after call to GetLastError, or else */
660 /* if someone was waiting for the I/O that just completed or failed,
663 if (bp->flags & CM_BUF_WAITING) {
664 /* turn off flags and wakeup users */
665 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
666 osi_Wakeup((LONG_PTR) bp);
671 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
672 * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
673 * The buffer must already be clean, and no I/O should be happening to it.
675 void buf_Recycle(cm_buf_t *bp)
680 cm_buf_t *prevBp, *nextBp;
682 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
684 /* if we get here, we know that the buffer still has a 0 ref count,
685 * and that it is clean and has no currently pending I/O. This is
686 * the dude to return.
687 * Remember that as long as the ref count is 0, we know that we won't
688 * have any lock conflicts, so we can grab the buffer lock out of
689 * order in the locking hierarchy.
691 osi_Log3( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x:%08x",
692 bp, bp->offset.HighPart, bp->offset.LowPart);
694 osi_assertx(bp->refCount == 0, "cm_buf_t refcount != 0");
695 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)),
696 "incorrect cm_buf_t flags");
697 lock_AssertWrite(&buf_globalLock);
699 if (bp->flags & CM_BUF_INHASH) {
700 /* Remove from hash */
702 i = BUF_HASH(&bp->fid, &bp->offset);
703 lbpp = &(cm_data.buf_scacheHashTablepp[i]);
704 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
709 /* we better find it */
710 osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
712 *lbpp = bp->hashp; /* hash out */
715 /* Remove from file hash */
717 i = BUF_FILEHASH(&bp->fid);
718 prevBp = bp->fileHashBackp;
719 bp->fileHashBackp = NULL;
720 nextBp = bp->fileHashp;
721 bp->fileHashp = NULL;
723 prevBp->fileHashp = nextBp;
725 cm_data.buf_fileHashTablepp[i] = nextBp;
727 nextBp->fileHashBackp = prevBp;
729 bp->flags &= ~CM_BUF_INHASH;
732 /* bump the soft reference counter now, to invalidate softRefs; no
733 * wakeup is required since people don't sleep waiting for this
738 /* make the fid unrecognizable */
739 memset(&bp->fid, 0, sizeof(cm_fid_t));
742 /* recycle a buffer, removing it from the free list, hashing in its new identity
743 * and returning it write-locked so that no one can use it. Called without
744 * any locks held, and can return an error if it loses the race condition and
745 * finds that someone else created the desired buffer.
747 * If success is returned, the buffer is returned write-locked.
749 * May be called with null scp and offsetp, if we're just trying to reclaim some
750 * space from the buffer pool. In that case, the buffer will be returned
751 * without being hashed into the hash table.
753 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
755 cm_buf_t *bp; /* buffer we're dealing with */
756 cm_buf_t *nextBp; /* next buffer in file hash chain */
757 afs_uint32 i; /* temp */
760 cm_InitReq(&req); /* just in case */
763 buf_ValidateBufQueues();
768 lock_ObtainRead(&scp->bufCreateLock);
769 lock_ObtainWrite(&buf_globalLock);
770 /* check to see if we lost the race */
772 if (bp = buf_FindLocked(scp, offsetp)) {
773 /* Do not call buf_ReleaseLocked() because we
774 * do not want to allow the buffer to be added
778 lock_ReleaseWrite(&buf_globalLock);
779 lock_ReleaseRead(&scp->bufCreateLock);
780 return CM_BUF_EXISTS;
784 /* does this fix the problem below? it's a simple solution. */
785 if (!cm_data.buf_freeListEndp)
787 lock_ReleaseWrite(&buf_globalLock);
788 lock_ReleaseRead(&scp->bufCreateLock);
789 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List is empty - sleeping 200ms");
794 /* for debugging, assert free list isn't empty, although we
795 * really should try waiting for a running tranasction to finish
796 * instead of this; or better, we should have a transaction
797 * throttler prevent us from entering this situation.
799 osi_assertx(cm_data.buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
801 /* look at all buffers in free list, some of which may temp.
802 * have high refcounts and which then should be skipped,
803 * starting cleaning I/O for those which are dirty. If we find
804 * a clean buffer, we rehash it, lock it and return it.
806 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
807 /* check to see if it really has zero ref count. This
808 * code can bump refcounts, at least, so it may not be
811 if (bp->refCount > 0)
814 /* we don't have to lock buffer itself, since the ref
815 * count is 0 and we know it will stay zero as long as
816 * we hold the global lock.
819 /* don't recycle someone in our own chunk */
820 if (!cm_FidCmp(&bp->fid, &scp->fid)
821 && (bp->offset.LowPart & (-cm_chunkSize))
822 == (offsetp->LowPart & (-cm_chunkSize)))
825 /* if this page is being filled (!) or cleaned, see if
826 * the I/O has completed. If not, skip it, otherwise
827 * do the final processing for the I/O.
829 if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
830 /* probably shouldn't do this much work while
831 * holding the big lock? Watch for contention
837 if (bp->flags & CM_BUF_DIRTY) {
838 /* if the buffer is dirty, start cleaning it and
839 * move on to the next buffer. We do this with
840 * just the lock required to minimize contention
844 lock_ReleaseWrite(&buf_globalLock);
845 lock_ReleaseRead(&scp->bufCreateLock);
847 /* grab required lock and clean; this only
848 * starts the I/O. By the time we're back,
849 * it'll still be marked dirty, but it will also
850 * have the WRITING flag set, so we won't get
853 buf_CleanAsync(bp, &req);
855 /* now put it back and go around again */
860 /* if we get here, we know that the buffer still has a 0
861 * ref count, and that it is clean and has no currently
862 * pending I/O. This is the dude to return.
863 * Remember that as long as the ref count is 0, we know
864 * that we won't have any lock conflicts, so we can grab
865 * the buffer lock out of order in the locking hierarchy.
869 /* clean up junk flags */
870 bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
871 bp->dataVersion = -1; /* unknown so far */
873 /* now hash in as our new buffer, and give it the
874 * appropriate label, if requested.
877 bp->flags |= CM_BUF_INHASH;
882 bp->offset = *offsetp;
883 i = BUF_HASH(&scp->fid, offsetp);
884 bp->hashp = cm_data.buf_scacheHashTablepp[i];
885 cm_data.buf_scacheHashTablepp[i] = bp;
886 i = BUF_FILEHASH(&scp->fid);
887 nextBp = cm_data.buf_fileHashTablepp[i];
888 bp->fileHashp = nextBp;
889 bp->fileHashBackp = NULL;
891 nextBp->fileHashBackp = bp;
892 cm_data.buf_fileHashTablepp[i] = bp;
895 /* we should move it from the lru queue. It better still be there,
896 * since we've held the global (big) lock since we found it there.
898 osi_assertx(bp->flags & CM_BUF_INLRU,
899 "buf_GetNewLocked: LRU screwup");
901 if (cm_data.buf_freeListEndp == bp) {
902 /* we're the last guy in this queue, so maintain it */
903 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
905 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
906 bp->flags &= ~CM_BUF_INLRU;
908 /* grab the mutex so that people don't use it
909 * before the caller fills it with data. Again, no one
910 * should have been able to get to this dude to lock it.
912 if (!lock_TryMutex(&bp->mx)) {
913 osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0",
915 osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
918 /* prepare to return it. Give it a refcount */
921 lock_ReleaseWrite(&buf_globalLock);
922 lock_ReleaseRead(&scp->bufCreateLock);
926 buf_ValidateBufQueues();
929 } /* for all buffers in lru queue */
930 lock_ReleaseWrite(&buf_globalLock);
931 lock_ReleaseRead(&scp->bufCreateLock);
932 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List has no buffers with a zero refcount - sleeping 100ms");
933 Sleep(100); /* give some time for a buffer to be freed */
934 } /* while loop over everything */
938 /* get a page, returning it held but unlocked. Doesn't fill in the page
939 * with I/O, since we're going to write the whole thing new.
941 long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
945 osi_hyper_t pageOffset;
949 pageOffset.HighPart = offsetp->HighPart;
950 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
952 bp = buf_Find(scp, &pageOffset);
954 /* lock it and break out */
955 lock_ObtainMutex(&bp->mx);
959 /* otherwise, we have to create a page */
960 code = buf_GetNewLocked(scp, &pageOffset, &bp);
962 /* check if the buffer was created in a race condition branch.
963 * If so, go around so we can hold a reference to it.
965 if (code == CM_BUF_EXISTS)
968 /* something else went wrong */
972 /* otherwise, we have a locked buffer that we just created */
975 } /* big while loop */
978 if (bp->flags & CM_BUF_READING)
981 /* once it has been read once, we can unlock it and return it, still
982 * with its refcount held.
984 lock_ReleaseMutex(&bp->mx);
986 osi_Log4(buf_logp, "buf_GetNew returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
987 bp, scp, offsetp->HighPart, offsetp->LowPart);
991 /* get a page, returning it held but unlocked. Make sure it is complete */
992 /* The scp must be unlocked when passed to this function */
993 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
997 osi_hyper_t pageOffset;
998 unsigned long tcount;
1002 cm_diskcache_t *dcp;
1003 #endif /* DISKCACHE95 */
1006 pageOffset.HighPart = offsetp->HighPart;
1007 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
1011 buf_ValidateBufQueues();
1012 #endif /* TESTING */
1014 bp = buf_Find(scp, &pageOffset);
1016 /* lock it and break out */
1017 lock_ObtainMutex(&bp->mx);
1020 /* touch disk chunk to update LRU info */
1021 diskcache_Touch(bp->dcp);
1022 #endif /* DISKCACHE95 */
1026 /* otherwise, we have to create a page */
1027 code = buf_GetNewLocked(scp, &pageOffset, &bp);
1028 /* bp->mx is now held */
1030 /* check if the buffer was created in a race condition branch.
1031 * If so, go around so we can hold a reference to it.
1033 if (code == CM_BUF_EXISTS)
1036 /* something else went wrong */
1039 buf_ValidateBufQueues();
1040 #endif /* TESTING */
1044 /* otherwise, we have a locked buffer that we just created */
1047 } /* big while loop */
1049 /* if we get here, we have a locked buffer that may have just been
1050 * created, in which case it needs to be filled with data.
1053 /* load the page; freshly created pages should be idle */
1054 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)), "incorrect cm_buf_t flags");
1056 /* start the I/O; may drop lock */
1057 bp->flags |= CM_BUF_READING;
1058 code = (*cm_buf_opsp->Readp)(bp, cm_data.buf_blockSize, &tcount, NULL);
1061 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, cm_data.buf_blockSize, &bp->dataVersion, &tcount, &dcp);
1062 bp->dcp = dcp; /* pointer to disk cache struct. */
1063 #endif /* DISKCACHE95 */
1066 /* failure or queued */
1067 if (code != ERROR_IO_PENDING) {
1069 bp->flags |= CM_BUF_ERROR;
1070 bp->flags &= ~CM_BUF_READING;
1071 if (bp->flags & CM_BUF_WAITING) {
1072 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1073 osi_Wakeup((LONG_PTR) bp);
1075 lock_ReleaseMutex(&bp->mx);
1078 buf_ValidateBufQueues();
1079 #endif /* TESTING */
1083 /* otherwise, I/O completed instantly and we're done, except
1084 * for padding the xfr out with 0s and checking for EOF
1086 if (tcount < (unsigned long) cm_data.buf_blockSize) {
1087 memset(bp->datap+tcount, 0, cm_data.buf_blockSize - tcount);
1089 bp->flags |= CM_BUF_EOF;
1091 bp->flags &= ~CM_BUF_READING;
1092 if (bp->flags & CM_BUF_WAITING) {
1093 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1094 osi_Wakeup((LONG_PTR) bp);
1100 /* wait for reads, either that which we started above, or that someone
1101 * else started. We don't care if we return a buffer being cleaned.
1103 if (bp->flags & CM_BUF_READING)
1104 buf_WaitIO(scp, bp);
1106 /* once it has been read once, we can unlock it and return it, still
1107 * with its refcount held.
1109 lock_ReleaseMutex(&bp->mx);
1112 /* now remove from queue; will be put in at the head (farthest from
1113 * being recycled) when we're done in buf_Release.
1115 lock_ObtainWrite(&buf_globalLock);
1116 if (bp->flags & CM_BUF_INLRU) {
1117 if (cm_data.buf_freeListEndp == bp)
1118 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1119 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
1120 bp->flags &= ~CM_BUF_INLRU;
1122 lock_ReleaseWrite(&buf_globalLock);
1124 osi_Log4(buf_logp, "buf_Get returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
1125 bp, scp, offsetp->HighPart, offsetp->LowPart);
1127 buf_ValidateBufQueues();
1128 #endif /* TESTING */
1132 /* count # of elements in the free list;
1133 * we don't bother doing the proper locking for accessing dataVersion or flags
1134 * since it is a pain, and this is really just an advisory call. If you need
1135 * to do better at some point, rewrite this function.
1137 long buf_CountFreeList(void)
1143 lock_ObtainRead(&buf_globalLock);
1144 for(bufp = cm_data.buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1145 /* if the buffer doesn't have an identity, or if the buffer
1146 * has been invalidate (by having its DV stomped upon), then
1147 * count it as free, since it isn't really being utilized.
1149 if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
1152 lock_ReleaseRead(&buf_globalLock);
1156 /* clean a buffer synchronously */
1157 long buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
1160 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1162 lock_ObtainMutex(&bp->mx);
1163 code = buf_CleanAsyncLocked(bp, reqp);
1164 lock_ReleaseMutex(&bp->mx);
1169 /* wait for a buffer's cleaning to finish */
1170 void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
1172 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1174 lock_ObtainMutex(&bp->mx);
1175 if (bp->flags & CM_BUF_WRITING) {
1176 buf_WaitIO(scp, bp);
1178 lock_ReleaseMutex(&bp->mx);
1181 /* set the dirty flag on a buffer, and set associated write-ahead log,
1182 * if there is one. Allow one to be added to a buffer, but not changed.
1184 * The buffer must be locked before calling this routine.
1186 void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length)
1188 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1189 osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
1191 if (bp->flags & CM_BUF_DIRTY) {
1193 osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
1195 if (bp->dirty_offset <= offset) {
1196 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1197 /* dirty_length remains the same */
1199 bp->dirty_length = offset + length - bp->dirty_offset;
1201 } else /* bp->dirty_offset > offset */ {
1202 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1203 bp->dirty_length = bp->dirty_offset + bp->dirty_length - offset;
1205 bp->dirty_length = length;
1207 bp->dirty_offset = offset;
1210 osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
1213 bp->flags |= CM_BUF_DIRTY;
1215 /* and turn off EOF flag, since it has associated data now */
1216 bp->flags &= ~CM_BUF_EOF;
1218 bp->dirty_offset = offset;
1219 bp->dirty_length = length;
1221 /* and add to the dirty list.
1222 * we obtain a hold on the buffer for as long as it remains
1223 * in the list. buffers are only removed from the list by
1224 * the buf_IncrSyncer function regardless of when else the
1225 * dirty flag might be cleared.
1227 * This should never happen but just in case there is a bug
1228 * elsewhere, never add to the dirty list if the buffer is
1231 lock_ObtainWrite(&buf_globalLock);
1232 if (bp->dirtyp == NULL && cm_data.buf_dirtyListEndp != bp) {
1234 if (!cm_data.buf_dirtyListp) {
1235 cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
1237 cm_data.buf_dirtyListEndp->dirtyp = bp;
1238 cm_data.buf_dirtyListEndp = bp;
1242 lock_ReleaseWrite(&buf_globalLock);
1246 /* clean all buffers, reset log pointers and invalidate all buffers.
1247 * Called with no locks held, and returns with same.
1249 * This function is guaranteed to clean and remove the log ptr of all the
1250 * buffers that were dirty or had non-zero log ptrs before the call was
1251 * made. That's sufficient to clean up any garbage left around by recovery,
1252 * which is all we're counting on this for; there may be newly created buffers
1253 * added while we're running, but that should be OK.
1255 * In an environment where there are no transactions (artificially imposed, for
1256 * example, when switching the database to raw mode), this function is used to
1257 * make sure that all updates have been written to the disk. In that case, we don't
1258 * really require that we forget the log association between pages and logs, but
1259 * it also doesn't hurt. Since raw mode I/O goes through this buffer package, we don't
1260 * have to worry about invalidating data in the buffers.
1262 * This function is used at the end of recovery as paranoia to get the recovered
1263 * database out to disk. It removes all references to the recovery log and cleans
1266 long buf_CleanAndReset(void)
1272 lock_ObtainRead(&buf_globalLock);
1273 for(i=0; i<cm_data.buf_hashSize; i++) {
1274 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp = bp->hashp) {
1275 if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
1277 lock_ReleaseRead(&buf_globalLock);
1279 /* now no locks are held; clean buffer and go on */
1281 req.flags |= CM_REQ_NORETRY;
1283 buf_CleanAsync(bp, &req);
1284 buf_CleanWait(NULL, bp);
1286 /* relock and release buffer */
1287 lock_ObtainRead(&buf_globalLock);
1288 buf_ReleaseLocked(bp, FALSE);
1290 } /* over one bucket */
1291 } /* for loop over all hash buckets */
1294 lock_ReleaseRead(&buf_globalLock);
1297 buf_ValidateBufQueues();
1298 #endif /* TESTING */
1300 /* and we're done */
1304 /* called without global lock being held, reserves buffers for callers
1305 * that need more than one held (not locked) at once.
1307 void buf_ReserveBuffers(afs_uint64 nbuffers)
1309 lock_ObtainWrite(&buf_globalLock);
1311 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1312 cm_data.buf_reserveWaiting = 1;
1313 osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1314 osi_SleepW((LONG_PTR) &cm_data.buf_reservedBufs, &buf_globalLock);
1315 lock_ObtainWrite(&buf_globalLock);
1318 cm_data.buf_reservedBufs += nbuffers;
1322 lock_ReleaseWrite(&buf_globalLock);
1325 int buf_TryReserveBuffers(afs_uint64 nbuffers)
1329 lock_ObtainWrite(&buf_globalLock);
1330 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1334 cm_data.buf_reservedBufs += nbuffers;
1337 lock_ReleaseWrite(&buf_globalLock);
1341 /* called without global lock held, releases reservation held by
1342 * buf_ReserveBuffers.
1344 void buf_UnreserveBuffers(afs_uint64 nbuffers)
1346 lock_ObtainWrite(&buf_globalLock);
1347 cm_data.buf_reservedBufs -= nbuffers;
1348 if (cm_data.buf_reserveWaiting) {
1349 cm_data.buf_reserveWaiting = 0;
1350 osi_Wakeup((LONG_PTR) &cm_data.buf_reservedBufs);
1352 lock_ReleaseWrite(&buf_globalLock);
1355 /* truncate the buffers past sizep, zeroing out the page, if we don't
1356 * end on a page boundary.
1358 * Requires cm_bufCreateLock to be write locked.
1360 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1364 cm_buf_t *nbufp; /* next buffer, if didRelease */
1370 /* assert that cm_bufCreateLock is held in write mode */
1371 lock_AssertWrite(&scp->bufCreateLock);
1373 i = BUF_FILEHASH(&scp->fid);
1375 lock_ObtainRead(&buf_globalLock);
1376 bufp = cm_data.buf_fileHashTablepp[i];
1378 lock_ReleaseRead(&buf_globalLock);
1382 buf_HoldLocked(bufp);
1383 lock_ReleaseRead(&buf_globalLock);
1385 lock_ObtainMutex(&bufp->mx);
1387 bufEnd.HighPart = 0;
1388 bufEnd.LowPart = cm_data.buf_blockSize;
1389 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1391 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1392 LargeIntegerLessThan(*sizep, bufEnd)) {
1393 buf_WaitIO(scp, bufp);
1395 lock_ObtainMutex(&scp->mx);
1397 /* make sure we have a callback (so we have the right value for
1398 * the length), and wait for it to be safe to do a truncate.
1400 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1401 CM_SCACHESYNC_NEEDCALLBACK
1402 | CM_SCACHESYNC_GETSTATUS
1403 | CM_SCACHESYNC_SETSIZE
1404 | CM_SCACHESYNC_BUFLOCKED);
1407 /* if we succeeded in our locking, and this applies to the right
1408 * file, and the truncate request overlaps the buffer either
1409 * totally or partially, then do something.
1411 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1412 && LargeIntegerLessThan(*sizep, bufEnd)) {
1415 /* destroy the buffer, turning off its dirty bit, if
1416 * we're truncating the whole buffer. Otherwise, set
1417 * the dirty bit, and clear out the tail of the buffer
1418 * if we just overlap some.
1420 if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1421 /* truncating the entire page */
1422 bufp->flags &= ~CM_BUF_DIRTY;
1423 bufp->dirty_offset = 0;
1424 bufp->dirty_length = 0;
1425 bufp->dataVersion = -1; /* known bad */
1426 bufp->dirtyCounter++;
1429 /* don't set dirty, since dirty implies
1430 * currently up-to-date. Don't need to do this,
1431 * since we'll update the length anyway.
1433 * Zero out remainder of the page, in case we
1434 * seek and write past EOF, and make this data
1437 bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
1438 osi_assertx(bufferPos != 0, "non-zero bufferPos");
1439 memset(bufp->datap + bufferPos, 0,
1440 cm_data.buf_blockSize - bufferPos);
1444 cm_SyncOpDone( scp, bufp,
1445 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
1446 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
1448 lock_ReleaseMutex(&scp->mx);
1449 lock_ReleaseMutex(&bufp->mx);
1452 nbufp = bufp->fileHashp;
1456 /* This forces the loop to end and the error code
1457 * to be returned. */
1465 buf_ValidateBufQueues();
1466 #endif /* TESTING */
1472 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1475 cm_buf_t *bp; /* buffer we're hacking on */
1480 i = BUF_FILEHASH(&scp->fid);
1483 lock_ObtainRead(&buf_globalLock);
1484 bp = cm_data.buf_fileHashTablepp[i];
1487 lock_ReleaseRead(&buf_globalLock);
1489 for (; bp; bp = nbp) {
1490 didRelease = 0; /* haven't released this buffer yet */
1492 /* clean buffer synchronously */
1493 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1494 lock_ObtainMutex(&bp->mx);
1496 /* start cleaning the buffer, and wait for it to finish */
1497 buf_CleanAsyncLocked(bp, reqp);
1498 buf_WaitIO(scp, bp);
1499 lock_ReleaseMutex(&bp->mx);
1501 code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1502 if (code && code != CM_ERROR_BADFD)
1505 /* if the scp's FID is bad its because we received VNOVNODE
1506 * when attempting to FetchStatus before the write. This
1507 * page therefore contains data that can no longer be stored.
1509 lock_ObtainMutex(&bp->mx);
1510 bp->flags &= ~CM_BUF_DIRTY;
1511 bp->flags |= CM_BUF_ERROR;
1513 bp->dirty_offset = 0;
1514 bp->dirty_length = 0;
1515 bp->dataVersion = -1; /* known bad */
1517 lock_ReleaseMutex(&bp->mx);
1519 /* actually, we only know that buffer is clean if ref
1520 * count is 1, since we don't have buffer itself locked.
1522 if (!(bp->flags & CM_BUF_DIRTY)) {
1523 lock_ObtainWrite(&buf_globalLock);
1524 if (bp->refCount == 1) { /* bp is held above */
1525 nbp = bp->fileHashp;
1527 buf_HoldLocked(nbp);
1528 buf_ReleaseLocked(bp, TRUE);
1532 lock_ReleaseWrite(&buf_globalLock);
1535 if (code != CM_ERROR_BADFD)
1536 (*cm_buf_opsp->Unstabilizep)(scp, userp);
1541 lock_ObtainRead(&buf_globalLock);
1542 nbp = bp->fileHashp;
1544 buf_HoldLocked(nbp);
1545 buf_ReleaseLocked(bp, FALSE);
1546 lock_ReleaseRead(&buf_globalLock);
1548 } /* for loop over a bunch of buffers */
1551 buf_ValidateBufQueues();
1552 #endif /* TESTING */
1558 /* Must be called with scp->mx held */
1559 long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
1565 lock_AssertMutex(&scp->mx);
1567 i = BUF_FILEHASH(&scp->fid);
1569 lock_ObtainRead(&buf_globalLock);
1571 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp = bp->fileHashp) {
1572 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1573 if (bp->dataVersion == fromVersion) {
1574 bp->dataVersion = toVersion;
1579 lock_ReleaseRead(&buf_globalLock);
1587 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1591 cm_buf_t *bp; /* buffer we're hacking on */
1592 cm_buf_t *nbp; /* next one */
1595 i = BUF_FILEHASH(&scp->fid);
1597 lock_ObtainRead(&buf_globalLock);
1598 bp = cm_data.buf_fileHashTablepp[i];
1601 lock_ReleaseRead(&buf_globalLock);
1602 for (; bp; bp = nbp) {
1603 /* clean buffer synchronously */
1604 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1607 lock_ObtainMutex(&bp->mx);
1609 cm_ReleaseUser(bp->userp);
1611 lock_ReleaseMutex(&bp->mx);
1613 wasDirty = buf_CleanAsync(bp, reqp);
1614 buf_CleanWait(scp, bp);
1615 lock_ObtainMutex(&bp->mx);
1616 if (bp->flags & CM_BUF_ERROR) {
1621 lock_ReleaseMutex(&bp->mx);
1624 lock_ObtainRead(&buf_globalLock);
1625 nbp = bp->fileHashp;
1627 buf_HoldLocked(nbp);
1628 buf_ReleaseLocked(bp, FALSE);
1629 lock_ReleaseRead(&buf_globalLock);
1630 } /* for loop over a bunch of buffers */
1633 buf_ValidateBufQueues();
1634 #endif /* TESTING */
1642 buf_ValidateBufQueues(void)
1644 cm_buf_t * bp, *bpb, *bpf, *bpa;
1645 afs_uint32 countf=0, countb=0, counta=0;
1647 lock_ObtainRead(&buf_globalLock);
1648 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1649 if (bp->magic != CM_BUF_MAGIC)
1650 osi_panic("buf magic error",__FILE__,__LINE__);
1655 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
1656 if (bp->magic != CM_BUF_MAGIC)
1657 osi_panic("buf magic error",__FILE__,__LINE__);
1662 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
1663 if (bp->magic != CM_BUF_MAGIC)
1664 osi_panic("buf magic error",__FILE__,__LINE__);
1668 lock_ReleaseRead(&buf_globalLock);
1670 if (countb != countf)
1671 osi_panic("buf magic error",__FILE__,__LINE__);
1673 if (counta != cm_data.buf_nbuffers)
1674 osi_panic("buf magic error",__FILE__,__LINE__);
1676 #endif /* TESTING */
1678 /* dump the contents of the buf_scacheHashTablepp. */
1679 int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
1686 if (cm_data.buf_scacheHashTablepp == NULL)
1690 lock_ObtainRead(&buf_globalLock);
1692 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\r\n",
1693 cookie, cm_data.buf_hashSize);
1694 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1696 for (i = 0; i < cm_data.buf_hashSize; i++)
1698 for (bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp)
1700 StringCbPrintfA(output, sizeof(output),
1701 "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
1702 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1703 "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
1704 cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
1705 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1706 bp->offset.LowPart, bp->dataVersion, bp->flags,
1707 bp->cmFlags, bp->refCount);
1708 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1712 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\r\n", cookie);
1713 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1715 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_freeListEndp\r\n", cookie);
1716 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1717 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1718 StringCbPrintfA(output, sizeof(output),
1719 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1720 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1721 "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
1722 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1723 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1724 bp->offset.LowPart, bp->dataVersion, bp->flags,
1725 bp->cmFlags, bp->refCount);
1726 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1728 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
1729 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1731 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListEndp\r\n", cookie);
1732 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1733 for(bp = cm_data.buf_dirtyListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1734 StringCbPrintfA(output, sizeof(output),
1735 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1736 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1737 "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
1738 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1739 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1740 bp->offset.LowPart, bp->dataVersion, bp->flags,
1741 bp->cmFlags, bp->refCount);
1742 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1744 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListEndp.\r\n", cookie);
1745 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1748 lock_ReleaseRead(&buf_globalLock);
1752 void buf_ForceTrace(BOOL flush)
1761 len = GetTempPath(sizeof(buf)-10, buf);
1762 StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log");
1763 handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ,
1764 NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
1765 if (handle == INVALID_HANDLE_VALUE) {
1766 osi_panic("Cannot create log file", __FILE__, __LINE__);
1768 osi_LogPrint(buf_logp, handle);
1770 FlushFileBuffers(handle);
1771 CloseHandle(handle);
1774 long buf_DirtyBuffersExist(cm_fid_t *fidp)
1777 afs_uint32 bcount = 0;
1780 i = BUF_FILEHASH(fidp);
1782 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->allp, bcount++) {
1783 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY))
1790 long buf_CleanDirtyBuffers(cm_scache_t *scp)
1793 afs_uint32 bcount = 0;
1794 cm_fid_t * fidp = &scp->fid;
1796 for (bp = cm_data.buf_allp; bp; bp=bp->allp, bcount++) {
1797 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
1799 lock_ObtainMutex(&bp->mx);
1800 bp->cmFlags &= ~CM_BUF_CMSTORING;
1801 bp->flags &= ~CM_BUF_DIRTY;
1802 bp->dirty_offset = 0;
1803 bp->dirty_length = 0;
1804 bp->flags |= CM_BUF_ERROR;
1805 bp->error = VNOVNODE;
1806 bp->dataVersion = -1; /* bad */
1808 if (bp->flags & CM_BUF_WAITING) {
1809 osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);
1810 osi_Wakeup((long) &bp);
1812 lock_ReleaseMutex(&bp->mx);