2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
12 #include <afsconfig.h>
13 #include <afs/param.h>
26 #include "cm_memmap.h"
29 #define TRACE_BUFFER 1
32 extern void afsi_log(char *pattern, ...);
34 /* This module implements the buffer package used by the local transaction
35 * system (cm). It is initialized by calling cm_Init, which calls buf_Init;
36 * it must be initalized before any of its main routines are called.
38 * Each buffer is hashed into a hash table by file ID and offset, and if its
39 * reference count is zero, it is also in a free list.
41 * There are two locks involved in buffer processing. The global lock
42 * buf_globalLock protects all of the global variables defined in this module,
43 * the reference counts and hash pointers in the actual cm_buf_t structures,
44 * and the LRU queue pointers in the buffer structures.
46 * The mutexes in the buffer structures protect the remaining fields in the
47 * buffers, as well the data itself.
49 * The locking hierarchy here is this:
51 * - resv multiple simul. buffers reservation
52 * - lock buffer I/O flags
53 * - lock buffer's mutex
54 * - lock buf_globalLock
58 /* global debugging log */
59 osi_log_t *buf_logp = NULL;
61 /* Global lock protecting hash tables and free lists */
62 osi_rwlock_t buf_globalLock;
64 /* ptr to head of the free list (most recently used) and the
65 * tail (the guy to remove first). We use osi_Q* functions
66 * to put stuff in buf_freeListp, and maintain the end
70 /* a pointer to a list of all buffers, just so that we can find them
71 * easily for debugging, and for the incr syncer. Locked under
75 /* defaults setup; these variables may be manually assigned into
76 * before calling cm_Init, as a way of changing these defaults.
79 /* callouts for reading and writing data, etc */
80 cm_buf_ops_t *cm_buf_opsp;
83 /* for experimental disk caching support in Win95 client */
84 cm_buf_t *buf_diskFreeListp;
85 cm_buf_t *buf_diskFreeListEndp;
86 cm_buf_t *buf_diskAllp;
87 extern int cm_diskCacheEnabled;
88 #endif /* DISKCACHE95 */
90 /* set this to 1 when we are terminating to prevent access attempts */
91 static int buf_ShutdownFlag = 0;
94 void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
96 void buf_HoldLocked(cm_buf_t *bp)
101 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
102 refCount = InterlockedIncrement(&bp->refCount);
103 #ifdef DEBUG_REFCOUNT
104 osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
105 afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
109 /* hold a reference to an already held buffer */
110 #ifdef DEBUG_REFCOUNT
111 void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
113 void buf_Hold(cm_buf_t *bp)
118 lock_ObtainRead(&buf_globalLock);
119 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
120 refCount = InterlockedIncrement(&bp->refCount);
121 #ifdef DEBUG_REFCOUNT
122 osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
123 afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
125 lock_ReleaseRead(&buf_globalLock);
128 /* code to drop reference count while holding buf_globalLock */
129 #ifdef DEBUG_REFCOUNT
130 void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
132 void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
138 lock_AssertWrite(&buf_globalLock);
140 lock_AssertRead(&buf_globalLock);
142 /* ensure that we're in the LRU queue if our ref count is 0 */
143 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
145 refCount = InterlockedDecrement(&bp->refCount);
146 #ifdef DEBUG_REFCOUNT
147 osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
148 afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
152 osi_panic("buf refcount 0",__FILE__,__LINE__);;
154 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
158 * If we are read locked there could be a race condition
159 * with buf_Find() so we must obtain a write lock and
160 * double check that the refCount is actually zero
161 * before we remove the buffer from the LRU queue.
164 lock_ConvertRToW(&buf_globalLock);
166 if (bp->refCount == 0 &&
167 !(bp->qFlags & CM_BUF_QINLRU)) {
168 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
170 /* watch for transition from empty to one element */
171 if (!cm_data.buf_freeListEndp)
172 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
173 bp->qFlags |= CM_BUF_QINLRU;
177 lock_ConvertWToR(&buf_globalLock);
181 /* release a buffer. Buffer must be referenced, but unlocked. */
182 #ifdef DEBUG_REFCOUNT
183 void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
185 void buf_Release(cm_buf_t *bp)
190 /* ensure that we're in the LRU queue if our ref count is 0 */
191 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
193 refCount = InterlockedDecrement(&bp->refCount);
194 #ifdef DEBUG_REFCOUNT
195 osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
196 afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
200 osi_panic("buf refcount 0",__FILE__,__LINE__);;
202 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
205 lock_ObtainWrite(&buf_globalLock);
206 if (bp->refCount == 0 &&
207 !(bp->qFlags & CM_BUF_QINLRU)) {
208 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
210 /* watch for transition from empty to one element */
211 if (!cm_data.buf_freeListEndp)
212 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
213 bp->qFlags |= CM_BUF_QINLRU;
215 lock_ReleaseWrite(&buf_globalLock);
220 buf_Sync(int quitOnShutdown)
222 cm_buf_t **bpp, *bp, *prevbp;
223 afs_uint32 wasDirty = 0;
226 /* go through all of the dirty buffers */
227 lock_ObtainRead(&buf_globalLock);
228 for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
229 if (quitOnShutdown && buf_ShutdownFlag)
232 lock_ReleaseRead(&buf_globalLock);
233 /* all dirty buffers are held when they are added to the
234 * dirty list. No need for an additional hold.
236 lock_ObtainMutex(&bp->mx);
238 if (bp->flags & CM_BUF_DIRTY && !(bp->qFlags & CM_BUF_QREDIR)) {
239 /* start cleaning the buffer; don't touch log pages since
240 * the log code counts on knowing exactly who is writing
241 * a log page at any given instant.
243 * only attempt to write the buffer if the volume might
249 volp = cm_GetVolumeByFID(&bp->fid);
250 switch (cm_GetVolumeStatus(volp, bp->fid.volume)) {
254 req.flags |= CM_REQ_NORETRY;
255 buf_CleanAsyncLocked(NULL, bp, &req, 0, &dirty);
261 /* the buffer may or may not have been dirty
262 * and if dirty may or may not have been cleaned
263 * successfully. check the dirty flag again.
265 if (!(bp->flags & CM_BUF_DIRTY)) {
266 /* remove the buffer from the dirty list */
267 lock_ObtainWrite(&buf_globalLock);
268 #ifdef DEBUG_REFCOUNT
269 if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
270 osi_Log1(afsd_logp,"buf_IncrSyncer bp 0x%p list corruption",bp);
271 afsi_log("buf_IncrSyncer bp 0x%p list corruption", bp);
276 bp->qFlags &= ~CM_BUF_QINDL;
277 if (cm_data.buf_dirtyListp == NULL)
278 cm_data.buf_dirtyListEndp = NULL;
279 else if (cm_data.buf_dirtyListEndp == bp)
280 cm_data.buf_dirtyListEndp = prevbp;
281 buf_ReleaseLocked(bp, TRUE);
282 lock_ConvertWToR(&buf_globalLock);
284 if (buf_ShutdownFlag) {
287 char volstr[VL_MAXNAMELEN+12]="";
290 volp = cm_GetVolumeByFID(&bp->fid);
293 if (bp->fid.volume == volp->vol[RWVOL].ID)
295 else if (bp->fid.volume == volp->vol[ROVOL].ID)
297 else if (bp->fid.volume == volp->vol[BACKVOL].ID)
301 snprintf(volstr, sizeof(volstr), "%s%s", volp->namep, ext);
303 cellp = cm_FindCellByID(bp->fid.cell, CM_FLAG_NOPROBE);
304 snprintf(volstr, sizeof(volstr), "%u", bp->fid.volume);
307 LogEvent(EVENTLOG_INFORMATION_TYPE, MSG_DIRTY_BUFFER_AT_SHUTDOWN,
308 cellp->name, volstr, bp->fid.vnode, bp->fid.unique,
309 bp->offset.QuadPart+bp->dirty_offset, bp->dirty_length);
312 /* advance the pointer so we don't loop forever */
313 lock_ObtainRead(&buf_globalLock);
317 lock_ReleaseMutex(&bp->mx);
318 } /* for loop over a bunch of buffers */
319 lock_ReleaseRead(&buf_globalLock);
324 /* incremental sync daemon. Writes all dirty buffers every 5000 ms */
325 void buf_IncrSyncer(long parm)
330 while (buf_ShutdownFlag == 0) {
333 i = SleepEx(5000, 1);
340 wasDirty = buf_Sync(1);
341 } /* whole daemon's while loop */
345 buf_ValidateBuffers(void)
347 cm_buf_t * bp, *bpf, *bpa, *bpb;
348 afs_uint64 countb = 0, countf = 0, counta = 0;
350 if (cm_data.buf_freeListp == NULL && cm_data.buf_freeListEndp != NULL ||
351 cm_data.buf_freeListp != NULL && cm_data.buf_freeListEndp == NULL) {
352 afsi_log("cm_ValidateBuffers failure: inconsistent free list pointers");
353 fprintf(stderr, "cm_ValidateBuffers failure: inconsistent free list pointers\n");
357 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
358 if (bp->magic != CM_BUF_MAGIC) {
359 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
360 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
366 if (countb > cm_data.buf_nbuffers) {
367 afsi_log("cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers");
368 fprintf(stderr, "cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers\n");
373 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
374 if (bp->magic != CM_BUF_MAGIC) {
375 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
376 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
382 if (countf > cm_data.buf_nbuffers) {
383 afsi_log("cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers");
384 fprintf(stderr, "cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers\n");
389 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
390 if (bp->magic != CM_BUF_MAGIC) {
391 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
392 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
398 if (counta > cm_data.buf_nbuffers) {
399 afsi_log("cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers");
400 fprintf(stderr, "cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers\n");
405 if (countb != countf) {
406 afsi_log("cm_ValidateBuffers failure: countb != countf");
407 fprintf(stderr, "cm_ValidateBuffers failure: countb != countf\n");
411 if (counta != cm_data.buf_nbuffers) {
412 afsi_log("cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers");
413 fprintf(stderr, "cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers\n");
420 void buf_Shutdown(void)
422 /* disable the buf_IncrSyncer() threads */
423 buf_ShutdownFlag = 1;
425 /* then force all dirty buffers to the file servers */
429 /* initialize the buffer package; called with no locks
430 * held during the initialization phase.
432 long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
434 static osi_once_t once;
443 cm_data.buf_nbuffers = nbuffers;
445 /* Have to be able to reserve a whole chunk */
446 if (((cm_data.buf_nbuffers - 3) * cm_data.buf_blockSize) < cm_chunkSize)
447 return CM_ERROR_TOOFEWBUFS;
450 /* recall for callouts */
453 if (osi_Once(&once)) {
454 /* initialize global locks */
455 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock", LOCK_HIERARCHY_BUF_GLOBAL);
458 /* remember this for those who want to reset it */
459 cm_data.buf_nOrigBuffers = cm_data.buf_nbuffers;
461 /* lower hash size to a prime number */
462 cm_data.buf_hashSize = osi_PrimeLessThan((afs_uint32)(cm_data.buf_nbuffers/7 + 1));
464 /* create hash table */
465 memset((void *)cm_data.buf_scacheHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
467 /* another hash table */
468 memset((void *)cm_data.buf_fileHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
470 /* create buffer headers and put in free list */
471 bp = cm_data.bufHeaderBaseAddress;
472 data = cm_data.bufDataBaseAddress;
473 cm_data.buf_allp = NULL;
475 for (i=0; i<cm_data.buf_nbuffers; i++) {
476 osi_assertx(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress,
477 "invalid cm_buf_t address");
478 osi_assertx(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData,
479 "invalid cm_buf_t data address");
481 /* allocate and zero some storage */
482 memset(bp, 0, sizeof(cm_buf_t));
483 bp->magic = CM_BUF_MAGIC;
484 /* thread on list of all buffers */
485 bp->allp = cm_data.buf_allp;
486 cm_data.buf_allp = bp;
488 osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
489 bp->qFlags |= CM_BUF_QINLRU;
490 lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
492 /* grab appropriate number of bytes from aligned zone */
495 /* setup last buffer pointer */
497 cm_data.buf_freeListEndp = bp;
501 data += cm_data.buf_blockSize;
504 /* none reserved at first */
505 cm_data.buf_reservedBufs = 0;
507 /* just for safety's sake */
508 cm_data.buf_maxReservedBufs = cm_data.buf_nbuffers - 3;
510 bp = cm_data.bufHeaderBaseAddress;
511 data = cm_data.bufDataBaseAddress;
513 for (i=0; i<cm_data.buf_nbuffers; i++) {
514 lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
517 bp->waitRequests = 0;
518 bp->flags &= ~CM_BUF_WAITING;
524 buf_ValidateBufQueues();
528 /* init the buffer trace log */
529 buf_logp = osi_LogCreate("buffer", 1000);
530 osi_LogEnable(buf_logp);
535 /* and create the incr-syncer */
536 phandle = thrd_Create(0, 0,
537 (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
540 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
541 CloseHandle(phandle);
545 buf_ValidateBufQueues();
550 /* add nbuffers to the buffer pool, if possible.
551 * Called with no locks held.
553 long buf_AddBuffers(afs_uint64 nbuffers)
555 /* The size of a virtual cache cannot be changed after it has
556 * been created. Subsequent calls to MapViewofFile() with
557 * an existing mapping object name would not allow the
558 * object to be resized. Return failure immediately.
560 * A similar problem now occurs with the persistent cache
561 * given that the memory mapped file now contains a complex
564 afsi_log("request to add %d buffers to the existing cache of size %d denied",
565 nbuffers, cm_data.buf_nbuffers);
567 return CM_ERROR_INVAL;
570 /* interface to set the number of buffers to an exact figure.
571 * Called with no locks held.
573 long buf_SetNBuffers(afs_uint64 nbuffers)
576 return CM_ERROR_INVAL;
577 if (nbuffers == cm_data.buf_nbuffers)
579 else if (nbuffers > cm_data.buf_nbuffers)
580 return buf_AddBuffers(nbuffers - cm_data.buf_nbuffers);
582 return CM_ERROR_INVAL;
585 /* wait for reading or writing to clear; called with write-locked
586 * buffer and unlocked scp and returns with locked buffer.
588 void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
593 osi_assertx(scp->magic == CM_SCACHE_MAGIC, "invalid cm_scache_t magic");
594 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
597 /* if no IO is happening, we're done */
598 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
601 /* otherwise I/O is happening, but some other thread is waiting for
602 * the I/O already. Wait for that guy to figure out what happened,
603 * and then check again.
605 if ( bp->flags & CM_BUF_WAITING ) {
608 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%p", bp);
610 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING set for 0x%p", bp);
611 bp->flags |= CM_BUF_WAITING;
612 bp->waitCount = bp->waitRequests = 1;
614 osi_SleepM((LONG_PTR)bp, &bp->mx);
616 cm_UpdateServerPriority();
618 lock_ObtainMutex(&bp->mx);
619 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%p", bp);
621 if (bp->waitCount == 0) {
622 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING reset for 0x%p", bp);
623 bp->flags &= ~CM_BUF_WAITING;
624 bp->waitRequests = 0;
628 if (scp = cm_FindSCache(&bp->fid))
632 lock_ObtainRead(&scp->rw);
633 if (scp->flags & CM_SCACHEFLAG_WAITING) {
634 osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
635 osi_Wakeup((LONG_PTR)&scp->flags);
637 lock_ReleaseRead(&scp->rw);
641 /* if we get here, the IO is done, but we may have to wakeup people waiting for
642 * the I/O to complete. Do so.
644 if (bp->flags & CM_BUF_WAITING) {
645 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
646 osi_Wakeup((LONG_PTR) bp);
648 osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%p", bp);
651 cm_ReleaseSCache(scp);
654 /* find a buffer, if any, for a particular file ID and offset. Assumes
655 * that buf_globalLock is write locked when called.
657 cm_buf_t *buf_FindLocked(struct cm_scache *scp, osi_hyper_t *offsetp)
662 i = BUF_HASH(&scp->fid, offsetp);
663 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) {
664 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
665 && offsetp->LowPart == bp->offset.LowPart
666 && offsetp->HighPart == bp->offset.HighPart) {
672 /* return whatever we found, if anything */
676 /* find a buffer with offset *offsetp for vnode *scp. Called
677 * with no locks held.
679 cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
683 lock_ObtainRead(&buf_globalLock);
684 bp = buf_FindLocked(scp, offsetp);
685 lock_ReleaseRead(&buf_globalLock);
690 /* find a buffer, if any, for a particular file ID and offset. Assumes
691 * that buf_globalLock is write locked when called. Uses the all buffer
694 cm_buf_t *buf_FindAllLocked(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
699 for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
700 if (cm_FidCmp(&scp->fid, &bp->fid) == 0
701 && offsetp->LowPart == bp->offset.LowPart
702 && offsetp->HighPart == bp->offset.HighPart) {
708 for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
709 if (cm_FidCmp(&scp->fid, &bp->fid) == 0) {
712 fileOffset = offsetp->QuadPart + cm_data.baseAddress;
713 if (fileOffset == bp->datap) {
720 /* return whatever we found, if anything */
724 /* find a buffer with offset *offsetp for vnode *scp. Called
725 * with no locks held. Use the all buffer list.
727 cm_buf_t *buf_FindAll(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
731 lock_ObtainRead(&buf_globalLock);
732 bp = buf_FindAllLocked(scp, offsetp, flags);
733 lock_ReleaseRead(&buf_globalLock);
738 /* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
741 * Makes sure that there's only one person writing this block
742 * at any given time, and also ensures that the log is forced sufficiently far,
743 * if this buffer contains logged data.
745 * Returns non-zero if the buffer was dirty.
747 afs_uint32 buf_CleanAsyncLocked(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp,
748 afs_uint32 flags, afs_uint32 *pisdirty)
751 afs_uint32 isdirty = 0;
755 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
757 if (scp = cm_FindSCache(&bp->fid))
761 osi_Log1(buf_logp, "buf_CleanAsyncLocked unable to start I/O - scp not found buf 0x%p", bp);
762 code = CM_ERROR_NOSUCHFILE;
765 while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
767 lock_ReleaseMutex(&bp->mx);
769 osi_Log2(buf_logp, "buf_CleanAsyncLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
772 LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
773 code = (*cm_buf_opsp->Writep)(scp, &offset,
775 /* we might as well try to write all of the contiguous
776 * dirty buffers in one RPC
782 flags, bp->userp, reqp);
783 osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
785 lock_ObtainMutex(&bp->mx);
786 /* if the Write routine returns No Such File, clear the dirty flag
787 * because we aren't going to be able to write this data to the file
790 if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD || code == CM_ERROR_NOACCESS ||
791 code == CM_ERROR_QUOTA || code == CM_ERROR_SPACE || code == CM_ERROR_TOOBIG ||
792 code == CM_ERROR_READONLY || code == CM_ERROR_NOSUCHPATH){
793 bp->flags &= ~CM_BUF_DIRTY;
794 bp->flags |= CM_BUF_ERROR;
795 bp->dirty_offset = 0;
796 bp->dirty_length = 0;
798 bp->dataVersion = CM_BUF_VERSION_BAD;
804 /* Disk cache support */
805 /* write buffer to disk cache (synchronous for now) */
806 diskcache_Update(bp->dcp, bp->datap, cm_data.buf_blockSize, bp->dataVersion);
807 #endif /* DISKCACHE95 */
809 /* if we get here and retries are not permitted
810 * then we need to exit this loop regardless of
811 * whether or not we were able to clear the dirty bit
813 if (reqp->flags & CM_REQ_NORETRY)
816 /* Ditto if the hardDeadTimeout or idleTimeout was reached */
817 if (code == CM_ERROR_TIMEDOUT || code == CM_ERROR_ALLDOWN ||
818 code == CM_ERROR_ALLBUSY || code == CM_ERROR_ALLOFFLINE ||
819 code == CM_ERROR_CLOCKSKEW) {
825 cm_ReleaseSCache(scp);
827 /* if someone was waiting for the I/O that just completed or failed,
830 if (bp->flags & CM_BUF_WAITING) {
831 /* turn off flags and wakeup users */
832 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
833 osi_Wakeup((LONG_PTR) bp);
842 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
843 * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
844 * The buffer must already be clean, and no I/O should be happening to it.
846 void buf_Recycle(cm_buf_t *bp)
851 cm_buf_t *prevBp, *nextBp;
853 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
855 /* if we get here, we know that the buffer still has a 0 ref count,
856 * and that it is clean and has no currently pending I/O. This is
857 * the dude to return.
858 * Remember that as long as the ref count is 0, we know that we won't
859 * have any lock conflicts, so we can grab the buffer lock out of
860 * order in the locking hierarchy.
862 osi_Log3( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x:%08x",
863 bp, bp->offset.HighPart, bp->offset.LowPart);
865 osi_assertx(bp->refCount == 0, "cm_buf_t refcount != 0");
866 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)),
867 "incorrect cm_buf_t flags");
868 lock_AssertWrite(&buf_globalLock);
870 if (bp->qFlags & CM_BUF_QINHASH) {
871 /* Remove from hash */
873 i = BUF_HASH(&bp->fid, &bp->offset);
874 lbpp = &(cm_data.buf_scacheHashTablepp[i]);
875 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
880 /* we better find it */
881 osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
883 *lbpp = bp->hashp; /* hash out */
886 /* Remove from file hash */
888 i = BUF_FILEHASH(&bp->fid);
889 prevBp = bp->fileHashBackp;
890 bp->fileHashBackp = NULL;
891 nextBp = bp->fileHashp;
892 bp->fileHashp = NULL;
894 prevBp->fileHashp = nextBp;
896 cm_data.buf_fileHashTablepp[i] = nextBp;
898 nextBp->fileHashBackp = prevBp;
900 bp->qFlags &= ~CM_BUF_QINHASH;
903 /* make the fid unrecognizable */
904 memset(&bp->fid, 0, sizeof(cm_fid_t));
906 /* clean up junk flags */
907 bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
908 bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
911 /* recycle a buffer, removing it from the free list, hashing in its new identity
912 * and returning it write-locked so that no one can use it. Called without
913 * any locks held, and can return an error if it loses the race condition and
914 * finds that someone else created the desired buffer.
916 * If success is returned, the buffer is returned write-locked.
918 * May be called with null scp and offsetp, if we're just trying to reclaim some
919 * space from the buffer pool. In that case, the buffer will be returned
920 * without being hashed into the hash table.
922 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
924 cm_buf_t *bp; /* buffer we're dealing with */
925 cm_buf_t *nextBp; /* next buffer in file hash chain */
926 afs_uint32 i; /* temp */
929 buf_ValidateBufQueues();
934 lock_ObtainRead(&scp->bufCreateLock);
935 lock_ObtainWrite(&buf_globalLock);
936 /* check to see if we lost the race */
938 if (bp = buf_FindLocked(scp, offsetp)) {
939 /* Do not call buf_ReleaseLocked() because we
940 * do not want to allow the buffer to be added
943 afs_int32 refCount = InterlockedDecrement(&bp->refCount);
944 #ifdef DEBUG_REFCOUNT
945 osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, refCount);
946 afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
948 lock_ReleaseWrite(&buf_globalLock);
949 lock_ReleaseRead(&scp->bufCreateLock);
950 return CM_BUF_EXISTS;
954 /* does this fix the problem below? it's a simple solution. */
955 if (!cm_data.buf_freeListEndp)
957 lock_ReleaseWrite(&buf_globalLock);
958 lock_ReleaseRead(&scp->bufCreateLock);
959 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List is empty - sleeping 200ms");
964 /* for debugging, assert free list isn't empty, although we
965 * really should try waiting for a running tranasction to finish
966 * instead of this; or better, we should have a transaction
967 * throttler prevent us from entering this situation.
969 osi_assertx(cm_data.buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
971 /* look at all buffers in free list, some of which may temp.
972 * have high refcounts and which then should be skipped,
973 * starting cleaning I/O for those which are dirty. If we find
974 * a clean buffer, we rehash it, lock it and return it.
976 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
977 /* check to see if it really has zero ref count. This
978 * code can bump refcounts, at least, so it may not be
981 if (bp->refCount > 0)
984 /* we don't have to lock buffer itself, since the ref
985 * count is 0 and we know it will stay zero as long as
986 * we hold the global lock.
989 /* Don't recycle a buffer held by the redirector. */
990 if (bp->qFlags & CM_BUF_QREDIR)
993 /* don't recycle someone in our own chunk */
994 if (!cm_FidCmp(&bp->fid, &scp->fid)
995 && (bp->offset.LowPart & (-cm_chunkSize))
996 == (offsetp->LowPart & (-cm_chunkSize)))
999 /* if this page is being filled (!) or cleaned, see if
1000 * the I/O has completed. If not, skip it, otherwise
1001 * do the final processing for the I/O.
1003 if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
1004 /* probably shouldn't do this much work while
1005 * holding the big lock? Watch for contention
1011 if (bp->flags & CM_BUF_DIRTY) {
1012 /* if the buffer is dirty, start cleaning it and
1013 * move on to the next buffer. We do this with
1014 * just the lock required to minimize contention
1018 lock_ReleaseWrite(&buf_globalLock);
1019 lock_ReleaseRead(&scp->bufCreateLock);
1021 /* grab required lock and clean; this only
1022 * starts the I/O. By the time we're back,
1023 * it'll still be marked dirty, but it will also
1024 * have the WRITING flag set, so we won't get
1027 buf_CleanAsync(scp, bp, reqp, 0, NULL);
1029 /* now put it back and go around again */
1034 /* if we get here, we know that the buffer still has a 0
1035 * ref count, and that it is clean and has no currently
1036 * pending I/O. This is the dude to return.
1037 * Remember that as long as the ref count is 0, we know
1038 * that we won't have any lock conflicts, so we can grab
1039 * the buffer lock out of order in the locking hierarchy.
1043 /* now hash in as our new buffer, and give it the
1044 * appropriate label, if requested.
1047 bp->qFlags |= CM_BUF_QINHASH;
1052 bp->offset = *offsetp;
1053 i = BUF_HASH(&scp->fid, offsetp);
1054 bp->hashp = cm_data.buf_scacheHashTablepp[i];
1055 cm_data.buf_scacheHashTablepp[i] = bp;
1056 i = BUF_FILEHASH(&scp->fid);
1057 nextBp = cm_data.buf_fileHashTablepp[i];
1058 bp->fileHashp = nextBp;
1059 bp->fileHashBackp = NULL;
1061 nextBp->fileHashBackp = bp;
1062 cm_data.buf_fileHashTablepp[i] = bp;
1065 /* we should move it from the lru queue. It better still be there,
1066 * since we've held the global (big) lock since we found it there.
1068 osi_assertx(bp->qFlags & CM_BUF_QINLRU,
1069 "buf_GetNewLocked: LRU screwup");
1071 if (cm_data.buf_freeListEndp == bp) {
1072 /* we're the last guy in this queue, so maintain it */
1073 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1075 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
1076 bp->qFlags &= ~CM_BUF_QINLRU;
1078 /* prepare to return it. Give it a refcount */
1080 #ifdef DEBUG_REFCOUNT
1081 osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
1082 afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
1084 /* grab the mutex so that people don't use it
1085 * before the caller fills it with data. Again, no one
1086 * should have been able to get to this dude to lock it.
1088 if (!lock_TryMutex(&bp->mx)) {
1089 osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0",
1091 osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
1094 lock_ReleaseWrite(&buf_globalLock);
1095 lock_ReleaseRead(&scp->bufCreateLock);
1100 buf_ValidateBufQueues();
1101 #endif /* TESTING */
1103 } /* for all buffers in lru queue */
1104 lock_ReleaseWrite(&buf_globalLock);
1105 lock_ReleaseRead(&scp->bufCreateLock);
1106 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List has no buffers with a zero refcount - sleeping 100ms");
1107 Sleep(100); /* give some time for a buffer to be freed */
1108 } /* while loop over everything */
1112 /* get a page, returning it held but unlocked. Doesn't fill in the page
1113 * with I/O, since we're going to write the whole thing new.
1115 long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
1119 osi_hyper_t pageOffset;
1123 pageOffset.HighPart = offsetp->HighPart;
1124 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
1126 bp = buf_Find(scp, &pageOffset);
1128 /* lock it and break out */
1129 lock_ObtainMutex(&bp->mx);
1133 /* otherwise, we have to create a page */
1134 code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
1136 /* check if the buffer was created in a race condition branch.
1137 * If so, go around so we can hold a reference to it.
1139 if (code == CM_BUF_EXISTS)
1142 /* something else went wrong */
1146 /* otherwise, we have a locked buffer that we just created */
1149 } /* big while loop */
1151 /* wait for reads */
1152 if (bp->flags & CM_BUF_READING)
1153 buf_WaitIO(scp, bp);
1155 /* once it has been read once, we can unlock it and return it, still
1156 * with its refcount held.
1158 lock_ReleaseMutex(&bp->mx);
1160 osi_Log4(buf_logp, "buf_GetNew returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
1161 bp, scp, offsetp->HighPart, offsetp->LowPart);
1165 /* get a page, returning it held but unlocked. Make sure it is complete */
1166 /* The scp must be unlocked when passed to this function */
1167 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
1171 osi_hyper_t pageOffset;
1172 unsigned long tcount;
1176 cm_diskcache_t *dcp;
1177 #endif /* DISKCACHE95 */
1180 pageOffset.HighPart = offsetp->HighPart;
1181 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
1185 buf_ValidateBufQueues();
1186 #endif /* TESTING */
1188 bp = buf_Find(scp, &pageOffset);
1190 /* lock it and break out */
1191 lock_ObtainMutex(&bp->mx);
1194 /* touch disk chunk to update LRU info */
1195 diskcache_Touch(bp->dcp);
1196 #endif /* DISKCACHE95 */
1200 /* otherwise, we have to create a page */
1201 code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
1202 /* bp->mx is now held */
1204 /* check if the buffer was created in a race condition branch.
1205 * If so, go around so we can hold a reference to it.
1207 if (code == CM_BUF_EXISTS)
1210 /* something else went wrong */
1213 buf_ValidateBufQueues();
1214 #endif /* TESTING */
1218 /* otherwise, we have a locked buffer that we just created */
1221 } /* big while loop */
1223 /* if we get here, we have a locked buffer that may have just been
1224 * created, in which case it needs to be filled with data.
1227 /* load the page; freshly created pages should be idle */
1228 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)), "incorrect cm_buf_t flags");
1230 /* start the I/O; may drop lock */
1231 bp->flags |= CM_BUF_READING;
1232 code = (*cm_buf_opsp->Readp)(bp, cm_data.buf_blockSize, &tcount, NULL);
1235 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, cm_data.buf_blockSize, &bp->dataVersion, &tcount, &dcp);
1236 bp->dcp = dcp; /* pointer to disk cache struct. */
1237 #endif /* DISKCACHE95 */
1240 /* failure or queued */
1241 if (code != ERROR_IO_PENDING) {
1243 bp->flags |= CM_BUF_ERROR;
1244 bp->flags &= ~CM_BUF_READING;
1245 if (bp->flags & CM_BUF_WAITING) {
1246 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1247 osi_Wakeup((LONG_PTR) bp);
1249 lock_ReleaseMutex(&bp->mx);
1252 buf_ValidateBufQueues();
1253 #endif /* TESTING */
1257 /* otherwise, I/O completed instantly and we're done, except
1258 * for padding the xfr out with 0s and checking for EOF
1260 if (tcount < (unsigned long) cm_data.buf_blockSize) {
1261 memset(bp->datap+tcount, 0, cm_data.buf_blockSize - tcount);
1263 bp->flags |= CM_BUF_EOF;
1265 bp->flags &= ~CM_BUF_READING;
1266 if (bp->flags & CM_BUF_WAITING) {
1267 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1268 osi_Wakeup((LONG_PTR) bp);
1274 /* wait for reads, either that which we started above, or that someone
1275 * else started. We don't care if we return a buffer being cleaned.
1277 if (bp->flags & CM_BUF_READING)
1278 buf_WaitIO(scp, bp);
1280 /* once it has been read once, we can unlock it and return it, still
1281 * with its refcount held.
1283 lock_ReleaseMutex(&bp->mx);
1286 /* now remove from queue; will be put in at the head (farthest from
1287 * being recycled) when we're done in buf_Release.
1289 lock_ObtainWrite(&buf_globalLock);
1290 if (bp->qFlags & CM_BUF_QINLRU) {
1291 if (cm_data.buf_freeListEndp == bp)
1292 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1293 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
1294 bp->qFlags &= ~CM_BUF_QINLRU;
1296 lock_ReleaseWrite(&buf_globalLock);
1298 osi_Log4(buf_logp, "buf_Get returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
1299 bp, scp, offsetp->HighPart, offsetp->LowPart);
1301 buf_ValidateBufQueues();
1302 #endif /* TESTING */
1306 /* count # of elements in the free list;
1307 * we don't bother doing the proper locking for accessing dataVersion or flags
1308 * since it is a pain, and this is really just an advisory call. If you need
1309 * to do better at some point, rewrite this function.
1311 long buf_CountFreeList(void)
1317 lock_ObtainRead(&buf_globalLock);
1318 for(bufp = cm_data.buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1319 /* if the buffer doesn't have an identity, or if the buffer
1320 * has been invalidate (by having its DV stomped upon), then
1321 * count it as free, since it isn't really being utilized.
1323 if (!(bufp->qFlags & CM_BUF_QINHASH) || bufp->dataVersion == CM_BUF_VERSION_BAD)
1326 lock_ReleaseRead(&buf_globalLock);
1330 /* clean a buffer synchronously */
1331 afs_uint32 buf_CleanAsync(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp, afs_uint32 flags, afs_uint32 *pisdirty)
1334 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1335 osi_assertx(!(flags & CM_BUF_WRITE_SCP_LOCKED), "scp->rw must not be held when calling buf_CleanAsync");
1337 lock_ObtainMutex(&bp->mx);
1338 code = buf_CleanAsyncLocked(scp, bp, reqp, flags, pisdirty);
1339 lock_ReleaseMutex(&bp->mx);
1344 /* wait for a buffer's cleaning to finish */
1345 void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
1347 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1350 lock_ObtainMutex(&bp->mx);
1351 if (bp->flags & CM_BUF_WRITING) {
1352 buf_WaitIO(scp, bp);
1355 lock_ReleaseMutex(&bp->mx);
1358 /* set the dirty flag on a buffer, and set associated write-ahead log,
1359 * if there is one. Allow one to be added to a buffer, but not changed.
1361 * The buffer must be locked before calling this routine.
1363 void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length, cm_user_t *userp)
1365 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1366 osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
1368 if (bp->flags & CM_BUF_DIRTY) {
1370 osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
1372 if (bp->dirty_offset <= offset) {
1373 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1374 /* dirty_length remains the same */
1376 bp->dirty_length = offset + length - bp->dirty_offset;
1378 } else /* bp->dirty_offset > offset */ {
1379 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1380 bp->dirty_length = bp->dirty_offset + bp->dirty_length - offset;
1382 bp->dirty_length = length;
1384 bp->dirty_offset = offset;
1387 osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
1390 bp->flags |= CM_BUF_DIRTY;
1392 /* and turn off EOF flag, since it has associated data now */
1393 bp->flags &= ~CM_BUF_EOF;
1395 bp->dirty_offset = offset;
1396 bp->dirty_length = length;
1398 /* and add to the dirty list.
1399 * we obtain a hold on the buffer for as long as it remains
1400 * in the list. buffers are only removed from the list by
1401 * the buf_IncrSyncer function regardless of when else the
1402 * dirty flag might be cleared.
1404 * This should never happen but just in case there is a bug
1405 * elsewhere, never add to the dirty list if the buffer is
1408 lock_ObtainWrite(&buf_globalLock);
1409 if (!(bp->qFlags & CM_BUF_QINDL)) {
1411 if (!cm_data.buf_dirtyListp) {
1412 cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
1414 cm_data.buf_dirtyListEndp->dirtyp = bp;
1415 cm_data.buf_dirtyListEndp = bp;
1418 bp->qFlags |= CM_BUF_QINDL;
1420 lock_ReleaseWrite(&buf_globalLock);
1423 /* and record the last writer */
1424 if (bp->userp != userp) {
1427 cm_ReleaseUser(bp->userp);
1432 /* clean all buffers, reset log pointers and invalidate all buffers.
1433 * Called with no locks held, and returns with same.
1435 * This function is guaranteed to clean and remove the log ptr of all the
1436 * buffers that were dirty or had non-zero log ptrs before the call was
1437 * made. That's sufficient to clean up any garbage left around by recovery,
1438 * which is all we're counting on this for; there may be newly created buffers
1439 * added while we're running, but that should be OK.
1441 * In an environment where there are no transactions (artificially imposed, for
1442 * example, when switching the database to raw mode), this function is used to
1443 * make sure that all updates have been written to the disk. In that case, we don't
1444 * really require that we forget the log association between pages and logs, but
1445 * it also doesn't hurt. Since raw mode I/O goes through this buffer package, we don't
1446 * have to worry about invalidating data in the buffers.
1448 * This function is used at the end of recovery as paranoia to get the recovered
1449 * database out to disk. It removes all references to the recovery log and cleans
1452 long buf_CleanAndReset(void)
1458 lock_ObtainRead(&buf_globalLock);
1459 for(i=0; i<cm_data.buf_hashSize; i++) {
1460 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp = bp->hashp) {
1461 if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
1463 lock_ReleaseRead(&buf_globalLock);
1465 /* now no locks are held; clean buffer and go on */
1467 req.flags |= CM_REQ_NORETRY;
1469 buf_CleanAsync(NULL, bp, &req, 0, NULL);
1470 buf_CleanWait(NULL, bp, FALSE);
1472 /* relock and release buffer */
1473 lock_ObtainRead(&buf_globalLock);
1474 buf_ReleaseLocked(bp, FALSE);
1476 } /* over one bucket */
1477 } /* for loop over all hash buckets */
1480 lock_ReleaseRead(&buf_globalLock);
1483 buf_ValidateBufQueues();
1484 #endif /* TESTING */
1486 /* and we're done */
1490 /* called without global lock being held, reserves buffers for callers
1491 * that need more than one held (not locked) at once.
1493 void buf_ReserveBuffers(afs_uint64 nbuffers)
1495 lock_ObtainWrite(&buf_globalLock);
1497 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1498 cm_data.buf_reserveWaiting = 1;
1499 osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1500 osi_SleepW((LONG_PTR) &cm_data.buf_reservedBufs, &buf_globalLock);
1501 lock_ObtainWrite(&buf_globalLock);
1504 cm_data.buf_reservedBufs += nbuffers;
1508 lock_ReleaseWrite(&buf_globalLock);
1511 int buf_TryReserveBuffers(afs_uint64 nbuffers)
1515 lock_ObtainWrite(&buf_globalLock);
1516 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1520 cm_data.buf_reservedBufs += nbuffers;
1523 lock_ReleaseWrite(&buf_globalLock);
1527 /* called without global lock held, releases reservation held by
1528 * buf_ReserveBuffers.
1530 void buf_UnreserveBuffers(afs_uint64 nbuffers)
1532 lock_ObtainWrite(&buf_globalLock);
1533 cm_data.buf_reservedBufs -= nbuffers;
1534 if (cm_data.buf_reserveWaiting) {
1535 cm_data.buf_reserveWaiting = 0;
1536 osi_Wakeup((LONG_PTR) &cm_data.buf_reservedBufs);
1538 lock_ReleaseWrite(&buf_globalLock);
1541 /* truncate the buffers past sizep, zeroing out the page, if we don't
1542 * end on a page boundary.
1544 * Requires cm_bufCreateLock to be write locked.
1546 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1550 cm_buf_t *nbufp; /* next buffer, if didRelease */
1556 /* assert that cm_bufCreateLock is held in write mode */
1557 lock_AssertWrite(&scp->bufCreateLock);
1559 i = BUF_FILEHASH(&scp->fid);
1561 lock_ObtainRead(&buf_globalLock);
1562 bufp = cm_data.buf_fileHashTablepp[i];
1564 lock_ReleaseRead(&buf_globalLock);
1568 buf_HoldLocked(bufp);
1569 lock_ReleaseRead(&buf_globalLock);
1571 lock_ObtainMutex(&bufp->mx);
1573 bufEnd.HighPart = 0;
1574 bufEnd.LowPart = cm_data.buf_blockSize;
1575 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1577 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1578 LargeIntegerLessThan(*sizep, bufEnd)) {
1579 buf_WaitIO(scp, bufp);
1581 lock_ObtainWrite(&scp->rw);
1583 /* make sure we have a callback (so we have the right value for
1584 * the length), and wait for it to be safe to do a truncate.
1586 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1587 CM_SCACHESYNC_NEEDCALLBACK
1588 | CM_SCACHESYNC_GETSTATUS
1589 | CM_SCACHESYNC_SETSIZE
1590 | CM_SCACHESYNC_BUFLOCKED);
1593 /* if we succeeded in our locking, and this applies to the right
1594 * file, and the truncate request overlaps the buffer either
1595 * totally or partially, then do something.
1597 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1598 && LargeIntegerLessThan(*sizep, bufEnd)) {
1601 /* destroy the buffer, turning off its dirty bit, if
1602 * we're truncating the whole buffer. Otherwise, set
1603 * the dirty bit, and clear out the tail of the buffer
1604 * if we just overlap some.
1606 if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1607 /* truncating the entire page */
1608 bufp->flags &= ~CM_BUF_DIRTY;
1609 bufp->dirty_offset = 0;
1610 bufp->dirty_length = 0;
1611 bufp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
1612 bufp->dirtyCounter++;
1615 /* don't set dirty, since dirty implies
1616 * currently up-to-date. Don't need to do this,
1617 * since we'll update the length anyway.
1619 * Zero out remainder of the page, in case we
1620 * seek and write past EOF, and make this data
1623 bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
1624 osi_assertx(bufferPos != 0, "non-zero bufferPos");
1625 memset(bufp->datap + bufferPos, 0,
1626 cm_data.buf_blockSize - bufferPos);
1630 cm_SyncOpDone( scp, bufp,
1631 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
1632 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
1634 lock_ReleaseWrite(&scp->rw);
1635 lock_ReleaseMutex(&bufp->mx);
1638 nbufp = bufp->fileHashp;
1642 /* This forces the loop to end and the error code
1643 * to be returned. */
1651 buf_ValidateBufQueues();
1652 #endif /* TESTING */
1658 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1661 cm_buf_t *bp; /* buffer we're hacking on */
1665 afs_uint32 stable = 0;
1667 i = BUF_FILEHASH(&scp->fid);
1670 lock_ObtainRead(&buf_globalLock);
1671 bp = cm_data.buf_fileHashTablepp[i];
1674 lock_ReleaseRead(&buf_globalLock);
1676 for (; bp; bp = nbp) {
1677 didRelease = 0; /* haven't released this buffer yet */
1679 /* clean buffer synchronously */
1680 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1682 if (code == 0 && !stable && (bp->flags & CM_BUF_DIRTY)) {
1684 * we must stabilize the object to ensure that buffer
1685 * changes cannot occur while the flush is performed.
1686 * However, we do not want to Stabilize if we do not
1687 * need to because Stabilize obtains a callback.
1689 code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1690 stable = (code == 0);
1693 if (code == CM_ERROR_BADFD) {
1694 /* if the scp's FID is bad its because we received VNOVNODE
1695 * when attempting to FetchStatus before the write. This
1696 * page therefore contains data that can no longer be stored.
1698 lock_ObtainMutex(&bp->mx);
1699 bp->flags &= ~CM_BUF_DIRTY;
1700 bp->flags |= CM_BUF_ERROR;
1701 bp->error = CM_ERROR_BADFD;
1702 bp->dirty_offset = 0;
1703 bp->dirty_length = 0;
1704 bp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
1706 lock_ReleaseMutex(&bp->mx);
1707 } else if (!(scp->flags & CM_SCACHEFLAG_RO)) {
1712 lock_ObtainMutex(&bp->mx);
1714 /* start cleaning the buffer, and wait for it to finish */
1715 buf_CleanAsyncLocked(scp, bp, reqp, 0, NULL);
1716 buf_WaitIO(scp, bp);
1718 lock_ReleaseMutex(&bp->mx);
1721 /* actually, we only know that buffer is clean if ref
1722 * count is 1, since we don't have buffer itself locked.
1724 if (!(bp->flags & CM_BUF_DIRTY)) {
1725 lock_ObtainWrite(&buf_globalLock);
1726 if (bp->refCount == 1) { /* bp is held above */
1727 nbp = bp->fileHashp;
1729 buf_HoldLocked(nbp);
1730 buf_ReleaseLocked(bp, TRUE);
1734 lock_ReleaseWrite(&buf_globalLock);
1740 lock_ObtainRead(&buf_globalLock);
1741 nbp = bp->fileHashp;
1743 buf_HoldLocked(nbp);
1744 buf_ReleaseLocked(bp, FALSE);
1745 lock_ReleaseRead(&buf_globalLock);
1747 } /* for loop over a bunch of buffers */
1750 (*cm_buf_opsp->Unstabilizep)(scp, userp);
1753 buf_ValidateBufQueues();
1754 #endif /* TESTING */
1760 /* Must be called with scp->rw held */
1761 long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
1767 lock_AssertAny(&scp->rw);
1769 i = BUF_FILEHASH(&scp->fid);
1771 lock_ObtainRead(&buf_globalLock);
1773 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp = bp->fileHashp) {
1774 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1775 if (bp->dataVersion == fromVersion) {
1776 bp->dataVersion = toVersion;
1781 lock_ReleaseRead(&buf_globalLock);
1789 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1793 cm_buf_t *bp; /* buffer we're hacking on */
1794 cm_buf_t *nbp; /* next one */
1797 i = BUF_FILEHASH(&scp->fid);
1799 lock_ObtainRead(&buf_globalLock);
1800 bp = cm_data.buf_fileHashTablepp[i];
1803 lock_ReleaseRead(&buf_globalLock);
1804 for (; bp; bp = nbp) {
1805 /* clean buffer synchronously */
1806 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1807 lock_ObtainMutex(&bp->mx);
1808 if (bp->flags & CM_BUF_DIRTY) {
1809 if (userp && userp != bp->userp) {
1812 cm_ReleaseUser(bp->userp);
1817 case CM_ERROR_NOSUCHFILE:
1818 case CM_ERROR_BADFD:
1819 case CM_ERROR_NOACCESS:
1820 case CM_ERROR_QUOTA:
1821 case CM_ERROR_SPACE:
1822 case CM_ERROR_TOOBIG:
1823 case CM_ERROR_READONLY:
1824 case CM_ERROR_NOSUCHPATH:
1826 * Apply the previous fatal error to this buffer.
1827 * Do not waste the time attempting to store to
1828 * the file server when we know it will fail.
1830 bp->flags &= ~CM_BUF_DIRTY;
1831 bp->flags |= CM_BUF_ERROR;
1832 bp->dirty_offset = 0;
1833 bp->dirty_length = 0;
1835 bp->dataVersion = CM_BUF_VERSION_BAD;
1838 case CM_ERROR_TIMEDOUT:
1839 case CM_ERROR_ALLDOWN:
1840 case CM_ERROR_ALLBUSY:
1841 case CM_ERROR_ALLOFFLINE:
1842 case CM_ERROR_CLOCKSKEW:
1843 /* do not mark the buffer in error state but do
1844 * not attempt to complete the rest either.
1848 code = buf_CleanAsyncLocked(scp, bp, reqp, 0, &wasDirty);
1849 if (bp->flags & CM_BUF_ERROR) {
1855 buf_CleanWait(scp, bp, TRUE);
1857 lock_ReleaseMutex(&bp->mx);
1860 lock_ObtainRead(&buf_globalLock);
1861 nbp = bp->fileHashp;
1863 buf_HoldLocked(nbp);
1864 buf_ReleaseLocked(bp, FALSE);
1865 lock_ReleaseRead(&buf_globalLock);
1866 } /* for loop over a bunch of buffers */
1869 buf_ValidateBufQueues();
1870 #endif /* TESTING */
1878 buf_ValidateBufQueues(void)
1880 cm_buf_t * bp, *bpb, *bpf, *bpa;
1881 afs_uint32 countf=0, countb=0, counta=0;
1883 lock_ObtainRead(&buf_globalLock);
1884 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1885 if (bp->magic != CM_BUF_MAGIC)
1886 osi_panic("buf magic error",__FILE__,__LINE__);
1891 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
1892 if (bp->magic != CM_BUF_MAGIC)
1893 osi_panic("buf magic error",__FILE__,__LINE__);
1898 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
1899 if (bp->magic != CM_BUF_MAGIC)
1900 osi_panic("buf magic error",__FILE__,__LINE__);
1904 lock_ReleaseRead(&buf_globalLock);
1906 if (countb != countf)
1907 osi_panic("buf magic error",__FILE__,__LINE__);
1909 if (counta != cm_data.buf_nbuffers)
1910 osi_panic("buf magic error",__FILE__,__LINE__);
1912 #endif /* TESTING */
1914 /* dump the contents of the buf_scacheHashTablepp. */
1915 int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
1922 if (cm_data.buf_scacheHashTablepp == NULL)
1926 lock_ObtainRead(&buf_globalLock);
1928 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\r\n",
1929 cookie, cm_data.buf_hashSize);
1930 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1932 for (i = 0; i < cm_data.buf_hashSize; i++)
1934 for (bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp)
1936 StringCbPrintfA(output, sizeof(output),
1937 "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
1938 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1939 "flags=0x%x, qFlags=0x%x cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
1940 cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
1941 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1942 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
1943 bp->cmFlags, bp->error, bp->refCount);
1944 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1948 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\r\n", cookie);
1949 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1951 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_freeListEndp\r\n", cookie);
1952 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1953 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1954 StringCbPrintfA(output, sizeof(output),
1955 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1956 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1957 "flags=0x%x, qFlags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
1958 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1959 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1960 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
1961 bp->cmFlags, bp->error, bp->refCount);
1962 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1964 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
1965 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1967 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
1968 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1969 for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
1970 StringCbPrintfA(output, sizeof(output),
1971 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1972 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1973 "flags=0x%x, qFlags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
1974 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1975 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1976 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
1977 bp->cmFlags, bp->error, bp->refCount);
1978 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1980 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
1981 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1984 lock_ReleaseRead(&buf_globalLock);
1988 void buf_ForceTrace(BOOL flush)
1997 len = GetTempPath(sizeof(buf)-10, buf);
1998 StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log");
1999 handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ,
2000 NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
2001 if (handle == INVALID_HANDLE_VALUE) {
2002 osi_panic("Cannot create log file", __FILE__, __LINE__);
2004 osi_LogPrint(buf_logp, handle);
2006 FlushFileBuffers(handle);
2007 CloseHandle(handle);
2010 long buf_DirtyBuffersExist(cm_fid_t *fidp)
2013 afs_uint32 bcount = 0;
2016 i = BUF_FILEHASH(fidp);
2018 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->fileHashp, bcount++) {
2019 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY))
2026 long buf_CleanDirtyBuffers(cm_scache_t *scp)
2029 afs_uint32 bcount = 0;
2030 cm_fid_t * fidp = &scp->fid;
2032 for (bp = cm_data.buf_allp; bp; bp=bp->allp, bcount++) {
2033 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
2035 lock_ObtainMutex(&bp->mx);
2036 bp->cmFlags &= ~CM_BUF_CMSTORING;
2037 bp->flags &= ~CM_BUF_DIRTY;
2038 bp->dirty_offset = 0;
2039 bp->dirty_length = 0;
2040 bp->flags |= CM_BUF_ERROR;
2041 bp->error = VNOVNODE;
2042 bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
2044 if (bp->flags & CM_BUF_WAITING) {
2045 osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);
2046 osi_Wakeup((long) &bp);
2048 lock_ReleaseMutex(&bp->mx);