2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
12 #include <afsconfig.h>
13 #include <afs/param.h>
25 #include "cm_memmap.h"
28 #define TRACE_BUFFER 1
31 extern void afsi_log(char *pattern, ...);
33 /* This module implements the buffer package used by the local transaction
34 * system (cm). It is initialized by calling cm_Init, which calls buf_Init;
35 * it must be initalized before any of its main routines are called.
37 * Each buffer is hashed into a hash table by file ID and offset, and if its
38 * reference count is zero, it is also in a free list.
40 * There are two locks involved in buffer processing. The global lock
41 * buf_globalLock protects all of the global variables defined in this module,
42 * the reference counts and hash pointers in the actual cm_buf_t structures,
43 * and the LRU queue pointers in the buffer structures.
45 * The mutexes in the buffer structures protect the remaining fields in the
46 * buffers, as well the data itself.
48 * The locking hierarchy here is this:
50 * - resv multiple simul. buffers reservation
51 * - lock buffer I/O flags
52 * - lock buffer's mutex
53 * - lock buf_globalLock
57 /* global debugging log */
58 osi_log_t *buf_logp = NULL;
60 /* Global lock protecting hash tables and free lists */
61 osi_rwlock_t buf_globalLock;
63 /* ptr to head of the free list (most recently used) and the
64 * tail (the guy to remove first). We use osi_Q* functions
65 * to put stuff in buf_freeListp, and maintain the end
69 /* a pointer to a list of all buffers, just so that we can find them
70 * easily for debugging, and for the incr syncer. Locked under
74 /* defaults setup; these variables may be manually assigned into
75 * before calling cm_Init, as a way of changing these defaults.
78 /* callouts for reading and writing data, etc */
79 cm_buf_ops_t *cm_buf_opsp;
82 /* for experimental disk caching support in Win95 client */
83 cm_buf_t *buf_diskFreeListp;
84 cm_buf_t *buf_diskFreeListEndp;
85 cm_buf_t *buf_diskAllp;
86 extern int cm_diskCacheEnabled;
87 #endif /* DISKCACHE95 */
89 /* set this to 1 when we are terminating to prevent access attempts */
90 static int buf_ShutdownFlag = 0;
93 void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
95 void buf_HoldLocked(cm_buf_t *bp)
100 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
101 refCount = InterlockedIncrement(&bp->refCount);
102 #ifdef DEBUG_REFCOUNT
103 osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
104 afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
108 /* hold a reference to an already held buffer */
109 #ifdef DEBUG_REFCOUNT
110 void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
112 void buf_Hold(cm_buf_t *bp)
117 lock_ObtainRead(&buf_globalLock);
118 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
119 refCount = InterlockedIncrement(&bp->refCount);
120 #ifdef DEBUG_REFCOUNT
121 osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
122 afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
124 lock_ReleaseRead(&buf_globalLock);
127 /* code to drop reference count while holding buf_globalLock */
128 #ifdef DEBUG_REFCOUNT
129 void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
131 void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
137 lock_AssertWrite(&buf_globalLock);
139 lock_AssertRead(&buf_globalLock);
141 /* ensure that we're in the LRU queue if our ref count is 0 */
142 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
144 refCount = InterlockedDecrement(&bp->refCount);
145 #ifdef DEBUG_REFCOUNT
146 osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
147 afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
151 osi_panic("buf refcount 0",__FILE__,__LINE__);;
153 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
157 * If we are read locked there could be a race condition
158 * with buf_Find() so we must obtain a write lock and
159 * double check that the refCount is actually zero
160 * before we remove the buffer from the LRU queue.
163 lock_ConvertRToW(&buf_globalLock);
165 if (bp->refCount == 0 &&
166 !(bp->qFlags & CM_BUF_QINLRU)) {
167 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
169 /* watch for transition from empty to one element */
170 if (!cm_data.buf_freeListEndp)
171 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
172 _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
176 lock_ConvertWToR(&buf_globalLock);
180 /* release a buffer. Buffer must be referenced, but unlocked. */
181 #ifdef DEBUG_REFCOUNT
182 void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
184 void buf_Release(cm_buf_t *bp)
189 /* ensure that we're in the LRU queue if our ref count is 0 */
190 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
192 refCount = InterlockedDecrement(&bp->refCount);
193 #ifdef DEBUG_REFCOUNT
194 osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
195 afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
199 osi_panic("buf refcount 0",__FILE__,__LINE__);;
201 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
204 lock_ObtainWrite(&buf_globalLock);
205 if (bp->refCount == 0 &&
206 !(bp->qFlags & CM_BUF_QINLRU)) {
207 osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
209 /* watch for transition from empty to one element */
210 if (!cm_data.buf_freeListEndp)
211 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
212 _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
214 lock_ReleaseWrite(&buf_globalLock);
219 buf_Sync(int quitOnShutdown)
221 cm_buf_t **bpp, *bp, *prevbp;
222 afs_uint32 wasDirty = 0;
225 /* go through all of the dirty buffers */
226 lock_ObtainRead(&buf_globalLock);
227 for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
228 if (quitOnShutdown && buf_ShutdownFlag)
231 lock_ReleaseRead(&buf_globalLock);
232 /* all dirty buffers are held when they are added to the
233 * dirty list. No need for an additional hold.
235 lock_ObtainMutex(&bp->mx);
237 if (bp->flags & CM_BUF_DIRTY && !(bp->qFlags & CM_BUF_QREDIR)) {
238 /* start cleaning the buffer; don't touch log pages since
239 * the log code counts on knowing exactly who is writing
240 * a log page at any given instant.
242 * only attempt to write the buffer if the volume might
248 volp = cm_GetVolumeByFID(&bp->fid);
249 switch (cm_GetVolumeStatus(volp, bp->fid.volume)) {
253 req.flags |= CM_REQ_NORETRY;
254 buf_CleanAsyncLocked(NULL, bp, &req, 0, &dirty);
260 /* the buffer may or may not have been dirty
261 * and if dirty may or may not have been cleaned
262 * successfully. check the dirty flag again.
264 if (!(bp->flags & CM_BUF_DIRTY)) {
265 /* remove the buffer from the dirty list */
266 lock_ObtainWrite(&buf_globalLock);
267 #ifdef DEBUG_REFCOUNT
268 if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
269 osi_Log1(afsd_logp,"buf_Sync bp 0x%p list corruption",bp);
270 afsi_log("buf_Sync bp 0x%p list corruption", bp);
275 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINDL);
276 if (cm_data.buf_dirtyListp == NULL)
277 cm_data.buf_dirtyListEndp = NULL;
278 else if (cm_data.buf_dirtyListEndp == bp)
279 cm_data.buf_dirtyListEndp = prevbp;
280 buf_ReleaseLocked(bp, TRUE);
281 lock_ConvertWToR(&buf_globalLock);
283 if (buf_ShutdownFlag) {
286 char volstr[VL_MAXNAMELEN+12]="";
289 volp = cm_GetVolumeByFID(&bp->fid);
292 if (bp->fid.volume == volp->vol[RWVOL].ID)
294 else if (bp->fid.volume == volp->vol[ROVOL].ID)
296 else if (bp->fid.volume == volp->vol[BACKVOL].ID)
300 snprintf(volstr, sizeof(volstr), "%s%s", volp->namep, ext);
302 cellp = cm_FindCellByID(bp->fid.cell, CM_FLAG_NOPROBE);
303 snprintf(volstr, sizeof(volstr), "%u", bp->fid.volume);
306 LogEvent(EVENTLOG_INFORMATION_TYPE, MSG_DIRTY_BUFFER_AT_SHUTDOWN,
307 cellp->name, volstr, bp->fid.vnode, bp->fid.unique,
308 bp->offset.QuadPart+bp->dirty_offset, bp->dirty_length);
311 /* advance the pointer so we don't loop forever */
312 lock_ObtainRead(&buf_globalLock);
316 lock_ReleaseMutex(&bp->mx);
317 } /* for loop over a bunch of buffers */
318 lock_ReleaseRead(&buf_globalLock);
323 /* incremental sync daemon. Writes all dirty buffers every 5000 ms */
324 void buf_IncrSyncer(long parm)
329 while (buf_ShutdownFlag == 0) {
331 i = SleepEx(5000, 1);
338 wasDirty = buf_Sync(1);
339 } /* whole daemon's while loop */
343 buf_ValidateBuffers(void)
345 cm_buf_t * bp, *bpf, *bpa, *bpb;
346 afs_uint64 countb = 0, countf = 0, counta = 0;
348 if (cm_data.buf_freeListp == NULL && cm_data.buf_freeListEndp != NULL ||
349 cm_data.buf_freeListp != NULL && cm_data.buf_freeListEndp == NULL) {
350 afsi_log("cm_ValidateBuffers failure: inconsistent free list pointers");
351 fprintf(stderr, "cm_ValidateBuffers failure: inconsistent free list pointers\n");
355 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
356 if (bp->magic != CM_BUF_MAGIC) {
357 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
358 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
364 if (countb > cm_data.buf_nbuffers) {
365 afsi_log("cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers");
366 fprintf(stderr, "cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers\n");
371 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
372 if (bp->magic != CM_BUF_MAGIC) {
373 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
374 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
380 if (countf > cm_data.buf_nbuffers) {
381 afsi_log("cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers");
382 fprintf(stderr, "cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers\n");
387 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
388 if (bp->magic != CM_BUF_MAGIC) {
389 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
390 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
396 if (counta > cm_data.buf_nbuffers) {
397 afsi_log("cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers");
398 fprintf(stderr, "cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers\n");
403 if (countb != countf) {
404 afsi_log("cm_ValidateBuffers failure: countb != countf");
405 fprintf(stderr, "cm_ValidateBuffers failure: countb != countf\n");
409 if (counta != cm_data.buf_nbuffers) {
410 afsi_log("cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers");
411 fprintf(stderr, "cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers\n");
418 void buf_Shutdown(void)
420 /* disable the buf_IncrSyncer() threads */
421 buf_ShutdownFlag = 1;
423 /* then force all dirty buffers to the file servers */
427 /* initialize the buffer package; called with no locks
428 * held during the initialization phase.
430 long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
432 static osi_once_t once;
441 cm_data.buf_nbuffers = nbuffers;
443 /* Have to be able to reserve a whole chunk */
444 if (((cm_data.buf_nbuffers - 3) * cm_data.buf_blockSize) < cm_chunkSize)
445 return CM_ERROR_TOOFEWBUFS;
448 /* recall for callouts */
451 if (osi_Once(&once)) {
452 /* initialize global locks */
453 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock", LOCK_HIERARCHY_BUF_GLOBAL);
456 /* remember this for those who want to reset it */
457 cm_data.buf_nOrigBuffers = cm_data.buf_nbuffers;
459 /* lower hash size to a prime number */
460 cm_data.buf_hashSize = osi_PrimeLessThan((afs_uint32)(cm_data.buf_nbuffers/7 + 1));
462 /* create hash table */
463 memset((void *)cm_data.buf_scacheHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
465 /* another hash table */
466 memset((void *)cm_data.buf_fileHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
468 /* create buffer headers and put in free list */
469 bp = cm_data.bufHeaderBaseAddress;
470 data = cm_data.bufDataBaseAddress;
471 cm_data.buf_allp = NULL;
473 for (i=0; i<cm_data.buf_nbuffers; i++) {
474 osi_assertx(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress,
475 "invalid cm_buf_t address");
476 osi_assertx(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData,
477 "invalid cm_buf_t data address");
479 /* allocate and zero some storage */
480 memset(bp, 0, sizeof(cm_buf_t));
481 bp->magic = CM_BUF_MAGIC;
482 /* thread on list of all buffers */
483 bp->allp = cm_data.buf_allp;
484 cm_data.buf_allp = bp;
486 osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
487 _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
488 lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
490 /* grab appropriate number of bytes from aligned zone */
493 /* setup last buffer pointer */
495 cm_data.buf_freeListEndp = bp;
499 data += cm_data.buf_blockSize;
502 /* none reserved at first */
503 cm_data.buf_reservedBufs = 0;
505 /* just for safety's sake */
506 cm_data.buf_maxReservedBufs = cm_data.buf_nbuffers - 3;
508 bp = cm_data.bufHeaderBaseAddress;
509 data = cm_data.bufDataBaseAddress;
511 for (i=0; i<cm_data.buf_nbuffers; i++) {
512 lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
515 bp->waitRequests = 0;
516 _InterlockedAnd(&bp->flags, ~CM_BUF_WAITING);
522 buf_ValidateBufQueues();
526 /* init the buffer trace log */
527 buf_logp = osi_LogCreate("buffer", 1000);
528 osi_LogEnable(buf_logp);
533 /* and create the incr-syncer */
534 phandle = thrd_Create(0, 0,
535 (ThreadFunc) buf_IncrSyncer, 0, 0, &pid,
538 osi_assertx(phandle != NULL, "buf: can't create incremental sync proc");
539 CloseHandle(phandle);
543 buf_ValidateBufQueues();
548 /* add nbuffers to the buffer pool, if possible.
549 * Called with no locks held.
551 long buf_AddBuffers(afs_uint64 nbuffers)
553 /* The size of a virtual cache cannot be changed after it has
554 * been created. Subsequent calls to MapViewofFile() with
555 * an existing mapping object name would not allow the
556 * object to be resized. Return failure immediately.
558 * A similar problem now occurs with the persistent cache
559 * given that the memory mapped file now contains a complex
562 afsi_log("request to add %d buffers to the existing cache of size %d denied",
563 nbuffers, cm_data.buf_nbuffers);
565 return CM_ERROR_INVAL;
568 /* interface to set the number of buffers to an exact figure.
569 * Called with no locks held.
571 long buf_SetNBuffers(afs_uint64 nbuffers)
574 return CM_ERROR_INVAL;
575 if (nbuffers == cm_data.buf_nbuffers)
577 else if (nbuffers > cm_data.buf_nbuffers)
578 return buf_AddBuffers(nbuffers - cm_data.buf_nbuffers);
580 return CM_ERROR_INVAL;
583 /* wait for reading or writing to clear; called with write-locked
584 * buffer and unlocked scp and returns with locked buffer.
586 void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
591 osi_assertx(scp->magic == CM_SCACHE_MAGIC, "invalid cm_scache_t magic");
592 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
595 /* if no IO is happening, we're done */
596 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
599 /* otherwise I/O is happening, but some other thread is waiting for
600 * the I/O already. Wait for that guy to figure out what happened,
601 * and then check again.
603 if ( bp->flags & CM_BUF_WAITING ) {
606 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%p", bp);
608 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING set for 0x%p", bp);
609 _InterlockedOr(&bp->flags, CM_BUF_WAITING);
610 bp->waitCount = bp->waitRequests = 1;
612 osi_SleepM((LONG_PTR)bp, &bp->mx);
614 cm_UpdateServerPriority();
616 lock_ObtainMutex(&bp->mx);
617 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%p", bp);
619 if (bp->waitCount == 0) {
620 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING reset for 0x%p", bp);
621 _InterlockedAnd(&bp->flags, ~CM_BUF_WAITING);
622 bp->waitRequests = 0;
626 if (scp = cm_FindSCache(&bp->fid))
630 lock_ObtainRead(&scp->rw);
631 if (scp->flags & CM_SCACHEFLAG_WAITING) {
632 osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
633 osi_Wakeup((LONG_PTR)&scp->flags);
635 lock_ReleaseRead(&scp->rw);
639 /* if we get here, the IO is done, but we may have to wakeup people waiting for
640 * the I/O to complete. Do so.
642 if (bp->flags & CM_BUF_WAITING) {
643 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
644 osi_Wakeup((LONG_PTR) bp);
646 osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%p", bp);
649 cm_ReleaseSCache(scp);
652 /* find a buffer, if any, for a particular file ID and offset. Assumes
653 * that buf_globalLock is write locked when called.
655 cm_buf_t *buf_FindLocked(struct cm_fid *fidp, osi_hyper_t *offsetp)
660 lock_AssertAny(&buf_globalLock);
662 i = BUF_HASH(fidp, offsetp);
663 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) {
664 if (cm_FidCmp(fidp, &bp->fid) == 0
665 && offsetp->LowPart == bp->offset.LowPart
666 && offsetp->HighPart == bp->offset.HighPart) {
672 /* return whatever we found, if anything */
676 /* find a buffer with offset *offsetp for vnode *scp. Called
677 * with no locks held.
679 cm_buf_t *buf_Find(struct cm_fid *fidp, osi_hyper_t *offsetp)
683 lock_ObtainRead(&buf_globalLock);
684 bp = buf_FindLocked(fidp, offsetp);
685 lock_ReleaseRead(&buf_globalLock);
690 /* find a buffer, if any, for a particular file ID and offset. Assumes
691 * that buf_globalLock is write locked when called. Uses the all buffer
694 cm_buf_t *buf_FindAllLocked(struct cm_fid *fidp, osi_hyper_t *offsetp, afs_uint32 flags)
699 for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
700 if (cm_FidCmp(fidp, &bp->fid) == 0
701 && offsetp->LowPart == bp->offset.LowPart
702 && offsetp->HighPart == bp->offset.HighPart) {
708 for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
709 if (cm_FidCmp(fidp, &bp->fid) == 0) {
712 fileOffset = offsetp->QuadPart + cm_data.baseAddress;
713 if (fileOffset == bp->datap) {
720 /* return whatever we found, if anything */
724 /* find a buffer with offset *offsetp for vnode *scp. Called
725 * with no locks held. Use the all buffer list.
727 cm_buf_t *buf_FindAll(struct cm_fid *fidp, osi_hyper_t *offsetp, afs_uint32 flags)
731 lock_ObtainRead(&buf_globalLock);
732 bp = buf_FindAllLocked(fidp, offsetp, flags);
733 lock_ReleaseRead(&buf_globalLock);
738 /* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
741 * Makes sure that there's only one person writing this block
742 * at any given time, and also ensures that the log is forced sufficiently far,
743 * if this buffer contains logged data.
745 * Returns non-zero if the buffer was dirty.
747 * 'scp' may or may not be NULL. If it is not NULL, the FID for both cm_scache_t
748 * and cm_buf_t must match.
750 afs_uint32 buf_CleanAsyncLocked(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp,
751 afs_uint32 flags, afs_uint32 *pisdirty)
754 afs_uint32 isdirty = 0;
758 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
759 osi_assertx(scp == NULL || cm_FidCmp(&scp->fid, &bp->fid) == 0, "scp fid != bp fid");
762 * If the matching cm_scache_t was not provided as a parameter
763 * we must either find one or allocate a new one. It is possible
764 * that the cm_scache_t was recycled out of the cache even though
765 * a cm_buf_t with the same FID is in the cache.
768 if ((scp = cm_FindSCache(&bp->fid)) ||
769 (cm_GetSCache(&bp->fid, &scp,
770 bp->userp ? bp->userp : cm_rootUserp,
776 while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
778 lock_ReleaseMutex(&bp->mx);
782 * If we didn't find a cm_scache_t object for bp->fid it means
783 * that we no longer have that FID in the cache. It does not
784 * mean that the object does not exist in the cell. That may
785 * in fact be the case but we don't know that until we attempt
786 * a FetchStatus on the FID.
788 osi_Log1(buf_logp, "buf_CleanAsyncLocked unable to start I/O - scp not found buf 0x%p", bp);
789 code = CM_ERROR_NOSUCHFILE;
791 osi_Log2(buf_logp, "buf_CleanAsyncLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
794 LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
795 code = (*cm_buf_opsp->Writep)(scp, &offset,
797 /* we might as well try to write all of the contiguous
798 * dirty buffers in one RPC
804 flags, bp->userp, reqp);
805 osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
807 lock_ObtainMutex(&bp->mx);
808 /* if the Write routine returns No Such File, clear the dirty flag
809 * because we aren't going to be able to write this data to the file
812 if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD || code == CM_ERROR_NOACCESS ||
813 code == CM_ERROR_QUOTA || code == CM_ERROR_SPACE || code == CM_ERROR_TOOBIG ||
814 code == CM_ERROR_READONLY || code == CM_ERROR_NOSUCHPATH){
815 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
816 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
817 bp->dirty_offset = 0;
818 bp->dirty_length = 0;
820 bp->dataVersion = CM_BUF_VERSION_BAD;
826 /* Disk cache support */
827 /* write buffer to disk cache (synchronous for now) */
828 diskcache_Update(bp->dcp, bp->datap, cm_data.buf_blockSize, bp->dataVersion);
829 #endif /* DISKCACHE95 */
831 /* if we get here and retries are not permitted
832 * then we need to exit this loop regardless of
833 * whether or not we were able to clear the dirty bit
835 if (reqp->flags & CM_REQ_NORETRY)
838 /* Ditto if the hardDeadTimeout or idleTimeout was reached */
839 if (code == CM_ERROR_TIMEDOUT || code == CM_ERROR_ALLDOWN ||
840 code == CM_ERROR_ALLBUSY || code == CM_ERROR_ALLOFFLINE ||
841 code == CM_ERROR_CLOCKSKEW) {
847 cm_ReleaseSCache(scp);
849 /* if someone was waiting for the I/O that just completed or failed,
852 if (bp->flags & CM_BUF_WAITING) {
853 /* turn off flags and wakeup users */
854 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
855 osi_Wakeup((LONG_PTR) bp);
864 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
865 * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
866 * The buffer must already be clean, and no I/O should be happening to it.
868 void buf_Recycle(cm_buf_t *bp)
873 cm_buf_t *prevBp, *nextBp;
875 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
877 /* if we get here, we know that the buffer still has a 0 ref count,
878 * and that it is clean and has no currently pending I/O. This is
879 * the dude to return.
880 * Remember that as long as the ref count is 0, we know that we won't
881 * have any lock conflicts, so we can grab the buffer lock out of
882 * order in the locking hierarchy.
884 osi_Log3( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x:%08x",
885 bp, bp->offset.HighPart, bp->offset.LowPart);
887 osi_assertx(bp->refCount == 0, "cm_buf_t refcount != 0");
888 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)),
889 "incorrect cm_buf_t flags");
890 lock_AssertWrite(&buf_globalLock);
892 if (bp->qFlags & CM_BUF_QINHASH) {
893 /* Remove from hash */
895 i = BUF_HASH(&bp->fid, &bp->offset);
896 lbpp = &(cm_data.buf_scacheHashTablepp[i]);
897 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
902 /* we better find it */
903 osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
905 *lbpp = bp->hashp; /* hash out */
908 /* Remove from file hash */
910 i = BUF_FILEHASH(&bp->fid);
911 prevBp = bp->fileHashBackp;
912 bp->fileHashBackp = NULL;
913 nextBp = bp->fileHashp;
914 bp->fileHashp = NULL;
916 prevBp->fileHashp = nextBp;
918 cm_data.buf_fileHashTablepp[i] = nextBp;
920 nextBp->fileHashBackp = prevBp;
922 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINHASH);
925 /* make the fid unrecognizable */
926 memset(&bp->fid, 0, sizeof(cm_fid_t));
928 /* clean up junk flags */
929 _InterlockedAnd(&bp->flags, ~(CM_BUF_EOF | CM_BUF_ERROR));
930 bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
933 /* recycle a buffer, removing it from the free list, hashing in its new identity
934 * and returning it write-locked so that no one can use it. Called without
935 * any locks held, and can return an error if it loses the race condition and
936 * finds that someone else created the desired buffer.
938 * If success is returned, the buffer is returned write-locked.
940 * May be called with null scp and offsetp, if we're just trying to reclaim some
941 * space from the buffer pool. In that case, the buffer will be returned
942 * without being hashed into the hash table.
944 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
946 cm_buf_t *bp; /* buffer we're dealing with */
947 cm_buf_t *nextBp; /* next buffer in file hash chain */
948 afs_uint32 i; /* temp */
951 buf_ValidateBufQueues();
956 lock_ObtainRead(&scp->bufCreateLock);
957 lock_ObtainWrite(&buf_globalLock);
958 /* check to see if we lost the race */
960 if (bp = buf_FindLocked(&scp->fid, offsetp)) {
961 /* Do not call buf_ReleaseLocked() because we
962 * do not want to allow the buffer to be added
965 afs_int32 refCount = InterlockedDecrement(&bp->refCount);
966 #ifdef DEBUG_REFCOUNT
967 osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, refCount);
968 afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
970 lock_ReleaseWrite(&buf_globalLock);
971 lock_ReleaseRead(&scp->bufCreateLock);
972 return CM_BUF_EXISTS;
976 /* does this fix the problem below? it's a simple solution. */
977 if (!cm_data.buf_freeListEndp)
979 lock_ReleaseWrite(&buf_globalLock);
980 lock_ReleaseRead(&scp->bufCreateLock);
981 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List is empty - sleeping 200ms");
986 /* for debugging, assert free list isn't empty, although we
987 * really should try waiting for a running tranasction to finish
988 * instead of this; or better, we should have a transaction
989 * throttler prevent us from entering this situation.
991 osi_assertx(cm_data.buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
993 /* look at all buffers in free list, some of which may temp.
994 * have high refcounts and which then should be skipped,
995 * starting cleaning I/O for those which are dirty. If we find
996 * a clean buffer, we rehash it, lock it and return it.
998 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
999 /* check to see if it really has zero ref count. This
1000 * code can bump refcounts, at least, so it may not be
1003 if (bp->refCount > 0)
1006 /* we don't have to lock buffer itself, since the ref
1007 * count is 0 and we know it will stay zero as long as
1008 * we hold the global lock.
1011 /* Don't recycle a buffer held by the redirector. */
1012 if (bp->qFlags & CM_BUF_QREDIR)
1015 /* don't recycle someone in our own chunk */
1016 if (!cm_FidCmp(&bp->fid, &scp->fid)
1017 && (bp->offset.LowPart & (-cm_chunkSize))
1018 == (offsetp->LowPart & (-cm_chunkSize)))
1021 /* if this page is being filled (!) or cleaned, see if
1022 * the I/O has completed. If not, skip it, otherwise
1023 * do the final processing for the I/O.
1025 if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
1026 /* probably shouldn't do this much work while
1027 * holding the big lock? Watch for contention
1033 if (bp->flags & CM_BUF_DIRTY) {
1034 /* if the buffer is dirty, start cleaning it and
1035 * move on to the next buffer. We do this with
1036 * just the lock required to minimize contention
1040 lock_ReleaseWrite(&buf_globalLock);
1041 lock_ReleaseRead(&scp->bufCreateLock);
1043 /* grab required lock and clean; this only
1044 * starts the I/O. By the time we're back,
1045 * it'll still be marked dirty, but it will also
1046 * have the WRITING flag set, so we won't get
1049 if (cm_FidCmp(&scp->fid, &bp->fid) == 0)
1050 buf_CleanAsync(scp, bp, reqp, 0, NULL);
1052 buf_CleanAsync(NULL, bp, reqp, 0, NULL);
1054 /* now put it back and go around again */
1059 /* if we get here, we know that the buffer still has a 0
1060 * ref count, and that it is clean and has no currently
1061 * pending I/O. This is the dude to return.
1062 * Remember that as long as the ref count is 0, we know
1063 * that we won't have any lock conflicts, so we can grab
1064 * the buffer lock out of order in the locking hierarchy.
1068 /* now hash in as our new buffer, and give it the
1069 * appropriate label, if requested.
1072 lock_AssertWrite(&buf_globalLock);
1074 _InterlockedOr(&bp->qFlags, CM_BUF_QINHASH);
1079 bp->offset = *offsetp;
1080 i = BUF_HASH(&scp->fid, offsetp);
1081 bp->hashp = cm_data.buf_scacheHashTablepp[i];
1082 cm_data.buf_scacheHashTablepp[i] = bp;
1083 i = BUF_FILEHASH(&scp->fid);
1084 nextBp = cm_data.buf_fileHashTablepp[i];
1085 bp->fileHashp = nextBp;
1086 bp->fileHashBackp = NULL;
1088 nextBp->fileHashBackp = bp;
1089 cm_data.buf_fileHashTablepp[i] = bp;
1092 /* we should move it from the lru queue. It better still be there,
1093 * since we've held the global (big) lock since we found it there.
1095 osi_assertx(bp->qFlags & CM_BUF_QINLRU,
1096 "buf_GetNewLocked: LRU screwup");
1098 if (cm_data.buf_freeListEndp == bp) {
1099 /* we're the last guy in this queue, so maintain it */
1100 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1102 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
1103 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINLRU);
1105 /* prepare to return it. Give it a refcount */
1107 #ifdef DEBUG_REFCOUNT
1108 osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
1109 afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
1111 /* grab the mutex so that people don't use it
1112 * before the caller fills it with data. Again, no one
1113 * should have been able to get to this dude to lock it.
1115 if (!lock_TryMutex(&bp->mx)) {
1116 osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0",
1118 osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
1121 lock_ReleaseWrite(&buf_globalLock);
1122 lock_ReleaseRead(&scp->bufCreateLock);
1127 buf_ValidateBufQueues();
1128 #endif /* TESTING */
1130 } /* for all buffers in lru queue */
1131 lock_ReleaseWrite(&buf_globalLock);
1132 lock_ReleaseRead(&scp->bufCreateLock);
1133 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List has no buffers with a zero refcount - sleeping 100ms");
1134 Sleep(100); /* give some time for a buffer to be freed */
1135 } /* while loop over everything */
1140 * get a page, returning it held but unlocked. the page may or may not
1141 * contain valid data.
1143 * The scp must be unlocked when passed in unlocked.
1145 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
1149 osi_hyper_t pageOffset;
1150 unsigned long tcount;
1154 cm_diskcache_t *dcp;
1155 #endif /* DISKCACHE95 */
1158 pageOffset.HighPart = offsetp->HighPart;
1159 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
1163 buf_ValidateBufQueues();
1164 #endif /* TESTING */
1166 bp = buf_Find(&scp->fid, &pageOffset);
1168 /* lock it and break out */
1169 lock_ObtainMutex(&bp->mx);
1172 /* touch disk chunk to update LRU info */
1173 diskcache_Touch(bp->dcp);
1174 #endif /* DISKCACHE95 */
1178 /* otherwise, we have to create a page */
1179 code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
1182 /* the requested buffer was created */
1187 * the requested buffer existed by the time the
1188 * scp->bufCreateLock and buf_globalLock could be obtained.
1189 * loop again and permit buf_Find() to obtain a reference.
1194 * the requested buffer could not be created.
1195 * return the error to the caller.
1198 buf_ValidateBufQueues();
1199 #endif /* TESTING */
1202 } /* big while loop */
1204 /* if we get here, we have a locked buffer that may have just been
1205 * created, in which case it needs to be filled with data.
1208 /* load the page; freshly created pages should be idle */
1209 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)), "incorrect cm_buf_t flags");
1212 * start the I/O; may drop lock. as of this writing, the only
1213 * implementation of Readp is cm_BufRead() which simply sets
1214 * tcount to 0 and returns success.
1216 _InterlockedOr(&bp->flags, CM_BUF_READING);
1217 code = (*cm_buf_opsp->Readp)(bp, cm_data.buf_blockSize, &tcount, NULL);
1220 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, cm_data.buf_blockSize, &bp->dataVersion, &tcount, &dcp);
1221 bp->dcp = dcp; /* pointer to disk cache struct. */
1222 #endif /* DISKCACHE95 */
1225 /* failure or queued */
1226 if (code != ERROR_IO_PENDING) {
1228 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
1229 _InterlockedAnd(&bp->flags, ~CM_BUF_READING);
1230 if (bp->flags & CM_BUF_WAITING) {
1231 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1232 osi_Wakeup((LONG_PTR) bp);
1234 lock_ReleaseMutex(&bp->mx);
1237 buf_ValidateBufQueues();
1238 #endif /* TESTING */
1243 * otherwise, I/O completed instantly and we're done, except
1244 * for padding the xfr out with 0s and checking for EOF
1246 if (tcount < (unsigned long) cm_data.buf_blockSize) {
1247 memset(bp->datap+tcount, 0, cm_data.buf_blockSize - tcount);
1249 _InterlockedOr(&bp->flags, CM_BUF_EOF);
1251 _InterlockedAnd(&bp->flags, ~CM_BUF_READING);
1252 if (bp->flags & CM_BUF_WAITING) {
1253 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1254 osi_Wakeup((LONG_PTR) bp);
1259 /* wait for reads, either that which we started above, or that someone
1260 * else started. We don't care if we return a buffer being cleaned.
1262 if (bp->flags & CM_BUF_READING)
1263 buf_WaitIO(scp, bp);
1265 /* once it has been read once, we can unlock it and return it, still
1266 * with its refcount held.
1268 lock_ReleaseMutex(&bp->mx);
1271 /* now remove from queue; will be put in at the head (farthest from
1272 * being recycled) when we're done in buf_Release.
1274 lock_ObtainWrite(&buf_globalLock);
1275 if (bp->qFlags & CM_BUF_QINLRU) {
1276 if (cm_data.buf_freeListEndp == bp)
1277 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
1278 osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
1279 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINLRU);
1281 lock_ReleaseWrite(&buf_globalLock);
1283 osi_Log4(buf_logp, "buf_Get returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
1284 bp, scp, offsetp->HighPart, offsetp->LowPart);
1286 buf_ValidateBufQueues();
1287 #endif /* TESTING */
1291 /* count # of elements in the free list;
1292 * we don't bother doing the proper locking for accessing dataVersion or flags
1293 * since it is a pain, and this is really just an advisory call. If you need
1294 * to do better at some point, rewrite this function.
1296 long buf_CountFreeList(void)
1302 lock_ObtainRead(&buf_globalLock);
1303 for(bufp = cm_data.buf_freeListp; bufp; bufp = (cm_buf_t *) osi_QNext(&bufp->q)) {
1304 /* if the buffer doesn't have an identity, or if the buffer
1305 * has been invalidate (by having its DV stomped upon), then
1306 * count it as free, since it isn't really being utilized.
1308 if (!(bufp->qFlags & CM_BUF_QINHASH) || bufp->dataVersion == CM_BUF_VERSION_BAD)
1311 lock_ReleaseRead(&buf_globalLock);
1315 /* clean a buffer synchronously */
1316 afs_uint32 buf_CleanAsync(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp, afs_uint32 flags, afs_uint32 *pisdirty)
1319 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1320 osi_assertx(!(flags & CM_BUF_WRITE_SCP_LOCKED), "scp->rw must not be held when calling buf_CleanAsync");
1322 lock_ObtainMutex(&bp->mx);
1323 code = buf_CleanAsyncLocked(scp, bp, reqp, flags, pisdirty);
1324 lock_ReleaseMutex(&bp->mx);
1329 /* wait for a buffer's cleaning to finish */
1330 void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
1332 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1335 lock_ObtainMutex(&bp->mx);
1336 if (bp->flags & CM_BUF_WRITING) {
1337 buf_WaitIO(scp, bp);
1340 lock_ReleaseMutex(&bp->mx);
1343 /* set the dirty flag on a buffer, and set associated write-ahead log,
1344 * if there is one. Allow one to be added to a buffer, but not changed.
1346 * The buffer must be locked before calling this routine.
1348 void buf_SetDirty(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 offset, afs_uint32 length, cm_user_t *userp)
1350 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1351 osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
1356 if (bp->flags & CM_BUF_DIRTY) {
1358 osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
1360 if (bp->dirty_offset <= offset) {
1361 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1362 /* dirty_length remains the same */
1364 bp->dirty_length = offset + length - bp->dirty_offset;
1366 } else /* bp->dirty_offset > offset */ {
1367 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1368 bp->dirty_length = bp->dirty_offset + bp->dirty_length - offset;
1370 bp->dirty_length = length;
1372 bp->dirty_offset = offset;
1375 osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
1378 _InterlockedOr(&bp->flags, CM_BUF_DIRTY);
1380 /* and turn off EOF flag, since it has associated data now */
1381 _InterlockedAnd(&bp->flags, ~CM_BUF_EOF);
1383 bp->dirty_offset = offset;
1384 bp->dirty_length = length;
1386 /* and add to the dirty list.
1387 * we obtain a hold on the buffer for as long as it remains
1388 * in the list. buffers are only removed from the list by
1389 * the buf_IncrSyncer function regardless of when else the
1390 * dirty flag might be cleared.
1392 * This should never happen but just in case there is a bug
1393 * elsewhere, never add to the dirty list if the buffer is
1396 lock_ObtainWrite(&buf_globalLock);
1397 if (!(bp->qFlags & CM_BUF_QINDL)) {
1399 if (!cm_data.buf_dirtyListp) {
1400 cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
1402 cm_data.buf_dirtyListEndp->dirtyp = bp;
1403 cm_data.buf_dirtyListEndp = bp;
1406 _InterlockedOr(&bp->qFlags, CM_BUF_QINDL);
1408 lock_ReleaseWrite(&buf_globalLock);
1411 /* and record the last writer */
1412 if (bp->userp != userp) {
1415 cm_ReleaseUser(bp->userp);
1420 /* clean all buffers, reset log pointers and invalidate all buffers.
1421 * Called with no locks held, and returns with same.
1423 * This function is guaranteed to clean and remove the log ptr of all the
1424 * buffers that were dirty or had non-zero log ptrs before the call was
1425 * made. That's sufficient to clean up any garbage left around by recovery,
1426 * which is all we're counting on this for; there may be newly created buffers
1427 * added while we're running, but that should be OK.
1429 * In an environment where there are no transactions (artificially imposed, for
1430 * example, when switching the database to raw mode), this function is used to
1431 * make sure that all updates have been written to the disk. In that case, we don't
1432 * really require that we forget the log association between pages and logs, but
1433 * it also doesn't hurt. Since raw mode I/O goes through this buffer package, we don't
1434 * have to worry about invalidating data in the buffers.
1436 * This function is used at the end of recovery as paranoia to get the recovered
1437 * database out to disk. It removes all references to the recovery log and cleans
1440 long buf_CleanAndReset(void)
1446 lock_ObtainRead(&buf_globalLock);
1447 for(i=0; i<cm_data.buf_hashSize; i++) {
1448 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp = bp->hashp) {
1449 if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
1451 lock_ReleaseRead(&buf_globalLock);
1453 /* now no locks are held; clean buffer and go on */
1455 req.flags |= CM_REQ_NORETRY;
1457 buf_CleanAsync(NULL, bp, &req, 0, NULL);
1458 buf_CleanWait(NULL, bp, FALSE);
1460 /* relock and release buffer */
1461 lock_ObtainRead(&buf_globalLock);
1462 buf_ReleaseLocked(bp, FALSE);
1464 } /* over one bucket */
1465 } /* for loop over all hash buckets */
1468 lock_ReleaseRead(&buf_globalLock);
1471 buf_ValidateBufQueues();
1472 #endif /* TESTING */
1474 /* and we're done */
1478 /* called without global lock being held, reserves buffers for callers
1479 * that need more than one held (not locked) at once.
1481 void buf_ReserveBuffers(afs_uint64 nbuffers)
1483 lock_ObtainWrite(&buf_globalLock);
1485 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1486 cm_data.buf_reserveWaiting = 1;
1487 osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1488 osi_SleepW((LONG_PTR) &cm_data.buf_reservedBufs, &buf_globalLock);
1489 lock_ObtainWrite(&buf_globalLock);
1492 cm_data.buf_reservedBufs += nbuffers;
1496 lock_ReleaseWrite(&buf_globalLock);
1499 int buf_TryReserveBuffers(afs_uint64 nbuffers)
1503 lock_ObtainWrite(&buf_globalLock);
1504 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1508 cm_data.buf_reservedBufs += nbuffers;
1511 lock_ReleaseWrite(&buf_globalLock);
1515 /* called without global lock held, releases reservation held by
1516 * buf_ReserveBuffers.
1518 void buf_UnreserveBuffers(afs_uint64 nbuffers)
1520 lock_ObtainWrite(&buf_globalLock);
1521 cm_data.buf_reservedBufs -= nbuffers;
1522 if (cm_data.buf_reserveWaiting) {
1523 cm_data.buf_reserveWaiting = 0;
1524 osi_Wakeup((LONG_PTR) &cm_data.buf_reservedBufs);
1526 lock_ReleaseWrite(&buf_globalLock);
1529 /* truncate the buffers past sizep, zeroing out the page, if we don't
1530 * end on a page boundary.
1532 * Requires cm_bufCreateLock to be write locked.
1534 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1538 cm_buf_t *nbufp; /* next buffer, if didRelease */
1544 /* assert that cm_bufCreateLock is held in write mode */
1545 lock_AssertWrite(&scp->bufCreateLock);
1547 i = BUF_FILEHASH(&scp->fid);
1549 lock_ObtainRead(&buf_globalLock);
1550 bufp = cm_data.buf_fileHashTablepp[i];
1552 lock_ReleaseRead(&buf_globalLock);
1556 buf_HoldLocked(bufp);
1557 lock_ReleaseRead(&buf_globalLock);
1559 lock_ObtainMutex(&bufp->mx);
1561 bufEnd.HighPart = 0;
1562 bufEnd.LowPart = cm_data.buf_blockSize;
1563 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1565 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1566 LargeIntegerLessThan(*sizep, bufEnd)) {
1567 buf_WaitIO(scp, bufp);
1569 lock_ObtainWrite(&scp->rw);
1571 /* make sure we have a callback (so we have the right value for
1572 * the length), and wait for it to be safe to do a truncate.
1574 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1575 CM_SCACHESYNC_NEEDCALLBACK
1576 | CM_SCACHESYNC_GETSTATUS
1577 | CM_SCACHESYNC_SETSIZE
1578 | CM_SCACHESYNC_BUFLOCKED);
1581 /* if we succeeded in our locking, and this applies to the right
1582 * file, and the truncate request overlaps the buffer either
1583 * totally or partially, then do something.
1585 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1586 && LargeIntegerLessThan(*sizep, bufEnd)) {
1589 /* destroy the buffer, turning off its dirty bit, if
1590 * we're truncating the whole buffer. Otherwise, set
1591 * the dirty bit, and clear out the tail of the buffer
1592 * if we just overlap some.
1594 if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1595 /* truncating the entire page */
1596 _InterlockedAnd(&bufp->flags, ~CM_BUF_DIRTY);
1597 bufp->dirty_offset = 0;
1598 bufp->dirty_length = 0;
1599 bufp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
1600 bufp->dirtyCounter++;
1603 /* don't set dirty, since dirty implies
1604 * currently up-to-date. Don't need to do this,
1605 * since we'll update the length anyway.
1607 * Zero out remainder of the page, in case we
1608 * seek and write past EOF, and make this data
1611 bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
1612 osi_assertx(bufferPos != 0, "non-zero bufferPos");
1613 memset(bufp->datap + bufferPos, 0,
1614 cm_data.buf_blockSize - bufferPos);
1618 cm_SyncOpDone( scp, bufp,
1619 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
1620 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
1622 lock_ReleaseWrite(&scp->rw);
1623 lock_ReleaseMutex(&bufp->mx);
1626 nbufp = bufp->fileHashp;
1630 /* This forces the loop to end and the error code
1631 * to be returned. */
1639 buf_ValidateBufQueues();
1640 #endif /* TESTING */
1646 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
1649 cm_buf_t *bp; /* buffer we're hacking on */
1653 afs_uint32 stable = 0;
1655 i = BUF_FILEHASH(&scp->fid);
1658 lock_ObtainRead(&buf_globalLock);
1659 bp = cm_data.buf_fileHashTablepp[i];
1662 lock_ReleaseRead(&buf_globalLock);
1664 for (; bp; bp = nbp) {
1665 didRelease = 0; /* haven't released this buffer yet */
1667 /* clean buffer synchronously */
1668 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1670 if (code == 0 && !stable && (bp->flags & CM_BUF_DIRTY)) {
1672 * we must stabilize the object to ensure that buffer
1673 * changes cannot occur while the flush is performed.
1674 * However, we do not want to Stabilize if we do not
1675 * need to because Stabilize obtains a callback.
1677 code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
1678 stable = (code == 0);
1681 if (code == CM_ERROR_BADFD) {
1682 /* if the scp's FID is bad its because we received VNOVNODE
1683 * when attempting to FetchStatus before the write. This
1684 * page therefore contains data that can no longer be stored.
1686 lock_ObtainMutex(&bp->mx);
1687 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
1688 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
1689 bp->error = CM_ERROR_BADFD;
1690 bp->dirty_offset = 0;
1691 bp->dirty_length = 0;
1692 bp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
1694 lock_ReleaseMutex(&bp->mx);
1695 } else if (!(scp->flags & CM_SCACHEFLAG_RO)) {
1700 lock_ObtainMutex(&bp->mx);
1702 /* start cleaning the buffer, and wait for it to finish */
1703 buf_CleanAsyncLocked(scp, bp, reqp, 0, NULL);
1704 buf_WaitIO(scp, bp);
1706 lock_ReleaseMutex(&bp->mx);
1709 /* actually, we only know that buffer is clean if ref
1710 * count is 1, since we don't have buffer itself locked.
1712 if (!(bp->flags & CM_BUF_DIRTY)) {
1713 lock_ObtainWrite(&buf_globalLock);
1714 if (bp->refCount == 1) { /* bp is held above */
1715 nbp = bp->fileHashp;
1717 buf_HoldLocked(nbp);
1718 buf_ReleaseLocked(bp, TRUE);
1722 lock_ReleaseWrite(&buf_globalLock);
1728 lock_ObtainRead(&buf_globalLock);
1729 nbp = bp->fileHashp;
1731 buf_HoldLocked(nbp);
1732 buf_ReleaseLocked(bp, FALSE);
1733 lock_ReleaseRead(&buf_globalLock);
1735 } /* for loop over a bunch of buffers */
1738 (*cm_buf_opsp->Unstabilizep)(scp, userp);
1741 buf_ValidateBufQueues();
1742 #endif /* TESTING */
1748 /* Must be called with scp->rw held */
1749 long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
1755 lock_AssertAny(&scp->rw);
1757 i = BUF_FILEHASH(&scp->fid);
1759 lock_ObtainRead(&buf_globalLock);
1761 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp = bp->fileHashp) {
1762 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1763 if (bp->dataVersion == fromVersion) {
1764 bp->dataVersion = toVersion;
1769 lock_ReleaseRead(&buf_globalLock);
1777 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
1781 cm_buf_t *bp; /* buffer we're hacking on */
1782 cm_buf_t *nbp; /* next one */
1785 i = BUF_FILEHASH(&scp->fid);
1787 lock_ObtainRead(&buf_globalLock);
1788 bp = cm_data.buf_fileHashTablepp[i];
1791 lock_ReleaseRead(&buf_globalLock);
1792 for (; bp; bp = nbp) {
1793 /* clean buffer synchronously */
1794 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
1795 lock_ObtainMutex(&bp->mx);
1796 if (bp->flags & CM_BUF_DIRTY) {
1797 if (userp && userp != bp->userp) {
1800 cm_ReleaseUser(bp->userp);
1805 case CM_ERROR_NOSUCHFILE:
1806 case CM_ERROR_BADFD:
1807 case CM_ERROR_NOACCESS:
1808 case CM_ERROR_QUOTA:
1809 case CM_ERROR_SPACE:
1810 case CM_ERROR_TOOBIG:
1811 case CM_ERROR_READONLY:
1812 case CM_ERROR_NOSUCHPATH:
1814 * Apply the previous fatal error to this buffer.
1815 * Do not waste the time attempting to store to
1816 * the file server when we know it will fail.
1818 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
1819 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
1820 bp->dirty_offset = 0;
1821 bp->dirty_length = 0;
1823 bp->dataVersion = CM_BUF_VERSION_BAD;
1826 case CM_ERROR_TIMEDOUT:
1827 case CM_ERROR_ALLDOWN:
1828 case CM_ERROR_ALLBUSY:
1829 case CM_ERROR_ALLOFFLINE:
1830 case CM_ERROR_CLOCKSKEW:
1831 /* do not mark the buffer in error state but do
1832 * not attempt to complete the rest either.
1836 code = buf_CleanAsyncLocked(scp, bp, reqp, 0, &wasDirty);
1837 if (bp->flags & CM_BUF_ERROR) {
1843 buf_CleanWait(scp, bp, TRUE);
1845 lock_ReleaseMutex(&bp->mx);
1848 lock_ObtainRead(&buf_globalLock);
1849 nbp = bp->fileHashp;
1851 buf_HoldLocked(nbp);
1852 buf_ReleaseLocked(bp, FALSE);
1853 lock_ReleaseRead(&buf_globalLock);
1854 } /* for loop over a bunch of buffers */
1857 buf_ValidateBufQueues();
1858 #endif /* TESTING */
1866 buf_ValidateBufQueues(void)
1868 cm_buf_t * bp, *bpb, *bpf, *bpa;
1869 afs_uint32 countf=0, countb=0, counta=0;
1871 lock_ObtainRead(&buf_globalLock);
1872 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1873 if (bp->magic != CM_BUF_MAGIC)
1874 osi_panic("buf magic error",__FILE__,__LINE__);
1879 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
1880 if (bp->magic != CM_BUF_MAGIC)
1881 osi_panic("buf magic error",__FILE__,__LINE__);
1886 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
1887 if (bp->magic != CM_BUF_MAGIC)
1888 osi_panic("buf magic error",__FILE__,__LINE__);
1892 lock_ReleaseRead(&buf_globalLock);
1894 if (countb != countf)
1895 osi_panic("buf magic error",__FILE__,__LINE__);
1897 if (counta != cm_data.buf_nbuffers)
1898 osi_panic("buf magic error",__FILE__,__LINE__);
1900 #endif /* TESTING */
1902 /* dump the contents of the buf_scacheHashTablepp. */
1903 int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
1910 if (cm_data.buf_scacheHashTablepp == NULL)
1914 lock_ObtainRead(&buf_globalLock);
1916 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\r\n",
1917 cookie, cm_data.buf_hashSize);
1918 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1920 for (i = 0; i < cm_data.buf_hashSize; i++)
1922 for (bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp)
1924 StringCbPrintfA(output, sizeof(output),
1925 "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
1926 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1927 "flags=0x%x, qFlags=0x%x cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
1928 cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
1929 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1930 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
1931 bp->cmFlags, bp->error, bp->refCount);
1932 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1936 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\r\n", cookie);
1937 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1939 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_freeListEndp\r\n", cookie);
1940 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1941 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1942 StringCbPrintfA(output, sizeof(output),
1943 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1944 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1945 "flags=0x%x, qFlags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
1946 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1947 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1948 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
1949 bp->cmFlags, bp->error, bp->refCount);
1950 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1952 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
1953 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1955 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
1956 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1957 for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
1958 StringCbPrintfA(output, sizeof(output),
1959 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
1960 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
1961 "flags=0x%x, qFlags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
1962 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
1963 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
1964 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
1965 bp->cmFlags, bp->error, bp->refCount);
1966 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1968 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
1969 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
1972 lock_ReleaseRead(&buf_globalLock);
1976 void buf_ForceTrace(BOOL flush)
1985 len = GetTempPath(sizeof(buf)-10, buf);
1986 StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log");
1987 handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ,
1988 NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
1989 if (handle == INVALID_HANDLE_VALUE) {
1990 osi_panic("Cannot create log file", __FILE__, __LINE__);
1992 osi_LogPrint(buf_logp, handle);
1994 FlushFileBuffers(handle);
1995 CloseHandle(handle);
1998 long buf_DirtyBuffersExist(cm_fid_t *fidp)
2001 afs_uint32 bcount = 0;
2004 i = BUF_FILEHASH(fidp);
2006 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->fileHashp, bcount++) {
2007 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY))
2014 long buf_CleanDirtyBuffers(cm_scache_t *scp)
2017 afs_uint32 bcount = 0;
2018 cm_fid_t * fidp = &scp->fid;
2020 for (bp = cm_data.buf_allp; bp; bp=bp->allp, bcount++) {
2021 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
2023 lock_ObtainMutex(&bp->mx);
2024 _InterlockedAnd(&bp->cmFlags, ~CM_BUF_CMSTORING);
2025 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
2026 bp->dirty_offset = 0;
2027 bp->dirty_length = 0;
2028 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
2029 bp->error = VNOVNODE;
2030 bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
2032 if (bp->flags & CM_BUF_WAITING) {
2033 osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);
2034 osi_Wakeup((long) &bp);
2036 lock_ReleaseMutex(&bp->mx);