2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
12 #include <afsconfig.h>
13 #include <afs/param.h>
23 #include <hcrypto\md5.h>
26 #include "cm_memmap.h"
29 #define TRACE_BUFFER 1
32 extern void afsi_log(char *pattern, ...);
34 /* This module implements the buffer package used by the local transaction
35 * system (cm). It is initialized by calling cm_Init, which calls buf_Init;
36 * it must be initalized before any of its main routines are called.
38 * Each buffer is hashed into a hash table by file ID and offset, and if its
39 * reference count is zero, it is also in a free list.
41 * There are two locks involved in buffer processing. The global lock
42 * buf_globalLock protects all of the global variables defined in this module,
43 * the reference counts and hash pointers in the actual cm_buf_t structures,
44 * and the LRU queue pointers in the buffer structures.
46 * The mutexes in the buffer structures protect the remaining fields in the
47 * buffers, as well the data itself.
49 * The locking hierarchy here is this:
51 * - resv multiple simul. buffers reservation
52 * - lock buffer I/O flags
53 * - lock buffer's mutex
54 * - lock buf_globalLock
58 /* global debugging log */
59 osi_log_t *buf_logp = NULL;
61 /* Global lock protecting hash tables and free lists */
62 osi_rwlock_t buf_globalLock;
64 /* Global lock used to limit the number of RDR Release
65 * Extents requests to one. */
66 osi_mutex_t buf_rdrReleaseExtentsLock;
68 /* ptr to head of the free list (most recently used) and the
69 * tail (the guy to remove first). We use osi_Q* functions
70 * to put stuff in buf_freeListp, and maintain the end
74 /* a pointer to a list of all buffers, just so that we can find them
75 * easily for debugging, and for the incr syncer. Locked under
79 /* defaults setup; these variables may be manually assigned into
80 * before calling cm_Init, as a way of changing these defaults.
83 /* callouts for reading and writing data, etc */
84 cm_buf_ops_t *cm_buf_opsp;
87 /* for experimental disk caching support in Win95 client */
88 cm_buf_t *buf_diskFreeListp;
89 cm_buf_t *buf_diskFreeListEndp;
90 cm_buf_t *buf_diskAllp;
91 extern int cm_diskCacheEnabled;
92 #endif /* DISKCACHE95 */
94 /* set this to 1 when we are terminating to prevent access attempts */
95 static int buf_ShutdownFlag = 0;
98 void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
100 void buf_HoldLocked(cm_buf_t *bp)
105 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
106 refCount = InterlockedIncrement(&bp->refCount);
107 #ifdef DEBUG_REFCOUNT
108 osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
109 afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
113 /* hold a reference to an already held buffer */
114 #ifdef DEBUG_REFCOUNT
115 void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
117 void buf_Hold(cm_buf_t *bp)
122 lock_ObtainRead(&buf_globalLock);
123 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
124 refCount = InterlockedIncrement(&bp->refCount);
125 #ifdef DEBUG_REFCOUNT
126 osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
127 afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
129 lock_ReleaseRead(&buf_globalLock);
132 /* code to drop reference count while holding buf_globalLock */
133 #ifdef DEBUG_REFCOUNT
134 void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
136 void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
142 lock_AssertWrite(&buf_globalLock);
144 lock_AssertRead(&buf_globalLock);
146 /* ensure that we're in the LRU queue if our ref count is 0 */
147 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
149 refCount = InterlockedDecrement(&bp->refCount);
150 #ifdef DEBUG_REFCOUNT
151 osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
152 afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
156 osi_panic("buf refcount 0",__FILE__,__LINE__);;
158 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
162 * If we are read locked there could be a race condition
163 * with buf_Find() so we must obtain a write lock and
164 * double check that the refCount is actually zero
165 * before we remove the buffer from the LRU queue.
168 lock_ConvertRToW(&buf_globalLock);
170 if (bp->refCount == 0 &&
171 !(bp->qFlags & (CM_BUF_QINLRU|CM_BUF_QREDIR))) {
172 osi_QAddH( (osi_queue_t **) &cm_data.buf_freeListp,
173 (osi_queue_t **) &cm_data.buf_freeListEndp,
175 _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
176 buf_IncrementFreeCount();
180 lock_ConvertWToR(&buf_globalLock);
184 /* release a buffer. Buffer must be referenced, but unlocked. */
185 #ifdef DEBUG_REFCOUNT
186 void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
188 void buf_Release(cm_buf_t *bp)
193 /* ensure that we're in the LRU queue if our ref count is 0 */
194 osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
196 refCount = InterlockedDecrement(&bp->refCount);
197 #ifdef DEBUG_REFCOUNT
198 osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
199 afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
203 osi_panic("buf refcount 0",__FILE__,__LINE__);;
205 osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
208 lock_ObtainWrite(&buf_globalLock);
209 if (bp->refCount == 0 &&
210 !(bp->qFlags & (CM_BUF_QINLRU|CM_BUF_QREDIR))) {
211 osi_QAddH( (osi_queue_t **) &cm_data.buf_freeListp,
212 (osi_queue_t **) &cm_data.buf_freeListEndp,
214 _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
215 buf_IncrementFreeCount();
217 lock_ReleaseWrite(&buf_globalLock);
222 buf_Sync(int quitOnShutdown)
224 cm_buf_t **bpp, *bp, *prevbp;
225 afs_uint32 wasDirty = 0;
228 /* go through all of the dirty buffers */
229 lock_ObtainRead(&buf_globalLock);
230 for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
231 if (quitOnShutdown && buf_ShutdownFlag)
235 * If the buffer is held be the redirector we must fetch
236 * it back in order to determine whether or not it is in
239 if (bp->qFlags & CM_BUF_QREDIR) {
240 osi_Log1(buf_logp,"buf_Sync buffer held by redirector bp 0x%p", bp);
242 /* Request single buffer from the redirector */
243 buf_RDRShakeAnExtentFree(bp, &req);
246 lock_ReleaseRead(&buf_globalLock);
248 * all dirty buffers are held when they are added to the
249 * dirty list. No need for an additional hold.
251 lock_ObtainMutex(&bp->mx);
253 if ((bp->flags & CM_BUF_DIRTY)) {
254 /* start cleaning the buffer; don't touch log pages since
255 * the log code counts on knowing exactly who is writing
256 * a log page at any given instant.
258 * only attempt to write the buffer if the volume might
264 volp = cm_GetVolumeByFID(&bp->fid);
265 switch (cm_GetVolumeStatus(volp, bp->fid.volume)) {
269 req.flags |= CM_REQ_NORETRY;
270 buf_CleanLocked(NULL, bp, &req, 0, &dirty);
276 /* the buffer may or may not have been dirty
277 * and if dirty may or may not have been cleaned
278 * successfully. check the dirty flag again.
280 if (!(bp->flags & CM_BUF_DIRTY)) {
281 /* remove the buffer from the dirty list */
282 lock_ObtainWrite(&buf_globalLock);
283 #ifdef DEBUG_REFCOUNT
284 if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
285 osi_Log1(afsd_logp,"buf_Sync bp 0x%p list corruption",bp);
286 afsi_log("buf_Sync bp 0x%p list corruption", bp);
291 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINDL);
292 if (cm_data.buf_dirtyListp == NULL)
293 cm_data.buf_dirtyListEndp = NULL;
294 else if (cm_data.buf_dirtyListEndp == bp)
295 cm_data.buf_dirtyListEndp = prevbp;
296 buf_ReleaseLocked(bp, TRUE);
297 lock_ConvertWToR(&buf_globalLock);
299 if (buf_ShutdownFlag) {
302 char volstr[VL_MAXNAMELEN+12]="";
305 volp = cm_GetVolumeByFID(&bp->fid);
308 if (bp->fid.volume == volp->vol[RWVOL].ID)
310 else if (bp->fid.volume == volp->vol[ROVOL].ID)
312 else if (bp->fid.volume == volp->vol[BACKVOL].ID)
316 snprintf(volstr, sizeof(volstr), "%s%s", volp->namep, ext);
318 cellp = cm_FindCellByID(bp->fid.cell, CM_FLAG_NOPROBE);
319 snprintf(volstr, sizeof(volstr), "%u", bp->fid.volume);
322 LogEvent(EVENTLOG_INFORMATION_TYPE, MSG_DIRTY_BUFFER_AT_SHUTDOWN,
323 cellp->name, volstr, bp->fid.vnode, bp->fid.unique,
324 bp->offset.QuadPart+bp->dirty_offset, bp->dirty_length);
327 /* advance the pointer so we don't loop forever */
328 lock_ObtainRead(&buf_globalLock);
332 lock_ReleaseMutex(&bp->mx);
333 } /* for loop over a bunch of buffers */
334 lock_ReleaseRead(&buf_globalLock);
339 /* incremental sync daemon. Writes all dirty buffers every 5000 ms */
341 buf_IncrSyncer(void * parm)
346 while (buf_ShutdownFlag == 0) {
348 i = SleepEx(5000, 1);
355 wasDirty = buf_Sync(1);
356 } /* whole daemon's while loop */
363 buf_ValidateBuffers(void)
365 cm_buf_t * bp, *bpf, *bpa, *bpb;
366 afs_uint64 countb = 0, countf = 0, counta = 0, countr = 0;
368 if (cm_data.buf_freeListp == NULL && cm_data.buf_freeListEndp != NULL ||
369 cm_data.buf_freeListp != NULL && cm_data.buf_freeListEndp == NULL) {
370 afsi_log("cm_ValidateBuffers failure: inconsistent free list pointers");
371 fprintf(stderr, "cm_ValidateBuffers failure: inconsistent free list pointers\n");
375 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
376 if (bp->magic != CM_BUF_MAGIC) {
377 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
378 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
384 if (countb > cm_data.buf_nbuffers) {
385 afsi_log("cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers");
386 fprintf(stderr, "cm_ValidateBuffers failure: countb > cm_data.buf_nbuffers\n");
391 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
392 if (bp->magic != CM_BUF_MAGIC) {
393 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
394 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
400 if (countf > cm_data.buf_nbuffers) {
401 afsi_log("cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers");
402 fprintf(stderr, "cm_ValidateBuffers failure: countf > cm_data.buf_nbuffers\n");
407 for ( bp = cm_data.buf_redirListp; bp; bp = (cm_buf_t *) osi_QNext(&bp->q)) {
408 if (!(bp->qFlags & CM_BUF_QREDIR)) {
409 afsi_log("CM_BUF_QREDIR not set on cm_buf_t in buf_redirListp");
410 fprintf(stderr, "CM_BUF_QREDIR not set on cm_buf_t in buf_redirListp");
414 if (countr > cm_data.buf_nbuffers) {
415 afsi_log("cm_ValidateBuffers failure: countr > cm_data.buf_nbuffers");
416 fprintf(stderr, "cm_ValidateBuffers failure: countr > cm_data.buf_nbuffers\n");
421 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
422 if (bp->magic != CM_BUF_MAGIC) {
423 afsi_log("cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC");
424 fprintf(stderr, "cm_ValidateBuffers failure: bp->magic != CM_BUF_MAGIC\n");
430 if (counta > cm_data.buf_nbuffers) {
431 afsi_log("cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers");
432 fprintf(stderr, "cm_ValidateBuffers failure: counta > cm_data.buf_nbuffers\n");
437 if (countb != countf) {
438 afsi_log("cm_ValidateBuffers failure: countb != countf");
439 fprintf(stderr, "cm_ValidateBuffers failure: countb != countf\n");
443 if (counta != cm_data.buf_nbuffers) {
444 afsi_log("cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers");
445 fprintf(stderr, "cm_ValidateBuffers failure: counta != cm_data.buf_nbuffers\n");
452 void buf_Shutdown(void)
454 /* disable the buf_IncrSyncer() threads */
455 buf_ShutdownFlag = 1;
457 /* then force all dirty buffers to the file servers */
461 /* initialize the buffer package; called with no locks
462 * held during the initialization phase.
464 long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
466 static osi_once_t once;
469 pthread_attr_t tattr;
476 cm_data.buf_nbuffers = nbuffers;
478 /* Have to be able to reserve a whole chunk */
479 if (((cm_data.buf_nbuffers - 3) * cm_data.buf_blockSize) < cm_chunkSize)
480 return CM_ERROR_TOOFEWBUFS;
483 /* recall for callouts */
486 if (osi_Once(&once)) {
487 /* initialize global locks */
488 lock_InitializeRWLock(&buf_globalLock, "Global buffer lock", LOCK_HIERARCHY_BUF_GLOBAL);
489 lock_InitializeMutex(&buf_rdrReleaseExtentsLock, "RDR Release Extents lock", LOCK_HIERARCHY_RDR_EXTENTS);
492 /* remember this for those who want to reset it */
493 cm_data.buf_nOrigBuffers = cm_data.buf_nbuffers;
495 /* lower hash size to a prime number */
496 cm_data.buf_hashSize = cm_NextHighestPowerOf2((afs_uint32)(cm_data.buf_nbuffers/7));
498 /* create hash table */
499 memset((void *)cm_data.buf_scacheHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
501 /* another hash table */
502 memset((void *)cm_data.buf_fileHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
504 /* create buffer headers and put in free list */
505 bp = cm_data.bufHeaderBaseAddress;
506 data = cm_data.bufDataBaseAddress;
507 cm_data.buf_allp = NULL;
509 for (i=0; i<cm_data.buf_nbuffers; i++) {
510 osi_assertx(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress,
511 "invalid cm_buf_t address");
512 osi_assertx(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData,
513 "invalid cm_buf_t data address");
515 /* allocate and zero some storage */
516 memset(bp, 0, sizeof(cm_buf_t));
517 bp->magic = CM_BUF_MAGIC;
518 /* thread on list of all buffers */
519 bp->allp = cm_data.buf_allp;
520 cm_data.buf_allp = bp;
522 osi_QAddH( (osi_queue_t **) &cm_data.buf_freeListp,
523 (osi_queue_t **) &cm_data.buf_freeListEndp,
525 _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
526 buf_IncrementFreeCount();
527 lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
529 /* grab appropriate number of bytes from aligned zone */
534 data += cm_data.buf_blockSize;
537 /* none reserved at first */
538 cm_data.buf_reservedBufs = 0;
540 /* just for safety's sake */
541 cm_data.buf_maxReservedBufs = cm_data.buf_nbuffers - 3;
543 bp = cm_data.bufHeaderBaseAddress;
544 data = cm_data.bufDataBaseAddress;
546 for (i=0; i<cm_data.buf_nbuffers; i++) {
547 lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
550 bp->waitRequests = 0;
551 _InterlockedAnd(&bp->flags, ~CM_BUF_WAITING);
553 if (bp->qFlags & CM_BUF_QREDIR) {
555 * extent was not returned by the file system driver.
558 bp->dataVersion = CM_BUF_VERSION_BAD;
559 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QREDIR);
560 osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_redirListp,
561 (osi_queue_t **) &cm_data.buf_redirListEndp,
563 buf_DecrementRedirCount();
564 bp->redirq.nextp = bp->redirq.prevp = NULL;
565 bp->redirLastAccess = 0;
566 bp->redirReleaseRequested = 0;
573 * There should be nothing left in cm_data.buf_redirListp
574 * but double check just to be sure.
576 for ( bp = cm_data.buf_redirListp;
578 bp = cm_data.buf_redirListp)
581 * extent was not returned by the file system driver.
584 bp->dataVersion = CM_BUF_VERSION_BAD;
585 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QREDIR);
586 osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_redirListp,
587 (osi_queue_t **) &cm_data.buf_redirListEndp,
589 buf_DecrementRedirCount();
590 bp->redirq.nextp = bp->redirq.prevp = NULL;
591 bp->redirLastAccess = 0;
592 bp->redirReleaseRequested = 0;
598 buf_ValidateBufQueues();
602 /* init the buffer trace log */
603 buf_logp = osi_LogCreate("buffer", 1000);
604 osi_LogEnable(buf_logp);
609 /* and create the incr-syncer */
610 pthread_attr_init(&tattr);
611 pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_DETACHED);
613 pstatus = pthread_create(&phandle, &tattr, buf_IncrSyncer, 0);
614 osi_assertx(pstatus == 0, "buf: can't create incremental sync proc");
616 pthread_attr_destroy(&tattr);
620 buf_ValidateBufQueues();
625 /* add nbuffers to the buffer pool, if possible.
626 * Called with no locks held.
628 long buf_AddBuffers(afs_uint64 nbuffers)
630 /* The size of a virtual cache cannot be changed after it has
631 * been created. Subsequent calls to MapViewofFile() with
632 * an existing mapping object name would not allow the
633 * object to be resized. Return failure immediately.
635 * A similar problem now occurs with the persistent cache
636 * given that the memory mapped file now contains a complex
639 afsi_log("request to add %d buffers to the existing cache of size %d denied",
640 nbuffers, cm_data.buf_nbuffers);
642 return CM_ERROR_INVAL;
645 /* interface to set the number of buffers to an exact figure.
646 * Called with no locks held.
648 long buf_SetNBuffers(afs_uint64 nbuffers)
651 return CM_ERROR_INVAL;
652 if (nbuffers == cm_data.buf_nbuffers)
654 else if (nbuffers > cm_data.buf_nbuffers)
655 return buf_AddBuffers(nbuffers - cm_data.buf_nbuffers);
657 return CM_ERROR_INVAL;
660 /* wait for reading or writing to clear; called with write-locked
661 * buffer and unlocked scp and returns with locked buffer.
663 void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
668 osi_assertx(scp->magic == CM_SCACHE_MAGIC, "invalid cm_scache_t magic");
669 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
672 /* if no IO is happening, we're done */
673 if (!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)))
676 /* otherwise I/O is happening, but some other thread is waiting for
677 * the I/O already. Wait for that guy to figure out what happened,
678 * and then check again.
680 if ( bp->flags & CM_BUF_WAITING ) {
683 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%p", bp);
685 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING set for 0x%p", bp);
686 _InterlockedOr(&bp->flags, CM_BUF_WAITING);
687 bp->waitCount = bp->waitRequests = 1;
689 osi_SleepM((LONG_PTR)bp, &bp->mx);
691 cm_UpdateServerPriority();
693 lock_ObtainMutex(&bp->mx);
694 osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%p", bp);
696 if (bp->waitCount == 0) {
697 osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING reset for 0x%p", bp);
698 _InterlockedAnd(&bp->flags, ~CM_BUF_WAITING);
699 bp->waitRequests = 0;
703 if (scp = cm_FindSCache(&bp->fid))
707 lock_ObtainRead(&scp->rw);
708 if (scp->flags & CM_SCACHEFLAG_WAITING) {
709 osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
710 osi_Wakeup((LONG_PTR)&scp->flags);
712 lock_ReleaseRead(&scp->rw);
716 /* if we get here, the IO is done, but we may have to wakeup people waiting for
717 * the I/O to complete. Do so.
719 if (bp->flags & CM_BUF_WAITING) {
720 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
721 osi_Wakeup((LONG_PTR) bp);
723 osi_Log1(buf_logp, "WaitIO finished wait for bp 0x%p", bp);
726 cm_ReleaseSCache(scp);
729 /* find a buffer, if any, for a particular file ID and offset. Assumes
730 * that buf_globalLock is write locked when called.
732 cm_buf_t *buf_FindLocked(struct cm_fid *fidp, osi_hyper_t *offsetp)
737 lock_AssertAny(&buf_globalLock);
739 i = BUF_HASH(fidp, offsetp);
740 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) {
741 if (cm_FidCmp(fidp, &bp->fid) == 0
742 && offsetp->LowPart == bp->offset.LowPart
743 && offsetp->HighPart == bp->offset.HighPart) {
749 /* return whatever we found, if anything */
753 /* find a buffer with offset *offsetp for vnode *scp. Called
754 * with no locks held.
756 cm_buf_t *buf_Find(struct cm_fid *fidp, osi_hyper_t *offsetp)
760 lock_ObtainRead(&buf_globalLock);
761 bp = buf_FindLocked(fidp, offsetp);
762 lock_ReleaseRead(&buf_globalLock);
767 /* find a buffer, if any, for a particular file ID and offset. Assumes
768 * that buf_globalLock is write locked when called. Uses the all buffer
771 cm_buf_t *buf_FindAllLocked(struct cm_fid *fidp, osi_hyper_t *offsetp, afs_uint32 flags)
776 for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
777 if (cm_FidCmp(fidp, &bp->fid) == 0
778 && offsetp->LowPart == bp->offset.LowPart
779 && offsetp->HighPart == bp->offset.HighPart) {
785 for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
786 if (cm_FidCmp(fidp, &bp->fid) == 0) {
789 fileOffset = offsetp->QuadPart + cm_data.baseAddress;
790 if (fileOffset == bp->datap) {
797 /* return whatever we found, if anything */
801 /* find a buffer with offset *offsetp for vnode *scp. Called
802 * with no locks held. Use the all buffer list.
804 cm_buf_t *buf_FindAll(struct cm_fid *fidp, osi_hyper_t *offsetp, afs_uint32 flags)
808 lock_ObtainRead(&buf_globalLock);
809 bp = buf_FindAllLocked(fidp, offsetp, flags);
810 lock_ReleaseRead(&buf_globalLock);
815 /* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
818 * Makes sure that there's only one person writing this block
819 * at any given time, and also ensures that the log is forced sufficiently far,
820 * if this buffer contains logged data.
822 * Returns non-zero if the buffer was dirty.
824 * 'scp' may or may not be NULL. If it is not NULL, the FID for both cm_scache_t
825 * and cm_buf_t must match.
827 afs_uint32 buf_CleanLocked(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp,
828 afs_uint32 flags, afs_uint32 *pisdirty)
831 afs_uint32 isdirty = 0;
835 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
836 osi_assertx(scp == NULL || cm_FidCmp(&scp->fid, &bp->fid) == 0, "scp fid != bp fid");
839 * If the matching cm_scache_t was not provided as a parameter
840 * we must either find one or allocate a new one. It is possible
841 * that the cm_scache_t was recycled out of the cache even though
842 * a cm_buf_t with the same FID is in the cache.
845 if ((scp = cm_FindSCache(&bp->fid)) ||
846 (cm_GetSCache(&bp->fid, &scp,
847 bp->userp ? bp->userp : cm_rootUserp,
853 while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
855 lock_ReleaseMutex(&bp->mx);
859 * If we didn't find a cm_scache_t object for bp->fid it means
860 * that we no longer have that FID in the cache. It does not
861 * mean that the object does not exist in the cell. That may
862 * in fact be the case but we don't know that until we attempt
863 * a FetchStatus on the FID.
865 osi_Log1(buf_logp, "buf_CleanLocked unable to start I/O - scp not found buf 0x%p", bp);
866 code = CM_ERROR_NOSUCHFILE;
868 osi_Log2(buf_logp, "buf_CleanLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
871 LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
873 * Only specify the dirty length of the current buffer in the call
874 * to cm_BufWrite(). It is the responsibility of cm_BufWrite()
875 * to determine if it is appropriate to fill a full chunk of data
876 * when storing to the file server.
878 code = (*cm_buf_opsp->Writep)(scp, &offset,
880 flags, bp->userp, reqp);
881 osi_Log3(buf_logp, "buf_CleanLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
883 lock_ObtainMutex(&bp->mx);
884 /* if the Write routine returns No Such File, clear the dirty flag
885 * because we aren't going to be able to write this data to the file
888 if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD || code == CM_ERROR_NOACCESS ||
889 code == CM_ERROR_QUOTA || code == CM_ERROR_SPACE || code == CM_ERROR_TOOBIG ||
890 code == CM_ERROR_READONLY || code == CM_ERROR_NOSUCHPATH){
891 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
892 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
893 bp->dirty_offset = 0;
894 bp->dirty_length = 0;
896 bp->dataVersion = CM_BUF_VERSION_BAD;
902 /* Disk cache support */
903 /* write buffer to disk cache (synchronous for now) */
904 diskcache_Update(bp->dcp, bp->datap, cm_data.buf_blockSize, bp->dataVersion);
905 #endif /* DISKCACHE95 */
907 /* if we get here and retries are not permitted
908 * then we need to exit this loop regardless of
909 * whether or not we were able to clear the dirty bit
911 if (reqp->flags & CM_REQ_NORETRY)
914 /* Ditto if the hardDeadTimeout or idleTimeout was reached */
915 if (code == CM_ERROR_TIMEDOUT || code == CM_ERROR_ALLDOWN ||
916 code == CM_ERROR_ALLBUSY || code == CM_ERROR_ALLOFFLINE ||
917 code == CM_ERROR_CLOCKSKEW) {
923 cm_ReleaseSCache(scp);
925 /* if someone was waiting for the I/O that just completed or failed,
928 if (bp->flags & CM_BUF_WAITING) {
929 /* turn off flags and wakeup users */
930 osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
931 osi_Wakeup((LONG_PTR) bp);
940 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
941 * recycles the buffer, and leaves it ready for reuse with a ref count of 1.
942 * The buffer must already be clean, and no I/O should be happening to it.
944 void buf_Recycle(cm_buf_t *bp)
949 cm_buf_t *prevBp, *nextBp;
951 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
953 osi_assertx(!(bp->qFlags & CM_BUF_QREDIR), "can't recycle redir held buffers");
955 /* if we get here, we know that the buffer still has a 0 ref count,
956 * and that it is clean and has no currently pending I/O. This is
957 * the dude to return.
958 * Remember that as long as the ref count is 0, we know that we won't
959 * have any lock conflicts, so we can grab the buffer lock out of
960 * order in the locking hierarchy.
962 osi_Log3( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x:%08x",
963 bp, bp->offset.HighPart, bp->offset.LowPart);
965 osi_assertx(bp->refCount == 0, "cm_buf_t refcount != 0");
966 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)),
967 "incorrect cm_buf_t flags");
968 lock_AssertWrite(&buf_globalLock);
970 if (bp->qFlags & CM_BUF_QINHASH) {
971 /* Remove from hash */
973 i = BUF_HASH(&bp->fid, &bp->offset);
974 lbpp = &(cm_data.buf_scacheHashTablepp[i]);
975 for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = tbp->hashp) {
980 /* we better find it */
981 osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
983 *lbpp = bp->hashp; /* hash out */
986 /* Remove from file hash */
988 i = BUF_FILEHASH(&bp->fid);
989 prevBp = bp->fileHashBackp;
990 bp->fileHashBackp = NULL;
991 nextBp = bp->fileHashp;
992 bp->fileHashp = NULL;
994 prevBp->fileHashp = nextBp;
996 cm_data.buf_fileHashTablepp[i] = nextBp;
998 nextBp->fileHashBackp = prevBp;
1000 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINHASH);
1003 /* make the fid unrecognizable */
1004 memset(&bp->fid, 0, sizeof(cm_fid_t));
1006 /* clean up junk flags */
1007 _InterlockedAnd(&bp->flags, ~(CM_BUF_EOF | CM_BUF_ERROR));
1008 bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
1013 * buf_RDRShakeAnExtentFree
1014 * called with buf_globalLock read locked
1017 buf_RDRShakeAnExtentFree(cm_buf_t *rbp, cm_req_t *reqp)
1019 afs_uint32 code = 0;
1020 LARGE_INTEGER heldExtents = {0,0};
1021 AFSFileExtentCB extentList[1];
1022 DWORD extentCount = 0;
1023 BOOL locked = FALSE;
1025 if (!(rbp->qFlags & CM_BUF_QREDIR))
1028 lock_ReleaseRead(&buf_globalLock);
1030 if (!lock_TryMutex(&buf_rdrReleaseExtentsLock)) {
1031 osi_Log0(afsd_logp, "Waiting for prior RDR_RequestExtentRelease request to complete");
1032 if (reqp->flags & CM_REQ_NORETRY) {
1033 code = CM_ERROR_WOULDBLOCK;
1037 lock_ObtainMutex(&buf_rdrReleaseExtentsLock);
1040 extentList[0].Flags = 0;
1041 extentList[0].Length = cm_data.blockSize;
1042 extentList[0].FileOffset.QuadPart = rbp->offset.QuadPart;
1043 extentList[0].CacheOffset.QuadPart = rbp->datap - cm_data.baseAddress;
1046 code = RDR_RequestExtentRelease(&rbp->fid, heldExtents, extentCount, extentList);
1048 lock_ReleaseMutex(&buf_rdrReleaseExtentsLock);
1051 lock_ObtainRead(&buf_globalLock);
1056 * buf_RDRShakeFileExtentsFree
1057 * requests all extents held by the redirector to be returned for
1058 * the specified cm_scache_t. This function is called with no
1062 buf_RDRShakeFileExtentsFree(cm_scache_t *rscp, cm_req_t *reqp)
1064 afs_uint32 code = 0;
1065 afs_uint64 n_redir = 0;
1067 if (!lock_TryMutex(&buf_rdrReleaseExtentsLock)) {
1068 osi_Log0(afsd_logp, "Waiting for prior RDR_RequestExtentRelease request to complete");
1069 if (reqp->flags & CM_REQ_NORETRY)
1070 return CM_ERROR_WOULDBLOCK;
1072 lock_ObtainMutex(&buf_rdrReleaseExtentsLock);
1075 for ( code = CM_ERROR_RETRY; code == CM_ERROR_RETRY; ) {
1076 LARGE_INTEGER heldExtents = {0,0};
1077 AFSFileExtentCB extentList[1024];
1078 DWORD extentCount = 0;
1082 /* only retry if a call to RDR_RequestExtentRelease says to */
1084 lock_ObtainWrite(&buf_globalLock);
1086 if (rscp->redirBufCount == 0)
1088 lock_ReleaseWrite(&buf_globalLock);
1093 for ( srbp = redirq_to_cm_buf_t(rscp->redirQueueT);
1095 srbp = ((code == 0 && extentCount == 0) ? redirq_to_cm_buf_t(rscp->redirQueueT) :
1096 redirq_to_cm_buf_t(osi_QPrev(&srbp->redirq))))
1098 extentList[extentCount].Flags = 0;
1099 extentList[extentCount].Length = cm_data.blockSize;
1100 extentList[extentCount].FileOffset.QuadPart = srbp->offset.QuadPart;
1101 extentList[extentCount].CacheOffset.QuadPart = srbp->datap - cm_data.baseAddress;
1102 srbp->redirReleaseRequested = now;
1105 if (extentCount == 1024) {
1106 lock_ReleaseWrite(&buf_globalLock);
1107 heldExtents.QuadPart = cm_data.buf_redirCount;
1108 code = RDR_RequestExtentRelease(&rscp->fid, heldExtents, extentCount, extentList);
1110 if (code == CM_ERROR_RETRY) {
1112 * The redirector either is not holding the extents or cannot let them
1113 * go because they are otherwise in use. At the moment, do nothing.
1119 lock_ObtainWrite(&buf_globalLock);
1122 lock_ReleaseWrite(&buf_globalLock);
1124 if (code == 0 && extentCount > 0) {
1125 heldExtents.QuadPart = cm_data.buf_redirCount;
1126 code = RDR_RequestExtentRelease(&rscp->fid, heldExtents, extentCount, extentList);
1129 if ((code == CM_ERROR_RETRY) && (reqp->flags & CM_REQ_NORETRY)) {
1130 code = CM_ERROR_WOULDBLOCK;
1134 lock_ReleaseMutex(&buf_rdrReleaseExtentsLock);
1139 buf_RDRShakeSomeExtentsFree(cm_req_t *reqp, afs_uint32 oneFid, afs_uint32 minage)
1141 afs_uint32 code = 0;
1143 if (!lock_TryMutex(&buf_rdrReleaseExtentsLock)) {
1144 if (reqp->flags & CM_REQ_NORETRY)
1145 return CM_ERROR_WOULDBLOCK;
1147 osi_Log0(afsd_logp, "Waiting for prior RDR_RequestExtentRelease request to complete");
1148 lock_ObtainMutex(&buf_rdrReleaseExtentsLock);
1151 for ( code = CM_ERROR_RETRY; code == CM_ERROR_RETRY; ) {
1152 LARGE_INTEGER heldExtents;
1153 AFSFileExtentCB extentList[1024];
1154 DWORD extentCount = 0;
1155 cm_buf_t *rbp, *srbp;
1158 BOOL locked = FALSE;
1160 /* only retry if a call to RDR_RequestExtentRelease says to */
1162 lock_ObtainWrite(&buf_globalLock);
1165 for ( rbp = cm_data.buf_redirListEndp;
1166 code == 0 && rbp && (!oneFid || extentCount == 0);
1167 rbp = (cm_buf_t *) osi_QPrev(&rbp->q))
1172 if (rbp->redirLastAccess >= rbp->redirReleaseRequested) {
1173 rscp = cm_FindSCache(&rbp->fid);
1178 for ( srbp = redirq_to_cm_buf_t(rscp->redirQueueT);
1179 srbp && extentCount < 1024;
1180 srbp = redirq_to_cm_buf_t(osi_QPrev(&srbp->redirq)))
1183 * Do not request a release if we have already done so
1184 * or if the extent was delivered to windows less than
1185 * 'minage' seconds ago.
1187 if (srbp->redirLastAccess >= srbp->redirReleaseRequested &&
1188 srbp->redirLastAccess < now - minage) {
1189 extentList[extentCount].Flags = 0;
1190 extentList[extentCount].Length = cm_data.blockSize;
1191 extentList[extentCount].FileOffset.QuadPart = srbp->offset.QuadPart;
1192 extentList[extentCount].CacheOffset.QuadPart = srbp->datap - cm_data.baseAddress;
1193 srbp->redirReleaseRequested = now;
1197 cm_ReleaseSCache(rscp);
1200 if ( !oneFid && extentCount > 0) {
1202 lock_ReleaseWrite(&buf_globalLock);
1205 heldExtents.QuadPart = cm_data.buf_redirCount;
1206 code = RDR_RequestExtentRelease(&rbp->fid, heldExtents, extentCount, extentList);
1209 lock_ObtainWrite(&buf_globalLock);
1214 lock_ReleaseWrite(&buf_globalLock);
1217 heldExtents.QuadPart = cm_data.buf_redirCount;
1218 if (rbp && extentCount)
1219 code = RDR_RequestExtentRelease(&rbp->fid, heldExtents, extentCount, extentList);
1221 code = RDR_RequestExtentRelease(NULL, heldExtents, 1024, NULL);
1227 if ((code == CM_ERROR_RETRY) && (reqp->flags & CM_REQ_NORETRY)) {
1228 code = CM_ERROR_WOULDBLOCK;
1232 lock_ReleaseMutex(&buf_rdrReleaseExtentsLock);
1236 /* returns 0 if the buffer does not exist, and non-0 if it does */
1238 buf_ExistsLocked(struct cm_scache *scp, osi_hyper_t *offsetp)
1242 if (bp = buf_FindLocked(&scp->fid, offsetp)) {
1243 /* Do not call buf_ReleaseLocked() because we
1244 * do not want to allow the buffer to be added
1247 afs_int32 refCount = InterlockedDecrement(&bp->refCount);
1248 #ifdef DEBUG_REFCOUNT
1249 osi_Log2(afsd_logp,"buf_ExistsLocked bp 0x%p ref %d", bp, refCount);
1250 afsi_log("%s:%d buf_ExistsLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
1252 return CM_BUF_EXISTS;
1258 /* recycle a buffer, removing it from the free list, hashing in its new identity
1259 * and returning it write-locked so that no one can use it. Called without
1260 * any locks held, and can return an error if it loses the race condition and
1261 * finds that someone else created the desired buffer.
1263 * If success is returned, the buffer is returned write-locked.
1265 * May be called with null scp and offsetp, if we're just trying to reclaim some
1266 * space from the buffer pool. In that case, the buffer will be returned
1267 * without being hashed into the hash table.
1269 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
1271 cm_buf_t *bp; /* buffer we're dealing with */
1272 cm_buf_t *nextBp; /* next buffer in file hash chain */
1273 afs_uint32 i; /* temp */
1274 afs_uint64 n_bufs, n_nonzero, n_busy, n_dirty, n_own;
1277 buf_ValidateBufQueues();
1278 #endif /* TESTING */
1288 lock_ObtainRead(&scp->bufCreateLock);
1289 lock_ObtainWrite(&buf_globalLock);
1290 /* check to see if we lost the race */
1291 if (buf_ExistsLocked(scp, offsetp)) {
1292 lock_ReleaseWrite(&buf_globalLock);
1293 lock_ReleaseRead(&scp->bufCreateLock);
1294 return CM_BUF_EXISTS;
1297 /* does this fix the problem below? it's a simple solution. */
1298 if (!cm_data.buf_freeListEndp)
1300 lock_ReleaseWrite(&buf_globalLock);
1301 lock_ReleaseRead(&scp->bufCreateLock);
1303 if ( RDR_Initialized )
1306 osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List is empty - sleeping 200ms");
1311 /* for debugging, assert free list isn't empty, although we
1312 * really should try waiting for a running tranasction to finish
1313 * instead of this; or better, we should have a transaction
1314 * throttler prevent us from entering this situation.
1316 osi_assertx(cm_data.buf_freeListEndp != NULL, "buf_GetNewLocked: no free buffers");
1318 /* look at all buffers in free list, some of which may temp.
1319 * have high refcounts and which then should be skipped,
1320 * starting cleaning I/O for those which are dirty. If we find
1321 * a clean buffer, we rehash it, lock it and return it.
1323 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
1326 /* check to see if it really has zero ref count. This
1327 * code can bump refcounts, at least, so it may not be
1330 if (bp->refCount > 0) {
1335 /* we don't have to lock buffer itself, since the ref
1336 * count is 0 and we know it will stay zero as long as
1337 * we hold the global lock.
1340 /* don't recycle someone in our own chunk */
1341 if (!cm_FidCmp(&bp->fid, &scp->fid) &&
1342 bp->dataVersion >= scp->bufDataVersionLow &&
1343 bp->dataVersion <= scp->dataVersion &&
1344 (bp->offset.LowPart & (-cm_chunkSize)) == (offsetp->LowPart & (-cm_chunkSize))) {
1349 /* if this page is being filled (!) or cleaned, see if
1350 * the I/O has completed. If not, skip it, otherwise
1351 * do the final processing for the I/O.
1353 if (bp->flags & (CM_BUF_READING | CM_BUF_WRITING)) {
1354 /* probably shouldn't do this much work while
1355 * holding the big lock? Watch for contention
1362 if (bp->flags & CM_BUF_DIRTY) {
1365 /* leave the buffer alone if held by the redirector */
1366 if (bp->qFlags & CM_BUF_QREDIR)
1369 /* if the buffer is dirty, start cleaning it and
1370 * move on to the next buffer. We do this with
1371 * just the lock required to minimize contention
1375 lock_ReleaseWrite(&buf_globalLock);
1376 lock_ReleaseRead(&scp->bufCreateLock);
1379 * grab required lock and clean.
1380 * previously the claim was that the cleaning
1381 * operation was async which it is not. It would
1382 * be a good idea to use an async mechanism here
1383 * but there is none at the moment other than
1384 * the buf_IncrSyncer() thread.
1386 if (cm_FidCmp(&scp->fid, &bp->fid) == 0)
1387 buf_Clean(scp, bp, reqp, 0, NULL);
1389 buf_Clean(NULL, bp, reqp, 0, NULL);
1391 /* now put it back and go around again */
1394 /* but first obtain the locks we gave up
1395 * before the buf_CleanAsync() call */
1396 lock_ObtainRead(&scp->bufCreateLock);
1397 lock_ObtainWrite(&buf_globalLock);
1400 * Since we dropped the locks we need to verify that
1401 * another thread has not allocated the buffer for us.
1403 if (buf_ExistsLocked(scp, offsetp)) {
1404 lock_ReleaseWrite(&buf_globalLock);
1405 lock_ReleaseRead(&scp->bufCreateLock);
1406 return CM_BUF_EXISTS;
1411 osi_Log3(afsd_logp, "buf_GetNewLocked: scp 0x%p examined %u buffers before recycling bufp 0x%p",
1413 osi_Log4(afsd_logp, "... nonzero %u; own %u; busy %u; dirty %u", n_nonzero, n_own, n_busy, n_dirty);
1415 /* if we get here, we know that the buffer still has a 0
1416 * ref count, and that it is clean and has no currently
1417 * pending I/O. This is the dude to return.
1418 * Remember that as long as the ref count is 0, we know
1419 * that we won't have any lock conflicts, so we can grab
1420 * the buffer lock out of order in the locking hierarchy.
1424 /* now hash in as our new buffer, and give it the
1425 * appropriate label, if requested.
1428 lock_AssertWrite(&buf_globalLock);
1430 _InterlockedOr(&bp->qFlags, CM_BUF_QINHASH);
1435 bp->offset = *offsetp;
1436 i = BUF_HASH(&scp->fid, offsetp);
1437 bp->hashp = cm_data.buf_scacheHashTablepp[i];
1438 cm_data.buf_scacheHashTablepp[i] = bp;
1439 i = BUF_FILEHASH(&scp->fid);
1440 nextBp = cm_data.buf_fileHashTablepp[i];
1441 bp->fileHashp = nextBp;
1442 bp->fileHashBackp = NULL;
1444 nextBp->fileHashBackp = bp;
1445 cm_data.buf_fileHashTablepp[i] = bp;
1448 /* we should remove it from the lru queue. It better still be there,
1449 * since we've held the global (big) lock since we found it there.
1451 osi_assertx(bp->qFlags & CM_BUF_QINLRU,
1452 "buf_GetNewLocked: LRU screwup");
1454 osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_freeListp,
1455 (osi_queue_t **) &cm_data.buf_freeListEndp,
1457 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINLRU);
1458 buf_DecrementFreeCount();
1460 /* prepare to return it. Give it a refcount */
1462 #ifdef DEBUG_REFCOUNT
1463 osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
1464 afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
1466 /* grab the mutex so that people don't use it
1467 * before the caller fills it with data. Again, no one
1468 * should have been able to get to this dude to lock it.
1470 if (!lock_TryMutex(&bp->mx)) {
1471 osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0",
1473 osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
1476 lock_ReleaseWrite(&buf_globalLock);
1477 lock_ReleaseRead(&scp->bufCreateLock);
1482 buf_ValidateBufQueues();
1483 #endif /* TESTING */
1485 } /* for all buffers in lru queue */
1486 lock_ReleaseWrite(&buf_globalLock);
1487 lock_ReleaseRead(&scp->bufCreateLock);
1489 osi_Log1(afsd_logp, "buf_GetNewLocked: Free Buffer List has %u buffers none free", n_bufs);
1490 osi_Log4(afsd_logp, "... nonzero %u; own %u; busy %u; dirty %u", n_nonzero, n_own, n_busy, n_dirty);
1492 if (RDR_Initialized) {
1495 code = buf_RDRShakeSomeExtentsFree(reqp, TRUE, 2 /* seconds */);
1497 case CM_ERROR_RETRY:
1500 case CM_ERROR_WOULDBLOCK:
1501 return CM_ERROR_WOULDBLOCK;
1505 Sleep(100); /* give some time for a buffer to be freed */
1506 } /* while loop over everything */
1511 * get a page, returning it held but unlocked. the page may or may not
1512 * contain valid data.
1514 * The scp must be unlocked when passed in unlocked.
1516 long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
1520 osi_hyper_t pageOffset;
1521 unsigned long tcount;
1525 cm_diskcache_t *dcp;
1526 #endif /* DISKCACHE95 */
1529 pageOffset.HighPart = offsetp->HighPart;
1530 pageOffset.LowPart = offsetp->LowPart & ~(cm_data.buf_blockSize-1);
1534 buf_ValidateBufQueues();
1535 #endif /* TESTING */
1537 bp = buf_Find(&scp->fid, &pageOffset);
1539 /* lock it and break out */
1540 lock_ObtainMutex(&bp->mx);
1543 /* touch disk chunk to update LRU info */
1544 diskcache_Touch(bp->dcp);
1545 #endif /* DISKCACHE95 */
1549 /* otherwise, we have to create a page */
1550 code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
1553 /* the requested buffer was created */
1558 * the requested buffer existed by the time the
1559 * scp->bufCreateLock and buf_globalLock could be obtained.
1560 * loop again and permit buf_Find() to obtain a reference.
1565 * the requested buffer could not be created.
1566 * return the error to the caller.
1569 buf_ValidateBufQueues();
1570 #endif /* TESTING */
1573 } /* big while loop */
1575 /* if we get here, we have a locked buffer that may have just been
1576 * created, in which case it needs to be filled with data.
1579 /* load the page; freshly created pages should be idle */
1580 osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)), "incorrect cm_buf_t flags");
1583 * start the I/O; may drop lock. as of this writing, the only
1584 * implementation of Readp is cm_BufRead() which simply sets
1585 * tcount to 0 and returns success.
1587 _InterlockedOr(&bp->flags, CM_BUF_READING);
1588 code = (*cm_buf_opsp->Readp)(bp, cm_data.buf_blockSize, &tcount, NULL);
1591 code = diskcache_Get(&bp->fid, &bp->offset, bp->datap, cm_data.buf_blockSize, &bp->dataVersion, &tcount, &dcp);
1592 bp->dcp = dcp; /* pointer to disk cache struct. */
1593 #endif /* DISKCACHE95 */
1596 /* failure or queued */
1598 /* unless cm_BufRead() is altered, this path cannot be hit */
1599 if (code != ERROR_IO_PENDING) {
1601 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
1602 _InterlockedAnd(&bp->flags, ~CM_BUF_READING);
1603 if (bp->flags & CM_BUF_WAITING) {
1604 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1605 osi_Wakeup((LONG_PTR) bp);
1607 lock_ReleaseMutex(&bp->mx);
1610 buf_ValidateBufQueues();
1611 #endif /* TESTING */
1616 * otherwise, I/O completed instantly and we're done, except
1617 * for padding the xfr out with 0s and checking for EOF
1619 if (tcount < (unsigned long) cm_data.buf_blockSize) {
1620 memset(bp->datap+tcount, 0, cm_data.buf_blockSize - tcount);
1622 _InterlockedOr(&bp->flags, CM_BUF_EOF);
1624 _InterlockedAnd(&bp->flags, ~CM_BUF_READING);
1625 if (bp->flags & CM_BUF_WAITING) {
1626 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
1627 osi_Wakeup((LONG_PTR) bp);
1632 /* wait for reads, either that which we started above, or that someone
1633 * else started. We don't care if we return a buffer being cleaned.
1635 if (bp->flags & CM_BUF_READING)
1636 buf_WaitIO(scp, bp);
1638 /* once it has been read once, we can unlock it and return it, still
1639 * with its refcount held.
1641 lock_ReleaseMutex(&bp->mx);
1644 /* now remove from queue; will be put in at the head (farthest from
1645 * being recycled) when we're done in buf_Release.
1647 lock_ObtainWrite(&buf_globalLock);
1648 if (bp->qFlags & CM_BUF_QINLRU) {
1649 osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_freeListp,
1650 (osi_queue_t **) &cm_data.buf_freeListEndp,
1652 _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINLRU);
1653 buf_DecrementFreeCount();
1655 lock_ReleaseWrite(&buf_globalLock);
1657 osi_Log4(buf_logp, "buf_Get returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
1658 bp, scp, offsetp->HighPart, offsetp->LowPart);
1660 buf_ValidateBufQueues();
1661 #endif /* TESTING */
1665 /* clean a buffer synchronously */
1666 afs_uint32 buf_Clean(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp, afs_uint32 flags, afs_uint32 *pisdirty)
1669 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1670 osi_assertx(!(flags & CM_BUF_WRITE_SCP_LOCKED), "scp->rw must not be held when calling buf_CleanAsync");
1672 lock_ObtainMutex(&bp->mx);
1673 code = buf_CleanLocked(scp, bp, reqp, flags, pisdirty);
1674 lock_ReleaseMutex(&bp->mx);
1679 /* wait for a buffer's cleaning to finish */
1680 void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
1682 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1685 lock_ObtainMutex(&bp->mx);
1686 if (bp->flags & CM_BUF_WRITING) {
1687 buf_WaitIO(scp, bp);
1690 lock_ReleaseMutex(&bp->mx);
1693 /* set the dirty flag on a buffer, and set associated write-ahead log,
1694 * if there is one. Allow one to be added to a buffer, but not changed.
1696 * The buffer must be locked before calling this routine.
1698 void buf_SetDirty(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 offset, afs_uint32 length, cm_user_t *userp)
1700 osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
1701 osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
1706 if (bp->flags & CM_BUF_DIRTY) {
1708 osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
1710 if (bp->dirty_offset <= offset) {
1711 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1712 /* dirty_length remains the same */
1714 bp->dirty_length = offset + length - bp->dirty_offset;
1716 } else /* bp->dirty_offset > offset */ {
1717 if (bp->dirty_offset + bp->dirty_length >= offset + length) {
1718 bp->dirty_length = bp->dirty_offset + bp->dirty_length - offset;
1720 bp->dirty_length = length;
1722 bp->dirty_offset = offset;
1725 osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
1728 _InterlockedOr(&bp->flags, CM_BUF_DIRTY);
1730 /* and turn off EOF flag, since it has associated data now */
1731 _InterlockedAnd(&bp->flags, ~CM_BUF_EOF);
1733 bp->dirty_offset = offset;
1734 bp->dirty_length = length;
1737 * if the request is not from the afs redirector,
1738 * add to the dirty list. The redirector interface ensures
1739 * that a background store operation is queued for each and
1740 * every dirty extent that is released. Therefore, the
1741 * buf_IncrSyncer thread is not required to ensure that
1742 * dirty buffers are written to the file server.
1744 * we obtain a hold on the buffer for as long as it remains
1745 * in the list. buffers are only removed from the list by
1746 * the buf_IncrSyncer function regardless of when else the
1747 * dirty flag might be cleared.
1749 * This should never happen but just in case there is a bug
1750 * elsewhere, never add to the dirty list if the buffer is
1753 if (!(reqp->flags & CM_REQ_SOURCE_REDIR)) {
1754 lock_ObtainWrite(&buf_globalLock);
1755 if (!(bp->qFlags & CM_BUF_QINDL)) {
1757 if (!cm_data.buf_dirtyListp) {
1758 cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
1760 cm_data.buf_dirtyListEndp->dirtyp = bp;
1761 cm_data.buf_dirtyListEndp = bp;
1764 _InterlockedOr(&bp->qFlags, CM_BUF_QINDL);
1766 lock_ReleaseWrite(&buf_globalLock);
1770 /* and record the last writer */
1771 if (bp->userp != userp) {
1774 cm_ReleaseUser(bp->userp);
1779 /* clean all buffers, reset log pointers and invalidate all buffers.
1780 * Called with no locks held, and returns with same.
1782 * This function is guaranteed to clean and remove the log ptr of all the
1783 * buffers that were dirty or had non-zero log ptrs before the call was
1784 * made. That's sufficient to clean up any garbage left around by recovery,
1785 * which is all we're counting on this for; there may be newly created buffers
1786 * added while we're running, but that should be OK.
1788 * In an environment where there are no transactions (artificially imposed, for
1789 * example, when switching the database to raw mode), this function is used to
1790 * make sure that all updates have been written to the disk. In that case, we don't
1791 * really require that we forget the log association between pages and logs, but
1792 * it also doesn't hurt. Since raw mode I/O goes through this buffer package, we don't
1793 * have to worry about invalidating data in the buffers.
1795 * This function is used at the end of recovery as paranoia to get the recovered
1796 * database out to disk. It removes all references to the recovery log and cleans
1799 long buf_CleanAndReset(void)
1805 lock_ObtainRead(&buf_globalLock);
1806 for(i=0; i<cm_data.buf_hashSize; i++) {
1807 for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp = bp->hashp) {
1808 if (bp->qFlags & CM_BUF_QREDIR) {
1809 osi_Log1(buf_logp,"buf_CleanAndReset buffer held by redirector bp 0x%p", bp);
1811 /* Request single extent from the redirector */
1812 buf_RDRShakeAnExtentFree(bp, &req);
1815 if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
1817 lock_ReleaseRead(&buf_globalLock);
1819 /* now no locks are held; clean buffer and go on */
1821 req.flags |= CM_REQ_NORETRY;
1823 buf_Clean(NULL, bp, &req, 0, NULL);
1824 buf_CleanWait(NULL, bp, FALSE);
1826 /* relock and release buffer */
1827 lock_ObtainRead(&buf_globalLock);
1828 buf_ReleaseLocked(bp, FALSE);
1830 } /* over one bucket */
1831 } /* for loop over all hash buckets */
1834 lock_ReleaseRead(&buf_globalLock);
1837 buf_ValidateBufQueues();
1838 #endif /* TESTING */
1840 /* and we're done */
1844 /* called without global lock being held, reserves buffers for callers
1845 * that need more than one held (not locked) at once.
1847 void buf_ReserveBuffers(afs_uint64 nbuffers)
1849 lock_ObtainWrite(&buf_globalLock);
1851 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1852 cm_data.buf_reserveWaiting = 1;
1853 osi_Log1(buf_logp, "buf_ReserveBuffers waiting for %d bufs", nbuffers);
1854 osi_SleepW((LONG_PTR) &cm_data.buf_reservedBufs, &buf_globalLock);
1855 lock_ObtainWrite(&buf_globalLock);
1858 cm_data.buf_reservedBufs += nbuffers;
1862 lock_ReleaseWrite(&buf_globalLock);
1865 int buf_TryReserveBuffers(afs_uint64 nbuffers)
1869 lock_ObtainWrite(&buf_globalLock);
1870 if (cm_data.buf_reservedBufs + nbuffers > cm_data.buf_maxReservedBufs) {
1874 cm_data.buf_reservedBufs += nbuffers;
1877 lock_ReleaseWrite(&buf_globalLock);
1881 /* called without global lock held, releases reservation held by
1882 * buf_ReserveBuffers.
1884 void buf_UnreserveBuffers(afs_uint64 nbuffers)
1886 lock_ObtainWrite(&buf_globalLock);
1887 cm_data.buf_reservedBufs -= nbuffers;
1888 if (cm_data.buf_reserveWaiting) {
1889 cm_data.buf_reserveWaiting = 0;
1890 osi_Wakeup((LONG_PTR) &cm_data.buf_reservedBufs);
1892 lock_ReleaseWrite(&buf_globalLock);
1895 /* truncate the buffers past sizep, zeroing out the page, if we don't
1896 * end on a page boundary.
1898 * Requires cm_bufCreateLock to be write locked.
1900 long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
1904 cm_buf_t *nbufp; /* next buffer, if didRelease */
1909 afs_uint32 invalidate = 0;
1911 /* assert that cm_bufCreateLock is held in write mode */
1912 lock_AssertWrite(&scp->bufCreateLock);
1914 i = BUF_FILEHASH(&scp->fid);
1916 lock_ObtainRead(&buf_globalLock);
1917 bufp = cm_data.buf_fileHashTablepp[i];
1919 lock_ReleaseRead(&buf_globalLock);
1923 buf_HoldLocked(bufp);
1924 lock_ReleaseRead(&buf_globalLock);
1927 lock_ObtainMutex(&bufp->mx);
1929 bufEnd.HighPart = 0;
1930 bufEnd.LowPart = cm_data.buf_blockSize;
1931 bufEnd = LargeIntegerAdd(bufEnd, bufp->offset);
1933 if (cm_FidCmp(&bufp->fid, &scp->fid) == 0 &&
1934 LargeIntegerLessThan(*sizep, bufEnd)) {
1935 buf_WaitIO(scp, bufp);
1937 lock_ObtainWrite(&scp->rw);
1939 /* make sure we have a callback (so we have the right value for
1940 * the length), and wait for it to be safe to do a truncate.
1942 code = cm_SyncOp(scp, bufp, userp, reqp, 0,
1943 CM_SCACHESYNC_NEEDCALLBACK
1944 | CM_SCACHESYNC_GETSTATUS
1945 | CM_SCACHESYNC_SETSIZE
1946 | CM_SCACHESYNC_BUFLOCKED);
1949 /* if we succeeded in our locking, and this applies to the right
1950 * file, and the truncate request overlaps the buffer either
1951 * totally or partially, then do something.
1953 if (code == 0 && cm_FidCmp(&bufp->fid, &scp->fid) == 0
1954 && LargeIntegerLessThan(*sizep, bufEnd)) {
1957 /* destroy the buffer, turning off its dirty bit, if
1958 * we're truncating the whole buffer. Otherwise, set
1959 * the dirty bit, and clear out the tail of the buffer
1960 * if we just overlap some.
1962 if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
1963 /* truncating the entire page */
1964 if (reqp->flags & CM_REQ_SOURCE_REDIR) {
1966 * Implicitly clear the redirector flag
1967 * and release the matching hold.
1969 if (bufp->qFlags & CM_BUF_QREDIR) {
1970 osi_Log4(buf_logp,"buf_Truncate taking from file system bufp 0x%p vno 0x%x foffset 0x%x:%x",
1971 bufp, bufp->fid.vnode, bufp->offset.HighPart, bufp->offset.LowPart);
1972 lock_ObtainWrite(&buf_globalLock);
1973 if (bufp->qFlags & CM_BUF_QREDIR) {
1974 buf_RemoveFromRedirQueue(scp, bufp);
1975 buf_ReleaseLocked(bufp, TRUE);
1977 lock_ReleaseWrite(&buf_globalLock);
1982 _InterlockedAnd(&bufp->flags, ~CM_BUF_DIRTY);
1984 bufp->dirty_offset = 0;
1985 bufp->dirty_length = 0;
1986 bufp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
1987 bufp->dirtyCounter++;
1990 /* don't set dirty, since dirty implies
1991 * currently up-to-date. Don't need to do this,
1992 * since we'll update the length anyway.
1994 * Zero out remainder of the page, in case we
1995 * seek and write past EOF, and make this data
1998 bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
1999 osi_assertx(bufferPos != 0, "non-zero bufferPos");
2000 memset(bufp->datap + bufferPos, 0,
2001 cm_data.buf_blockSize - bufferPos);
2005 cm_SyncOpDone( scp, bufp,
2006 CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
2007 | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
2009 lock_ReleaseWrite(&scp->rw);
2010 lock_ReleaseMutex(&bufp->mx);
2013 nbufp = bufp->fileHashp;
2017 /* This forces the loop to end and the error code
2018 * to be returned. */
2026 buf_ValidateBufQueues();
2027 #endif /* TESTING */
2029 if (invalidate && RDR_Initialized)
2030 RDR_InvalidateObject(scp->fid.cell, scp->fid.volume, scp->fid.vnode,
2031 scp->fid.unique, scp->fid.hash,
2032 scp->fileType, AFS_INVALIDATE_SMB);
2038 long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
2041 cm_buf_t *bp; /* buffer we're hacking on */
2045 afs_uint32 stable = 0;
2047 i = BUF_FILEHASH(&scp->fid);
2050 lock_ObtainRead(&buf_globalLock);
2051 bp = cm_data.buf_fileHashTablepp[i];
2054 lock_ReleaseRead(&buf_globalLock);
2056 for (; bp; bp = nbp) {
2057 didRelease = 0; /* haven't released this buffer yet */
2059 /* clean buffer synchronously */
2060 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
2062 if (code == 0 && !stable && (bp->flags & CM_BUF_DIRTY)) {
2064 * we must stabilize the object to ensure that buffer
2065 * changes cannot occur while the flush is performed.
2066 * However, we do not want to Stabilize if we do not
2067 * need to because Stabilize obtains a callback.
2069 code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
2070 stable = (code == 0);
2073 if (code == CM_ERROR_BADFD) {
2074 /* if the scp's FID is bad its because we received VNOVNODE
2075 * when attempting to FetchStatus before the write. This
2076 * page therefore contains data that can no longer be stored.
2078 lock_ObtainMutex(&bp->mx);
2079 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
2080 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
2081 bp->error = CM_ERROR_BADFD;
2082 bp->dirty_offset = 0;
2083 bp->dirty_length = 0;
2084 bp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
2086 lock_ReleaseMutex(&bp->mx);
2087 } else if (!(scp->flags & CM_SCACHEFLAG_RO)) {
2092 lock_ObtainMutex(&bp->mx);
2094 /* start cleaning the buffer, and wait for it to finish */
2095 buf_CleanLocked(scp, bp, reqp, 0, NULL);
2096 buf_WaitIO(scp, bp);
2098 lock_ReleaseMutex(&bp->mx);
2101 /* actually, we only know that buffer is clean if ref
2102 * count is 1, since we don't have buffer itself locked.
2104 if (!(bp->flags & CM_BUF_DIRTY) && !(bp->qFlags & CM_BUF_QREDIR)) {
2105 lock_ObtainWrite(&buf_globalLock);
2106 if (!(bp->flags & CM_BUF_DIRTY) && !(bp->qFlags & CM_BUF_QREDIR)) {
2107 if (bp->refCount == 1) { /* bp is held above */
2108 nbp = bp->fileHashp;
2110 buf_HoldLocked(nbp);
2111 buf_ReleaseLocked(bp, TRUE);
2116 lock_ReleaseWrite(&buf_globalLock);
2122 lock_ObtainRead(&buf_globalLock);
2123 nbp = bp->fileHashp;
2125 buf_HoldLocked(nbp);
2126 buf_ReleaseLocked(bp, FALSE);
2127 lock_ReleaseRead(&buf_globalLock);
2129 } /* for loop over a bunch of buffers */
2132 (*cm_buf_opsp->Unstabilizep)(scp, userp);
2135 buf_ValidateBufQueues();
2136 #endif /* TESTING */
2142 /* Must be called with scp->rw held */
2143 long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
2149 lock_AssertAny(&scp->rw);
2151 i = BUF_FILEHASH(&scp->fid);
2153 lock_ObtainRead(&buf_globalLock);
2155 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp = bp->fileHashp) {
2156 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
2157 if (bp->dataVersion == fromVersion) {
2158 bp->dataVersion = toVersion;
2163 lock_ReleaseRead(&buf_globalLock);
2171 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
2175 cm_buf_t *bp; /* buffer we're hacking on */
2176 cm_buf_t *nbp; /* next one */
2179 if (RDR_Initialized && scp->redirBufCount > 0) {
2180 /* Retrieve all extents for this file from the redirector */
2181 buf_RDRShakeFileExtentsFree(scp, reqp);
2184 i = BUF_FILEHASH(&scp->fid);
2186 lock_ObtainRead(&buf_globalLock);
2187 bp = cm_data.buf_fileHashTablepp[i];
2190 lock_ReleaseRead(&buf_globalLock);
2191 for (; bp; bp = nbp) {
2192 /* clean buffer synchronously */
2193 if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
2195 * If the buffer is held by the redirector we must fetch
2196 * it back in order to determine whether or not it is in
2199 lock_ObtainRead(&buf_globalLock);
2200 if (bp->qFlags & CM_BUF_QREDIR) {
2201 osi_Log1(buf_logp,"buf_CleanVnode buffer held by redirector bp 0x%p", bp);
2203 /* Retrieve single extent from the redirector */
2204 buf_RDRShakeAnExtentFree(bp, reqp);
2206 lock_ReleaseRead(&buf_globalLock);
2208 lock_ObtainMutex(&bp->mx);
2209 if ((bp->flags & CM_BUF_DIRTY)) {
2210 if (userp && userp != bp->userp) {
2213 cm_ReleaseUser(bp->userp);
2218 case CM_ERROR_NOSUCHFILE:
2219 case CM_ERROR_BADFD:
2220 case CM_ERROR_NOACCESS:
2221 case CM_ERROR_QUOTA:
2222 case CM_ERROR_SPACE:
2223 case CM_ERROR_TOOBIG:
2224 case CM_ERROR_READONLY:
2225 case CM_ERROR_NOSUCHPATH:
2227 * Apply the previous fatal error to this buffer.
2228 * Do not waste the time attempting to store to
2229 * the file server when we know it will fail.
2231 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
2232 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
2233 bp->dirty_offset = 0;
2234 bp->dirty_length = 0;
2236 bp->dataVersion = CM_BUF_VERSION_BAD;
2239 case CM_ERROR_TIMEDOUT:
2240 case CM_ERROR_ALLDOWN:
2241 case CM_ERROR_ALLBUSY:
2242 case CM_ERROR_ALLOFFLINE:
2243 case CM_ERROR_CLOCKSKEW:
2244 /* do not mark the buffer in error state but do
2245 * not attempt to complete the rest either.
2249 code = buf_CleanLocked(scp, bp, reqp, 0, &wasDirty);
2250 if (bp->flags & CM_BUF_ERROR) {
2256 buf_CleanWait(scp, bp, TRUE);
2258 lock_ReleaseMutex(&bp->mx);
2261 lock_ObtainRead(&buf_globalLock);
2262 nbp = bp->fileHashp;
2264 buf_HoldLocked(nbp);
2265 buf_ReleaseLocked(bp, FALSE);
2266 lock_ReleaseRead(&buf_globalLock);
2267 } /* for loop over a bunch of buffers */
2270 buf_ValidateBufQueues();
2271 #endif /* TESTING */
2279 buf_ValidateBufQueues(void)
2281 cm_buf_t * bp, *bpb, *bpf, *bpa;
2282 afs_uint32 countf=0, countb=0, counta=0;
2284 lock_ObtainRead(&buf_globalLock);
2285 for (bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
2286 if (bp->magic != CM_BUF_MAGIC)
2287 osi_panic("buf magic error",__FILE__,__LINE__);
2292 for (bp = cm_data.buf_freeListp; bp; bp=(cm_buf_t *) osi_QNext(&bp->q)) {
2293 if (bp->magic != CM_BUF_MAGIC)
2294 osi_panic("buf magic error",__FILE__,__LINE__);
2299 for (bp = cm_data.buf_allp; bp; bp=bp->allp) {
2300 if (bp->magic != CM_BUF_MAGIC)
2301 osi_panic("buf magic error",__FILE__,__LINE__);
2305 lock_ReleaseRead(&buf_globalLock);
2307 if (countb != countf)
2308 osi_panic("buf magic error",__FILE__,__LINE__);
2310 if (counta != cm_data.buf_nbuffers)
2311 osi_panic("buf magic error",__FILE__,__LINE__);
2313 #endif /* TESTING */
2315 /* dump the contents of the buf_scacheHashTablepp. */
2316 int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
2323 if (cm_data.buf_scacheHashTablepp == NULL)
2327 lock_ObtainRead(&buf_globalLock);
2329 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\r\n",
2330 cookie, cm_data.buf_hashSize);
2331 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2333 for (i = 0; i < cm_data.buf_hashSize; i++)
2335 for (bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp)
2337 StringCbPrintfA(output, sizeof(output),
2338 "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
2339 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
2340 "flags=0x%x, qFlags=0x%x cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
2341 cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
2342 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
2343 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
2344 bp->cmFlags, bp->error, bp->refCount);
2345 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2349 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\r\n", cookie);
2350 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2352 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_freeListEndp\r\n", cookie);
2353 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2354 for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
2355 StringCbPrintfA(output, sizeof(output),
2356 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
2357 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
2358 "flags=0x%x, qFlags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
2359 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
2360 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
2361 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
2362 bp->cmFlags, bp->error, bp->refCount);
2363 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2365 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
2366 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2368 StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
2369 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2370 for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
2371 StringCbPrintfA(output, sizeof(output),
2372 "%s bp=0x%08X, fid (cell=%d, volume=%d, "
2373 "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
2374 "flags=0x%x, qFlags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
2375 cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
2376 bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
2377 bp->offset.LowPart, bp->dataVersion, bp->flags, bp->qFlags,
2378 bp->cmFlags, bp->error, bp->refCount);
2379 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2381 StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
2382 WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
2385 lock_ReleaseRead(&buf_globalLock);
2389 void buf_ForceTrace(BOOL flush)
2398 len = GetTempPath(sizeof(buf)-10, buf);
2399 StringCbCopyA(&buf[len], sizeof(buf)-len, "/afs-buffer.log");
2400 handle = CreateFile(buf, GENERIC_WRITE, FILE_SHARE_READ,
2401 NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL);
2402 if (handle == INVALID_HANDLE_VALUE) {
2403 osi_panic("Cannot create log file", __FILE__, __LINE__);
2405 osi_LogPrint(buf_logp, handle);
2407 FlushFileBuffers(handle);
2408 CloseHandle(handle);
2411 long buf_DirtyBuffersExist(cm_fid_t *fidp)
2414 afs_uint32 bcount = 0;
2418 i = BUF_FILEHASH(fidp);
2420 lock_ObtainRead(&buf_globalLock);
2421 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->fileHashp, bcount++) {
2422 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
2427 lock_ReleaseRead(&buf_globalLock);
2431 long buf_RDRBuffersExist(cm_fid_t *fidp)
2434 afs_uint32 bcount = 0;
2438 if (!RDR_Initialized)
2441 i = BUF_FILEHASH(fidp);
2443 lock_ObtainRead(&buf_globalLock);
2444 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->fileHashp, bcount++) {
2445 if (!cm_FidCmp(fidp, &bp->fid) && (bp->qFlags & CM_BUF_QREDIR)) {
2450 lock_ReleaseRead(&buf_globalLock);
2454 long buf_ClearRDRFlag(cm_scache_t *scp, char *reason)
2456 cm_fid_t *fidp = &scp->fid;
2458 afs_uint32 bcount = 0;
2461 i = BUF_FILEHASH(fidp);
2463 lock_ObtainWrite(&scp->rw);
2464 lock_ObtainRead(&buf_globalLock);
2465 for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->fileHashp, bcount++) {
2466 if (!cm_FidCmp(fidp, &bp->fid) && (bp->qFlags & CM_BUF_QREDIR)) {
2467 lock_ConvertRToW(&buf_globalLock);
2468 if (bp->qFlags & CM_BUF_QREDIR) {
2469 osi_Log4(buf_logp,"buf_ClearRDRFlag taking from file system bp 0x%p vno 0x%x foffset 0x%x:%x",
2470 bp, bp->fid.vnode, bp->offset.HighPart, bp->offset.LowPart);
2471 buf_RemoveFromRedirQueue(scp, bp);
2472 buf_ReleaseLocked(bp, TRUE);
2474 lock_ConvertWToR(&buf_globalLock);
2478 /* Confirm that there are none left */
2479 lock_ConvertRToW(&buf_globalLock);
2480 for ( bp = redirq_to_cm_buf_t(scp->redirQueueT);
2482 bp = redirq_to_cm_buf_t(scp->redirQueueT))
2484 if (bp->qFlags & CM_BUF_QREDIR) {
2485 osi_Log4(buf_logp,"buf_ClearRDRFlag taking from file system bufp 0x%p vno 0x%x foffset 0x%x:%x",
2486 bp, bp->fid.vnode, bp->offset.HighPart, bp->offset.LowPart);
2487 buf_RemoveFromRedirQueue(scp, bp);
2488 buf_ReleaseLocked(bp, TRUE);
2492 lock_ReleaseWrite(&buf_globalLock);
2493 lock_ReleaseWrite(&scp->rw);
2498 long buf_CleanDirtyBuffers(cm_scache_t *scp)
2501 afs_uint32 bcount = 0;
2502 cm_fid_t * fidp = &scp->fid;
2504 for (bp = cm_data.buf_allp; bp; bp=bp->allp, bcount++) {
2505 if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
2507 lock_ObtainMutex(&bp->mx);
2508 _InterlockedAnd(&bp->cmFlags, ~CM_BUF_CMSTORING);
2509 _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
2510 bp->dirty_offset = 0;
2511 bp->dirty_length = 0;
2512 _InterlockedOr(&bp->flags, CM_BUF_ERROR);
2513 bp->error = VNOVNODE;
2514 bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
2516 if (bp->flags & CM_BUF_WAITING) {
2517 osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);
2518 osi_Wakeup((long) &bp);
2520 lock_ReleaseMutex(&bp->mx);
2529 * The following routines will not be used on a
2530 * regular basis but are very useful in a variety
2531 * of scenarios when debugging data corruption.
2534 buf_HexCheckSum(cm_buf_t * bp)
2537 static char buf[33];
2538 static char tr[16] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'};
2540 for (i=0;i<16;i++) {
2541 k = bp->md5cksum[i];
2543 buf[i*2] = tr[k / 16];
2544 buf[i*2+1] = tr[k % 16];
2552 buf_ComputeCheckSum(cm_buf_t * bp)
2557 MD5_Update(&md5, bp->datap, cm_data.blockSize);
2558 MD5_Final(bp->md5cksum, &md5);
2560 osi_Log4(buf_logp, "CheckSum bp 0x%p md5 %s, dirty: offset %u length %u",
2561 bp, osi_LogSaveString(buf_logp, buf_HexCheckSum(bp)),
2562 bp->dirty_offset, bp->dirty_length);
2566 buf_ValidateCheckSum(cm_buf_t * bp)
2569 unsigned char tmp[16];
2572 MD5_Update(&md5, bp->datap, cm_data.blockSize);
2573 MD5_Final(tmp, &md5);
2575 if (memcmp(tmp, bp->md5cksum, 16) == 0)
2581 buf_InsertToRedirQueue(cm_scache_t *scp, cm_buf_t *bufp)
2583 lock_AssertWrite(&buf_globalLock);
2585 lock_ObtainMutex(&scp->redirMx);
2587 if (bufp->qFlags & CM_BUF_QINLRU) {
2588 _InterlockedAnd(&bufp->qFlags, ~CM_BUF_QINLRU);
2589 osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_freeListp,
2590 (osi_queue_t **) &cm_data.buf_freeListEndp,
2592 buf_DecrementFreeCount();
2594 _InterlockedOr(&bufp->qFlags, CM_BUF_QREDIR);
2595 osi_QAddH( (osi_queue_t **) &cm_data.buf_redirListp,
2596 (osi_queue_t **) &cm_data.buf_redirListEndp,
2598 buf_IncrementRedirCount();
2599 bufp->redirLastAccess = time(NULL);
2601 osi_QAddH( (osi_queue_t **) &scp->redirQueueH,
2602 (osi_queue_t **) &scp->redirQueueT,
2604 scp->redirLastAccess = bufp->redirLastAccess;
2605 InterlockedIncrement(&scp->redirBufCount);
2608 lock_ReleaseMutex(&scp->redirMx);
2612 buf_RemoveFromRedirQueue(cm_scache_t *scp, cm_buf_t *bufp)
2614 lock_AssertWrite(&buf_globalLock);
2616 lock_ObtainMutex(&scp->redirMx);
2618 _InterlockedAnd(&bufp->qFlags, ~CM_BUF_QREDIR);
2619 osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_redirListp,
2620 (osi_queue_t **) &cm_data.buf_redirListEndp,
2622 buf_DecrementRedirCount();
2624 osi_QRemoveHT( (osi_queue_t **) &scp->redirQueueH,
2625 (osi_queue_t **) &scp->redirQueueT,
2627 InterlockedDecrement(&scp->redirBufCount);
2630 lock_ReleaseMutex(&scp->redirMx);
2634 buf_MoveToHeadOfRedirQueue(cm_scache_t *scp, cm_buf_t *bufp)
2636 lock_AssertWrite(&buf_globalLock);
2637 osi_assertx(bufp->qFlags & CM_BUF_QREDIR,
2638 "buf_MoveToHeadOfRedirQueue buffer not held by redirector");
2640 lock_ObtainMutex(&scp->redirMx);
2642 osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_redirListp,
2643 (osi_queue_t **) &cm_data.buf_redirListEndp,
2645 osi_QAddH( (osi_queue_t **) &cm_data.buf_redirListp,
2646 (osi_queue_t **) &cm_data.buf_redirListEndp,
2648 bufp->redirLastAccess = time(NULL);
2650 osi_QRemoveHT( (osi_queue_t **) &scp->redirQueueH,
2651 (osi_queue_t **) &scp->redirQueueT,
2653 osi_QAddH( (osi_queue_t **) &scp->redirQueueH,
2654 (osi_queue_t **) &scp->redirQueueT,
2656 scp->redirLastAccess = bufp->redirLastAccess;
2659 lock_ReleaseMutex(&scp->redirMx);