windows-buf-redirector-20080528
[openafs.git] / src / WINNT / afsd / cm_buf.c
index 2052d3e..d97739f 100644 (file)
@@ -87,33 +87,81 @@ extern int cm_diskCacheEnabled;
 /* set this to 1 when we are terminating to prevent access attempts */
 static int buf_ShutdownFlag = 0;
 
+#ifdef DEBUG_REFCOUNT
+void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
+#else
 void buf_HoldLocked(cm_buf_t *bp)
+#endif
 {
-    osi_assert(bp->magic == CM_BUF_MAGIC);
-    bp->refCount++;
+    afs_int32 refCount;
+
+    osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+    refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
+    afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
 }
 
 /* hold a reference to an already held buffer */
+#ifdef DEBUG_REFCOUNT
+void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
+#else
 void buf_Hold(cm_buf_t *bp)
+#endif
 {
-    lock_ObtainWrite(&buf_globalLock);
-    buf_HoldLocked(bp);
-    lock_ReleaseWrite(&buf_globalLock);
+    afs_int32 refCount;
+
+    lock_ObtainRead(&buf_globalLock);
+    osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+    refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
+    afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
+    lock_ReleaseRead(&buf_globalLock);
 }
 
 /* code to drop reference count while holding buf_globalLock */
-void buf_ReleaseLocked(cm_buf_t *bp)
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
+#else
+void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
+#endif
 {
+    afs_int32 refCount;
+
+    if (writeLocked)
+        lock_AssertWrite(&buf_globalLock);
+    else
+        lock_AssertRead(&buf_globalLock);
+
     /* ensure that we're in the LRU queue if our ref count is 0 */
-    osi_assert(bp->magic == CM_BUF_MAGIC);
+    osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+
+    refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
+    afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
+#endif
 #ifdef DEBUG
-    if (bp->refCount == 0)
+    if (refCount < 0)
        osi_panic("buf refcount 0",__FILE__,__LINE__);;
 #else
-    osi_assert(bp->refCount > 0);
+    osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
 #endif
-    if (--bp->refCount == 0) {
-        if (!(bp->flags & CM_BUF_INLRU)) {
+    if (refCount == 0) {
+        /* 
+         * If we are read locked there could be a race condition
+         * with buf_Find() so we must obtain a write lock and
+         * double check that the refCount is actually zero
+         * before we remove the buffer from the LRU queue.
+         */
+        if (!writeLocked)
+            lock_ConvertRToW(&buf_globalLock);
+
+        if (bp->refCount == 0 &&
+            !(bp->flags & CM_BUF_INLRU)) {
             osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
 
             /* watch for transition from empty to one element */
@@ -121,68 +169,116 @@ void buf_ReleaseLocked(cm_buf_t *bp)
                 cm_data.buf_freeListEndp = cm_data.buf_freeListp;
             bp->flags |= CM_BUF_INLRU;
         }
+
+        if (!writeLocked)
+            lock_ConvertWToR(&buf_globalLock);
     }
 }       
 
 /* release a buffer.  Buffer must be referenced, but unlocked. */
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
+#else
 void buf_Release(cm_buf_t *bp)
+#endif
 {
-    lock_ObtainWrite(&buf_globalLock);
-    buf_ReleaseLocked(bp);
-    lock_ReleaseWrite(&buf_globalLock);
+    afs_int32 refCount;
+
+    /* ensure that we're in the LRU queue if our ref count is 0 */
+    osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+
+    refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
+    afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
+#ifdef DEBUG
+    if (refCount < 0)
+       osi_panic("buf refcount 0",__FILE__,__LINE__);;
+#else
+    osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
+#endif
+    if (refCount == 0) {
+        lock_ObtainWrite(&buf_globalLock);
+        if (bp->refCount == 0 && 
+            !(bp->flags & CM_BUF_INLRU)) {
+            osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
+
+            /* watch for transition from empty to one element */
+            if (!cm_data.buf_freeListEndp)
+                cm_data.buf_freeListEndp = cm_data.buf_freeListp;
+            bp->flags |= CM_BUF_INLRU;
+        }
+        lock_ReleaseWrite(&buf_globalLock);
+    }
 }
 
-/* incremental sync daemon.  Writes 1/10th of all the buffers every 5000 ms */
+/* incremental sync daemon.  Writes all dirty buffers every 5000 ms */
 void buf_IncrSyncer(long parm)
 {
-    cm_buf_t *bp;                      /* buffer we're hacking on; held */
+    cm_buf_t **bpp, *bp, *prevbp;
     long i;                            /* counter */
-    long wasDirty;
+    long wasDirty = 0;
     cm_req_t req;
 
-    lock_ObtainWrite(&buf_globalLock);
-    bp = cm_data.buf_allp;
-    buf_HoldLocked(bp);
-    lock_ReleaseWrite(&buf_globalLock);
-    wasDirty = 0;
-
     while (buf_ShutdownFlag == 0) {
         if (!wasDirty) {
             i = SleepEx(5000, 1);
             if (i != 0) continue;
        }
 
-        if (buf_ShutdownFlag == 1)
-            return;
-
        wasDirty = 0;
 
-        /* now go through our percentage of the buffers */
-        for (i=0; i<cm_data.buf_nbuffers; i++) {
-            /* don't want its identity changing while we're
-             * messing with it, so must do all of this with
-             * bp held.
-             */
+        /* go through all of the dirty buffers */
+        lock_ObtainRead(&buf_globalLock);
+        for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
+            lock_ReleaseRead(&buf_globalLock);
+           /* all dirty buffers are held when they are added to the
+            * dirty list.  No need for an additional hold.
+            */
+            lock_ObtainMutex(&bp->mx);
 
-            /* start cleaning the buffer; don't touch log pages since
-             * the log code counts on knowing exactly who is writing
-             * a log page at any given instant.
-             */
-            cm_InitReq(&req);
-            req.flags |= CM_REQ_NORETRY;
-           wasDirty |= buf_CleanAsync(bp, &req);
+           if (bp->flags & CM_BUF_DIRTY) {
+               /* start cleaning the buffer; don't touch log pages since
+                * the log code counts on knowing exactly who is writing
+                * a log page at any given instant.
+                */
+               cm_InitReq(&req);
+               req.flags |= CM_REQ_NORETRY;
+               wasDirty |= buf_CleanAsyncLocked(bp, &req);
+           }
 
-            /* now advance to the next buffer; the allp chain never changes,
-             * and so can be followed even when holding no locks.
-             */
-            lock_ObtainWrite(&buf_globalLock);
-            buf_ReleaseLocked(bp);
-            bp = bp->allp;
-            if (!bp) 
-                bp = cm_data.buf_allp;
-           buf_HoldLocked(bp);
-            lock_ReleaseWrite(&buf_globalLock);
+           /* the buffer may or may not have been dirty
+            * and if dirty may or may not have been cleaned
+            * successfully.  check the dirty flag again.  
+            */
+            if (!(bp->flags & CM_BUF_DIRTY)) {
+                /* remove the buffer from the dirty list */
+                lock_ObtainWrite(&buf_globalLock);
+#ifdef DEBUG_REFCOUNT
+                if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
+                    osi_Log1(afsd_logp,"buf_IncrSyncer bp 0x%p list corruption",bp);
+                    afsi_log("buf_IncrSyncer bp 0x%p list corruption", bp);
+                }
+#endif
+                *bpp = bp->dirtyp;
+                bp->dirtyp = NULL;
+                bp->flags &= ~CM_BUF_INDL;
+                if (cm_data.buf_dirtyListp == NULL)
+                    cm_data.buf_dirtyListEndp = NULL;
+                else if (cm_data.buf_dirtyListEndp == bp)
+                    cm_data.buf_dirtyListEndp = prevbp;
+                buf_ReleaseLocked(bp, TRUE);
+                lock_ConvertWToR(&buf_globalLock);
+            } else {
+                /* advance the pointer so we don't loop forever */
+                lock_ObtainRead(&buf_globalLock);
+                bpp = &bp->dirtyp;
+                prevbp = bp;
+            }
+            lock_ReleaseMutex(&bp->mx);
         }      /* for loop over a bunch of buffers */
+        lock_ReleaseRead(&buf_globalLock);
     }          /* whole daemon's while loop */
 }
 
@@ -300,10 +396,10 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
             cm_data.buf_nOrigBuffers = cm_data.buf_nbuffers;
  
             /* lower hash size to a prime number */
-            cm_data.buf_hashSize = osi_PrimeLessThan(CM_BUF_HASHSIZE);
+           cm_data.buf_hashSize = osi_PrimeLessThan((afs_uint32)(cm_data.buf_nbuffers/7 + 1));
  
             /* create hash table */
-            memset((void *)cm_data.buf_hashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
+            memset((void *)cm_data.buf_scacheHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
             
             /* another hash table */
             memset((void *)cm_data.buf_fileHashTablepp, 0, cm_data.buf_hashSize * sizeof(cm_buf_t *));
@@ -314,8 +410,10 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
             cm_data.buf_allp = NULL;
             
             for (i=0; i<cm_data.buf_nbuffers; i++) {
-                osi_assert(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress);
-                osi_assert(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData);
+                osi_assertx(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress, 
+                            "invalid cm_buf_t address");
+                osi_assertx(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData,
+                            "invalid cm_buf_t data address");
                 
                 /* allocate and zero some storage */
                 memset(bp, 0, sizeof(cm_buf_t));
@@ -429,8 +527,8 @@ void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
     int release = 0;
 
     if (scp)
-        osi_assert(scp->magic == CM_SCACHE_MAGIC);
-    osi_assert(bp->magic == CM_BUF_MAGIC);
+        osi_assertx(scp->magic == CM_SCACHE_MAGIC, "invalid cm_scache_t magic");
+    osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
 
     while (1) {
         /* if no IO is happening, we're done */
@@ -468,12 +566,12 @@ void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
                 release = 1;
         }
         if ( scp ) {
-            lock_ObtainMutex(&scp->mx);
+            lock_ObtainRead(&scp->rw);
             if (scp->flags & CM_SCACHEFLAG_WAITING) {
                 osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
                 osi_Wakeup((LONG_PTR)&scp->flags);
             }
-           lock_ReleaseMutex(&scp->mx);
+           lock_ReleaseRead(&scp->rw);
         }
     }
         
@@ -495,11 +593,11 @@ void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
  */
 cm_buf_t *buf_FindLocked(struct cm_scache *scp, osi_hyper_t *offsetp)
 {
-    long i;
+    afs_uint32 i;
     cm_buf_t *bp;
 
     i = BUF_HASH(&scp->fid, offsetp);
-    for(bp = cm_data.buf_hashTablepp[i]; bp; bp=bp->hashp) {
+    for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) {
         if (cm_FidCmp(&scp->fid, &bp->fid) == 0
              && offsetp->LowPart == bp->offset.LowPart
              && offsetp->HighPart == bp->offset.HighPart) {
@@ -519,9 +617,9 @@ cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
 {
     cm_buf_t *bp;
 
-    lock_ObtainWrite(&buf_globalLock);
+    lock_ObtainRead(&buf_globalLock);
     bp = buf_FindLocked(scp, offsetp);
-    lock_ReleaseWrite(&buf_globalLock);
+    lock_ReleaseRead(&buf_globalLock);
 
     return bp;
 }       
@@ -540,8 +638,9 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
     long code = 0;
     long isdirty = 0;
     cm_scache_t * scp = NULL;
+    osi_hyper_t offset;
 
-    osi_assert(bp->magic == CM_BUF_MAGIC);
+    osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
 
     while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
        isdirty = 1;
@@ -550,9 +649,19 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
        scp = cm_FindSCache(&bp->fid);
        if (scp) {
            osi_Log2(buf_logp, "buf_CleanAsyncLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
-           code = (*cm_buf_opsp->Writep)(scp, &bp->offset,
-                                          cm_data.buf_blockSize, 0, bp->userp,
-                                          reqp);
+
+            offset = bp->offset;
+            LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
+           code = (*cm_buf_opsp->Writep)(scp, &offset, 
+#if 1
+                                           /* we might as well try to write all of the contiguous 
+                                            * dirty buffers in one RPC 
+                                            */
+                                           cm_chunkSize,
+#else
+                                          bp->dirty_length, 
+#endif
+                                          0, bp->userp, reqp);
            osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
 
            cm_ReleaseSCache(scp);
@@ -567,11 +676,13 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
         * because we aren't going to be able to write this data to the file
         * server.
         */
-       if (code == CM_ERROR_NOSUCHFILE){
+       if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD){
            bp->flags &= ~CM_BUF_DIRTY;
            bp->flags |= CM_BUF_ERROR;
-           bp->error = CM_ERROR_NOSUCHFILE;
-           bp->dataVersion = -1; /* bad */
+            bp->dirty_offset = 0;
+            bp->dirty_length = 0;
+           bp->error = code;
+           bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
            bp->dirtyCounter++;
        }
 
@@ -589,6 +700,11 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
            break;
     };
 
+    if (!(bp->flags & CM_BUF_DIRTY)) {
+       /* remove buffer from dirty buffer queue */
+
+    }
+
     /* do logging after call to GetLastError, or else */
         
     /* if someone was waiting for the I/O that just completed or failed,
@@ -608,12 +724,12 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
  */
 void buf_Recycle(cm_buf_t *bp)
 {
-    int i;
+    afs_uint32 i;
     cm_buf_t **lbpp;
     cm_buf_t *tbp;
     cm_buf_t *prevBp, *nextBp;
 
-    osi_assert(bp->magic == CM_BUF_MAGIC);
+    osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
 
     /* if we get here, we know that the buffer still has a 0 ref count,
      * and that it is clean and has no currently pending I/O.  This is
@@ -622,32 +738,37 @@ void buf_Recycle(cm_buf_t *bp)
      * have any lock conflicts, so we can grab the buffer lock out of
      * order in the locking hierarchy.
      */
-    osi_Log2( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x",
-              bp, bp->offset.LowPart);
+    osi_Log3( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x:%08x",
+              bp, bp->offset.HighPart, bp->offset.LowPart);
 
-    osi_assert(bp->refCount == 0);
-    osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
+    osi_assertx(bp->refCount == 0, "cm_buf_t refcount != 0");
+    osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)),
+                "incorrect cm_buf_t flags");
     lock_AssertWrite(&buf_globalLock);
 
     if (bp->flags & CM_BUF_INHASH) {
         /* Remove from hash */
 
         i = BUF_HASH(&bp->fid, &bp->offset);
-        lbpp = &(cm_data.buf_hashTablepp[i]);
+        lbpp = &(cm_data.buf_scacheHashTablepp[i]);
         for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
-            if (tbp == bp) break;
+            if (tbp == bp) 
+                break;
         }
 
         /* we better find it */
         osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
 
         *lbpp = bp->hashp;     /* hash out */
+        bp->hashp = NULL;
 
         /* Remove from file hash */
 
         i = BUF_FILEHASH(&bp->fid);
         prevBp = bp->fileHashBackp;
+        bp->fileHashBackp = NULL;
         nextBp = bp->fileHashp;
+        bp->fileHashp = NULL;
         if (prevBp)
             prevBp->fileHashp = nextBp;
         else
@@ -658,12 +779,6 @@ void buf_Recycle(cm_buf_t *bp)
         bp->flags &= ~CM_BUF_INHASH;
     }
 
-    /* bump the soft reference counter now, to invalidate softRefs; no
-     * wakeup is required since people don't sleep waiting for this
-     * counter to change.
-     */
-    bp->idCounter++;
-
     /* make the fid unrecognizable */
     memset(&bp->fid, 0, sizeof(cm_fid_t));
 }       
@@ -681,9 +796,9 @@ void buf_Recycle(cm_buf_t *bp)
  */
 long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
 {
-    cm_buf_t *bp;              /* buffer we're dealing with */
+    cm_buf_t *bp;      /* buffer we're dealing with */
     cm_buf_t *nextBp;  /* next buffer in file hash chain */
-    long i;                    /* temp */
+    afs_uint32 i;      /* temp */
     cm_req_t req;
 
     cm_InitReq(&req);  /* just in case */
@@ -694,6 +809,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
 
     while(1) {
       retry:
+        lock_ObtainRead(&scp->bufCreateLock);
         lock_ObtainWrite(&buf_globalLock);
         /* check to see if we lost the race */
         if (scp) {
@@ -702,19 +818,26 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
                 * do not want to allow the buffer to be added
                 * to the free list.
                 */
-                bp->refCount--;
+                afs_int32 refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+                osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, refCount);
+                afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
+#endif
                 lock_ReleaseWrite(&buf_globalLock);
+                lock_ReleaseRead(&scp->bufCreateLock);
                 return CM_BUF_EXISTS;
             }
         }
 
        /* does this fix the problem below?  it's a simple solution. */
        if (!cm_data.buf_freeListEndp)
-           {
+       {
            lock_ReleaseWrite(&buf_globalLock);
+            lock_ReleaseRead(&scp->bufCreateLock);
+           osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List is empty - sleeping 200ms");
            Sleep(200);
            goto retry;
-           }
+       }
 
         /* for debugging, assert free list isn't empty, although we
          * really should try waiting for a running tranasction to finish
@@ -741,6 +864,10 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
              * we hold the global lock.
              */
 
+            /* Don't recycle a buffer held by the redirector. */
+            if (bp->flags & CM_BUF_REDIR)
+                continue;
+
             /* don't recycle someone in our own chunk */
             if (!cm_FidCmp(&bp->fid, &scp->fid)
                  && (bp->offset.LowPart & (-cm_chunkSize))
@@ -767,6 +894,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
                  */
                 buf_HoldLocked(bp);
                 lock_ReleaseWrite(&buf_globalLock);
+                lock_ReleaseRead(&scp->bufCreateLock);
 
                 /* grab required lock and clean; this only
                  * starts the I/O.  By the time we're back,
@@ -792,7 +920,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
 
             /* clean up junk flags */
             bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
-            bp->dataVersion = -1;      /* unknown so far */
+            bp->dataVersion = CM_BUF_VERSION_BAD;      /* unknown so far */
 
             /* now hash in as our new buffer, and give it the
              * appropriate label, if requested.
@@ -805,8 +933,8 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
 #endif
                 bp->offset = *offsetp;
                 i = BUF_HASH(&scp->fid, offsetp);
-                bp->hashp = cm_data.buf_hashTablepp[i];
-                cm_data.buf_hashTablepp[i] = bp;
+                bp->hashp = cm_data.buf_scacheHashTablepp[i];
+                cm_data.buf_scacheHashTablepp[i] = bp;
                 i = BUF_FILEHASH(&scp->fid);
                 nextBp = cm_data.buf_fileHashTablepp[i];
                 bp->fileHashp = nextBp;
@@ -816,17 +944,12 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
                 cm_data.buf_fileHashTablepp[i] = bp;
             }
 
-            /* prepare to return it.  Start by giving it a good
-             * refcount */
-            bp->refCount = 1;
-                        
-            /* and since it has a non-zero ref count, we should move
-             * it from the lru queue.  It better be still there,
-             * since we've held the global (big) lock since we found
-             * it there.
+            /* we should move it from the lru queue.  It better still be there,
+             * since we've held the global (big) lock since we found it there.
              */
             osi_assertx(bp->flags & CM_BUF_INLRU,
                          "buf_GetNewLocked: LRU screwup");
+
             if (cm_data.buf_freeListEndp == bp) {
                 /* we're the last guy in this queue, so maintain it */
                 cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
@@ -834,14 +957,24 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
             osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
             bp->flags &= ~CM_BUF_INLRU;
 
-            /* finally, grab the mutex so that people don't use it
+            /* grab the mutex so that people don't use it
              * before the caller fills it with data.  Again, no one    
              * should have been able to get to this dude to lock it.
              */
-            osi_assertx(lock_TryMutex(&bp->mx),
-                         "buf_GetNewLocked: TryMutex failed");
+           if (!lock_TryMutex(&bp->mx)) {
+               osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked.  refCount %d should be 0",
+                        bp, bp->refCount);
+               osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
+           }
 
+           /* prepare to return it.  Give it a refcount */
+            bp->refCount = 1;
+#ifdef DEBUG_REFCOUNT
+            osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
+            afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
+#endif
             lock_ReleaseWrite(&buf_globalLock);
+            lock_ReleaseRead(&scp->bufCreateLock);
             *bufpp = bp;
 
 #ifdef TESTING
@@ -850,6 +983,9 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
             return 0;
         } /* for all buffers in lru queue */
         lock_ReleaseWrite(&buf_globalLock);
+        lock_ReleaseRead(&scp->bufCreateLock);
+       osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List has no buffers with a zero refcount - sleeping 100ms");
+       Sleep(100);             /* give some time for a buffer to be freed */
     }  /* while loop over everything */
     /* not reached */
 } /* the proc */
@@ -902,8 +1038,8 @@ long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
      */
     lock_ReleaseMutex(&bp->mx);
     *bufpp = bp;
-    osi_Log3(buf_logp, "buf_GetNew returning bp 0x%p for scp 0x%p, offset 0x%x",
-              bp, scp, offsetp->LowPart);
+    osi_Log4(buf_logp, "buf_GetNew returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
+              bp, scp, offsetp->HighPart, offsetp->LowPart);
     return 0;
 }
 
@@ -970,11 +1106,7 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
      */
     if (created) {
         /* load the page; freshly created pages should be idle */
-        osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
-
-        /* setup offset, event */
-        bp->over.Offset = bp->offset.LowPart;
-        bp->over.OffsetHigh = bp->offset.HighPart;
+        osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)), "incorrect cm_buf_t flags");
 
         /* start the I/O; may drop lock */
         bp->flags |= CM_BUF_READING;
@@ -1044,8 +1176,8 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
     }
     lock_ReleaseWrite(&buf_globalLock);
 
-    osi_Log3(buf_logp, "buf_Get returning bp 0x%p for scp 0x%p, offset 0x%x",
-              bp, scp, offsetp->LowPart);
+    osi_Log4(buf_logp, "buf_Get returning bp 0x%p for scp 0x%p, offset 0x%x:%08x",
+              bp, scp, offsetp->HighPart, offsetp->LowPart);
 #ifdef TESTING
     buf_ValidateBufQueues();
 #endif /* TESTING */
@@ -1069,7 +1201,7 @@ long buf_CountFreeList(void)
          * has been invalidate (by having its DV stomped upon), then
          * count it as free, since it isn't really being utilized.
          */
-        if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
+        if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion == CM_BUF_VERSION_BAD)
             count++;
     }       
     lock_ReleaseRead(&buf_globalLock);
@@ -1080,7 +1212,7 @@ long buf_CountFreeList(void)
 long buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
 {
     long code;
-    osi_assert(bp->magic == CM_BUF_MAGIC);
+    osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
 
     lock_ObtainMutex(&bp->mx);
     code = buf_CleanAsyncLocked(bp, reqp);
@@ -1090,15 +1222,17 @@ long buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
 }       
 
 /* wait for a buffer's cleaning to finish */
-void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
+void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
 {
-    osi_assert(bp->magic == CM_BUF_MAGIC);
+    osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
 
-    lock_ObtainMutex(&bp->mx);
+    if (!locked)
+        lock_ObtainMutex(&bp->mx);
     if (bp->flags & CM_BUF_WRITING) {
         buf_WaitIO(scp, bp);
     }
-    lock_ReleaseMutex(&bp->mx);
+    if (!locked)
+        lock_ReleaseMutex(&bp->mx);
 }       
 
 /* set the dirty flag on a buffer, and set associated write-ahead log,
@@ -1106,18 +1240,65 @@ void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
  *
  * The buffer must be locked before calling this routine.
  */
-void buf_SetDirty(cm_buf_t *bp)
+void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length)
 {
-    osi_assert(bp->magic == CM_BUF_MAGIC);
-    osi_assert(bp->refCount > 0);
-       
-    osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
+    osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
+    osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
+
+    if (bp->flags & CM_BUF_DIRTY) {
 
-    /* set dirty bit */
-    bp->flags |= CM_BUF_DIRTY;
+       osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
 
-    /* and turn off EOF flag, since it has associated data now */
-    bp->flags &= ~CM_BUF_EOF;
+        if (bp->dirty_offset <= offset) {
+            if (bp->dirty_offset + bp->dirty_length >= offset + length) {
+                /* dirty_length remains the same */
+            } else {
+                bp->dirty_length = offset + length - bp->dirty_offset;
+            }
+        } else /* bp->dirty_offset > offset */ {
+            if (bp->dirty_offset + bp->dirty_length >= offset + length) {
+                bp->dirty_length = bp->dirty_offset + bp->dirty_length - offset;
+            } else {
+                bp->dirty_length = length;
+            }
+            bp->dirty_offset = offset;
+        }
+    } else {
+       osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
+
+        /* set dirty bit */
+        bp->flags |= CM_BUF_DIRTY;
+
+        /* and turn off EOF flag, since it has associated data now */
+        bp->flags &= ~CM_BUF_EOF;
+
+        bp->dirty_offset = offset;
+        bp->dirty_length = length;
+
+        /* and add to the dirty list.  
+         * we obtain a hold on the buffer for as long as it remains 
+         * in the list.  buffers are only removed from the list by 
+         * the buf_IncrSyncer function regardless of when else the
+         * dirty flag might be cleared.
+         *
+         * This should never happen but just in case there is a bug
+         * elsewhere, never add to the dirty list if the buffer is 
+         * already there.
+         */
+        lock_ObtainWrite(&buf_globalLock);
+        if (!(bp->flags & CM_BUF_INDL)) {
+            buf_HoldLocked(bp);
+            if (!cm_data.buf_dirtyListp) {
+                cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
+            } else {
+                cm_data.buf_dirtyListEndp->dirtyp = bp;
+                cm_data.buf_dirtyListEndp = bp;
+            }
+            bp->dirtyp = NULL;
+            bp->flags |= CM_BUF_INDL;
+        }
+        lock_ReleaseWrite(&buf_globalLock);
+    }
 }
 
 /* clean all buffers, reset log pointers and invalidate all buffers.
@@ -1142,33 +1323,33 @@ void buf_SetDirty(cm_buf_t *bp)
  */
 long buf_CleanAndReset(void)
 {
-    long i;
+    afs_uint32 i;
     cm_buf_t *bp;
     cm_req_t req;
 
-    lock_ObtainWrite(&buf_globalLock);
+    lock_ObtainRead(&buf_globalLock);
     for(i=0; i<cm_data.buf_hashSize; i++) {
-        for(bp = cm_data.buf_hashTablepp[i]; bp; bp = bp->hashp) {
+        for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp = bp->hashp) {
             if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
                 buf_HoldLocked(bp);
-                lock_ReleaseWrite(&buf_globalLock);
+                lock_ReleaseRead(&buf_globalLock);
 
                 /* now no locks are held; clean buffer and go on */
                 cm_InitReq(&req);
                req.flags |= CM_REQ_NORETRY;
 
                buf_CleanAsync(bp, &req);
-               buf_CleanWait(NULL, bp);
+               buf_CleanWait(NULL, bp, FALSE);
 
                 /* relock and release buffer */
-                lock_ObtainWrite(&buf_globalLock);
-                buf_ReleaseLocked(bp);
+                lock_ObtainRead(&buf_globalLock);
+                buf_ReleaseLocked(bp, FALSE);
             } /* dirty */
         } /* over one bucket */
     }  /* for loop over all hash buckets */
 
     /* release locks */
-    lock_ReleaseWrite(&buf_globalLock);
+    lock_ReleaseRead(&buf_globalLock);
 
 #ifdef TESTING
     buf_ValidateBufQueues();
@@ -1242,23 +1423,23 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
     osi_hyper_t bufEnd;
     long code;
     long bufferPos;
-    long i;
+    afs_uint32 i;
 
     /* assert that cm_bufCreateLock is held in write mode */
     lock_AssertWrite(&scp->bufCreateLock);
 
     i = BUF_FILEHASH(&scp->fid);
 
-    lock_ObtainWrite(&buf_globalLock);
+    lock_ObtainRead(&buf_globalLock);
     bufp = cm_data.buf_fileHashTablepp[i];
     if (bufp == NULL) {
-        lock_ReleaseWrite(&buf_globalLock);
+        lock_ReleaseRead(&buf_globalLock);
         return 0;
     }
 
     buf_HoldLocked(bufp);
-    lock_ReleaseWrite(&buf_globalLock);
-    for(; bufp; bufp = nbufp) {
+    lock_ReleaseRead(&buf_globalLock);
+    while (bufp) {
         lock_ObtainMutex(&bufp->mx);
 
         bufEnd.HighPart = 0;
@@ -1269,7 +1450,7 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
              LargeIntegerLessThan(*sizep, bufEnd)) {
             buf_WaitIO(scp, bufp);
         }
-        lock_ObtainMutex(&scp->mx);
+        lock_ObtainWrite(&scp->rw);
        
         /* make sure we have a callback (so we have the right value for
          * the length), and wait for it to be safe to do a truncate.
@@ -1281,7 +1462,6 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
                           | CM_SCACHESYNC_BUFLOCKED);
 
        
-       lock_ObtainWrite(&buf_globalLock);
        /* if we succeeded in our locking, and this applies to the right
          * file, and the truncate request overlaps the buffer either
          * totally or partially, then do something.
@@ -1298,7 +1478,9 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
             if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
                 /* truncating the entire page */
                 bufp->flags &= ~CM_BUF_DIRTY;
-                bufp->dataVersion = -1;        /* known bad */
+                bufp->dirty_offset = 0;
+                bufp->dirty_length = 0;
+                bufp->dataVersion = CM_BUF_VERSION_BAD;        /* known bad */
                 bufp->dirtyCounter++;
             }
             else {
@@ -1311,7 +1493,7 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
                  * visible again.
                  */
                 bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
-                osi_assert(bufferPos != 0);
+                osi_assertx(bufferPos != 0, "non-zero bufferPos");
                 memset(bufp->datap + bufferPos, 0,
                         cm_data.buf_blockSize - bufferPos);
             }
@@ -1321,20 +1503,20 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
                       CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
                       | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
 
-        lock_ReleaseMutex(&scp->mx);
+        lock_ReleaseWrite(&scp->rw);
         lock_ReleaseMutex(&bufp->mx);
     
        if (!code) {
            nbufp = bufp->fileHashp;
            if (nbufp) 
-               buf_HoldLocked(nbufp);
+               buf_Hold(nbufp);
        } else {
            /* This forces the loop to end and the error code
             * to be returned. */
            nbufp = NULL;
        }
-       buf_ReleaseLocked(bufp);
-       lock_ReleaseWrite(&buf_globalLock);
+       buf_Release(bufp);
+       bufp = nbufp;
     }
 
 #ifdef TESTING
@@ -1351,16 +1533,16 @@ long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
     cm_buf_t *bp;              /* buffer we're hacking on */
     cm_buf_t *nbp;
     int didRelease;
-    long i;
+    afs_uint32 i;
 
     i = BUF_FILEHASH(&scp->fid);
 
     code = 0;
-    lock_ObtainWrite(&buf_globalLock);
+    lock_ObtainRead(&buf_globalLock);
     bp = cm_data.buf_fileHashTablepp[i];
     if (bp) 
         buf_HoldLocked(bp);
-    lock_ReleaseWrite(&buf_globalLock);
+    lock_ReleaseRead(&buf_globalLock);
     
     for (; bp; bp = nbp) {
         didRelease = 0;        /* haven't released this buffer yet */
@@ -1374,37 +1556,60 @@ long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
             buf_WaitIO(scp, bp);
             lock_ReleaseMutex(&bp->mx);
 
-            code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
-            if (code) 
-                goto skip;
+            /* 
+             * if the error for the previous buffer was BADFD
+             * then all buffers for the FID are bad.  Do not
+             * attempt to stabalize.
+             */
+            if (code != CM_ERROR_BADFD) {
+                code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
+                if (code && code != CM_ERROR_BADFD) 
+                    goto skip;
+            }
+            if (code == CM_ERROR_BADFD) {
+                /* if the scp's FID is bad its because we received VNOVNODE 
+                 * when attempting to FetchStatus before the write.  This
+                 * page therefore contains data that can no longer be stored.
+                 */
+                lock_ObtainMutex(&bp->mx);
+                bp->flags &= ~CM_BUF_DIRTY;
+                bp->flags |= CM_BUF_ERROR;
+                bp->error = CM_ERROR_BADFD;
+                bp->dirty_offset = 0;
+                bp->dirty_length = 0;
+                bp->dataVersion = CM_BUF_VERSION_BAD;  /* known bad */
+                bp->dirtyCounter++;
+                lock_ReleaseMutex(&bp->mx);
+            }
 
-            lock_ObtainWrite(&buf_globalLock);
             /* actually, we only know that buffer is clean if ref
              * count is 1, since we don't have buffer itself locked.
              */
             if (!(bp->flags & CM_BUF_DIRTY)) {
+                lock_ObtainWrite(&buf_globalLock);
                 if (bp->refCount == 1) {       /* bp is held above */
                     nbp = bp->fileHashp;
                     if (nbp) 
                         buf_HoldLocked(nbp);
-                    buf_ReleaseLocked(bp);
+                    buf_ReleaseLocked(bp, TRUE);
                     didRelease = 1;
                     buf_Recycle(bp);
                 }
+                lock_ReleaseWrite(&buf_globalLock);
             }
-            lock_ReleaseWrite(&buf_globalLock);
 
-            (*cm_buf_opsp->Unstabilizep)(scp, userp);
+           if (code == 0)
+               (*cm_buf_opsp->Unstabilizep)(scp, userp);
         }
 
       skip:
         if (!didRelease) {
-            lock_ObtainWrite(&buf_globalLock);
+            lock_ObtainRead(&buf_globalLock);
             nbp = bp->fileHashp;
            if (nbp)
                 buf_HoldLocked(nbp);
-            buf_ReleaseLocked(bp);
-            lock_ReleaseWrite(&buf_globalLock);
+            buf_ReleaseLocked(bp, FALSE);
+            lock_ReleaseRead(&buf_globalLock);
         }
     }  /* for loop over a bunch of buffers */
 
@@ -1416,50 +1621,78 @@ long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
     return code;
 }       
 
+/* Must be called with scp->rw held */
+long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
+{
+    cm_buf_t * bp;
+    afs_uint32 i;
+    int found = 0;
+
+    lock_AssertAny(&scp->rw);
+
+    i = BUF_FILEHASH(&scp->fid);
+
+    lock_ObtainRead(&buf_globalLock);
+
+    for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp = bp->fileHashp) {
+        if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
+            if (bp->dataVersion == fromVersion) {
+                bp->dataVersion = toVersion;
+                found = 1;
+            }
+        }
+    }
+    lock_ReleaseRead(&buf_globalLock);
+
+    if (found)
+        return 0;
+    else
+        return ENOENT;
+}
+
 long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
 {
-    long code;
+    long code = 0;
+    long wasDirty = 0;
     cm_buf_t *bp;              /* buffer we're hacking on */
     cm_buf_t *nbp;             /* next one */
-    long i;
+    afs_uint32 i;
 
     i = BUF_FILEHASH(&scp->fid);
 
-    code = 0;
-    lock_ObtainWrite(&buf_globalLock);
+    lock_ObtainRead(&buf_globalLock);
     bp = cm_data.buf_fileHashTablepp[i];
     if (bp) 
         buf_HoldLocked(bp);
-    lock_ReleaseWrite(&buf_globalLock);
+    lock_ReleaseRead(&buf_globalLock);
     for (; bp; bp = nbp) {
         /* clean buffer synchronously */
         if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
-            if (userp) {
-                cm_HoldUser(userp);
-                lock_ObtainMutex(&bp->mx);
-                if (bp->userp) 
-                    cm_ReleaseUser(bp->userp);
-                bp->userp = userp;
-                lock_ReleaseMutex(&bp->mx);
-            }   
-            code = buf_CleanAsync(bp, reqp);
-           buf_CleanWait(scp, bp);
             lock_ObtainMutex(&bp->mx);
-            if (bp->flags & CM_BUF_ERROR) {
-                if (code == 0 || code == -1) 
+            if (bp->flags & CM_BUF_DIRTY) {
+                if (userp) {
+                    cm_HoldUser(userp);
+                    if (bp->userp) 
+                        cm_ReleaseUser(bp->userp);
+                    bp->userp = userp;
+                }   
+                wasDirty = buf_CleanAsyncLocked(bp, reqp);
+                buf_CleanWait(scp, bp, TRUE);
+                if (bp->flags & CM_BUF_ERROR) {
                     code = bp->error;
-                if (code == 0) 
-                    code = -1;
+                    if (code == 0) 
+                        code = -1;
+                }
             }
             lock_ReleaseMutex(&bp->mx);
         }
 
-        lock_ObtainWrite(&buf_globalLock);
+        lock_ObtainRead(&buf_globalLock);
         nbp = bp->fileHashp;
         if (nbp) 
             buf_HoldLocked(nbp);
-        buf_ReleaseLocked(bp);
-        lock_ReleaseWrite(&buf_globalLock);
+        buf_ReleaseLocked(bp, FALSE);
+        lock_ReleaseRead(&buf_globalLock);
     }  /* for loop over a bunch of buffers */
 
 #ifdef TESTING
@@ -1508,39 +1741,73 @@ buf_ValidateBufQueues(void)
 }
 #endif /* TESTING */
 
-/* dump the contents of the buf_hashTablepp. */
+/* dump the contents of the buf_scacheHashTablepp. */
 int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
 {
     int zilch;
     cm_buf_t *bp;
     char output[1024];
-    int i;
+    afs_uint32 i;
   
-    if (cm_data.buf_hashTablepp == NULL)
+    if (cm_data.buf_scacheHashTablepp == NULL)
         return -1;
 
     if (lock)
         lock_ObtainRead(&buf_globalLock);
   
-    StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\n", 
+    StringCbPrintfA(output, sizeof(output), "%s - dumping buf_HashTable - buf_hashSize=%d\r\n", 
                     cookie, cm_data.buf_hashSize);
     WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
   
     for (i = 0; i < cm_data.buf_hashSize; i++)
     {
-        for (bp = cm_data.buf_hashTablepp[i]; bp; bp=bp->hashp) 
+        for (bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) 
         {
-            if (bp->refCount)
-            {
-                StringCbPrintfA(output, sizeof(output), "vnode=%d, unique=%d), size=%d refCount=%d\n", 
-                        cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume, 
-                        bp->fid.vnode, bp->fid.unique, bp->size, bp->refCount);
-                WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
-            }
+           StringCbPrintfA(output, sizeof(output), 
+                           "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
+                           "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
+                           "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+                            cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume, 
+                            bp->fid.vnode, bp->fid.unique, bp->offset.HighPart, 
+                            bp->offset.LowPart, bp->dataVersion, bp->flags, 
+                            bp->cmFlags, bp->refCount);
+           WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
         }
     }
   
-    StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\n", cookie);
+    StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_HashTable.\r\n", cookie);
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+
+    StringCbPrintfA(output, sizeof(output), "%s - dumping buf_freeListEndp\r\n", cookie);
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+    for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+       StringCbPrintfA(output, sizeof(output), 
+                        "%s bp=0x%08X, fid (cell=%d, volume=%d, "
+                        "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
+                        "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+                        cookie, (void *)bp, bp->fid.cell, bp->fid.volume, 
+                        bp->fid.vnode, bp->fid.unique, bp->offset.HighPart, 
+                        bp->offset.LowPart, bp->dataVersion, bp->flags, 
+                        bp->cmFlags, bp->refCount);
+       WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+    }
+    StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+
+    StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
+    WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+    for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
+       StringCbPrintfA(output, sizeof(output), 
+                        "%s bp=0x%08X, fid (cell=%d, volume=%d, "
+                        "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
+                        "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+                        cookie, (void *)bp, bp->fid.cell, bp->fid.volume, 
+                        bp->fid.vnode, bp->fid.unique, bp->offset.HighPart, 
+                        bp->offset.LowPart, bp->dataVersion, bp->flags, 
+                        bp->cmFlags, bp->refCount);
+       WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
+    }
+    StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
     WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
 
     if (lock)
@@ -1574,8 +1841,11 @@ long buf_DirtyBuffersExist(cm_fid_t *fidp)
 {
     cm_buf_t *bp;
     afs_uint32 bcount = 0;
+    afs_uint32 i;
 
-    for (bp = cm_data.buf_allp; bp; bp=bp->allp, bcount++) {
+    i = BUF_FILEHASH(fidp);
+
+    for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->allp, bcount++) {
        if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY))
            return 1;
     }
@@ -1591,13 +1861,15 @@ long buf_CleanDirtyBuffers(cm_scache_t *scp)
 
     for (bp = cm_data.buf_allp; bp; bp=bp->allp, bcount++) {
        if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
-               buf_Hold(bp);
+            buf_Hold(bp);
            lock_ObtainMutex(&bp->mx);
            bp->cmFlags &= ~CM_BUF_CMSTORING;
            bp->flags &= ~CM_BUF_DIRTY;
+            bp->dirty_offset = 0;
+            bp->dirty_length = 0;
            bp->flags |= CM_BUF_ERROR;
            bp->error = VNOVNODE;
-           bp->dataVersion = -1; /* bad */
+           bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
            bp->dirtyCounter++;
            if (bp->flags & CM_BUF_WAITING) {
                osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);