Windows: Add buf_FindAll() and buf_FindAllLocked()
[openafs.git] / src / WINNT / afsd / cm_buf.c
index b2fc84a..5b37eed 100644 (file)
@@ -87,22 +87,47 @@ extern int cm_diskCacheEnabled;
 /* set this to 1 when we are terminating to prevent access attempts */
 static int buf_ShutdownFlag = 0;
 
+#ifdef DEBUG_REFCOUNT
+void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
+#else
 void buf_HoldLocked(cm_buf_t *bp)
+#endif
 {
+    afs_int32 refCount;
+
     osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
-    InterlockedIncrement(&bp->refCount);
+    refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
+    afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
 }
 
 /* hold a reference to an already held buffer */
+#ifdef DEBUG_REFCOUNT
+void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
+#else
 void buf_Hold(cm_buf_t *bp)
+#endif
 {
+    afs_int32 refCount;
+
     lock_ObtainRead(&buf_globalLock);
-    buf_HoldLocked(bp);
+    osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+    refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
+    afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
     lock_ReleaseRead(&buf_globalLock);
 }
 
 /* code to drop reference count while holding buf_globalLock */
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
+#else
 void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
+#endif
 {
     afs_int32 refCount;
 
@@ -115,6 +140,10 @@ void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
     osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
 
     refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
+    afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
+#endif
 #ifdef DEBUG
     if (refCount < 0)
        osi_panic("buf refcount 0",__FILE__,__LINE__);;
@@ -147,7 +176,11 @@ void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
 }       
 
 /* release a buffer.  Buffer must be referenced, but unlocked. */
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
+#else
 void buf_Release(cm_buf_t *bp)
+#endif
 {
     afs_int32 refCount;
 
@@ -155,6 +188,10 @@ void buf_Release(cm_buf_t *bp)
     osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
 
     refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+    osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
+    afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
 #ifdef DEBUG
     if (refCount < 0)
        osi_panic("buf refcount 0",__FILE__,__LINE__);;
@@ -176,65 +213,129 @@ void buf_Release(cm_buf_t *bp)
     }
 }
 
-/* incremental sync daemon.  Writes all dirty buffers every 5000 ms */
-void buf_IncrSyncer(long parm)
+long 
+buf_Sync(int quitOnShutdown) 
 {
-    cm_buf_t **bpp, *bp;
-    long i;                            /* counter */
-    long wasDirty = 0;
+    cm_buf_t **bpp, *bp, *prevbp;
+    afs_uint32 wasDirty = 0;
     cm_req_t req;
 
-    while (buf_ShutdownFlag == 0) {
-        if (!wasDirty) {
-            i = SleepEx(5000, 1);
-            if (i != 0) continue;
-       }
+    /* go through all of the dirty buffers */
+    lock_ObtainRead(&buf_globalLock);
+    for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
+        if (quitOnShutdown && buf_ShutdownFlag)
+            break;
 
-       wasDirty = 0;
+        lock_ReleaseRead(&buf_globalLock);
+        /* all dirty buffers are held when they are added to the
+        * dirty list.  No need for an additional hold.
+        */
+        lock_ObtainMutex(&bp->mx);
 
-        /* now go through our percentage of the buffers */
-        for (bpp = &cm_data.buf_dirtyListp; bp = *bpp; ) {
+        if (bp->flags & CM_BUF_DIRTY && !(bp->flags & CM_BUF_REDIR)) {
+            /* start cleaning the buffer; don't touch log pages since
+             * the log code counts on knowing exactly who is writing
+             * a log page at any given instant.
+             *
+             * only attempt to write the buffer if the volume might
+             * be online.
+             */
+            afs_uint32 dirty;
+            cm_volume_t * volp;
 
-           /* all dirty buffers are held when they are added to the
-            * dirty list.  No need for an additional hold.
-            */
+            volp = cm_GetVolumeByFID(&bp->fid);
+            switch (cm_GetVolumeStatus(volp, bp->fid.volume)) {
+            case vl_online:
+            case vl_unknown:
+                cm_InitReq(&req);
+                req.flags |= CM_REQ_NORETRY;
+                buf_CleanAsyncLocked(bp, &req, &dirty);
+                wasDirty |= dirty;
+            }
+            cm_PutVolume(volp);
+        }
 
-           if (bp->flags & CM_BUF_DIRTY) {
-               /* start cleaning the buffer; don't touch log pages since
-                * the log code counts on knowing exactly who is writing
-                * a log page at any given instant.
-                */
-               cm_InitReq(&req);
-               req.flags |= CM_REQ_NORETRY;
-               wasDirty |= buf_CleanAsync(bp, &req);
-           }
+        /* the buffer may or may not have been dirty
+        * and if dirty may or may not have been cleaned
+        * successfully.  check the dirty flag again.  
+        */
+        if (!(bp->flags & CM_BUF_DIRTY)) {
+            /* remove the buffer from the dirty list */
+            lock_ObtainWrite(&buf_globalLock);
+#ifdef DEBUG_REFCOUNT
+            if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
+                osi_Log1(afsd_logp,"buf_IncrSyncer bp 0x%p list corruption",bp);
+                afsi_log("buf_IncrSyncer bp 0x%p list corruption", bp);
+            }
+#endif
+            *bpp = bp->dirtyp;
+            bp->dirtyp = NULL;
+            bp->flags &= ~CM_BUF_INDL;
+            if (cm_data.buf_dirtyListp == NULL)
+                cm_data.buf_dirtyListEndp = NULL;
+            else if (cm_data.buf_dirtyListEndp == bp)
+                cm_data.buf_dirtyListEndp = prevbp;
+            buf_ReleaseLocked(bp, TRUE);
+            lock_ConvertWToR(&buf_globalLock);
+        } else {
+            if (buf_ShutdownFlag) {
+                cm_cell_t *cellp;
+                cm_volume_t *volp;
+                char volstr[VL_MAXNAMELEN+12]="";
+                char *ext = "";
+
+                volp = cm_GetVolumeByFID(&bp->fid);
+                if (volp) {
+                    cellp = volp->cellp;
+                    if (bp->fid.volume == volp->vol[RWVOL].ID)
+                        ext = "";
+                    else if (bp->fid.volume == volp->vol[ROVOL].ID)
+                        ext = ".readonly";
+                    else if (bp->fid.volume == volp->vol[BACKVOL].ID)
+                        ext = ".backup";
+                    else
+                        ext = ".nomatch";
+                    snprintf(volstr, sizeof(volstr), "%s%s", volp->namep, ext);
+                } else {
+                    cellp = cm_FindCellByID(bp->fid.cell, CM_FLAG_NOPROBE);
+                    snprintf(volstr, sizeof(volstr), "%u", bp->fid.volume);
+                }
 
-           /* the buffer may or may not have been dirty
-            * and if dirty may or may not have been cleaned
-            * successfully.  check the dirty flag again.  
-            */
-           if (!(bp->flags & CM_BUF_DIRTY)) {
-               lock_ObtainMutex(&bp->mx);
-               if (!(bp->flags & CM_BUF_DIRTY)) {
-                   /* remove the buffer from the dirty list */
-                   lock_ObtainWrite(&buf_globalLock);
-                   *bpp = bp->dirtyp;
-                   bp->dirtyp = NULL;
-                   if (cm_data.buf_dirtyListp == NULL)
-                       cm_data.buf_dirtyListEndp = NULL;
-                   buf_ReleaseLocked(bp, TRUE);
-                   lock_ReleaseWrite(&buf_globalLock);
-               } else {
-                   /* advance the pointer so we don't loop forever */
-                   bpp = &bp->dirtyp;
-               }
-               lock_ReleaseMutex(&bp->mx);
-           } else {
-               /* advance the pointer so we don't loop forever */
-               bpp = &bp->dirtyp;
-           }
-        }      /* for loop over a bunch of buffers */
-    }          /* whole daemon's while loop */
+                LogEvent(EVENTLOG_INFORMATION_TYPE, MSG_DIRTY_BUFFER_AT_SHUTDOWN, 
+                         cellp->name, volstr, bp->fid.vnode, bp->fid.unique, 
+                         bp->offset.QuadPart+bp->dirty_offset, bp->dirty_length);
+            }
+
+            /* advance the pointer so we don't loop forever */
+            lock_ObtainRead(&buf_globalLock);
+            bpp = &bp->dirtyp;
+            prevbp = bp;
+        }
+        lock_ReleaseMutex(&bp->mx);
+    }  /* for loop over a bunch of buffers */
+    lock_ReleaseRead(&buf_globalLock);
+
+    return wasDirty;
+}
+
+/* incremental sync daemon.  Writes all dirty buffers every 5000 ms */
+void buf_IncrSyncer(long parm)
+{
+    long wasDirty = 0;
+    long i;
+
+    while (buf_ShutdownFlag == 0) {
+
+        if (!wasDirty) {
+           i = SleepEx(5000, 1);
+           if (i != 0) 
+                continue;
+       } else {
+            Sleep(50);
+        }
+
+        wasDirty = buf_Sync(1);
+    } /* whole daemon's while loop */
 }
 
 long
@@ -314,8 +415,12 @@ buf_ValidateBuffers(void)
 }
 
 void buf_Shutdown(void)  
-{                        
+{  
+    /* disable the buf_IncrSyncer() threads */
     buf_ShutdownFlag = 1;
+
+    /* then force all dirty buffers to the file servers */
+    buf_Sync(0);
 }                        
 
 /* initialize the buffer package; called with no locks
@@ -344,7 +449,7 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
 
     if (osi_Once(&once)) {
         /* initialize global locks */
-        lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
+        lock_InitializeRWLock(&buf_globalLock, "Global buffer lock", LOCK_HIERARCHY_BUF_GLOBAL);
 
         if ( newFile ) {
             /* remember this for those who want to reset it */
@@ -379,7 +484,7 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
                 
                 osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
                 bp->flags |= CM_BUF_INLRU;
-                lock_InitializeMutex(&bp->mx, "Buffer mutex");
+                lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
                 
                 /* grab appropriate number of bytes from aligned zone */
                 bp->datap = data;
@@ -403,7 +508,7 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
             data = cm_data.bufDataBaseAddress;
             
             for (i=0; i<cm_data.buf_nbuffers; i++) {
-                lock_InitializeMutex(&bp->mx, "Buffer mutex");
+                lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
                 bp->userp = NULL;
                 bp->waitCount = 0;
                 bp->waitRequests = 0;
@@ -521,12 +626,12 @@ void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
                 release = 1;
         }
         if ( scp ) {
-            lock_ObtainMutex(&scp->mx);
+            lock_ObtainRead(&scp->rw);
             if (scp->flags & CM_SCACHEFLAG_WAITING) {
                 osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
                 osi_Wakeup((LONG_PTR)&scp->flags);
             }
-           lock_ReleaseMutex(&scp->mx);
+           lock_ReleaseRead(&scp->rw);
         }
     }
         
@@ -579,6 +684,54 @@ cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
     return bp;
 }       
 
+/* find a buffer, if any, for a particular file ID and offset.  Assumes
+ * that buf_globalLock is write locked when called.  Uses the all buffer
+ * list.
+ */
+cm_buf_t *buf_FindAllLocked(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+{
+    cm_buf_t *bp;
+
+    if (flags == 0) {
+        for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
+            if (cm_FidCmp(&scp->fid, &bp->fid) == 0
+                 && offsetp->LowPart == bp->offset.LowPart
+                 && offsetp->HighPart == bp->offset.HighPart) {
+                buf_HoldLocked(bp);
+                break;
+            }
+        }
+    } else {
+        for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
+            if (cm_FidCmp(&scp->fid, &bp->fid) == 0) {
+                char * fileOffset;
+                
+                fileOffset = offsetp->QuadPart + cm_data.baseAddress;
+                if (fileOffset == bp->datap) {
+                    buf_HoldLocked(bp);
+                    break;
+                }
+            }
+        }
+    }
+    /* return whatever we found, if anything */
+    return bp;
+}
+
+/* find a buffer with offset *offsetp for vnode *scp.  Called
+ * with no locks held.  Use the all buffer list.
+ */
+cm_buf_t *buf_FindAll(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+{
+    cm_buf_t *bp;
+
+    lock_ObtainRead(&buf_globalLock);
+    bp = buf_FindAllLocked(scp, offsetp, flags);
+    lock_ReleaseRead(&buf_globalLock);
+
+    return bp;
+}       
+
 /* start cleaning I/O on this buffer.  Buffer must be write locked, and is returned
  * write-locked.
  *
@@ -588,10 +741,10 @@ cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
  *
  * Returns non-zero if the buffer was dirty.
  */
-long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
+afs_uint32 buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
 {
-    long code = 0;
-    long isdirty = 0;
+    afs_uint32 code = 0;
+    afs_uint32 isdirty = 0;
     cm_scache_t * scp = NULL;
     osi_hyper_t offset;
 
@@ -607,7 +760,16 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
 
             offset = bp->offset;
             LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
-           code = (*cm_buf_opsp->Writep)(scp, &offset, bp->dirty_length, 0, bp->userp, reqp);
+           code = (*cm_buf_opsp->Writep)(scp, &offset, 
+#if 1
+                                           /* we might as well try to write all of the contiguous 
+                                            * dirty buffers in one RPC 
+                                            */
+                                           cm_chunkSize,
+#else
+                                          bp->dirty_length, 
+#endif
+                                          0, bp->userp, reqp);
            osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
 
            cm_ReleaseSCache(scp);
@@ -622,14 +784,17 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
         * because we aren't going to be able to write this data to the file
         * server.
         */
-       if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD){
+       if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD || code == CM_ERROR_NOACCESS || 
+            code == CM_ERROR_QUOTA || code == CM_ERROR_SPACE || code == CM_ERROR_TOOBIG || 
+            code == CM_ERROR_READONLY || code == CM_ERROR_NOSUCHPATH){
            bp->flags &= ~CM_BUF_DIRTY;
            bp->flags |= CM_BUF_ERROR;
             bp->dirty_offset = 0;
             bp->dirty_length = 0;
            bp->error = code;
-           bp->dataVersion = -1; /* bad */
+           bp->dataVersion = CM_BUF_VERSION_BAD;
            bp->dirtyCounter++;
+            break;
        }
 
 #ifdef DISKCACHE95
@@ -644,15 +809,15 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
         */
        if (reqp->flags & CM_REQ_NORETRY)
            break;
-    };
-
-    if (!(bp->flags & CM_BUF_DIRTY)) {
-       /* remove buffer from dirty buffer queue */
 
+        /* Ditto if the hardDeadTimeout or idleTimeout was reached */
+        if (code == CM_ERROR_TIMEDOUT || code == CM_ERROR_ALLDOWN ||
+            code == CM_ERROR_ALLBUSY || code == CM_ERROR_ALLOFFLINE ||
+            code == CM_ERROR_CLOCKSKEW) {
+            break;
+        }
     }
 
-    /* do logging after call to GetLastError, or else */
-        
     /* if someone was waiting for the I/O that just completed or failed,
      * wake them up.
      */
@@ -661,7 +826,11 @@ long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
         osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
         osi_Wakeup((LONG_PTR) bp);
     }
-    return isdirty;
+
+    if (pisdirty)
+        *pisdirty = isdirty;
+
+    return code;
 }
 
 /* Called with a zero-ref count buffer and with the buf_globalLock write locked.
@@ -725,12 +894,6 @@ void buf_Recycle(cm_buf_t *bp)
         bp->flags &= ~CM_BUF_INHASH;
     }
 
-    /* bump the soft reference counter now, to invalidate softRefs; no
-     * wakeup is required since people don't sleep waiting for this
-     * counter to change.
-     */
-    bp->idCounter++;
-
     /* make the fid unrecognizable */
     memset(&bp->fid, 0, sizeof(cm_fid_t));
 }       
@@ -746,14 +909,11 @@ void buf_Recycle(cm_buf_t *bp)
  * space from the buffer pool.  In that case, the buffer will be returned
  * without being hashed into the hash table.
  */
-long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
 {
     cm_buf_t *bp;      /* buffer we're dealing with */
     cm_buf_t *nextBp;  /* next buffer in file hash chain */
     afs_uint32 i;      /* temp */
-    cm_req_t req;
-
-    cm_InitReq(&req);  /* just in case */
 
 #ifdef TESTING
     buf_ValidateBufQueues();
@@ -770,7 +930,11 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
                 * do not want to allow the buffer to be added
                 * to the free list.
                 */
-                bp->refCount--;
+                afs_int32 refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+                osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, refCount);
+                afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
+#endif
                 lock_ReleaseWrite(&buf_globalLock);
                 lock_ReleaseRead(&scp->bufCreateLock);
                 return CM_BUF_EXISTS;
@@ -812,6 +976,10 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
              * we hold the global lock.
              */
 
+            /* Don't recycle a buffer held by the redirector. */
+            if (bp->flags & CM_BUF_REDIR)
+                continue;
+
             /* don't recycle someone in our own chunk */
             if (!cm_FidCmp(&bp->fid, &scp->fid)
                  && (bp->offset.LowPart & (-cm_chunkSize))
@@ -846,7 +1014,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
                  * have the WRITING flag set, so we won't get
                  * back here.
                  */
-                buf_CleanAsync(bp, &req);
+                buf_CleanAsync(bp, reqp, NULL);
 
                 /* now put it back and go around again */
                 buf_Release(bp);
@@ -864,7 +1032,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
 
             /* clean up junk flags */
             bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
-            bp->dataVersion = -1;      /* unknown so far */
+            bp->dataVersion = CM_BUF_VERSION_BAD;      /* unknown so far */
 
             /* now hash in as our new buffer, and give it the
              * appropriate label, if requested.
@@ -901,21 +1069,25 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
             osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
             bp->flags &= ~CM_BUF_INLRU;
 
+            /* prepare to return it.  Give it a refcount */
+            bp->refCount = 1;
+#ifdef DEBUG_REFCOUNT
+            osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
+            afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
+#endif
             /* grab the mutex so that people don't use it
              * before the caller fills it with data.  Again, no one    
              * should have been able to get to this dude to lock it.
              */
            if (!lock_TryMutex(&bp->mx)) {
                osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked.  refCount %d should be 0",
-                        bp, bp->refCount);
+                         bp, bp->refCount);
                osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
            }
 
-           /* prepare to return it.  Give it a refcount */
-            bp->refCount = 1;
-                        
             lock_ReleaseWrite(&buf_globalLock);
             lock_ReleaseRead(&scp->bufCreateLock);
+
             *bufpp = bp;
 
 #ifdef TESTING
@@ -934,7 +1106,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bu
 /* get a page, returning it held but unlocked.  Doesn't fill in the page
  * with I/O, since we're going to write the whole thing new.
  */
-long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
 {
     cm_buf_t *bp;
     long code;
@@ -953,7 +1125,7 @@ long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
         }
 
         /* otherwise, we have to create a page */
-        code = buf_GetNewLocked(scp, &pageOffset, &bp);
+        code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
 
         /* check if the buffer was created in a race condition branch.
          * If so, go around so we can hold a reference to it. 
@@ -986,7 +1158,7 @@ long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
 
 /* get a page, returning it held but unlocked.  Make sure it is complete */
 /* The scp must be unlocked when passed to this function */
-long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
 {
     cm_buf_t *bp;
     long code;
@@ -1020,7 +1192,7 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
         }
 
         /* otherwise, we have to create a page */
-        code = buf_GetNewLocked(scp, &pageOffset, &bp);
+        code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
        /* bp->mx is now held */
 
         /* check if the buffer was created in a race condition branch.
@@ -1142,7 +1314,7 @@ long buf_CountFreeList(void)
          * has been invalidate (by having its DV stomped upon), then
          * count it as free, since it isn't really being utilized.
          */
-        if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
+        if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion == CM_BUF_VERSION_BAD)
             count++;
     }       
     lock_ReleaseRead(&buf_globalLock);
@@ -1150,28 +1322,30 @@ long buf_CountFreeList(void)
 }
 
 /* clean a buffer synchronously */
-long buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
+afs_uint32 buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
 {
     long code;
     osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
 
     lock_ObtainMutex(&bp->mx);
-    code = buf_CleanAsyncLocked(bp, reqp);
+    code = buf_CleanAsyncLocked(bp, reqp, pisdirty);
     lock_ReleaseMutex(&bp->mx);
 
     return code;
 }       
 
 /* wait for a buffer's cleaning to finish */
-void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
+void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
 {
     osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
 
-    lock_ObtainMutex(&bp->mx);
+    if (!locked)
+        lock_ObtainMutex(&bp->mx);
     if (bp->flags & CM_BUF_WRITING) {
         buf_WaitIO(scp, bp);
     }
-    lock_ReleaseMutex(&bp->mx);
+    if (!locked)
+        lock_ReleaseMutex(&bp->mx);
 }       
 
 /* set the dirty flag on a buffer, and set associated write-ahead log,
@@ -1179,7 +1353,7 @@ void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
  *
  * The buffer must be locked before calling this routine.
  */
-void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length)
+void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length, cm_user_t *userp)
 {
     osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
     osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
@@ -1225,7 +1399,7 @@ void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length)
          * already there.
          */
         lock_ObtainWrite(&buf_globalLock);
-        if (bp->dirtyp == NULL && cm_data.buf_dirtyListEndp != bp) {
+        if (!(bp->flags & CM_BUF_INDL)) {
             buf_HoldLocked(bp);
             if (!cm_data.buf_dirtyListp) {
                 cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
@@ -1234,9 +1408,18 @@ void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length)
                 cm_data.buf_dirtyListEndp = bp;
             }
             bp->dirtyp = NULL;
+            bp->flags |= CM_BUF_INDL;
         }
         lock_ReleaseWrite(&buf_globalLock);
     }
+
+    /* and record the last writer */
+    if (bp->userp != userp) {
+        cm_HoldUser(userp);
+        if (bp->userp) 
+            cm_ReleaseUser(bp->userp);
+        bp->userp = userp;
+    }
 }
 
 /* clean all buffers, reset log pointers and invalidate all buffers.
@@ -1276,8 +1459,8 @@ long buf_CleanAndReset(void)
                 cm_InitReq(&req);
                req.flags |= CM_REQ_NORETRY;
 
-               buf_CleanAsync(bp, &req);
-               buf_CleanWait(NULL, bp);
+               buf_CleanAsync(bp, &req, NULL);
+               buf_CleanWait(NULL, bp, FALSE);
 
                 /* relock and release buffer */
                 lock_ObtainRead(&buf_globalLock);
@@ -1388,7 +1571,7 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
              LargeIntegerLessThan(*sizep, bufEnd)) {
             buf_WaitIO(scp, bufp);
         }
-        lock_ObtainMutex(&scp->mx);
+        lock_ObtainWrite(&scp->rw);
        
         /* make sure we have a callback (so we have the right value for
          * the length), and wait for it to be safe to do a truncate.
@@ -1418,7 +1601,7 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
                 bufp->flags &= ~CM_BUF_DIRTY;
                 bufp->dirty_offset = 0;
                 bufp->dirty_length = 0;
-                bufp->dataVersion = -1;        /* known bad */
+                bufp->dataVersion = CM_BUF_VERSION_BAD;        /* known bad */
                 bufp->dirtyCounter++;
             }
             else {
@@ -1441,7 +1624,7 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
                       CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
                       | CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
 
-        lock_ReleaseMutex(&scp->mx);
+        lock_ReleaseWrite(&scp->rw);
         lock_ReleaseMutex(&bufp->mx);
     
        if (!code) {
@@ -1490,14 +1673,20 @@ long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
             lock_ObtainMutex(&bp->mx);
 
             /* start cleaning the buffer, and wait for it to finish */
-            buf_CleanAsyncLocked(bp, reqp);
+            buf_CleanAsyncLocked(bp, reqp, NULL);
             buf_WaitIO(scp, bp);
             lock_ReleaseMutex(&bp->mx);
 
-            code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
-            if (code && code != CM_ERROR_BADFD) 
-                goto skip;
-
+            /* 
+             * if the error for the previous buffer was BADFD
+             * then all buffers for the FID are bad.  Do not
+             * attempt to stabalize.
+             */
+            if (code != CM_ERROR_BADFD) {
+                code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
+                if (code && code != CM_ERROR_BADFD) 
+                    goto skip;
+            }
             if (code == CM_ERROR_BADFD) {
                 /* if the scp's FID is bad its because we received VNOVNODE 
                  * when attempting to FetchStatus before the write.  This
@@ -1509,7 +1698,7 @@ long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
                 bp->error = CM_ERROR_BADFD;
                 bp->dirty_offset = 0;
                 bp->dirty_length = 0;
-                bp->dataVersion = -1;  /* known bad */
+                bp->dataVersion = CM_BUF_VERSION_BAD;  /* known bad */
                 bp->dirtyCounter++;
                 lock_ReleaseMutex(&bp->mx);
             }
@@ -1553,14 +1742,14 @@ long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
     return code;
 }       
 
-/* Must be called with scp->mx held */
+/* Must be called with scp->rw held */
 long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
 {
     cm_buf_t * bp;
     afs_uint32 i;
     int found = 0;
 
-    lock_AssertMutex(&scp->mx);
+    lock_AssertAny(&scp->rw);
 
     i = BUF_FILEHASH(&scp->fid);
 
@@ -1600,21 +1789,55 @@ long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
     for (; bp; bp = nbp) {
         /* clean buffer synchronously */
         if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
-            if (userp) {
-                cm_HoldUser(userp);
-                lock_ObtainMutex(&bp->mx);
-                if (bp->userp) 
-                    cm_ReleaseUser(bp->userp);
-                bp->userp = userp;
-                lock_ReleaseMutex(&bp->mx);
-            }   
-            wasDirty = buf_CleanAsync(bp, reqp);
-           buf_CleanWait(scp, bp);
             lock_ObtainMutex(&bp->mx);
-            if (bp->flags & CM_BUF_ERROR) {
-               code = bp->error;
-                if (code == 0) 
-                    code = -1;
+            if (bp->flags & CM_BUF_DIRTY) {
+                if (userp && userp != bp->userp) {
+                    cm_HoldUser(userp);
+                    if (bp->userp) 
+                        cm_ReleaseUser(bp->userp);
+                    bp->userp = userp;
+                }   
+
+                switch (code) {
+                case CM_ERROR_NOSUCHFILE:
+                case CM_ERROR_BADFD:
+                case CM_ERROR_NOACCESS:
+                case CM_ERROR_QUOTA:
+                case CM_ERROR_SPACE:
+                case CM_ERROR_TOOBIG:
+                case CM_ERROR_READONLY:
+                case CM_ERROR_NOSUCHPATH:
+                    /* 
+                     * Apply the previous fatal error to this buffer.
+                     * Do not waste the time attempting to store to
+                     * the file server when we know it will fail.
+                     */
+                    bp->flags &= ~CM_BUF_DIRTY;
+                    bp->flags |= CM_BUF_ERROR;
+                    bp->dirty_offset = 0;
+                    bp->dirty_length = 0;
+                    bp->error = code;
+                    bp->dataVersion = CM_BUF_VERSION_BAD;
+                    bp->dirtyCounter++;
+                    break;
+                case CM_ERROR_TIMEDOUT:
+                case CM_ERROR_ALLDOWN:
+                case CM_ERROR_ALLBUSY:
+                case CM_ERROR_ALLOFFLINE:
+                case CM_ERROR_CLOCKSKEW:
+                    /* do not mark the buffer in error state but do
+                     * not attempt to complete the rest either.
+                     */
+                    break;
+                default:
+                    code = buf_CleanAsyncLocked(bp, reqp, &wasDirty);
+                    if (bp->flags & CM_BUF_ERROR) {
+                        code = bp->error;
+                        if (code == 0)
+                            code = -1;
+                    }
+                }
+                buf_CleanWait(scp, bp, TRUE);
             }
             lock_ReleaseMutex(&bp->mx);
         }
@@ -1698,11 +1921,11 @@ int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
            StringCbPrintfA(output, sizeof(output), 
                            "%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
                            "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
-                           "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+                           "flags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
                             cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume, 
                             bp->fid.vnode, bp->fid.unique, bp->offset.HighPart, 
                             bp->offset.LowPart, bp->dataVersion, bp->flags, 
-                            bp->cmFlags, bp->refCount);
+                            bp->cmFlags, bp->error, bp->refCount);
            WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
         }
     }
@@ -1716,30 +1939,30 @@ int cm_DumpBufHashTable(FILE *outputFile, char *cookie, int lock)
        StringCbPrintfA(output, sizeof(output), 
                         "%s bp=0x%08X, fid (cell=%d, volume=%d, "
                         "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
-                        "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+                        "flags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
                         cookie, (void *)bp, bp->fid.cell, bp->fid.volume, 
                         bp->fid.vnode, bp->fid.unique, bp->offset.HighPart, 
                         bp->offset.LowPart, bp->dataVersion, bp->flags, 
-                        bp->cmFlags, bp->refCount);
+                        bp->cmFlags, bp->error, bp->refCount);
        WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
     }
     StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
     WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
 
-    StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListEndp\r\n", cookie);
+    StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
     WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
-    for(bp = cm_data.buf_dirtyListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+    for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
        StringCbPrintfA(output, sizeof(output), 
                         "%s bp=0x%08X, fid (cell=%d, volume=%d, "
                         "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
-                        "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+                        "flags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
                         cookie, (void *)bp, bp->fid.cell, bp->fid.volume, 
                         bp->fid.vnode, bp->fid.unique, bp->offset.HighPart, 
                         bp->offset.LowPart, bp->dataVersion, bp->flags, 
-                        bp->cmFlags, bp->refCount);
+                        bp->cmFlags, bp->error, bp->refCount);
        WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
     }
-    StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListEndp.\r\n", cookie);
+    StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
     WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
 
     if (lock)
@@ -1801,7 +2024,7 @@ long buf_CleanDirtyBuffers(cm_scache_t *scp)
             bp->dirty_length = 0;
            bp->flags |= CM_BUF_ERROR;
            bp->error = VNOVNODE;
-           bp->dataVersion = -1; /* bad */
+           bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
            bp->dirtyCounter++;
            if (bp->flags & CM_BUF_WAITING) {
                osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);