Windows: validate buffer hash tables in cm_MergeStatus
[openafs.git] / src / WINNT / afsd / cm_buf.c
index e62a20b..73d64f8 100644 (file)
@@ -164,12 +164,10 @@ void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
 
         if (bp->refCount == 0 &&
             !(bp->qFlags & CM_BUF_QINLRU)) {
-            osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
-
-            /* watch for transition from empty to one element */
-            if (!cm_data.buf_freeListEndp)
-                cm_data.buf_freeListEndp = cm_data.buf_freeListp;
-            bp->qFlags |= CM_BUF_QINLRU;
+            osi_QAddH( (osi_queue_t **) &cm_data.buf_freeListp,
+                       (osi_queue_t **) &cm_data.buf_freeListEndp,
+                       &bp->q);
+            _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
         }
 
         if (!writeLocked)
@@ -204,12 +202,10 @@ void buf_Release(cm_buf_t *bp)
         lock_ObtainWrite(&buf_globalLock);
         if (bp->refCount == 0 &&
             !(bp->qFlags & CM_BUF_QINLRU)) {
-            osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
-
-            /* watch for transition from empty to one element */
-            if (!cm_data.buf_freeListEndp)
-                cm_data.buf_freeListEndp = cm_data.buf_freeListp;
-            bp->qFlags |= CM_BUF_QINLRU;
+            osi_QAddH( (osi_queue_t **) &cm_data.buf_freeListp,
+                       (osi_queue_t **) &cm_data.buf_freeListEndp,
+                       &bp->q);
+            _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
         }
         lock_ReleaseWrite(&buf_globalLock);
     }
@@ -266,13 +262,13 @@ buf_Sync(int quitOnShutdown)
             lock_ObtainWrite(&buf_globalLock);
 #ifdef DEBUG_REFCOUNT
             if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
-                osi_Log1(afsd_logp,"buf_IncrSyncer bp 0x%p list corruption",bp);
-                afsi_log("buf_IncrSyncer bp 0x%p list corruption", bp);
+                osi_Log1(afsd_logp,"buf_Sync bp 0x%p list corruption",bp);
+                afsi_log("buf_Sync bp 0x%p list corruption", bp);
             }
 #endif
             *bpp = bp->dirtyp;
             bp->dirtyp = NULL;
-            bp->qFlags &= ~CM_BUF_QINDL;
+            _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINDL);
             if (cm_data.buf_dirtyListp == NULL)
                 cm_data.buf_dirtyListEndp = NULL;
             else if (cm_data.buf_dirtyListEndp == bp)
@@ -327,7 +323,6 @@ void buf_IncrSyncer(long parm)
     long i;
 
     while (buf_ShutdownFlag == 0) {
-
         if (!wasDirty) {
            i = SleepEx(5000, 1);
            if (i != 0)
@@ -484,17 +479,15 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
                 bp->allp = cm_data.buf_allp;
                 cm_data.buf_allp = bp;
 
-                osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
-                bp->qFlags |= CM_BUF_QINLRU;
+                osi_QAddH( (osi_queue_t **) &cm_data.buf_freeListp,
+                           (osi_queue_t **) &cm_data.buf_freeListEndp,
+                           &bp->q);
+                _InterlockedOr(&bp->qFlags, CM_BUF_QINLRU);
                 lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
 
                 /* grab appropriate number of bytes from aligned zone */
                 bp->datap = data;
 
-                /* setup last buffer pointer */
-                if (i == 0)
-                    cm_data.buf_freeListEndp = bp;
-
                 /* next */
                 bp++;
                 data += cm_data.buf_blockSize;
@@ -514,7 +507,7 @@ long buf_Init(int newFile, cm_buf_ops_t *opsp, afs_uint64 nbuffers)
                 bp->userp = NULL;
                 bp->waitCount = 0;
                 bp->waitRequests = 0;
-                bp->flags &= ~CM_BUF_WAITING;
+                _InterlockedAnd(&bp->flags, ~CM_BUF_WAITING);
                 bp++;
             }
         }
@@ -607,7 +600,7 @@ void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
             osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING already set for 0x%p", bp);
         } else {
             osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING set for 0x%p", bp);
-            bp->flags |= CM_BUF_WAITING;
+            _InterlockedOr(&bp->flags, CM_BUF_WAITING);
             bp->waitCount = bp->waitRequests = 1;
         }
         osi_SleepM((LONG_PTR)bp, &bp->mx);
@@ -619,7 +612,7 @@ void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
         bp->waitCount--;
         if (bp->waitCount == 0) {
             osi_Log1(buf_logp, "buf_WaitIO CM_BUF_WAITING reset for 0x%p", bp);
-            bp->flags &= ~CM_BUF_WAITING;
+            _InterlockedAnd(&bp->flags, ~CM_BUF_WAITING);
             bp->waitRequests = 0;
         }
 
@@ -653,16 +646,16 @@ void buf_WaitIO(cm_scache_t * scp, cm_buf_t *bp)
 /* find a buffer, if any, for a particular file ID and offset.  Assumes
  * that buf_globalLock is write locked when called.
  */
-cm_buf_t *buf_FindLocked(struct cm_scache *scp, osi_hyper_t *offsetp)
+cm_buf_t *buf_FindLocked(struct cm_fid *fidp, osi_hyper_t *offsetp)
 {
     afs_uint32 i;
     cm_buf_t *bp;
 
     lock_AssertAny(&buf_globalLock);
 
-    i = BUF_HASH(&scp->fid, offsetp);
+    i = BUF_HASH(fidp, offsetp);
     for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp=bp->hashp) {
-        if (cm_FidCmp(&scp->fid, &bp->fid) == 0
+        if (cm_FidCmp(fidp, &bp->fid) == 0
              && offsetp->LowPart == bp->offset.LowPart
              && offsetp->HighPart == bp->offset.HighPart) {
             buf_HoldLocked(bp);
@@ -677,12 +670,12 @@ cm_buf_t *buf_FindLocked(struct cm_scache *scp, osi_hyper_t *offsetp)
 /* find a buffer with offset *offsetp for vnode *scp.  Called
  * with no locks held.
  */
-cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
+cm_buf_t *buf_Find(struct cm_fid *fidp, osi_hyper_t *offsetp)
 {
     cm_buf_t *bp;
 
     lock_ObtainRead(&buf_globalLock);
-    bp = buf_FindLocked(scp, offsetp);
+    bp = buf_FindLocked(fidp, offsetp);
     lock_ReleaseRead(&buf_globalLock);
 
     return bp;
@@ -692,13 +685,13 @@ cm_buf_t *buf_Find(struct cm_scache *scp, osi_hyper_t *offsetp)
  * that buf_globalLock is write locked when called.  Uses the all buffer
  * list.
  */
-cm_buf_t *buf_FindAllLocked(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+cm_buf_t *buf_FindAllLocked(struct cm_fid *fidp, osi_hyper_t *offsetp, afs_uint32 flags)
 {
     cm_buf_t *bp;
 
     if (flags == 0) {
         for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
-            if (cm_FidCmp(&scp->fid, &bp->fid) == 0
+            if (cm_FidCmp(fidp, &bp->fid) == 0
                  && offsetp->LowPart == bp->offset.LowPart
                  && offsetp->HighPart == bp->offset.HighPart) {
                 buf_HoldLocked(bp);
@@ -707,7 +700,7 @@ cm_buf_t *buf_FindAllLocked(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uin
         }
     } else {
         for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
-            if (cm_FidCmp(&scp->fid, &bp->fid) == 0) {
+            if (cm_FidCmp(fidp, &bp->fid) == 0) {
                 char * fileOffset;
 
                 fileOffset = offsetp->QuadPart + cm_data.baseAddress;
@@ -725,12 +718,12 @@ cm_buf_t *buf_FindAllLocked(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uin
 /* find a buffer with offset *offsetp for vnode *scp.  Called
  * with no locks held.  Use the all buffer list.
  */
-cm_buf_t *buf_FindAll(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+cm_buf_t *buf_FindAll(struct cm_fid *fidp, osi_hyper_t *offsetp, afs_uint32 flags)
 {
     cm_buf_t *bp;
 
     lock_ObtainRead(&buf_globalLock);
-    bp = buf_FindAllLocked(scp, offsetp, flags);
+    bp = buf_FindAllLocked(fidp, offsetp, flags);
     lock_ReleaseRead(&buf_globalLock);
 
     return bp;
@@ -813,8 +806,8 @@ afs_uint32 buf_CleanAsyncLocked(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp,
        if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD || code == CM_ERROR_NOACCESS ||
             code == CM_ERROR_QUOTA || code == CM_ERROR_SPACE || code == CM_ERROR_TOOBIG ||
             code == CM_ERROR_READONLY || code == CM_ERROR_NOSUCHPATH){
-           bp->flags &= ~CM_BUF_DIRTY;
-           bp->flags |= CM_BUF_ERROR;
+           _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
+           _InterlockedOr(&bp->flags, CM_BUF_ERROR);
             bp->dirty_offset = 0;
             bp->dirty_length = 0;
            bp->error = code;
@@ -895,7 +888,7 @@ void buf_Recycle(cm_buf_t *bp)
 
         i = BUF_HASH(&bp->fid, &bp->offset);
         lbpp = &(cm_data.buf_scacheHashTablepp[i]);
-        for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
+        for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = tbp->hashp) {
             if (tbp == bp)
                 break;
         }
@@ -920,14 +913,14 @@ void buf_Recycle(cm_buf_t *bp)
         if (nextBp)
             nextBp->fileHashBackp = prevBp;
 
-        bp->qFlags &= ~CM_BUF_QINHASH;
+        _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINHASH);
     }
 
     /* make the fid unrecognizable */
     memset(&bp->fid, 0, sizeof(cm_fid_t));
 
     /* clean up junk flags */
-    bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
+    _InterlockedAnd(&bp->flags, ~(CM_BUF_EOF | CM_BUF_ERROR));
     bp->dataVersion = CM_BUF_VERSION_BAD;      /* unknown so far */
 }
 
@@ -958,7 +951,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *req
         lock_ObtainWrite(&buf_globalLock);
         /* check to see if we lost the race */
         if (scp) {
-            if (bp = buf_FindLocked(scp, offsetp)) {
+            if (bp = buf_FindLocked(&scp->fid, offsetp)) {
                /* Do not call buf_ReleaseLocked() because we
                 * do not want to allow the buffer to be added
                 * to the free list.
@@ -1072,7 +1065,7 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *req
             if (scp) {
                 lock_AssertWrite(&buf_globalLock);
 
-                bp->qFlags |= CM_BUF_QINHASH;
+                _InterlockedOr(&bp->qFlags, CM_BUF_QINHASH);
                 bp->fid = scp->fid;
 #ifdef DEBUG
                bp->scp = scp;
@@ -1090,18 +1083,16 @@ long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *req
                 cm_data.buf_fileHashTablepp[i] = bp;
             }
 
-            /* we should move it from the lru queue.  It better still be there,
+            /* we should remove it from the lru queue.  It better still be there,
              * since we've held the global (big) lock since we found it there.
              */
             osi_assertx(bp->qFlags & CM_BUF_QINLRU,
                          "buf_GetNewLocked: LRU screwup");
 
-            if (cm_data.buf_freeListEndp == bp) {
-                /* we're the last guy in this queue, so maintain it */
-                cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
-            }
-            osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
-            bp->qFlags &= ~CM_BUF_QINLRU;
+            osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_freeListp,
+                           (osi_queue_t **) &cm_data.buf_freeListEndp,
+                           &bp->q);
+            _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINLRU);
 
             /* prepare to return it.  Give it a refcount */
             bp->refCount = 1;
@@ -1164,7 +1155,7 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf
         buf_ValidateBufQueues();
 #endif /* TESTING */
 
-        bp = buf_Find(scp, &pageOffset);
+        bp = buf_Find(&scp->fid, &pageOffset);
         if (bp) {
             /* lock it and break out */
             lock_ObtainMutex(&bp->mx);
@@ -1214,7 +1205,7 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf
          * implementation of Readp is cm_BufRead() which simply sets
          * tcount to 0 and returns success.
          */
-        bp->flags |= CM_BUF_READING;
+        _InterlockedOr(&bp->flags, CM_BUF_READING);
         code = (*cm_buf_opsp->Readp)(bp, cm_data.buf_blockSize, &tcount, NULL);
 
 #ifdef DISKCACHE95
@@ -1226,8 +1217,8 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf
             /* failure or queued */
             if (code != ERROR_IO_PENDING) {
                 bp->error = code;
-                bp->flags |= CM_BUF_ERROR;
-                bp->flags &= ~CM_BUF_READING;
+                _InterlockedOr(&bp->flags, CM_BUF_ERROR);
+                _InterlockedAnd(&bp->flags, ~CM_BUF_READING);
                 if (bp->flags & CM_BUF_WAITING) {
                     osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
                     osi_Wakeup((LONG_PTR) bp);
@@ -1247,9 +1238,9 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf
             if (tcount < (unsigned long) cm_data.buf_blockSize) {
                 memset(bp->datap+tcount, 0, cm_data.buf_blockSize - tcount);
                 if (tcount == 0)
-                    bp->flags |= CM_BUF_EOF;
+                    _InterlockedOr(&bp->flags, CM_BUF_EOF);
             }
-            bp->flags &= ~CM_BUF_READING;
+            _InterlockedAnd(&bp->flags, ~CM_BUF_READING);
             if (bp->flags & CM_BUF_WAITING) {
                 osi_Log1(buf_logp, "buf_Get Waking bp 0x%p", bp);
                 osi_Wakeup((LONG_PTR) bp);
@@ -1274,10 +1265,10 @@ long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf
      */
     lock_ObtainWrite(&buf_globalLock);
     if (bp->qFlags & CM_BUF_QINLRU) {
-        if (cm_data.buf_freeListEndp == bp)
-            cm_data.buf_freeListEndp = (cm_buf_t *) osi_QPrev(&bp->q);
-        osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
-        bp->qFlags &= ~CM_BUF_QINLRU;
+        osi_QRemoveHT( (osi_queue_t **) &cm_data.buf_freeListp,
+                       (osi_queue_t **) &cm_data.buf_freeListEndp,
+                       &bp->q);
+        _InterlockedAnd(&bp->qFlags, ~CM_BUF_QINLRU);
     }
     lock_ReleaseWrite(&buf_globalLock);
 
@@ -1346,11 +1337,14 @@ void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
  *
  * The buffer must be locked before calling this routine.
  */
-void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length, cm_user_t *userp)
+void buf_SetDirty(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 offset, afs_uint32 length, cm_user_t *userp)
 {
     osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
     osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
 
+    if (length == 0)
+        return;
+
     if (bp->flags & CM_BUF_DIRTY) {
 
        osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
@@ -1373,10 +1367,10 @@ void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length, cm_user_t
        osi_Log1(buf_logp, "buf_SetDirty 0x%p", bp);
 
         /* set dirty bit */
-        bp->flags |= CM_BUF_DIRTY;
+        _InterlockedOr(&bp->flags, CM_BUF_DIRTY);
 
         /* and turn off EOF flag, since it has associated data now */
-        bp->flags &= ~CM_BUF_EOF;
+        _InterlockedAnd(&bp->flags, ~CM_BUF_EOF);
 
         bp->dirty_offset = offset;
         bp->dirty_length = length;
@@ -1401,7 +1395,7 @@ void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length, cm_user_t
                 cm_data.buf_dirtyListEndp = bp;
             }
             bp->dirtyp = NULL;
-            bp->qFlags |= CM_BUF_QINDL;
+            _InterlockedOr(&bp->qFlags, CM_BUF_QINDL);
         }
         lock_ReleaseWrite(&buf_globalLock);
     }
@@ -1591,7 +1585,7 @@ long buf_Truncate(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp,
              */
             if (LargeIntegerLessThanOrEqualTo(*sizep, bufp->offset)) {
                 /* truncating the entire page */
-                bufp->flags &= ~CM_BUF_DIRTY;
+                _InterlockedAnd(&bufp->flags, ~CM_BUF_DIRTY);
                 bufp->dirty_offset = 0;
                 bufp->dirty_length = 0;
                 bufp->dataVersion = CM_BUF_VERSION_BAD;        /* known bad */
@@ -1682,8 +1676,8 @@ long buf_FlushCleanPages(cm_scache_t *scp, cm_user_t *userp, cm_req_t *reqp)
                  * page therefore contains data that can no longer be stored.
                  */
                 lock_ObtainMutex(&bp->mx);
-                bp->flags &= ~CM_BUF_DIRTY;
-                bp->flags |= CM_BUF_ERROR;
+                _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
+                _InterlockedOr(&bp->flags, CM_BUF_ERROR);
                 bp->error = CM_ERROR_BADFD;
                 bp->dirty_offset = 0;
                 bp->dirty_length = 0;
@@ -1813,8 +1807,8 @@ long buf_CleanVnode(struct cm_scache *scp, cm_user_t *userp, cm_req_t *reqp)
                      * Do not waste the time attempting to store to
                      * the file server when we know it will fail.
                      */
-                    bp->flags &= ~CM_BUF_DIRTY;
-                    bp->flags |= CM_BUF_ERROR;
+                    _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
+                    _InterlockedOr(&bp->flags, CM_BUF_ERROR);
                     bp->dirty_offset = 0;
                     bp->dirty_length = 0;
                     bp->error = code;
@@ -2019,11 +2013,11 @@ long buf_CleanDirtyBuffers(cm_scache_t *scp)
        if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY)) {
             buf_Hold(bp);
            lock_ObtainMutex(&bp->mx);
-           bp->cmFlags &= ~CM_BUF_CMSTORING;
-           bp->flags &= ~CM_BUF_DIRTY;
+           _InterlockedAnd(&bp->cmFlags, ~CM_BUF_CMSTORING);
+           _InterlockedAnd(&bp->flags, ~CM_BUF_DIRTY);
             bp->dirty_offset = 0;
             bp->dirty_length = 0;
-           bp->flags |= CM_BUF_ERROR;
+           _InterlockedOr(&bp->flags, CM_BUF_ERROR);
            bp->error = VNOVNODE;
            bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
            bp->dirtyCounter++;