/* set this to 1 when we are terminating to prevent access attempts */
static int buf_ShutdownFlag = 0;
+#ifdef DEBUG_REFCOUNT
+void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
+#else
void buf_HoldLocked(cm_buf_t *bp)
+#endif
{
- osi_assert(bp->magic == CM_BUF_MAGIC);
- bp->refCount++;
+ afs_int32 refCount;
+
+ osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+ refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
+ afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
}
/* hold a reference to an already held buffer */
+#ifdef DEBUG_REFCOUNT
+void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
+#else
void buf_Hold(cm_buf_t *bp)
+#endif
{
- lock_ObtainWrite(&buf_globalLock);
- buf_HoldLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ afs_int32 refCount;
+
+ lock_ObtainRead(&buf_globalLock);
+ osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+ refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
+ afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
+ lock_ReleaseRead(&buf_globalLock);
}
/* code to drop reference count while holding buf_globalLock */
-void buf_ReleaseLocked(cm_buf_t *bp)
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
+#else
+void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
+#endif
{
+ afs_int32 refCount;
+
+ if (writeLocked)
+ lock_AssertWrite(&buf_globalLock);
+ else
+ lock_AssertRead(&buf_globalLock);
+
/* ensure that we're in the LRU queue if our ref count is 0 */
- osi_assert(bp->magic == CM_BUF_MAGIC);
+ osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+
+ refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
+ afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
+#endif
#ifdef DEBUG
- if (bp->refCount == 0)
+ if (refCount < 0)
osi_panic("buf refcount 0",__FILE__,__LINE__);;
#else
- osi_assert(bp->refCount > 0);
+ osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
#endif
- if (--bp->refCount == 0) {
- if (!(bp->flags & CM_BUF_INLRU)) {
+ if (refCount == 0) {
+ /*
+ * If we are read locked there could be a race condition
+ * with buf_Find() so we must obtain a write lock and
+ * double check that the refCount is actually zero
+ * before we remove the buffer from the LRU queue.
+ */
+ if (!writeLocked)
+ lock_ConvertRToW(&buf_globalLock);
+
+ if (bp->refCount == 0 &&
+ !(bp->flags & CM_BUF_INLRU)) {
osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
/* watch for transition from empty to one element */
cm_data.buf_freeListEndp = cm_data.buf_freeListp;
bp->flags |= CM_BUF_INLRU;
}
+
+ if (!writeLocked)
+ lock_ConvertWToR(&buf_globalLock);
}
}
/* release a buffer. Buffer must be referenced, but unlocked. */
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
+#else
void buf_Release(cm_buf_t *bp)
+#endif
{
- lock_ObtainWrite(&buf_globalLock);
- buf_ReleaseLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ afs_int32 refCount;
+
+ /* ensure that we're in the LRU queue if our ref count is 0 */
+ osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+
+ refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
+ afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
+#ifdef DEBUG
+ if (refCount < 0)
+ osi_panic("buf refcount 0",__FILE__,__LINE__);;
+#else
+ osi_assertx(refCount >= 0, "cm_buf_t refCount == 0");
+#endif
+ if (refCount == 0) {
+ lock_ObtainWrite(&buf_globalLock);
+ if (bp->refCount == 0 &&
+ !(bp->flags & CM_BUF_INLRU)) {
+ osi_QAdd((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
+
+ /* watch for transition from empty to one element */
+ if (!cm_data.buf_freeListEndp)
+ cm_data.buf_freeListEndp = cm_data.buf_freeListp;
+ bp->flags |= CM_BUF_INLRU;
+ }
+ lock_ReleaseWrite(&buf_globalLock);
+ }
}
-/* incremental sync daemon. Writes all dirty buffers every 5000 ms */
-void buf_IncrSyncer(long parm)
+long
+buf_Sync(int quitOnShutdown)
{
- cm_buf_t **bpp, *bp;
- long i; /* counter */
- long wasDirty = 0;
+ cm_buf_t **bpp, *bp, *prevbp;
+ afs_uint32 wasDirty = 0;
cm_req_t req;
- while (buf_ShutdownFlag == 0) {
- if (!wasDirty) {
- i = SleepEx(5000, 1);
- if (i != 0) continue;
- }
+ /* go through all of the dirty buffers */
+ lock_ObtainRead(&buf_globalLock);
+ for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
+ if (quitOnShutdown && buf_ShutdownFlag)
+ break;
- wasDirty = 0;
+ lock_ReleaseRead(&buf_globalLock);
+ /* all dirty buffers are held when they are added to the
+ * dirty list. No need for an additional hold.
+ */
+ lock_ObtainMutex(&bp->mx);
- /* now go through our percentage of the buffers */
- for (bpp = &cm_data.buf_dirtyListp; bp = *bpp; ) {
+ if (bp->flags & CM_BUF_DIRTY && !(bp->flags & CM_BUF_REDIR)) {
+ /* start cleaning the buffer; don't touch log pages since
+ * the log code counts on knowing exactly who is writing
+ * a log page at any given instant.
+ *
+ * only attempt to write the buffer if the volume might
+ * be online.
+ */
+ afs_uint32 dirty;
+ cm_volume_t * volp;
- /* all dirty buffers are held when they are added to the
- * dirty list. No need for an additional hold.
- */
+ volp = cm_GetVolumeByFID(&bp->fid);
+ switch (cm_GetVolumeStatus(volp, bp->fid.volume)) {
+ case vl_online:
+ case vl_unknown:
+ cm_InitReq(&req);
+ req.flags |= CM_REQ_NORETRY;
+ buf_CleanAsyncLocked(bp, &req, &dirty);
+ wasDirty |= dirty;
+ }
+ cm_PutVolume(volp);
+ }
- if (bp->flags & CM_BUF_DIRTY) {
- /* start cleaning the buffer; don't touch log pages since
- * the log code counts on knowing exactly who is writing
- * a log page at any given instant.
- */
- cm_InitReq(&req);
- req.flags |= CM_REQ_NORETRY;
- wasDirty |= buf_CleanAsync(bp, &req);
- }
+ /* the buffer may or may not have been dirty
+ * and if dirty may or may not have been cleaned
+ * successfully. check the dirty flag again.
+ */
+ if (!(bp->flags & CM_BUF_DIRTY)) {
+ /* remove the buffer from the dirty list */
+ lock_ObtainWrite(&buf_globalLock);
+#ifdef DEBUG_REFCOUNT
+ if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
+ osi_Log1(afsd_logp,"buf_IncrSyncer bp 0x%p list corruption",bp);
+ afsi_log("buf_IncrSyncer bp 0x%p list corruption", bp);
+ }
+#endif
+ *bpp = bp->dirtyp;
+ bp->dirtyp = NULL;
+ bp->flags &= ~CM_BUF_INDL;
+ if (cm_data.buf_dirtyListp == NULL)
+ cm_data.buf_dirtyListEndp = NULL;
+ else if (cm_data.buf_dirtyListEndp == bp)
+ cm_data.buf_dirtyListEndp = prevbp;
+ buf_ReleaseLocked(bp, TRUE);
+ lock_ConvertWToR(&buf_globalLock);
+ } else {
+ if (buf_ShutdownFlag) {
+ cm_cell_t *cellp;
+ cm_volume_t *volp;
+ char volstr[VL_MAXNAMELEN+12]="";
+ char *ext = "";
+
+ volp = cm_GetVolumeByFID(&bp->fid);
+ if (volp) {
+ cellp = volp->cellp;
+ if (bp->fid.volume == volp->vol[RWVOL].ID)
+ ext = "";
+ else if (bp->fid.volume == volp->vol[ROVOL].ID)
+ ext = ".readonly";
+ else if (bp->fid.volume == volp->vol[BACKVOL].ID)
+ ext = ".backup";
+ else
+ ext = ".nomatch";
+ snprintf(volstr, sizeof(volstr), "%s%s", volp->namep, ext);
+ } else {
+ cellp = cm_FindCellByID(bp->fid.cell, CM_FLAG_NOPROBE);
+ snprintf(volstr, sizeof(volstr), "%u", bp->fid.volume);
+ }
- /* the buffer may or may not have been dirty
- * and if dirty may or may not have been cleaned
- * successfully. check the dirty flag again.
- */
- if (!(bp->flags & CM_BUF_DIRTY)) {
- lock_ObtainMutex(&bp->mx);
- if (!(bp->flags & CM_BUF_DIRTY)) {
- /* remove the buffer from the dirty list */
- lock_ObtainWrite(&buf_globalLock);
- *bpp = bp->dirtyp;
- bp->dirtyp = NULL;
- if (cm_data.buf_dirtyListp == NULL)
- cm_data.buf_dirtyListEndp = NULL;
- buf_ReleaseLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
- } else {
- /* advance the pointer so we don't loop forever */
- bpp = &bp->dirtyp;
- }
- lock_ReleaseMutex(&bp->mx);
- } else {
- /* advance the pointer so we don't loop forever */
- bpp = &bp->dirtyp;
- }
- } /* for loop over a bunch of buffers */
- } /* whole daemon's while loop */
+ LogEvent(EVENTLOG_INFORMATION_TYPE, MSG_DIRTY_BUFFER_AT_SHUTDOWN,
+ cellp->name, volstr, bp->fid.vnode, bp->fid.unique,
+ bp->offset.QuadPart+bp->dirty_offset, bp->dirty_length);
+ }
+
+ /* advance the pointer so we don't loop forever */
+ lock_ObtainRead(&buf_globalLock);
+ bpp = &bp->dirtyp;
+ prevbp = bp;
+ }
+ lock_ReleaseMutex(&bp->mx);
+ } /* for loop over a bunch of buffers */
+ lock_ReleaseRead(&buf_globalLock);
+
+ return wasDirty;
+}
+
+/* incremental sync daemon. Writes all dirty buffers every 5000 ms */
+void buf_IncrSyncer(long parm)
+{
+ long wasDirty = 0;
+ long i;
+
+ while (buf_ShutdownFlag == 0) {
+
+ if (!wasDirty) {
+ i = SleepEx(5000, 1);
+ if (i != 0)
+ continue;
+ } else {
+ Sleep(50);
+ }
+
+ wasDirty = buf_Sync(1);
+ } /* whole daemon's while loop */
}
long
}
void buf_Shutdown(void)
-{
+{
+ /* disable the buf_IncrSyncer() threads */
buf_ShutdownFlag = 1;
+
+ /* then force all dirty buffers to the file servers */
+ buf_Sync(0);
}
/* initialize the buffer package; called with no locks
if (osi_Once(&once)) {
/* initialize global locks */
- lock_InitializeRWLock(&buf_globalLock, "Global buffer lock");
+ lock_InitializeRWLock(&buf_globalLock, "Global buffer lock", LOCK_HIERARCHY_BUF_GLOBAL);
if ( newFile ) {
/* remember this for those who want to reset it */
cm_data.buf_allp = NULL;
for (i=0; i<cm_data.buf_nbuffers; i++) {
- osi_assert(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress);
- osi_assert(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData);
+ osi_assertx(bp >= cm_data.bufHeaderBaseAddress && bp < (cm_buf_t *)cm_data.bufDataBaseAddress,
+ "invalid cm_buf_t address");
+ osi_assertx(data >= cm_data.bufDataBaseAddress && data < cm_data.bufEndOfData,
+ "invalid cm_buf_t data address");
/* allocate and zero some storage */
memset(bp, 0, sizeof(cm_buf_t));
osi_QAdd((osi_queue_t **)&cm_data.buf_freeListp, &bp->q);
bp->flags |= CM_BUF_INLRU;
- lock_InitializeMutex(&bp->mx, "Buffer mutex");
+ lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
/* grab appropriate number of bytes from aligned zone */
bp->datap = data;
data = cm_data.bufDataBaseAddress;
for (i=0; i<cm_data.buf_nbuffers; i++) {
- lock_InitializeMutex(&bp->mx, "Buffer mutex");
+ lock_InitializeMutex(&bp->mx, "Buffer mutex", LOCK_HIERARCHY_BUFFER);
bp->userp = NULL;
bp->waitCount = 0;
bp->waitRequests = 0;
int release = 0;
if (scp)
- osi_assert(scp->magic == CM_SCACHE_MAGIC);
- osi_assert(bp->magic == CM_BUF_MAGIC);
+ osi_assertx(scp->magic == CM_SCACHE_MAGIC, "invalid cm_scache_t magic");
+ osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
while (1) {
/* if no IO is happening, we're done */
}
osi_SleepM((LONG_PTR)bp, &bp->mx);
- smb_UpdateServerPriority();
+ cm_UpdateServerPriority();
lock_ObtainMutex(&bp->mx);
osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%p", bp);
release = 1;
}
if ( scp ) {
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainRead(&scp->rw);
if (scp->flags & CM_SCACHEFLAG_WAITING) {
osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
osi_Wakeup((LONG_PTR)&scp->flags);
}
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseRead(&scp->rw);
}
}
{
cm_buf_t *bp;
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
bp = buf_FindLocked(scp, offsetp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
+
+ return bp;
+}
+
+/* find a buffer, if any, for a particular file ID and offset. Assumes
+ * that buf_globalLock is write locked when called. Uses the all buffer
+ * list.
+ */
+cm_buf_t *buf_FindAllLocked(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+{
+ cm_buf_t *bp;
+
+ if (flags == 0) {
+ for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
+ if (cm_FidCmp(&scp->fid, &bp->fid) == 0
+ && offsetp->LowPart == bp->offset.LowPart
+ && offsetp->HighPart == bp->offset.HighPart) {
+ buf_HoldLocked(bp);
+ break;
+ }
+ }
+ } else {
+ for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
+ if (cm_FidCmp(&scp->fid, &bp->fid) == 0) {
+ char * fileOffset;
+
+ fileOffset = offsetp->QuadPart + cm_data.baseAddress;
+ if (fileOffset == bp->datap) {
+ buf_HoldLocked(bp);
+ break;
+ }
+ }
+ }
+ }
+ /* return whatever we found, if anything */
+ return bp;
+}
+
+/* find a buffer with offset *offsetp for vnode *scp. Called
+ * with no locks held. Use the all buffer list.
+ */
+cm_buf_t *buf_FindAll(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+{
+ cm_buf_t *bp;
+
+ lock_ObtainRead(&buf_globalLock);
+ bp = buf_FindAllLocked(scp, offsetp, flags);
+ lock_ReleaseRead(&buf_globalLock);
return bp;
}
*
* Returns non-zero if the buffer was dirty.
*/
-long buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp)
+afs_uint32 buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
{
- long code = 0;
- long isdirty = 0;
+ afs_uint32 code = 0;
+ afs_uint32 isdirty = 0;
cm_scache_t * scp = NULL;
osi_hyper_t offset;
- osi_assert(bp->magic == CM_BUF_MAGIC);
+ osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
isdirty = 1;
offset = bp->offset;
LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
- code = (*cm_buf_opsp->Writep)(scp, &offset, bp->dirty_length, 0, bp->userp, reqp);
+ code = (*cm_buf_opsp->Writep)(scp, &offset,
+#if 1
+ /* we might as well try to write all of the contiguous
+ * dirty buffers in one RPC
+ */
+ cm_chunkSize,
+#else
+ bp->dirty_length,
+#endif
+ 0, bp->userp, reqp);
osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
cm_ReleaseSCache(scp);
* because we aren't going to be able to write this data to the file
* server.
*/
- if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD){
+ if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD || code == CM_ERROR_NOACCESS ||
+ code == CM_ERROR_QUOTA || code == CM_ERROR_SPACE || code == CM_ERROR_TOOBIG ||
+ code == CM_ERROR_READONLY || code == CM_ERROR_NOSUCHPATH){
bp->flags &= ~CM_BUF_DIRTY;
bp->flags |= CM_BUF_ERROR;
bp->dirty_offset = 0;
bp->dirty_length = 0;
bp->error = code;
- bp->dataVersion = -1; /* bad */
+ bp->dataVersion = CM_BUF_VERSION_BAD;
bp->dirtyCounter++;
+ break;
}
#ifdef DISKCACHE95
*/
if (reqp->flags & CM_REQ_NORETRY)
break;
- };
-
- if (!(bp->flags & CM_BUF_DIRTY)) {
- /* remove buffer from dirty buffer queue */
+ /* Ditto if the hardDeadTimeout or idleTimeout was reached */
+ if (code == CM_ERROR_TIMEDOUT || code == CM_ERROR_ALLDOWN ||
+ code == CM_ERROR_ALLBUSY || code == CM_ERROR_ALLOFFLINE ||
+ code == CM_ERROR_CLOCKSKEW) {
+ break;
+ }
}
- /* do logging after call to GetLastError, or else */
-
/* if someone was waiting for the I/O that just completed or failed,
* wake them up.
*/
osi_Log1(buf_logp, "buf_WaitIO Waking bp 0x%p", bp);
osi_Wakeup((LONG_PTR) bp);
}
- return isdirty;
+
+ if (pisdirty)
+ *pisdirty = isdirty;
+
+ return code;
}
/* Called with a zero-ref count buffer and with the buf_globalLock write locked.
*/
void buf_Recycle(cm_buf_t *bp)
{
- int i;
+ afs_uint32 i;
cm_buf_t **lbpp;
cm_buf_t *tbp;
cm_buf_t *prevBp, *nextBp;
- osi_assert(bp->magic == CM_BUF_MAGIC);
+ osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
/* if we get here, we know that the buffer still has a 0 ref count,
* and that it is clean and has no currently pending I/O. This is
osi_Log3( buf_logp, "buf_Recycle recycles 0x%p, off 0x%x:%08x",
bp, bp->offset.HighPart, bp->offset.LowPart);
- osi_assert(bp->refCount == 0);
- osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)));
+ osi_assertx(bp->refCount == 0, "cm_buf_t refcount != 0");
+ osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING | CM_BUF_DIRTY)),
+ "incorrect cm_buf_t flags");
lock_AssertWrite(&buf_globalLock);
if (bp->flags & CM_BUF_INHASH) {
i = BUF_HASH(&bp->fid, &bp->offset);
lbpp = &(cm_data.buf_scacheHashTablepp[i]);
for(tbp = *lbpp; tbp; lbpp = &tbp->hashp, tbp = *lbpp) {
- if (tbp == bp) break;
+ if (tbp == bp)
+ break;
}
/* we better find it */
osi_assertx(tbp != NULL, "buf_Recycle: hash table screwup");
*lbpp = bp->hashp; /* hash out */
+ bp->hashp = NULL;
/* Remove from file hash */
i = BUF_FILEHASH(&bp->fid);
prevBp = bp->fileHashBackp;
+ bp->fileHashBackp = NULL;
nextBp = bp->fileHashp;
+ bp->fileHashp = NULL;
if (prevBp)
prevBp->fileHashp = nextBp;
else
bp->flags &= ~CM_BUF_INHASH;
}
- /* bump the soft reference counter now, to invalidate softRefs; no
- * wakeup is required since people don't sleep waiting for this
- * counter to change.
- */
- bp->idCounter++;
-
/* make the fid unrecognizable */
memset(&bp->fid, 0, sizeof(cm_fid_t));
+
+ /* clean up junk flags */
+ bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
}
/* recycle a buffer, removing it from the free list, hashing in its new identity
* space from the buffer pool. In that case, the buffer will be returned
* without being hashed into the hash table.
*/
-long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
{
- cm_buf_t *bp; /* buffer we're dealing with */
+ cm_buf_t *bp; /* buffer we're dealing with */
cm_buf_t *nextBp; /* next buffer in file hash chain */
- long i; /* temp */
- cm_req_t req;
-
- cm_InitReq(&req); /* just in case */
+ afs_uint32 i; /* temp */
#ifdef TESTING
buf_ValidateBufQueues();
while(1) {
retry:
+ lock_ObtainRead(&scp->bufCreateLock);
lock_ObtainWrite(&buf_globalLock);
/* check to see if we lost the race */
if (scp) {
* do not want to allow the buffer to be added
* to the free list.
*/
- bp->refCount--;
+ afs_int32 refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, refCount);
+ afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
+#endif
lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&scp->bufCreateLock);
return CM_BUF_EXISTS;
}
}
if (!cm_data.buf_freeListEndp)
{
lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&scp->bufCreateLock);
osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List is empty - sleeping 200ms");
Sleep(200);
goto retry;
* we hold the global lock.
*/
+ /* Don't recycle a buffer held by the redirector. */
+ if (bp->flags & CM_BUF_REDIR)
+ continue;
+
/* don't recycle someone in our own chunk */
if (!cm_FidCmp(&bp->fid, &scp->fid)
&& (bp->offset.LowPart & (-cm_chunkSize))
*/
buf_HoldLocked(bp);
lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&scp->bufCreateLock);
/* grab required lock and clean; this only
* starts the I/O. By the time we're back,
* have the WRITING flag set, so we won't get
* back here.
*/
- buf_CleanAsync(bp, &req);
+ buf_CleanAsync(bp, reqp, NULL);
/* now put it back and go around again */
buf_Release(bp);
*/
buf_Recycle(bp);
- /* clean up junk flags */
- bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
- bp->dataVersion = -1; /* unknown so far */
-
/* now hash in as our new buffer, and give it the
* appropriate label, if requested.
*/
osi_QRemove((osi_queue_t **) &cm_data.buf_freeListp, &bp->q);
bp->flags &= ~CM_BUF_INLRU;
+ /* prepare to return it. Give it a refcount */
+ bp->refCount = 1;
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
+ afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
+#endif
/* grab the mutex so that people don't use it
* before the caller fills it with data. Again, no one
* should have been able to get to this dude to lock it.
*/
if (!lock_TryMutex(&bp->mx)) {
osi_Log2(afsd_logp, "buf_GetNewLocked bp 0x%p cannot be mutex locked. refCount %d should be 0",
- bp, bp->refCount);
+ bp, bp->refCount);
osi_panic("buf_GetNewLocked: TryMutex failed",__FILE__,__LINE__);
}
- /* prepare to return it. Give it a refcount */
- bp->refCount = 1;
-
lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&scp->bufCreateLock);
+
*bufpp = bp;
#ifdef TESTING
return 0;
} /* for all buffers in lru queue */
lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&scp->bufCreateLock);
osi_Log0(afsd_logp, "buf_GetNewLocked: Free Buffer List has no buffers with a zero refcount - sleeping 100ms");
Sleep(100); /* give some time for a buffer to be freed */
} /* while loop over everything */
/* get a page, returning it held but unlocked. Doesn't fill in the page
* with I/O, since we're going to write the whole thing new.
*/
-long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
{
cm_buf_t *bp;
long code;
}
/* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
/* check if the buffer was created in a race condition branch.
* If so, go around so we can hold a reference to it.
/* get a page, returning it held but unlocked. Make sure it is complete */
/* The scp must be unlocked when passed to this function */
-long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
{
cm_buf_t *bp;
long code;
}
/* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
/* bp->mx is now held */
/* check if the buffer was created in a race condition branch.
*/
if (created) {
/* load the page; freshly created pages should be idle */
- osi_assert(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)));
+ osi_assertx(!(bp->flags & (CM_BUF_READING | CM_BUF_WRITING)), "incorrect cm_buf_t flags");
/* start the I/O; may drop lock */
bp->flags |= CM_BUF_READING;
* has been invalidate (by having its DV stomped upon), then
* count it as free, since it isn't really being utilized.
*/
- if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
+ if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion == CM_BUF_VERSION_BAD)
count++;
}
lock_ReleaseRead(&buf_globalLock);
}
/* clean a buffer synchronously */
-long buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp)
+afs_uint32 buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
{
long code;
- osi_assert(bp->magic == CM_BUF_MAGIC);
+ osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
lock_ObtainMutex(&bp->mx);
- code = buf_CleanAsyncLocked(bp, reqp);
+ code = buf_CleanAsyncLocked(bp, reqp, pisdirty);
lock_ReleaseMutex(&bp->mx);
return code;
}
/* wait for a buffer's cleaning to finish */
-void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
+void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
{
- osi_assert(bp->magic == CM_BUF_MAGIC);
+ osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
- lock_ObtainMutex(&bp->mx);
+ if (!locked)
+ lock_ObtainMutex(&bp->mx);
if (bp->flags & CM_BUF_WRITING) {
buf_WaitIO(scp, bp);
}
- lock_ReleaseMutex(&bp->mx);
+ if (!locked)
+ lock_ReleaseMutex(&bp->mx);
}
/* set the dirty flag on a buffer, and set associated write-ahead log,
*
* The buffer must be locked before calling this routine.
*/
-void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length)
+void buf_SetDirty(cm_buf_t *bp, afs_uint32 offset, afs_uint32 length, cm_user_t *userp)
{
- osi_assert(bp->magic == CM_BUF_MAGIC);
- osi_assert(bp->refCount > 0);
+ osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
+ osi_assertx(bp->refCount > 0, "cm_buf_t refcount 0");
- lock_ObtainWrite(&buf_globalLock);
if (bp->flags & CM_BUF_DIRTY) {
osi_Log1(buf_logp, "buf_SetDirty 0x%p already dirty", bp);
* elsewhere, never add to the dirty list if the buffer is
* already there.
*/
- if (bp->dirtyp == NULL && cm_data.buf_dirtyListEndp != bp) {
+ lock_ObtainWrite(&buf_globalLock);
+ if (!(bp->flags & CM_BUF_INDL)) {
buf_HoldLocked(bp);
if (!cm_data.buf_dirtyListp) {
cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
cm_data.buf_dirtyListEndp = bp;
}
bp->dirtyp = NULL;
+ bp->flags |= CM_BUF_INDL;
}
+ lock_ReleaseWrite(&buf_globalLock);
+ }
+
+ /* and record the last writer */
+ if (bp->userp != userp) {
+ cm_HoldUser(userp);
+ if (bp->userp)
+ cm_ReleaseUser(bp->userp);
+ bp->userp = userp;
}
- lock_ReleaseWrite(&buf_globalLock);
}
/* clean all buffers, reset log pointers and invalidate all buffers.
cm_buf_t *bp;
cm_req_t req;
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
for(i=0; i<cm_data.buf_hashSize; i++) {
for(bp = cm_data.buf_scacheHashTablepp[i]; bp; bp = bp->hashp) {
if ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
buf_HoldLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
/* now no locks are held; clean buffer and go on */
cm_InitReq(&req);
req.flags |= CM_REQ_NORETRY;
- buf_CleanAsync(bp, &req);
- buf_CleanWait(NULL, bp);
+ buf_CleanAsync(bp, &req, NULL);
+ buf_CleanWait(NULL, bp, FALSE);
/* relock and release buffer */
- lock_ObtainWrite(&buf_globalLock);
- buf_ReleaseLocked(bp);
+ lock_ObtainRead(&buf_globalLock);
+ buf_ReleaseLocked(bp, FALSE);
} /* dirty */
} /* over one bucket */
} /* for loop over all hash buckets */
/* release locks */
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
#ifdef TESTING
buf_ValidateBufQueues();
osi_hyper_t bufEnd;
long code;
long bufferPos;
- long i;
+ afs_uint32 i;
/* assert that cm_bufCreateLock is held in write mode */
lock_AssertWrite(&scp->bufCreateLock);
i = BUF_FILEHASH(&scp->fid);
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
bufp = cm_data.buf_fileHashTablepp[i];
if (bufp == NULL) {
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
return 0;
}
buf_HoldLocked(bufp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
while (bufp) {
lock_ObtainMutex(&bufp->mx);
LargeIntegerLessThan(*sizep, bufEnd)) {
buf_WaitIO(scp, bufp);
}
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainWrite(&scp->rw);
/* make sure we have a callback (so we have the right value for
* the length), and wait for it to be safe to do a truncate.
| CM_SCACHESYNC_BUFLOCKED);
- lock_ObtainWrite(&buf_globalLock);
/* if we succeeded in our locking, and this applies to the right
* file, and the truncate request overlaps the buffer either
* totally or partially, then do something.
bufp->flags &= ~CM_BUF_DIRTY;
bufp->dirty_offset = 0;
bufp->dirty_length = 0;
- bufp->dataVersion = -1; /* known bad */
+ bufp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
bufp->dirtyCounter++;
}
else {
* visible again.
*/
bufferPos = sizep->LowPart & (cm_data.buf_blockSize - 1);
- osi_assert(bufferPos != 0);
+ osi_assertx(bufferPos != 0, "non-zero bufferPos");
memset(bufp->datap + bufferPos, 0,
cm_data.buf_blockSize - bufferPos);
}
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
| CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseWrite(&scp->rw);
lock_ReleaseMutex(&bufp->mx);
if (!code) {
nbufp = bufp->fileHashp;
if (nbufp)
- buf_HoldLocked(nbufp);
+ buf_Hold(nbufp);
} else {
/* This forces the loop to end and the error code
* to be returned. */
nbufp = NULL;
}
- buf_ReleaseLocked(bufp);
- lock_ReleaseWrite(&buf_globalLock);
+ buf_Release(bufp);
bufp = nbufp;
}
cm_buf_t *bp; /* buffer we're hacking on */
cm_buf_t *nbp;
int didRelease;
- long i;
+ afs_uint32 i;
i = BUF_FILEHASH(&scp->fid);
code = 0;
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
bp = cm_data.buf_fileHashTablepp[i];
if (bp)
buf_HoldLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
for (; bp; bp = nbp) {
didRelease = 0; /* haven't released this buffer yet */
lock_ObtainMutex(&bp->mx);
/* start cleaning the buffer, and wait for it to finish */
- buf_CleanAsyncLocked(bp, reqp);
+ buf_CleanAsyncLocked(bp, reqp, NULL);
buf_WaitIO(scp, bp);
lock_ReleaseMutex(&bp->mx);
- code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
- if (code && code != CM_ERROR_BADFD)
- goto skip;
-
- /* if the scp's FID is bad its because we received VNOVNODE
- * when attempting to FetchStatus before the write. This
- * page therefore contains data that can no longer be stored.
- */
- lock_ObtainMutex(&bp->mx);
- bp->flags &= ~CM_BUF_DIRTY;
- bp->flags |= CM_BUF_ERROR;
- bp->error = code;
- bp->dirty_offset = 0;
- bp->dirty_length = 0;
- bp->dataVersion = -1; /* known bad */
- bp->dirtyCounter++;
- lock_ReleaseMutex(&bp->mx);
+ /*
+ * if the error for the previous buffer was BADFD
+ * then all buffers for the FID are bad. Do not
+ * attempt to stabalize.
+ */
+ if (code != CM_ERROR_BADFD) {
+ code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
+ if (code && code != CM_ERROR_BADFD)
+ goto skip;
+ }
+ if (code == CM_ERROR_BADFD) {
+ /* if the scp's FID is bad its because we received VNOVNODE
+ * when attempting to FetchStatus before the write. This
+ * page therefore contains data that can no longer be stored.
+ */
+ lock_ObtainMutex(&bp->mx);
+ bp->flags &= ~CM_BUF_DIRTY;
+ bp->flags |= CM_BUF_ERROR;
+ bp->error = CM_ERROR_BADFD;
+ bp->dirty_offset = 0;
+ bp->dirty_length = 0;
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
+ bp->dirtyCounter++;
+ lock_ReleaseMutex(&bp->mx);
+ }
- lock_ObtainWrite(&buf_globalLock);
/* actually, we only know that buffer is clean if ref
* count is 1, since we don't have buffer itself locked.
*/
if (!(bp->flags & CM_BUF_DIRTY)) {
+ lock_ObtainWrite(&buf_globalLock);
if (bp->refCount == 1) { /* bp is held above */
nbp = bp->fileHashp;
if (nbp)
buf_HoldLocked(nbp);
- buf_ReleaseLocked(bp);
+ buf_ReleaseLocked(bp, TRUE);
didRelease = 1;
buf_Recycle(bp);
}
+ lock_ReleaseWrite(&buf_globalLock);
}
- lock_ReleaseWrite(&buf_globalLock);
- if (code != CM_ERROR_BADFD)
+ if (code == 0)
(*cm_buf_opsp->Unstabilizep)(scp, userp);
}
skip:
if (!didRelease) {
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
nbp = bp->fileHashp;
if (nbp)
buf_HoldLocked(nbp);
- buf_ReleaseLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ buf_ReleaseLocked(bp, FALSE);
+ lock_ReleaseRead(&buf_globalLock);
}
} /* for loop over a bunch of buffers */
return code;
}
-/* Must be called with scp->mx held */
-long buf_ForceDataVersion(cm_scache_t * scp, afs_uint32 fromVersion, afs_uint32 toVersion)
+/* Must be called with scp->rw held */
+long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
{
cm_buf_t * bp;
- cm_buf_t * nbp;
- unsigned int i;
+ afs_uint32 i;
int found = 0;
+ lock_AssertAny(&scp->rw);
+
i = BUF_FILEHASH(&scp->fid);
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp = bp->fileHashp) {
if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
}
}
}
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
if (found)
return 0;
long wasDirty = 0;
cm_buf_t *bp; /* buffer we're hacking on */
cm_buf_t *nbp; /* next one */
- long i;
+ afs_uint32 i;
i = BUF_FILEHASH(&scp->fid);
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
bp = cm_data.buf_fileHashTablepp[i];
if (bp)
buf_HoldLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ lock_ReleaseRead(&buf_globalLock);
for (; bp; bp = nbp) {
/* clean buffer synchronously */
if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
- if (userp) {
- cm_HoldUser(userp);
- lock_ObtainMutex(&bp->mx);
- if (bp->userp)
- cm_ReleaseUser(bp->userp);
- bp->userp = userp;
- lock_ReleaseMutex(&bp->mx);
- }
- wasDirty = buf_CleanAsync(bp, reqp);
- buf_CleanWait(scp, bp);
lock_ObtainMutex(&bp->mx);
- if (bp->flags & CM_BUF_ERROR) {
- code = bp->error;
- if (code == 0)
- code = -1;
+ if (bp->flags & CM_BUF_DIRTY) {
+ if (userp && userp != bp->userp) {
+ cm_HoldUser(userp);
+ if (bp->userp)
+ cm_ReleaseUser(bp->userp);
+ bp->userp = userp;
+ }
+
+ switch (code) {
+ case CM_ERROR_NOSUCHFILE:
+ case CM_ERROR_BADFD:
+ case CM_ERROR_NOACCESS:
+ case CM_ERROR_QUOTA:
+ case CM_ERROR_SPACE:
+ case CM_ERROR_TOOBIG:
+ case CM_ERROR_READONLY:
+ case CM_ERROR_NOSUCHPATH:
+ /*
+ * Apply the previous fatal error to this buffer.
+ * Do not waste the time attempting to store to
+ * the file server when we know it will fail.
+ */
+ bp->flags &= ~CM_BUF_DIRTY;
+ bp->flags |= CM_BUF_ERROR;
+ bp->dirty_offset = 0;
+ bp->dirty_length = 0;
+ bp->error = code;
+ bp->dataVersion = CM_BUF_VERSION_BAD;
+ bp->dirtyCounter++;
+ break;
+ case CM_ERROR_TIMEDOUT:
+ case CM_ERROR_ALLDOWN:
+ case CM_ERROR_ALLBUSY:
+ case CM_ERROR_ALLOFFLINE:
+ case CM_ERROR_CLOCKSKEW:
+ /* do not mark the buffer in error state but do
+ * not attempt to complete the rest either.
+ */
+ break;
+ default:
+ code = buf_CleanAsyncLocked(bp, reqp, &wasDirty);
+ if (bp->flags & CM_BUF_ERROR) {
+ code = bp->error;
+ if (code == 0)
+ code = -1;
+ }
+ }
+ buf_CleanWait(scp, bp, TRUE);
}
lock_ReleaseMutex(&bp->mx);
}
- lock_ObtainWrite(&buf_globalLock);
+ lock_ObtainRead(&buf_globalLock);
nbp = bp->fileHashp;
if (nbp)
buf_HoldLocked(nbp);
- buf_ReleaseLocked(bp);
- lock_ReleaseWrite(&buf_globalLock);
+ buf_ReleaseLocked(bp, FALSE);
+ lock_ReleaseRead(&buf_globalLock);
} /* for loop over a bunch of buffers */
#ifdef TESTING
{
StringCbPrintfA(output, sizeof(output),
"%s bp=0x%08X, hash=%d, fid (cell=%d, volume=%d, "
- "vnode=%d, unique=%d), offset=%x:%08x, dv=%d, "
- "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+ "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
+ "flags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
cookie, (void *)bp, i, bp->fid.cell, bp->fid.volume,
bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
bp->offset.LowPart, bp->dataVersion, bp->flags,
- bp->cmFlags, bp->refCount);
+ bp->cmFlags, bp->error, bp->refCount);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
}
}
for(bp = cm_data.buf_freeListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
StringCbPrintfA(output, sizeof(output),
"%s bp=0x%08X, fid (cell=%d, volume=%d, "
- "vnode=%d, unique=%d), offset=%x:%08x, dv=%d, "
- "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+ "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
+ "flags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
bp->offset.LowPart, bp->dataVersion, bp->flags,
- bp->cmFlags, bp->refCount);
+ bp->cmFlags, bp->error, bp->refCount);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
}
StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
- StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListEndp\r\n", cookie);
+ StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
- for(bp = cm_data.buf_dirtyListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+ for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
StringCbPrintfA(output, sizeof(output),
"%s bp=0x%08X, fid (cell=%d, volume=%d, "
- "vnode=%d, unique=%d), offset=%x:%08x, dv=%d, "
- "flags=0x%x, cmFlags=0x%x, refCount=%d\r\n",
+ "vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
+ "flags=0x%x, cmFlags=0x%x, error=0x%x, refCount=%d\r\n",
cookie, (void *)bp, bp->fid.cell, bp->fid.volume,
bp->fid.vnode, bp->fid.unique, bp->offset.HighPart,
bp->offset.LowPart, bp->dataVersion, bp->flags,
- bp->cmFlags, bp->refCount);
+ bp->cmFlags, bp->error, bp->refCount);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
}
- StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListEndp.\r\n", cookie);
+ StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
if (lock)
i = BUF_FILEHASH(fidp);
- for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->allp, bcount++) {
+ for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->fileHashp, bcount++) {
if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY))
return 1;
}
bp->dirty_length = 0;
bp->flags |= CM_BUF_ERROR;
bp->error = VNOVNODE;
- bp->dataVersion = -1; /* bad */
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
bp->dirtyCounter++;
if (bp->flags & CM_BUF_WAITING) {
osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);