/* set this to 1 when we are terminating to prevent access attempts */
static int buf_ShutdownFlag = 0;
+#ifdef DEBUG_REFCOUNT
+void buf_HoldLockedDbg(cm_buf_t *bp, char *file, long line)
+#else
void buf_HoldLocked(cm_buf_t *bp)
+#endif
{
+ afs_int32 refCount;
+
osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
- InterlockedIncrement(&bp->refCount);
+ refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_HoldLocked bp 0x%p ref %d",bp, refCount);
+ afsi_log("%s:%d buf_HoldLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
}
/* hold a reference to an already held buffer */
+#ifdef DEBUG_REFCOUNT
+void buf_HoldDbg(cm_buf_t *bp, char *file, long line)
+#else
void buf_Hold(cm_buf_t *bp)
+#endif
{
+ afs_int32 refCount;
+
lock_ObtainRead(&buf_globalLock);
- buf_HoldLocked(bp);
+ osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
+ refCount = InterlockedIncrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_Hold bp 0x%p ref %d",bp, refCount);
+ afsi_log("%s:%d buf_Hold bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
lock_ReleaseRead(&buf_globalLock);
}
/* code to drop reference count while holding buf_globalLock */
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseLockedDbg(cm_buf_t *bp, afs_uint32 writeLocked, char *file, long line)
+#else
void buf_ReleaseLocked(cm_buf_t *bp, afs_uint32 writeLocked)
+#endif
{
afs_int32 refCount;
osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log3(afsd_logp,"buf_ReleaseLocked %s bp 0x%p ref %d",writeLocked?"write":"read", bp, refCount);
+ afsi_log("%s:%d buf_ReleaseLocked %s bp 0x%p, ref %d", file, line, writeLocked?"write":"read", bp, refCount);
+#endif
#ifdef DEBUG
if (refCount < 0)
osi_panic("buf refcount 0",__FILE__,__LINE__);;
}
/* release a buffer. Buffer must be referenced, but unlocked. */
+#ifdef DEBUG_REFCOUNT
+void buf_ReleaseDbg(cm_buf_t *bp, char *file, long line)
+#else
void buf_Release(cm_buf_t *bp)
+#endif
{
afs_int32 refCount;
osi_assertx(bp->magic == CM_BUF_MAGIC,"incorrect cm_buf_t magic");
refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_Release bp 0x%p ref %d", bp, refCount);
+ afsi_log("%s:%d buf_ReleaseLocked bp 0x%p, ref %d", file, line, bp, refCount);
+#endif
#ifdef DEBUG
if (refCount < 0)
osi_panic("buf refcount 0",__FILE__,__LINE__);;
/* incremental sync daemon. Writes all dirty buffers every 5000 ms */
void buf_IncrSyncer(long parm)
{
- cm_buf_t **bpp, *bp;
+ cm_buf_t **bpp, *bp, *prevbp;
long i; /* counter */
long wasDirty = 0;
cm_req_t req;
wasDirty = 0;
- /* now go through our percentage of the buffers */
- for (bpp = &cm_data.buf_dirtyListp; bp = *bpp; ) {
-
+ /* go through all of the dirty buffers */
+ lock_ObtainRead(&buf_globalLock);
+ for (bpp = &cm_data.buf_dirtyListp, prevbp = NULL; bp = *bpp; ) {
+ lock_ReleaseRead(&buf_globalLock);
/* all dirty buffers are held when they are added to the
* dirty list. No need for an additional hold.
*/
+ lock_ObtainMutex(&bp->mx);
if (bp->flags & CM_BUF_DIRTY) {
/* start cleaning the buffer; don't touch log pages since
*/
cm_InitReq(&req);
req.flags |= CM_REQ_NORETRY;
- wasDirty |= buf_CleanAsync(bp, &req);
+ wasDirty |= buf_CleanAsyncLocked(bp, &req);
}
/* the buffer may or may not have been dirty
* and if dirty may or may not have been cleaned
* successfully. check the dirty flag again.
*/
- if (!(bp->flags & CM_BUF_DIRTY)) {
- lock_ObtainMutex(&bp->mx);
- if (!(bp->flags & CM_BUF_DIRTY)) {
- /* remove the buffer from the dirty list */
- lock_ObtainWrite(&buf_globalLock);
- *bpp = bp->dirtyp;
- bp->dirtyp = NULL;
- if (cm_data.buf_dirtyListp == NULL)
- cm_data.buf_dirtyListEndp = NULL;
- buf_ReleaseLocked(bp, TRUE);
- lock_ReleaseWrite(&buf_globalLock);
- } else {
- /* advance the pointer so we don't loop forever */
- bpp = &bp->dirtyp;
- }
- lock_ReleaseMutex(&bp->mx);
- } else {
- /* advance the pointer so we don't loop forever */
- bpp = &bp->dirtyp;
- }
+ if (!(bp->flags & CM_BUF_DIRTY)) {
+ /* remove the buffer from the dirty list */
+ lock_ObtainWrite(&buf_globalLock);
+#ifdef DEBUG_REFCOUNT
+ if (bp->dirtyp == NULL && bp != cm_data.buf_dirtyListEndp) {
+ osi_Log1(afsd_logp,"buf_IncrSyncer bp 0x%p list corruption",bp);
+ afsi_log("buf_IncrSyncer bp 0x%p list corruption", bp);
+ }
+#endif
+ *bpp = bp->dirtyp;
+ bp->dirtyp = NULL;
+ bp->flags &= ~CM_BUF_INDL;
+ if (cm_data.buf_dirtyListp == NULL)
+ cm_data.buf_dirtyListEndp = NULL;
+ else if (cm_data.buf_dirtyListEndp == bp)
+ cm_data.buf_dirtyListEndp = prevbp;
+ buf_ReleaseLocked(bp, TRUE);
+ lock_ConvertWToR(&buf_globalLock);
+ } else {
+ /* advance the pointer so we don't loop forever */
+ lock_ObtainRead(&buf_globalLock);
+ bpp = &bp->dirtyp;
+ prevbp = bp;
+ }
+ lock_ReleaseMutex(&bp->mx);
} /* for loop over a bunch of buffers */
+ lock_ReleaseRead(&buf_globalLock);
} /* whole daemon's while loop */
}
release = 1;
}
if ( scp ) {
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainRead(&scp->rw);
if (scp->flags & CM_SCACHEFLAG_WAITING) {
osi_Log1(buf_logp, "buf_WaitIO waking scp 0x%p", scp);
osi_Wakeup((LONG_PTR)&scp->flags);
}
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseRead(&scp->rw);
}
}
offset = bp->offset;
LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
- code = (*cm_buf_opsp->Writep)(scp, &offset, bp->dirty_length, 0, bp->userp, reqp);
+ code = (*cm_buf_opsp->Writep)(scp, &offset,
+#if 1
+ /* we might as well try to write all of the contiguous
+ * dirty buffers in one RPC
+ */
+ cm_chunkSize,
+#else
+ bp->dirty_length,
+#endif
+ 0, bp->userp, reqp);
osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
cm_ReleaseSCache(scp);
* because we aren't going to be able to write this data to the file
* server.
*/
- if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD){
+ if (code == CM_ERROR_NOSUCHFILE || code == CM_ERROR_BADFD || code == CM_ERROR_NOACCESS){
bp->flags &= ~CM_BUF_DIRTY;
bp->flags |= CM_BUF_ERROR;
bp->dirty_offset = 0;
bp->dirty_length = 0;
bp->error = code;
- bp->dataVersion = -1; /* bad */
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
bp->dirtyCounter++;
}
bp->flags &= ~CM_BUF_INHASH;
}
- /* bump the soft reference counter now, to invalidate softRefs; no
- * wakeup is required since people don't sleep waiting for this
- * counter to change.
- */
- bp->idCounter++;
-
/* make the fid unrecognizable */
memset(&bp->fid, 0, sizeof(cm_fid_t));
}
* do not want to allow the buffer to be added
* to the free list.
*/
- bp->refCount--;
+ afs_int32 refCount = InterlockedDecrement(&bp->refCount);
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, refCount);
+ afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, refCount);
+#endif
lock_ReleaseWrite(&buf_globalLock);
lock_ReleaseRead(&scp->bufCreateLock);
return CM_BUF_EXISTS;
* we hold the global lock.
*/
+ /* Don't recycle a buffer held by the redirector. */
+ if (bp->flags & CM_BUF_REDIR)
+ continue;
+
/* don't recycle someone in our own chunk */
if (!cm_FidCmp(&bp->fid, &scp->fid)
&& (bp->offset.LowPart & (-cm_chunkSize))
/* clean up junk flags */
bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
- bp->dataVersion = -1; /* unknown so far */
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
/* now hash in as our new buffer, and give it the
* appropriate label, if requested.
/* prepare to return it. Give it a refcount */
bp->refCount = 1;
-
+#ifdef DEBUG_REFCOUNT
+ osi_Log2(afsd_logp,"buf_GetNewLocked bp 0x%p ref %d", bp, 1);
+ afsi_log("%s:%d buf_GetNewLocked bp 0x%p, ref %d", __FILE__, __LINE__, bp, 1);
+#endif
lock_ReleaseWrite(&buf_globalLock);
lock_ReleaseRead(&scp->bufCreateLock);
*bufpp = bp;
* has been invalidate (by having its DV stomped upon), then
* count it as free, since it isn't really being utilized.
*/
- if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion <= 0)
+ if (!(bufp->flags & CM_BUF_INHASH) || bufp->dataVersion == CM_BUF_VERSION_BAD)
count++;
}
lock_ReleaseRead(&buf_globalLock);
}
/* wait for a buffer's cleaning to finish */
-void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp)
+void buf_CleanWait(cm_scache_t * scp, cm_buf_t *bp, afs_uint32 locked)
{
osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
- lock_ObtainMutex(&bp->mx);
+ if (!locked)
+ lock_ObtainMutex(&bp->mx);
if (bp->flags & CM_BUF_WRITING) {
buf_WaitIO(scp, bp);
}
- lock_ReleaseMutex(&bp->mx);
+ if (!locked)
+ lock_ReleaseMutex(&bp->mx);
}
/* set the dirty flag on a buffer, and set associated write-ahead log,
* already there.
*/
lock_ObtainWrite(&buf_globalLock);
- if (bp->dirtyp == NULL && cm_data.buf_dirtyListEndp != bp) {
+ if (!(bp->flags & CM_BUF_INDL)) {
buf_HoldLocked(bp);
if (!cm_data.buf_dirtyListp) {
cm_data.buf_dirtyListp = cm_data.buf_dirtyListEndp = bp;
cm_data.buf_dirtyListEndp = bp;
}
bp->dirtyp = NULL;
+ bp->flags |= CM_BUF_INDL;
}
lock_ReleaseWrite(&buf_globalLock);
}
req.flags |= CM_REQ_NORETRY;
buf_CleanAsync(bp, &req);
- buf_CleanWait(NULL, bp);
+ buf_CleanWait(NULL, bp, FALSE);
/* relock and release buffer */
lock_ObtainRead(&buf_globalLock);
LargeIntegerLessThan(*sizep, bufEnd)) {
buf_WaitIO(scp, bufp);
}
- lock_ObtainMutex(&scp->mx);
+ lock_ObtainWrite(&scp->rw);
/* make sure we have a callback (so we have the right value for
* the length), and wait for it to be safe to do a truncate.
bufp->flags &= ~CM_BUF_DIRTY;
bufp->dirty_offset = 0;
bufp->dirty_length = 0;
- bufp->dataVersion = -1; /* known bad */
+ bufp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
bufp->dirtyCounter++;
}
else {
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS
| CM_SCACHESYNC_SETSIZE | CM_SCACHESYNC_BUFLOCKED);
- lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseWrite(&scp->rw);
lock_ReleaseMutex(&bufp->mx);
if (!code) {
buf_WaitIO(scp, bp);
lock_ReleaseMutex(&bp->mx);
- code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
- if (code && code != CM_ERROR_BADFD)
- goto skip;
-
- /* if the scp's FID is bad its because we received VNOVNODE
- * when attempting to FetchStatus before the write. This
- * page therefore contains data that can no longer be stored.
- */
- lock_ObtainMutex(&bp->mx);
- bp->flags &= ~CM_BUF_DIRTY;
- bp->flags |= CM_BUF_ERROR;
- bp->error = code;
- bp->dirty_offset = 0;
- bp->dirty_length = 0;
- bp->dataVersion = -1; /* known bad */
- bp->dirtyCounter++;
- lock_ReleaseMutex(&bp->mx);
+ /*
+ * if the error for the previous buffer was BADFD
+ * then all buffers for the FID are bad. Do not
+ * attempt to stabalize.
+ */
+ if (code != CM_ERROR_BADFD) {
+ code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
+ if (code && code != CM_ERROR_BADFD)
+ goto skip;
+ }
+ if (code == CM_ERROR_BADFD) {
+ /* if the scp's FID is bad its because we received VNOVNODE
+ * when attempting to FetchStatus before the write. This
+ * page therefore contains data that can no longer be stored.
+ */
+ lock_ObtainMutex(&bp->mx);
+ bp->flags &= ~CM_BUF_DIRTY;
+ bp->flags |= CM_BUF_ERROR;
+ bp->error = CM_ERROR_BADFD;
+ bp->dirty_offset = 0;
+ bp->dirty_length = 0;
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
+ bp->dirtyCounter++;
+ lock_ReleaseMutex(&bp->mx);
+ }
/* actually, we only know that buffer is clean if ref
* count is 1, since we don't have buffer itself locked.
lock_ReleaseWrite(&buf_globalLock);
}
- if (code != CM_ERROR_BADFD)
+ if (code == 0)
(*cm_buf_opsp->Unstabilizep)(scp, userp);
}
return code;
}
-/* Must be called with scp->mx held */
+/* Must be called with scp->rw held */
long buf_ForceDataVersion(cm_scache_t * scp, afs_uint64 fromVersion, afs_uint64 toVersion)
{
cm_buf_t * bp;
afs_uint32 i;
int found = 0;
- lock_AssertMutex(&scp->mx);
+ lock_AssertAny(&scp->rw);
i = BUF_FILEHASH(&scp->fid);
for (; bp; bp = nbp) {
/* clean buffer synchronously */
if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
- if (userp) {
- cm_HoldUser(userp);
- lock_ObtainMutex(&bp->mx);
- if (bp->userp)
- cm_ReleaseUser(bp->userp);
- bp->userp = userp;
- lock_ReleaseMutex(&bp->mx);
- }
- wasDirty = buf_CleanAsync(bp, reqp);
- buf_CleanWait(scp, bp);
lock_ObtainMutex(&bp->mx);
- if (bp->flags & CM_BUF_ERROR) {
- code = bp->error;
- if (code == 0)
- code = -1;
+ if (bp->flags & CM_BUF_DIRTY) {
+ if (userp) {
+ cm_HoldUser(userp);
+ if (bp->userp)
+ cm_ReleaseUser(bp->userp);
+ bp->userp = userp;
+ }
+ wasDirty = buf_CleanAsyncLocked(bp, reqp);
+ buf_CleanWait(scp, bp, TRUE);
+ if (bp->flags & CM_BUF_ERROR) {
+ code = bp->error;
+ if (code == 0)
+ code = -1;
+ }
}
lock_ReleaseMutex(&bp->mx);
}
StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_FreeListEndp.\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
- StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListEndp\r\n", cookie);
+ StringCbPrintfA(output, sizeof(output), "%s - dumping buf_dirtyListp\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
- for(bp = cm_data.buf_dirtyListEndp; bp; bp=(cm_buf_t *) osi_QPrev(&bp->q)) {
+ for(bp = cm_data.buf_dirtyListp; bp; bp=bp->dirtyp) {
StringCbPrintfA(output, sizeof(output),
"%s bp=0x%08X, fid (cell=%d, volume=%d, "
"vnode=%d, unique=%d), offset=%x:%08x, dv=%I64d, "
bp->cmFlags, bp->refCount);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
}
- StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListEndp.\r\n", cookie);
+ StringCbPrintfA(output, sizeof(output), "%s - Done dumping buf_dirtyListp.\r\n", cookie);
WriteFile(outputFile, output, (DWORD)strlen(output), &zilch, NULL);
if (lock)
bp->dirty_length = 0;
bp->flags |= CM_BUF_ERROR;
bp->error = VNOVNODE;
- bp->dataVersion = -1; /* bad */
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* bad */
bp->dirtyCounter++;
if (bp->flags & CM_BUF_WAITING) {
osi_Log2(buf_logp, "BUF CleanDirtyBuffers Waking [scp 0x%x] bp 0x%x", scp, bp);