if (bp->flags & CM_BUF_DIRTY && !(bp->flags & CM_BUF_REDIR)) {
/* start cleaning the buffer; don't touch log pages since
- * the log code counts on knowing exactly who is writing
- * a log page at any given instant.
- */
+ * the log code counts on knowing exactly who is writing
+ * a log page at any given instant.
+ *
+ * only attempt to write the buffer if the volume might
+ * be online.
+ */
afs_uint32 dirty;
+ cm_volume_t * volp;
- cm_InitReq(&req);
- req.flags |= CM_REQ_NORETRY;
- buf_CleanAsyncLocked(bp, &req, &dirty);
- wasDirty |= dirty;
+ volp = cm_GetVolumeByFID(&bp->fid);
+ switch (cm_GetVolumeStatus(volp, bp->fid.volume)) {
+ case vl_online:
+ case vl_unknown:
+ cm_InitReq(&req);
+ req.flags |= CM_REQ_NORETRY;
+ buf_CleanAsyncLocked(bp, &req, &dirty);
+ wasDirty |= dirty;
+ }
+ cm_PutVolume(volp);
}
/* the buffer may or may not have been dirty
buf_ReleaseLocked(bp, TRUE);
lock_ConvertWToR(&buf_globalLock);
} else {
+ if (buf_ShutdownFlag) {
+ cm_cell_t *cellp;
+ cm_volume_t *volp;
+ char volstr[VL_MAXNAMELEN+12]="";
+ char *ext = "";
+
+ volp = cm_GetVolumeByFID(&bp->fid);
+ if (volp) {
+ cellp = volp->cellp;
+ if (bp->fid.volume == volp->vol[RWVOL].ID)
+ ext = "";
+ else if (bp->fid.volume == volp->vol[ROVOL].ID)
+ ext = ".readonly";
+ else if (bp->fid.volume == volp->vol[BACKVOL].ID)
+ ext = ".backup";
+ else
+ ext = ".nomatch";
+ snprintf(volstr, sizeof(volstr), "%s%s", volp->namep, ext);
+ } else {
+ cellp = cm_FindCellByID(bp->fid.cell, CM_FLAG_NOPROBE);
+ snprintf(volstr, sizeof(volstr), "%u", bp->fid.volume);
+ }
+
+ LogEvent(EVENTLOG_INFORMATION_TYPE, MSG_DIRTY_BUFFER_AT_SHUTDOWN,
+ cellp->name, volstr, bp->fid.vnode, bp->fid.unique,
+ bp->offset.QuadPart+bp->dirty_offset, bp->dirty_length);
+ }
+
/* advance the pointer so we don't loop forever */
lock_ObtainRead(&buf_globalLock);
bpp = &bp->dirtyp;
i = SleepEx(5000, 1);
if (i != 0)
continue;
- }
+ } else {
+ Sleep(50);
+ }
wasDirty = buf_Sync(1);
} /* whole daemon's while loop */
}
osi_SleepM((LONG_PTR)bp, &bp->mx);
- smb_UpdateServerPriority();
+ cm_UpdateServerPriority();
lock_ObtainMutex(&bp->mx);
osi_Log1(buf_logp, "buf_WaitIO conflict wait done for 0x%p", bp);
return bp;
}
+/* find a buffer, if any, for a particular file ID and offset. Assumes
+ * that buf_globalLock is write locked when called. Uses the all buffer
+ * list.
+ */
+cm_buf_t *buf_FindAllLocked(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+{
+ cm_buf_t *bp;
+
+ if (flags == 0) {
+ for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
+ if (cm_FidCmp(&scp->fid, &bp->fid) == 0
+ && offsetp->LowPart == bp->offset.LowPart
+ && offsetp->HighPart == bp->offset.HighPart) {
+ buf_HoldLocked(bp);
+ break;
+ }
+ }
+ } else {
+ for(bp = cm_data.buf_allp; bp; bp=bp->allp) {
+ if (cm_FidCmp(&scp->fid, &bp->fid) == 0) {
+ char * fileOffset;
+
+ fileOffset = offsetp->QuadPart + cm_data.baseAddress;
+ if (fileOffset == bp->datap) {
+ buf_HoldLocked(bp);
+ break;
+ }
+ }
+ }
+ }
+ /* return whatever we found, if anything */
+ return bp;
+}
+
+/* find a buffer with offset *offsetp for vnode *scp. Called
+ * with no locks held. Use the all buffer list.
+ */
+cm_buf_t *buf_FindAll(struct cm_scache *scp, osi_hyper_t *offsetp, afs_uint32 flags)
+{
+ cm_buf_t *bp;
+
+ lock_ObtainRead(&buf_globalLock);
+ bp = buf_FindAllLocked(scp, offsetp, flags);
+ lock_ReleaseRead(&buf_globalLock);
+
+ return bp;
+}
+
/* start cleaning I/O on this buffer. Buffer must be write locked, and is returned
* write-locked.
*
*/
if (reqp->flags & CM_REQ_NORETRY)
break;
- };
+
+ /* Ditto if the hardDeadTimeout or idleTimeout was reached */
+ if (code == CM_ERROR_TIMEDOUT || code == CM_ERROR_ALLDOWN ||
+ code == CM_ERROR_ALLBUSY || code == CM_ERROR_ALLOFFLINE ||
+ code == CM_ERROR_CLOCKSKEW) {
+ break;
+ }
+ }
/* if someone was waiting for the I/O that just completed or failed,
* wake them up.
/* make the fid unrecognizable */
memset(&bp->fid, 0, sizeof(cm_fid_t));
+
+ /* clean up junk flags */
+ bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
+ bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
}
/* recycle a buffer, removing it from the free list, hashing in its new identity
* space from the buffer pool. In that case, the buffer will be returned
* without being hashed into the hash table.
*/
-long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_GetNewLocked(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
{
cm_buf_t *bp; /* buffer we're dealing with */
cm_buf_t *nextBp; /* next buffer in file hash chain */
afs_uint32 i; /* temp */
- cm_req_t req;
-
- cm_InitReq(&req); /* just in case */
#ifdef TESTING
buf_ValidateBufQueues();
* have the WRITING flag set, so we won't get
* back here.
*/
- buf_CleanAsync(bp, &req, NULL);
+ buf_CleanAsync(bp, reqp, NULL);
/* now put it back and go around again */
buf_Release(bp);
*/
buf_Recycle(bp);
- /* clean up junk flags */
- bp->flags &= ~(CM_BUF_EOF | CM_BUF_ERROR);
- bp->dataVersion = CM_BUF_VERSION_BAD; /* unknown so far */
-
/* now hash in as our new buffer, and give it the
* appropriate label, if requested.
*/
/* get a page, returning it held but unlocked. Doesn't fill in the page
* with I/O, since we're going to write the whole thing new.
*/
-long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_GetNew(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
{
cm_buf_t *bp;
long code;
}
/* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
/* check if the buffer was created in a race condition branch.
* If so, go around so we can hold a reference to it.
/* get a page, returning it held but unlocked. Make sure it is complete */
/* The scp must be unlocked when passed to this function */
-long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_buf_t **bufpp)
+long buf_Get(struct cm_scache *scp, osi_hyper_t *offsetp, cm_req_t *reqp, cm_buf_t **bufpp)
{
cm_buf_t *bp;
long code;
}
/* otherwise, we have to create a page */
- code = buf_GetNewLocked(scp, &pageOffset, &bp);
+ code = buf_GetNewLocked(scp, &pageOffset, reqp, &bp);
/* bp->mx is now held */
/* check if the buffer was created in a race condition branch.
}
/* clean a buffer synchronously */
-long buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
+afs_uint32 buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
{
long code;
osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
bp->dataVersion = CM_BUF_VERSION_BAD;
bp->dirtyCounter++;
break;
+ case CM_ERROR_TIMEDOUT:
+ case CM_ERROR_ALLDOWN:
+ case CM_ERROR_ALLBUSY:
+ case CM_ERROR_ALLOFFLINE:
+ case CM_ERROR_CLOCKSKEW:
+ /* do not mark the buffer in error state but do
+ * not attempt to complete the rest either.
+ */
+ break;
default:
code = buf_CleanAsyncLocked(bp, reqp, &wasDirty);
if (bp->flags & CM_BUF_ERROR) {
i = BUF_FILEHASH(fidp);
- for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->allp, bcount++) {
+ for (bp = cm_data.buf_fileHashTablepp[i]; bp; bp=bp->fileHashp, bcount++) {
if (!cm_FidCmp(fidp, &bp->fid) && (bp->flags & CM_BUF_DIRTY))
return 1;
}