case vl_unknown:
cm_InitReq(&req);
req.flags |= CM_REQ_NORETRY;
- buf_CleanAsyncLocked(bp, &req, &dirty);
+ buf_CleanAsyncLocked(NULL, bp, &req, 0, &dirty);
wasDirty |= dirty;
}
cm_PutVolume(volp);
*
* Returns non-zero if the buffer was dirty.
*/
-afs_uint32 buf_CleanAsyncLocked(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
+afs_uint32 buf_CleanAsyncLocked(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp,
+ afs_uint32 flags, afs_uint32 *pisdirty)
{
afs_uint32 code = 0;
afs_uint32 isdirty = 0;
- cm_scache_t * scp = NULL;
osi_hyper_t offset;
+ int release_scp = 0;
osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
+ if (scp = cm_FindSCache(&bp->fid))
+ release_scp = 1;
+
+ if (!scp) {
+ osi_Log1(buf_logp, "buf_CleanAsyncLocked unable to start I/O - scp not found buf 0x%p", bp);
+ code = CM_ERROR_NOSUCHFILE;
+ }
+
while ((bp->flags & CM_BUF_DIRTY) == CM_BUF_DIRTY) {
- isdirty = 1;
+ isdirty = 1;
lock_ReleaseMutex(&bp->mx);
- scp = cm_FindSCache(&bp->fid);
- if (scp) {
- osi_Log2(buf_logp, "buf_CleanAsyncLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
+ osi_Log2(buf_logp, "buf_CleanAsyncLocked starts I/O on scp 0x%p buf 0x%p", scp, bp);
- offset = bp->offset;
- LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
- code = (*cm_buf_opsp->Writep)(scp, &offset,
+ offset = bp->offset;
+ LargeIntegerAdd(offset, ConvertLongToLargeInteger(bp->dirty_offset));
+ code = (*cm_buf_opsp->Writep)(scp, &offset,
#if 1
- /* we might as well try to write all of the contiguous
- * dirty buffers in one RPC
- */
- cm_chunkSize,
+ /* we might as well try to write all of the contiguous
+ * dirty buffers in one RPC
+ */
+ cm_chunkSize,
#else
- bp->dirty_length,
+ bp->dirty_length,
#endif
- 0, bp->userp, reqp);
- osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
+ flags, bp->userp, reqp);
+ osi_Log3(buf_logp, "buf_CleanAsyncLocked I/O on scp 0x%p buf 0x%p, done=%d", scp, bp, code);
- cm_ReleaseSCache(scp);
- scp = NULL;
- } else {
- osi_Log1(buf_logp, "buf_CleanAsyncLocked unable to start I/O - scp not found buf 0x%p", bp);
- code = CM_ERROR_NOSUCHFILE;
- }
-
- lock_ObtainMutex(&bp->mx);
+ lock_ObtainMutex(&bp->mx);
/* if the Write routine returns No Such File, clear the dirty flag
* because we aren't going to be able to write this data to the file
* server.
}
}
+ if (release_scp)
+ cm_ReleaseSCache(scp);
+
/* if someone was waiting for the I/O that just completed or failed,
* wake them up.
*/
* have the WRITING flag set, so we won't get
* back here.
*/
- buf_CleanAsync(bp, reqp, NULL);
+ buf_CleanAsync(scp, bp, reqp, 0, NULL);
/* now put it back and go around again */
buf_Release(bp);
}
/* clean a buffer synchronously */
-afs_uint32 buf_CleanAsync(cm_buf_t *bp, cm_req_t *reqp, afs_uint32 *pisdirty)
+afs_uint32 buf_CleanAsync(cm_scache_t *scp, cm_buf_t *bp, cm_req_t *reqp, afs_uint32 flags, afs_uint32 *pisdirty)
{
long code;
osi_assertx(bp->magic == CM_BUF_MAGIC, "invalid cm_buf_t magic");
+ osi_assertx(!(flags & CM_BUF_WRITE_SCP_LOCKED), "scp->rw must not be held when calling buf_CleanAsync");
lock_ObtainMutex(&bp->mx);
- code = buf_CleanAsyncLocked(bp, reqp, pisdirty);
+ code = buf_CleanAsyncLocked(scp, bp, reqp, flags, pisdirty);
lock_ReleaseMutex(&bp->mx);
return code;
cm_InitReq(&req);
req.flags |= CM_REQ_NORETRY;
- buf_CleanAsync(bp, &req, NULL);
+ buf_CleanAsync(NULL, bp, &req, 0, NULL);
buf_CleanWait(NULL, bp, FALSE);
/* relock and release buffer */
/* clean buffer synchronously */
if (cm_FidCmp(&bp->fid, &scp->fid) == 0) {
- /*
- * if the object is not located on a read/write volume
- * we must stabilize the object to ensure that buffer
- * changes cannot occur while the flush is performed.
- */
- if (!stable && code == 0 && !(scp->flags & CM_SCACHEFLAG_RO)) {
+ if (code == 0 && !stable && (bp->flags & CM_BUF_DIRTY)) {
+ /*
+ * we must stabilize the object to ensure that buffer
+ * changes cannot occur while the flush is performed.
+ * However, we do not want to Stabilize if we do not
+ * need to because Stabilize obtains a callback.
+ */
code = (*cm_buf_opsp->Stabilizep)(scp, userp, reqp);
stable = (code == 0);
}
- lock_ObtainMutex(&bp->mx);
-
- /* start cleaning the buffer, and wait for it to finish */
- buf_CleanAsyncLocked(bp, reqp, NULL);
- buf_WaitIO(scp, bp);
- lock_ReleaseMutex(&bp->mx);
-
if (code == CM_ERROR_BADFD) {
/* if the scp's FID is bad its because we received VNOVNODE
* when attempting to FetchStatus before the write. This
bp->dataVersion = CM_BUF_VERSION_BAD; /* known bad */
bp->dirtyCounter++;
lock_ReleaseMutex(&bp->mx);
- } else if (!stable && code) {
- goto skip;
+ } else if (!(scp->flags & CM_SCACHEFLAG_RO)) {
+ if (code) {
+ goto skip;
+ }
+
+ lock_ObtainMutex(&bp->mx);
+
+ /* start cleaning the buffer, and wait for it to finish */
+ buf_CleanAsyncLocked(scp, bp, reqp, 0, NULL);
+ buf_WaitIO(scp, bp);
+
+ lock_ReleaseMutex(&bp->mx);
}
/* actually, we only know that buffer is clean if ref
*/
break;
default:
- code = buf_CleanAsyncLocked(bp, reqp, &wasDirty);
+ code = buf_CleanAsyncLocked(scp, bp, reqp, 0, &wasDirty);
if (bp->flags & CM_BUF_ERROR) {
code = bp->error;
if (code == 0)
#define CM_BUF_EOF 0x80 /* read 0 bytes; used for detecting EOF */
typedef struct cm_buf_ops {
- long (*Writep)(void *, osi_hyper_t *, long, long, struct cm_user *,
- struct cm_req *);
- long (*Readp)(cm_buf_t *, long, long *, struct cm_user *);
- long (*Stabilizep)(void *, struct cm_user *, struct cm_req *);
- long (*Unstabilizep)(void *, struct cm_user *);
+ long (*Writep)(void *vscp, osi_hyper_t *offsetp,
+ long length, long flags,
+ struct cm_user *userp,
+ struct cm_req *reqp);
+ long (*Readp)(cm_buf_t *bufp, long length,
+ long *bytesReadp, struct cm_user *userp);
+ long (*Stabilizep)(void *vscp, struct cm_user *userp, struct cm_req *reqp);
+ long (*Unstabilizep)(void *vscp, struct cm_user *userp);
} cm_buf_ops_t;
+#define CM_BUF_WRITE_SCP_LOCKED 0x1
+
/* global locks */
extern osi_rwlock_t buf_globalLock;
extern long buf_GetNew(struct cm_scache *, osi_hyper_t *, cm_req_t *, cm_buf_t **);
-extern afs_uint32 buf_CleanAsyncLocked(cm_buf_t *, cm_req_t *, afs_uint32 *);
+extern afs_uint32 buf_CleanAsyncLocked(cm_scache_t *, cm_buf_t *, cm_req_t *, afs_uint32 flags, afs_uint32 *);
-extern afs_uint32 buf_CleanAsync(cm_buf_t *, cm_req_t *, afs_uint32 *);
+extern afs_uint32 buf_CleanAsync(cm_scache_t *, cm_buf_t *, cm_req_t *, afs_uint32 flags, afs_uint32 *);
extern void buf_CleanWait(cm_scache_t *, cm_buf_t *, afs_uint32 locked);
cm_bulkIO_t biod; /* bulk IO descriptor */
int require_64bit_ops = 0;
int call_was_64bit = 0;
+ int scp_locked = flags & CM_BUF_WRITE_SCP_LOCKED;
osi_assertx(userp != NULL, "null cm_user_t");
osi_assertx(scp != NULL, "null cm_scache_t");
* drops lots of locks, and may indeed return a properly initialized
* buffer, although more likely it will just return a new, empty, buffer.
*/
-
- lock_ObtainWrite(&scp->rw);
+ if (!scp_locked)
+ lock_ObtainWrite(&scp->rw);
if (scp->flags & CM_SCACHEFLAG_DELETED) {
- lock_ReleaseWrite(&scp->rw);
+ if (!scp_locked)
+ lock_ReleaseWrite(&scp->rw);
return CM_ERROR_NOSUCHFILE;
}
if (code) {
osi_Log1(afsd_logp, "cm_SetupStoreBIOD code %x", code);
cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
- lock_ReleaseWrite(&scp->rw);
+ if (!scp_locked)
+ lock_ReleaseWrite(&scp->rw);
return code;
}
osi_Log0(afsd_logp, "cm_SetupStoreBIOD length 0");
cm_ReleaseBIOD(&biod, 1, 0, 1); /* should be a NOOP */
cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_STOREDATA_EXCL);
- lock_ReleaseWrite(&scp->rw);
+ if (!scp_locked)
+ lock_ReleaseWrite(&scp->rw);
return 0;
}
else if (code == CM_ERROR_QUOTA)
scp->flags |= CM_SCACHEFLAG_OVERQUOTA;
}
- lock_ReleaseWrite(&scp->rw);
+ if (!scp_locked)
+ lock_ReleaseWrite(&scp->rw);
return code;
}