/* first, start by finding out whether we have a directory or something
* else, so we can find what object's ACL we need.
*/
- if (!cm_HaveCallback(scp)) {
+ if (scp->fileType == CM_SCACHETYPE_DIRECTORY || !cm_HaveCallback(scp)) {
code = cm_SyncOp(scp, NULL, userp, reqp, 0,
- CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+ CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_FORCECB);
if (code)
return code;
code = cm_SyncOp(aclScp, NULL, userp, reqp, 0,
CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
if (!code) {
- code = cm_GetCallback(aclScp, userp, reqp, 1);
- cm_SyncOpDone(aclScp, NULL, CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS);
+#if 0
+ /* cm_GetCallback was called by cm_SyncOp */
+ code = cm_GetCallback(aclScp, userp, reqp, 1);
+#endif
+ cm_SyncOpDone(aclScp, NULL,
+ CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_GETSTATUS | CM_SCACHESYNC_FORCECB);
}
lock_ReleaseMutex(&aclScp->mx);
}
cm_ReleaseSCache(aclScp);
lock_ObtainMutex(&scp->mx);
- } else if (!got_cb) {
+ }
+#if 0
+ else if (!got_cb) {
+ /* cm_GetCallback was called by cm_SyncOp */
code = cm_GetCallback(scp, userp, reqp, 1);
}
+#endif
_done:
if (got_cb)
long cm_GetCallback(cm_scache_t *scp, struct cm_user *userp,
struct cm_req *reqp, long flags)
{
- long code;
+ long code = 0;
cm_conn_t *connp = NULL;
AFSFetchStatus afsStatus;
AFSVolSync volSync;
int mustCall;
cm_fid_t sfid;
struct rx_connection * callp = NULL;
+ int syncop_done = 0;
osi_Log4(afsd_logp, "GetCallback scp 0x%p cell %d vol %d flags %lX",
scp, scp->fid.cell, scp->fid.volume, flags);
mustCall = (flags & 1);
cm_AFSFidFromFid(&tfid, &scp->fid);
while (1) {
- if (!mustCall && cm_HaveCallback(scp)) {
- osi_Log3(afsd_logp, "GetCallback Complete scp 0x%p cell %d vol %d",
- scp, scp->fid.cell, scp->fid.volume);
- return 0;
- }
+ if (!mustCall && cm_HaveCallback(scp))
+ break;
/* turn off mustCall, since it has now forced us past the check above */
mustCall = 0;
- /* 20060929 jaltman - We are being called from within cm_SyncOp.
- * if we call cm_SyncOp again and another thread has attempted
- * to obtain current status CM_SCACHEFLAG_WAITING will be set
- * and we will deadlock.
- */
/* otherwise, we have to make an RPC to get the status */
- cm_SyncOp(scp, NULL, userp, reqp, 0,
- CM_SCACHESYNC_FETCHSTATUS | CM_SCACHESYNC_GETCALLBACK);
+ if (!syncop_done) {
+ code = cm_SyncOp(scp, NULL, userp, reqp, 0,
+ CM_SCACHESYNC_FETCHSTATUS | CM_SCACHESYNC_GETCALLBACK);
+ if (code)
+ break;
+ syncop_done = 1;
+ }
cm_StartCallbackGrantingCall(scp, &cbr);
sfid = scp->fid;
lock_ReleaseMutex(&scp->mx);
} else {
cm_EndCallbackGrantingCall(NULL, &cbr, NULL, 0);
}
- /* 20060929 jaltman - don't deadlock */
- cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_FETCHSTATUS | CM_SCACHESYNC_GETCALLBACK);
-
- /* now check to see if we got an error */
- if (code) {
- osi_Log2(afsd_logp, "GetCallback Failed code 0x%x scp 0x%p -->",code, scp);
- osi_Log4(afsd_logp, " cell %u vol %u vn %u uniq %u",
- scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique);
- return code;
- }
+
+ /* if we got an error, return to caller */
+ if (code)
+ break;
+ }
+
+ if (syncop_done)
+ cm_SyncOpDone(scp, NULL, CM_SCACHESYNC_FETCHSTATUS | CM_SCACHESYNC_GETCALLBACK);
+
+ if (code) {
+ osi_Log2(afsd_logp, "GetCallback Failed code 0x%x scp 0x%p -->",code, scp);
+ osi_Log4(afsd_logp, " cell %u vol %u vn %u uniq %u",
+ scp->fid.cell, scp->fid.volume, scp->fid.vnode, scp->fid.unique);
+ } else {
+ osi_Log3(afsd_logp, "GetCallback Complete scp 0x%p cell %d vol %d",
+ scp, scp->fid.cell, scp->fid.volume);
}
+
+ return code;
}
/* called periodically by cm_daemon to shut down use of expired callbacks */
code = RegQueryValueEx(parmKey, "ConnDeadTimeout", NULL, NULL,
(BYTE *) &dwValue, &dummyLen);
if (code == ERROR_SUCCESS)
- ConnDeadtimeout = dwValue;
+ ConnDeadtimeout = (unsigned short)dwValue;
dummyLen = sizeof(DWORD);
code = RegQueryValueEx(parmKey, "HardDeadTimeout", NULL, NULL,
(BYTE *) &dwValue, &dummyLen);
if (code == ERROR_SUCCESS)
- HardDeadtimeout = dwValue;
+ HardDeadtimeout = (unsigned short)dwValue;
afsi_log("HardDeadTimeout is %d", HardDeadtimeout);
RegCloseKey(parmKey);
}
afsi_log("lanmanworkstation : SessTimeout %d", RDRtimeout);
if (ConnDeadtimeout == 0)
- ConnDeadtimeout = RDRtimeout / 2;
+ ConnDeadtimeout = (unsigned short) (RDRtimeout / 2);
afsi_log("ConnDeadTimeout is %d", ConnDeadtimeout);
if (HardDeadtimeout == 0)
- HardDeadtimeout = RDRtimeout;
+ HardDeadtimeout = (unsigned short) RDRtimeout;
afsi_log("HardDeadTimeout is %d", HardDeadtimeout);
osi_EndOnce(&once);
lock_ObtainMutex(&bufp->mx);
lock_ObtainMutex(&scp->mx);
- flags = CM_SCACHESYNC_NEEDCALLBACK
- | CM_SCACHESYNC_GETSTATUS
+ flags = CM_SCACHESYNC_GETSTATUS
| CM_SCACHESYNC_STOREDATA
| CM_SCACHESYNC_BUFLOCKED;
code = cm_SyncOp(scp, bufp, userp, reqp, 0, flags);
thyper.HighPart = 0;
scanEnd = LargeIntegerAdd(scanStart, thyper);
- flags = CM_SCACHESYNC_NEEDCALLBACK
- | CM_SCACHESYNC_GETSTATUS
+ flags = CM_SCACHESYNC_GETSTATUS
| CM_SCACHESYNC_STOREDATA
| CM_SCACHESYNC_BUFLOCKED
| CM_SCACHESYNC_NOWAIT;
break;
}
- flags = CM_SCACHESYNC_NEEDCALLBACK | CM_SCACHESYNC_FETCHDATA
- | CM_SCACHESYNC_BUFLOCKED;
+ flags = CM_SCACHESYNC_FETCHDATA | CM_SCACHESYNC_BUFLOCKED;
if (!isFirst)
flags |= CM_SCACHESYNC_NOWAIT;
if (biop->reserved)
buf_UnreserveBuffers(cm_chunkSize / cm_data.buf_blockSize);
- flags = CM_SCACHESYNC_NEEDCALLBACK;
if (isStore)
- flags |= CM_SCACHESYNC_STOREDATA;
+ flags = CM_SCACHESYNC_STOREDATA;
else
- flags |= CM_SCACHESYNC_FETCHDATA;
+ flags = CM_SCACHESYNC_FETCHDATA;
scp = biop->scp;
- for(qdp = biop->bufListp; qdp; qdp = nqdp) {
- /* lookup next guy first, since we're going to free this one */
- nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
+ if (biop->bufListp) {
+ for(qdp = biop->bufListp; qdp; qdp = nqdp) {
+ /* lookup next guy first, since we're going to free this one */
+ nqdp = (osi_queueData_t *) osi_QNext(&qdp->q);
- /* extract buffer and free queue data */
- bufp = osi_GetQData(qdp);
- osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
- (osi_queue_t **) &biop->bufListEndp,
- &qdp->q);
- osi_QDFree(qdp);
-
- /* now, mark I/O as done, unlock the buffer and release it */
- lock_ObtainMutex(&bufp->mx);
- lock_ObtainMutex(&scp->mx);
- cm_SyncOpDone(scp, bufp, flags);
+ /* extract buffer and free queue data */
+ bufp = osi_GetQData(qdp);
+ osi_QRemoveHT((osi_queue_t **) &biop->bufListp,
+ (osi_queue_t **) &biop->bufListEndp,
+ &qdp->q);
+ osi_QDFree(qdp);
+
+ /* now, mark I/O as done, unlock the buffer and release it */
+ lock_ObtainMutex(&bufp->mx);
+ lock_ObtainMutex(&scp->mx);
+ cm_SyncOpDone(scp, bufp, flags);
- /* turn off writing and wakeup users */
- if (isStore) {
- if (bufp->flags & CM_BUF_WAITING) {
- osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
- osi_Wakeup((LONG_PTR) bufp);
- }
- bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
- }
-
- lock_ReleaseMutex(&scp->mx);
- lock_ReleaseMutex(&bufp->mx);
- buf_Release(bufp);
- bufp = NULL;
+ /* turn off writing and wakeup users */
+ if (isStore) {
+ if (bufp->flags & CM_BUF_WAITING) {
+ osi_Log2(afsd_logp, "cm_ReleaseBIOD Waking [scp 0x%p] bp 0x%p", scp, bufp);
+ osi_Wakeup((LONG_PTR) bufp);
+ }
+ bufp->flags &= ~(CM_BUF_WRITING | CM_BUF_DIRTY);
+ }
+
+ lock_ReleaseMutex(&scp->mx);
+ lock_ReleaseMutex(&bufp->mx);
+ buf_Release(bufp);
+ bufp = NULL;
+ }
+ } else {
+ lock_ObtainMutex(&scp->mx);
+ cm_SyncOpDone(scp, NULL, flags);
+ lock_ReleaseMutex(&scp->mx);
}
/* clean things out */
/* There were no deleted scache objects that we could use. Try to find
* one that simply hasn't been used in a while.
*/
- while (1) {
for ( scp = cm_data.scacheLRULastp;
scp;
scp = (cm_scache_t *) osi_QPrev(&scp->q))
}
osi_Log1(afsd_logp, "GetNewSCache all scache entries in use (retry = %d)", retry);
- /* If get here it means that every scache is either in use or has dirty buffers.
- * We used to panic. Now we will give up our lock and wait.
- */
- if (++retry < 10) {
- lock_ReleaseWrite(&cm_scacheLock);
- Sleep(1000);
- lock_ObtainWrite(&cm_scacheLock);
- } else {
return NULL;
- }
- } /* forever */
}
/* if we get here, we should allocate a new scache entry. We either are below
cm_buf_t *tbufp;
afs_uint32 outRights;
int bufLocked;
+ afs_uint32 sleep_scp_flags = 0;
+ afs_uint32 sleep_buf_cmflags = 0;
+ afs_uint32 sleep_scp_bufs = 0;
/* lookup this first */
bufLocked = flags & CM_SCACHESYNC_BUFLOCKED;
cm_fakeDirCallback < 2)
#endif /* AFS_FREELANCE_CLIENT */
) {
- if (!cm_HaveCallback(scp)) {
+ if ((flags & CM_SCACHESYNC_FORCECB) || !cm_HaveCallback(scp)) {
osi_Log1(afsd_logp, "CM SyncOp getting callback on scp 0x%p",
scp);
if (bufLocked)
}
if (code)
return code;
+ flags &= ~CM_SCACHESYNC_FORCECB; /* only force once */
continue;
}
}
if (flags & CM_SCACHESYNC_NOWAIT)
return CM_ERROR_WOULDBLOCK;
+ sleep_scp_flags = scp->flags; /* so we know why we slept */
+ sleep_buf_cmflags = bufp ? bufp->cmFlags : 0;
+ sleep_scp_bufs = (scp->bufReadsp ? 1 : 0) | (scp->bufWritesp ? 2 : 0);
+
/* wait here, then try again */
osi_Log1(afsd_logp, "CM SyncOp sleeping scp 0x%p", scp);
if ( scp->flags & CM_SCACHEFLAG_WAITING ) {
osi_queueData_t *qdp;
cm_buf_t *tbufp;
+ lock_AssertMutex(&scp->mx);
+
/* now, update the recorded state for RPC-type calls */
if (flags & CM_SCACHESYNC_FETCHSTATUS)
scp->flags &= ~CM_SCACHEFLAG_FETCHING;
|| error == 122 /* EDQUOT on Linux */
|| error == 1133) /* EDQUOT on Irix */
error = CM_ERROR_QUOTA;
- else if (error == VNOVNODE) {
-#ifdef COMMENT
+ else if (error == VNOVNODE)
error = CM_ERROR_BADFD;
-#else
- error = CM_ERROR_RETRY;
-#endif
- } else if (error == 21)
+ else if (error == 21)
return CM_ERROR_ISDIR;
return error;
}
{
smb_vc_t *vcp;
+ lock_ObtainWrite(&smb_globalLock); /* for numVCs */
lock_ObtainWrite(&smb_rctLock);
for (vcp = smb_allVCsp; vcp; vcp=vcp->nextp) {
if (vcp->magic != SMB_VC_MAGIC)
if (!vcp && (flags & SMB_FLAG_CREATE)) {
vcp = malloc(sizeof(*vcp));
memset(vcp, 0, sizeof(*vcp));
- lock_ObtainWrite(&smb_globalLock);
vcp->vcID = ++numVCs;
- lock_ReleaseWrite(&smb_globalLock);
vcp->magic = SMB_VC_MAGIC;
vcp->refCount = 2; /* smb_allVCsp and caller */
vcp->tidCounter = 1;
memset(vcp->encKey, 0, MSV1_0_CHALLENGE_LENGTH);
if (numVCs >= CM_SESSION_RESERVED) {
- lock_ObtainWrite(&smb_globalLock);
numVCs = 0;
- lock_ReleaseWrite(&smb_globalLock);
osi_Log0(smb_logp, "WARNING: numVCs wrapping around");
}
}
lock_ReleaseWrite(&smb_rctLock);
+ lock_ReleaseWrite(&smb_globalLock);
return vcp;
}