-/*
+/*
* Copyright 2000, International Business Machines Corporation and others.
* All Rights Reserved.
- *
+ *
* This software has been released under the terms of the IBM Public
* License. For details, see the LICENSE file in the top-level source
* directory or online at http://www.openafs.org/dl/license10.html
/* Thread local storage index for lock tracking */
static DWORD tls_LockRefH = 0;
static DWORD tls_LockRefT = 0;
+static BOOLEAN lockOrderValidation = 0;
+static osi_lock_ref_t * lock_ref_FreeListp = NULL;
+static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
+CRITICAL_SECTION lock_ref_CS;
void osi_BaseInit(void)
{
for(i=0; i<OSI_MUTEXHASHSIZE; i++)
InitializeCriticalSection(&osi_baseAtomicCS[i]);
- if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
- osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
+ if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
+ osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
- if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
- osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
-}
+ if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
+ osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
-osi_lock_ref_t *lock_GetLockRef(void * lockp, char type)
+ InitializeCriticalSection(&lock_ref_CS);
+}
+
+void
+osi_SetLockOrderValidation(int on)
{
- osi_lock_ref_t * lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
+ lockOrderValidation = (BOOLEAN)on;
+}
+
+static osi_lock_ref_t *
+lock_GetLockRef(void * lockp, char type)
+{
+ osi_lock_ref_t * lockRefp = NULL;
+
+ EnterCriticalSection(&lock_ref_CS);
+ if (lock_ref_FreeListp) {
+ lockRefp = lock_ref_FreeListp;
+ osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
+ (osi_queue_t **) &lock_ref_FreeListEndp,
+ &lockRefp->q);
+ }
+ LeaveCriticalSection(&lock_ref_CS);
+
+ if (lockRefp == NULL)
+ lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
memset(lockRefp, 0, sizeof(osi_lock_ref_t));
lockRefp->type = type;
return lockRefp;
}
+static void
+lock_FreeLockRef(osi_lock_ref_t * lockRefp)
+{
+ EnterCriticalSection(&lock_ref_CS);
+ osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
+ (osi_queue_t **) &lock_ref_FreeListEndp,
+ &lockRefp->q);
+ LeaveCriticalSection(&lock_ref_CS);
+}
+
void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
{
char msg[512];
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
-
+
if ((i=lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
(osi_lockOps[i]->ObtainWriteProc)(lockp);
return;
}
- lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
- lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+ if (lockOrderValidation) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
- if (lockp->level != 0)
- lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
+ if (lockp->level != 0)
+ lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
+ }
/* otherwise we're the fast base type */
csp = &osi_baseAtomicCS[lockp->atomicIndex];
EnterCriticalSection(csp);
/* here we have the fast lock, so see if we can obtain the real lock */
- if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
+ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
(lockp->readers > 0)) {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
lockp->waiters--;
osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
- }
- else {
+ } else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = thrd_Current();
}
-
- lockp->tid = thrd_Current();
-
LeaveCriticalSection(csp);
- lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
- osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
- TlsSetValue(tls_LockRefH, lockRefH);
- TlsSetValue(tls_LockRefT, lockRefT);
-}
+ if (lockOrderValidation) {
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
+}
void lock_ObtainRead(osi_rwlock_t *lockp)
{
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
-
+ DWORD tid = thrd_Current();
+
if ((i=lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
(osi_lockOps[i]->ObtainReadProc)(lockp);
return;
}
- lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
- lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+ if (lockOrderValidation) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
- if (lockp->level != 0)
- lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
+ if (lockp->level != 0)
+ lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
+ }
/* otherwise we're the fast base type */
csp = &osi_baseAtomicCS[lockp->atomicIndex];
EnterCriticalSection(csp);
+ for ( i=0; i < lockp->readers; i++ ) {
+ osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
+ }
+
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
lockp->waiters--;
osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
- }
- else {
+ } else {
/* if we're here, all clear to set the lock */
- lockp->readers++;
+ if (++lockp->readers <= OSI_RWLOCK_THREADS)
+ lockp->tid[lockp->readers-1] = tid;
}
-
LeaveCriticalSection(csp);
- lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
- osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
- TlsSetValue(tls_LockRefH, lockRefH);
- TlsSetValue(tls_LockRefT, lockRefT);
+ if (lockOrderValidation) {
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
}
void lock_ReleaseRead(osi_rwlock_t *lockp)
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
-
+ DWORD tid = thrd_Current();
+
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
(osi_lockOps[i]->ReleaseReadProc)(lockp);
return;
}
- if (lockp->level != 0) {
+ if (lockOrderValidation && lockp->level != 0) {
+ int found = 0;
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
+ found = 1;
break;
}
}
+ osi_assertx(found, "read lock not found in TLS queue");
TlsSetValue(tls_LockRefH, lockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
osi_assertx(lockp->readers > 0, "read lock not held");
+ for ( i=0; i < lockp->readers; i++) {
+ if ( lockp->tid[i] == tid ) {
+ for ( ; i < lockp->readers - 1; i++)
+ lockp->tid[i] = lockp->tid[i+1];
+ lockp->tid[i] = 0;
+ break;
+ }
+ }
+
/* releasing a read lock can allow readers or writers */
if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, csp);
return;
}
- if (lockp->level != 0) {
+ if (lockOrderValidation && lockp->level != 0) {
+ int found = 0;
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
+ found = 1;
break;
}
}
+ osi_assertx(found, "write lock not found in TLS queue");
TlsSetValue(tls_LockRefH, lockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
EnterCriticalSection(csp);
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
+ osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
- lockp->tid = 0;
+ lockp->tid[0] = 0;
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
if (!osi_TEmpty(&lockp->d.turn)) {
/* and finally release the big lock */
LeaveCriticalSection(csp);
}
-}
+}
void lock_ConvertWToR(osi_rwlock_t *lockp)
{
EnterCriticalSection(csp);
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
+ osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
/* convert write lock to read lock */
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
lockp->readers++;
- lockp->tid = 0;
-
if (!osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
}
{
long i;
CRITICAL_SECTION *csp;
+ DWORD tid = thrd_Current();
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
osi_assertx(lockp->readers > 0, "read lock not held");
+ for ( i=0; i < lockp->readers; i++) {
+ if ( lockp->tid[i] == tid ) {
+ for ( ; i < lockp->readers - 1; i++)
+ lockp->tid[i] = lockp->tid[i+1];
+ lockp->tid[i] = 0;
+ break;
+ }
+ }
+
if (--lockp->readers == 0) {
/* convert read lock to write lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = tid;
} else {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
lockp->waiters--;
osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
}
- lockp->tid = thrd_Current();
LeaveCriticalSection(csp);
-}
+}
void lock_ObtainMutex(struct osi_mutex *lockp)
{
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
-
+
if ((i=lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
(osi_lockOps[i]->ObtainMutexProc)(lockp);
return;
}
- lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
- lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+ if (lockOrderValidation) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
- if (lockp->level != 0)
- lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
+ if (lockp->level != 0)
+ lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
+ }
/* otherwise we're the fast base type */
csp = &osi_baseAtomicCS[lockp->atomicIndex];
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
lockp->waiters--;
osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
- }
- else {
+ } else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid = thrd_Current();
}
- lockp->tid = thrd_Current();
LeaveCriticalSection(csp);
- lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
- osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
- TlsSetValue(tls_LockRefH, lockRefH);
- TlsSetValue(tls_LockRefT, lockRefT);
+ if (lockOrderValidation) {
+ lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
+ osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
+ TlsSetValue(tls_LockRefH, lockRefH);
+ TlsSetValue(tls_LockRefT, lockRefT);
+ }
}
void lock_ReleaseMutex(struct osi_mutex *lockp)
return;
}
- if (lockp->level != 0) {
+ if (lockOrderValidation && lockp->level != 0) {
+ int found = 0;
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
+ found = 1;
break;
}
}
-
+
+ osi_assertx(found, "mutex lock not found in TLS queue");
TlsSetValue(tls_LockRefH, lockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
}
EnterCriticalSection(csp);
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
+ osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
lockp->tid = 0;
/* and finally release the big lock */
LeaveCriticalSection(csp);
}
-}
+}
int lock_TryRead(struct osi_rwlock *lockp)
{
if (i >= 0 && i < OSI_NLOCKTYPES)
return (osi_lockOps[i]->TryReadProc)(lockp);
- lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
- lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+ if (lockOrderValidation) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
- if (lockp->level != 0) {
- for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
- if (lockRefp->type == OSI_LOCK_RW) {
- osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
+ if (lockp->level != 0) {
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW) {
+ osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
+ }
}
}
}
}
else {
/* if we're here, all clear to set the lock */
- lockp->readers++;
+ if (++lockp->readers < OSI_RWLOCK_THREADS)
+ lockp->tid[lockp->readers-1] = thrd_Current();
i = 1;
}
LeaveCriticalSection(csp);
- if (i) {
+ if (lockOrderValidation && i) {
lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
TlsSetValue(tls_LockRefH, lockRefH);
}
return i;
-}
+}
int lock_TryWrite(struct osi_rwlock *lockp)
if (i >= 0 && i < OSI_NLOCKTYPES)
return (osi_lockOps[i]->TryWriteProc)(lockp);
- lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
- lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+ if (lockOrderValidation) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
- if (lockp->level != 0) {
- for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
- if (lockRefp->type == OSI_LOCK_RW) {
- osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
+ if (lockp->level != 0) {
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_RW) {
+ osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
+ }
}
}
}
else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = thrd_Current();
i = 1;
}
- if (i)
- lockp->tid = thrd_Current();
-
LeaveCriticalSection(csp);
- if (i) {
+ if (lockOrderValidation && i) {
lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
TlsSetValue(tls_LockRefH, lockRefH);
if (i >= 0 && i < OSI_NLOCKTYPES)
return (osi_lockOps[i]->TryMutexProc)(lockp);
- lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
- lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
+ if (lockOrderValidation) {
+ lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
+ lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
- if (lockp->level != 0) {
- for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
- if (lockRefp->type == OSI_LOCK_MUTEX) {
- osi_assertx(lockRefp->mx != lockp, "Mutex already held");
+ if (lockp->level != 0) {
+ for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
+ if (lockRefp->type == OSI_LOCK_MUTEX) {
+ osi_assertx(lockRefp->mx != lockp, "Mutex already held");
+ }
}
}
}
else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid = thrd_Current();
i = 1;
}
- if (i)
- lockp->tid = thrd_Current();
-
LeaveCriticalSection(csp);
- if (i) {
+ if (lockOrderValidation && i) {
lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
TlsSetValue(tls_LockRefH, lockRefH);
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
+ DWORD tid = thrd_Current();
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
return;
}
- if (lockp->level != 0) {
+ if (lockOrderValidation && lockp->level != 0) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
break;
}
}
osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
+ for ( i=0; i < lockp->readers; i++) {
+ if ( lockp->tid[i] == tid ) {
+ for ( ; i < lockp->readers - 1; i++)
+ lockp->tid[i] = lockp->tid[i+1];
+ lockp->tid[i] = 0;
+ break;
+ }
+ }
+
/* XXX better to get the list of things to wakeup from TSignalForMLs, and
* then do the wakeup after SleepSpin releases the low-level mutex.
*/
/* now call into scheduler to sleep atomically with releasing spin lock */
osi_SleepSpin(sleepVal, csp);
-}
+}
void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
{
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
+ DWORD tid = thrd_Current();
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
return;
}
- if (lockp->level != 0) {
+ if (lockOrderValidation && lockp->level != 0) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
break;
}
}
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = 0;
if (!osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
}
return;
}
- if (lockp->level != 0) {
+ if (lockOrderValidation && lockp->level != 0) {
lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
break;
}
}
-
+
TlsSetValue(tls_LockRefH, lockRefH);
TlsSetValue(tls_LockRefT, lockRefT);
}
EnterCriticalSection(csp);
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
-
+
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ lockp->tid = 0;
if (!osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
}
if ((i=lockp->type) != 0)
if (i >= 0 && i < OSI_NLOCKTYPES)
(osi_lockOps[i]->FinalizeRWLockProc)(lockp);
-}
+}
void lock_FinalizeMutex(osi_mutex_t *lockp)
-{
+{
long i;
if ((i=lockp->type) != 0)
if (i >= 0 && i < OSI_NLOCKTYPES)
(osi_lockOps[i]->FinalizeMutexProc)(lockp);
-}
+}
void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
{
(osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
return;
}
-
+
/* otherwise we have the base case, which requires no special
* initialization.
*/
- mp->type = 0;
- mp->flags = 0;
+ memset(mp, 0, sizeof(osi_rwlock_t));
mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
- mp->readers = 0;
- mp->tid = 0;
mp->level = level;
osi_TInit(&mp->d.turn);
return;
EnterCriticalSection(csp);
/* here we have the fast lock, so see if we can obtain the real lock */
- if (lp->flags & OSI_LOCKFLAG_EXCL)
+ if (lp->flags & OSI_LOCKFLAG_EXCL)
i = OSI_RWLOCK_WRITEHELD;
- else
+ else
i = 0;
- if (lp->readers > 0)
+ if (lp->readers > 0)
i |= OSI_RWLOCK_READHELD;
LeaveCriticalSection(csp);
return i;
}
-int lock_GetMutexState(struct osi_mutex *mp)
+int lock_GetMutexState(struct osi_mutex *mp)
{
long i;
CRITICAL_SECTION *csp;