static DWORD tls_LockRefH = 0;
static DWORD tls_LockRefT = 0;
static BOOLEAN lockOrderValidation = 0;
+static osi_lock_ref_t * lock_ref_FreeListp = NULL;
+static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
+CRITICAL_SECTION lock_ref_CS;
void osi_BaseInit(void)
{
if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
+
+ InitializeCriticalSection(&lock_ref_CS);
}
-void osi_SetLockOrderValidation(int on)
+void
+osi_SetLockOrderValidation(int on)
{
lockOrderValidation = (BOOLEAN)on;
}
-osi_lock_ref_t *lock_GetLockRef(void * lockp, char type)
+static osi_lock_ref_t *
+lock_GetLockRef(void * lockp, char type)
{
- osi_lock_ref_t * lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
+ osi_lock_ref_t * lockRefp = NULL;
+
+ EnterCriticalSection(&lock_ref_CS);
+ if (lock_ref_FreeListp) {
+ lockRefp = lock_ref_FreeListp;
+ osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
+ (osi_queue_t **) &lock_ref_FreeListEndp,
+ &lockRefp->q);
+ }
+ LeaveCriticalSection(&lock_ref_CS);
+
+ if (lockRefp == NULL)
+ lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
memset(lockRefp, 0, sizeof(osi_lock_ref_t));
lockRefp->type = type;
return lockRefp;
}
+static void
+lock_FreeLockRef(osi_lock_ref_t * lockRefp)
+{
+ EnterCriticalSection(&lock_ref_CS);
+ osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
+ (osi_queue_t **) &lock_ref_FreeListEndp,
+ &lockRefp->q);
+ LeaveCriticalSection(&lock_ref_CS);
+}
+
void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
{
char msg[512];
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
(lockp->readers > 0)) {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
lockp->waiters--;
osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
- }
- else {
+ } else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = thrd_Current();
}
-
- lockp->tid = thrd_Current();
-
LeaveCriticalSection(csp);
if (lockOrderValidation) {
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
+ DWORD tid = thrd_Current();
if ((i=lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
csp = &osi_baseAtomicCS[lockp->atomicIndex];
EnterCriticalSection(csp);
+ for ( i=0; i < lockp->readers; i++ ) {
+ osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
+ }
+
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
lockp->waiters--;
osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
- }
- else {
+ } else {
/* if we're here, all clear to set the lock */
- lockp->readers++;
+ if (++lockp->readers <= OSI_RWLOCK_THREADS)
+ lockp->tid[lockp->readers-1] = tid;
}
-
LeaveCriticalSection(csp);
if (lockOrderValidation) {
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
+ DWORD tid = thrd_Current();
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
found = 1;
break;
}
osi_assertx(lockp->readers > 0, "read lock not held");
+ for ( i=0; i < lockp->readers; i++) {
+ if ( lockp->tid[i] == tid ) {
+ for ( ; i < lockp->readers - 1; i++)
+ lockp->tid[i] = lockp->tid[i+1];
+ lockp->tid[i] = 0;
+ break;
+ }
+ }
+
/* releasing a read lock can allow readers or writers */
if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, csp);
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
found = 1;
break;
}
EnterCriticalSection(csp);
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
- osi_assertx(lockp->tid == thrd_Current(), "write lock not held by current thread");
+ osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
- lockp->tid = 0;
+ lockp->tid[0] = 0;
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
if (!osi_TEmpty(&lockp->d.turn)) {
EnterCriticalSection(csp);
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
- osi_assertx(lockp->tid == thrd_Current(), "write lock not held by current thread");
+ osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
/* convert write lock to read lock */
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
lockp->readers++;
- lockp->tid = 0;
-
if (!osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
}
{
long i;
CRITICAL_SECTION *csp;
+ DWORD tid = thrd_Current();
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
osi_assertx(lockp->readers > 0, "read lock not held");
+ for ( i=0; i < lockp->readers; i++) {
+ if ( lockp->tid[i] == tid ) {
+ for ( ; i < lockp->readers - 1; i++)
+ lockp->tid[i] = lockp->tid[i+1];
+ lockp->tid[i] = 0;
+ break;
+ }
+ }
+
if (--lockp->readers == 0) {
/* convert read lock to write lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = tid;
} else {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
lockp->waiters--;
osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
}
- lockp->tid = thrd_Current();
LeaveCriticalSection(csp);
}
/* here we have the fast lock, so see if we can obtain the real lock */
if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
lockp->waiters++;
- osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
+ osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
lockp->waiters--;
osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
- }
- else {
+ } else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid = thrd_Current();
}
- lockp->tid = thrd_Current();
LeaveCriticalSection(csp);
if (lockOrderValidation) {
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
found = 1;
break;
}
}
else {
/* if we're here, all clear to set the lock */
- lockp->readers++;
+ if (++lockp->readers < OSI_RWLOCK_THREADS)
+ lockp->tid[lockp->readers-1] = thrd_Current();
i = 1;
}
else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = thrd_Current();
i = 1;
}
- if (i)
- lockp->tid = thrd_Current();
-
LeaveCriticalSection(csp);
if (lockOrderValidation && i) {
else {
/* if we're here, all clear to set the lock */
lockp->flags |= OSI_LOCKFLAG_EXCL;
+ lockp->tid = thrd_Current();
i = 1;
}
- if (i)
- lockp->tid = thrd_Current();
-
LeaveCriticalSection(csp);
if (lockOrderValidation && i) {
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
+ DWORD tid = thrd_Current();
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
break;
}
}
osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
+ for ( i=0; i < lockp->readers; i++) {
+ if ( lockp->tid[i] == tid ) {
+ for ( ; i < lockp->readers - 1; i++)
+ lockp->tid[i] = lockp->tid[i+1];
+ lockp->tid[i] = 0;
+ break;
+ }
+ }
+
/* XXX better to get the list of things to wakeup from TSignalForMLs, and
* then do the wakeup after SleepSpin releases the low-level mutex.
*/
CRITICAL_SECTION *csp;
osi_queue_t * lockRefH, *lockRefT;
osi_lock_ref_t *lockRefp;
+ DWORD tid = thrd_Current();
if ((i = lockp->type) != 0) {
if (i >= 0 && i < OSI_NLOCKTYPES)
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
break;
}
}
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ lockp->tid[0] = 0;
if (!osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
}
for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
- free(lockRefp);
+ lock_FreeLockRef(lockRefp);
break;
}
}
osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
lockp->flags &= ~OSI_LOCKFLAG_EXCL;
+ lockp->tid = 0;
if (!osi_TEmpty(&lockp->d.turn)) {
osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
}
/* otherwise we have the base case, which requires no special
* initialization.
*/
- mp->type = 0;
- mp->flags = 0;
+ memset(mp, 0, sizeof(osi_rwlock_t));
mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
- mp->readers = 0;
- mp->tid = 0;
mp->level = level;
osi_TInit(&mp->d.turn);
return;