2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
13 #include <afs/param.h>
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long atomicIndexCounter = 0;
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
33 void osi_BaseInit(void)
37 for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38 InitializeCriticalSectionAndSpinCount(&osi_baseAtomicCS[i], 4000);
40 if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41 osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
43 if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44 osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
46 InitializeCriticalSectionAndSpinCount(&lock_ref_CS, 4000);
50 osi_SetLockOrderValidation(int on)
52 lockOrderValidation = (BOOLEAN)on;
58 osi_InterlockedAnd(LONG * pdest, LONG value)
60 LONG orig, current, new;
68 current = _InterlockedCompareExchange(pdest, new, orig);
69 } while (orig != current);
73 osi_InterlockedOr(LONG * pdest, LONG value)
75 LONG orig, current, new;
83 current = _InterlockedCompareExchange(pdest, new, orig);
84 } while (orig != current);
87 #define _InterlockedOr osi_InterlockedOr
88 #define _InterlockedAnd osi_InterlockedAnd
92 static osi_lock_ref_t *
93 lock_GetLockRef(void * lockp, char type)
95 osi_lock_ref_t * lockRefp = NULL;
97 EnterCriticalSection(&lock_ref_CS);
98 if (lock_ref_FreeListp) {
99 lockRefp = lock_ref_FreeListp;
100 osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
101 (osi_queue_t **) &lock_ref_FreeListEndp,
104 LeaveCriticalSection(&lock_ref_CS);
106 if (lockRefp == NULL)
107 lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
109 memset(lockRefp, 0, sizeof(osi_lock_ref_t));
110 lockRefp->type = type;
113 lockRefp->mx = lockp;
116 lockRefp->rw = lockp;
119 osi_panic("Invalid Lock Type", __FILE__, __LINE__);
126 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
128 EnterCriticalSection(&lock_ref_CS);
129 osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
130 (osi_queue_t **) &lock_ref_FreeListEndp,
132 LeaveCriticalSection(&lock_ref_CS);
135 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
138 osi_lock_ref_t * lockRefp;
140 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
141 if (lockRefp->type == OSI_LOCK_RW) {
142 if (lockRefp->rw == lockp) {
143 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
144 osi_panic(msg, __FILE__, __LINE__);
146 if (lockRefp->rw->level > lockp->level) {
147 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
148 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
149 osi_panic(msg, __FILE__, __LINE__);
152 if (lockRefp->mx->level > lockp->level) {
153 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
154 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
155 osi_panic(msg, __FILE__, __LINE__);
157 osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
162 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
165 osi_lock_ref_t * lockRefp;
167 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
168 if (lockRefp->type == OSI_LOCK_MUTEX) {
169 if (lockRefp->mx == lockp) {
170 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
171 osi_panic(msg, __FILE__, __LINE__);
173 if (lockRefp->mx->level > lockp->level) {
174 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
175 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
176 osi_panic(msg, __FILE__, __LINE__);
179 if (lockRefp->rw->level > lockp->level) {
180 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
181 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
182 osi_panic(msg, __FILE__, __LINE__);
188 void lock_ObtainWrite(osi_rwlock_t *lockp)
191 CRITICAL_SECTION *csp;
192 osi_queue_t * lockRefH, *lockRefT;
193 osi_lock_ref_t *lockRefp;
194 DWORD tid = thrd_Current();
196 if ((i=lockp->type) != 0) {
197 if (i >= 0 && i < OSI_NLOCKTYPES)
198 (osi_lockOps[i]->ObtainWriteProc)(lockp);
202 if (lockOrderValidation) {
203 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
204 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
206 if (lockp->level != 0)
207 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
210 /* otherwise we're the fast base type */
211 csp = &osi_baseAtomicCS[lockp->atomicIndex];
212 EnterCriticalSection(csp);
214 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
215 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
219 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
220 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
225 /* here we have the fast lock, so see if we can obtain the real lock */
226 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
227 (lockp->readers > 0)) {
229 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
231 osi_assertx(lockp->waiters >= 0, "waiters underflow");
232 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
234 /* if we're here, all clear to set the lock */
235 _InterlockedOr(&lockp->flags, OSI_LOCKFLAG_EXCL);
238 osi_assertx(lockp->readers == 0, "write lock readers present");
240 LeaveCriticalSection(csp);
242 if (lockOrderValidation) {
243 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
244 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
245 TlsSetValue(tls_LockRefH, lockRefH);
246 TlsSetValue(tls_LockRefT, lockRefT);
250 void lock_ObtainRead(osi_rwlock_t *lockp)
253 CRITICAL_SECTION *csp;
254 osi_queue_t * lockRefH, *lockRefT;
255 osi_lock_ref_t *lockRefp;
256 DWORD tid = thrd_Current();
258 if ((i=lockp->type) != 0) {
259 if (i >= 0 && i < OSI_NLOCKTYPES)
260 (osi_lockOps[i]->ObtainReadProc)(lockp);
264 if (lockOrderValidation) {
265 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
266 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
268 if (lockp->level != 0)
269 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
272 /* otherwise we're the fast base type */
273 csp = &osi_baseAtomicCS[lockp->atomicIndex];
274 EnterCriticalSection(csp);
276 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
277 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
281 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
282 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
287 /* here we have the fast lock, so see if we can obtain the real lock */
288 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
290 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
292 osi_assertx(lockp->waiters >= 0, "waiters underflow");
293 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
295 /* if we're here, all clear to set the lock */
298 if (lockp->readers <= OSI_RWLOCK_THREADS)
299 lockp->tid[lockp->readers-1] = tid;
302 LeaveCriticalSection(csp);
304 if (lockOrderValidation) {
305 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
306 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
307 TlsSetValue(tls_LockRefH, lockRefH);
308 TlsSetValue(tls_LockRefT, lockRefT);
312 void lock_ReleaseRead(osi_rwlock_t *lockp)
315 CRITICAL_SECTION *csp;
316 osi_queue_t * lockRefH, *lockRefT;
317 osi_lock_ref_t *lockRefp;
318 DWORD tid = thrd_Current();
320 if ((i = lockp->type) != 0) {
321 if (i >= 0 && i < OSI_NLOCKTYPES)
322 (osi_lockOps[i]->ReleaseReadProc)(lockp);
326 /* otherwise we're the fast base type */
327 csp = &osi_baseAtomicCS[lockp->atomicIndex];
328 EnterCriticalSection(csp);
330 if (lockOrderValidation && lockp->level != 0) {
332 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
333 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
335 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
336 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
337 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
338 lock_FreeLockRef(lockRefp);
343 osi_assertx(found, "read lock not found in TLS queue");
345 TlsSetValue(tls_LockRefH, lockRefH);
346 TlsSetValue(tls_LockRefT, lockRefT);
349 osi_assertx(lockp->readers > 0, "read lock not held");
352 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
353 if ( lockp->tid[i] == tid ) {
354 for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
355 lockp->tid[i] = lockp->tid[i+1];
364 /* releasing a read lock can allow writers */
365 if (lockp->readers == 0 && lockp->waiters) {
366 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
369 osi_assertx(lockp->readers >= 0, "read lock underflow");
371 /* and finally release the big lock */
372 LeaveCriticalSection(csp);
376 void lock_ReleaseWrite(osi_rwlock_t *lockp)
379 CRITICAL_SECTION *csp;
380 osi_queue_t * lockRefH, *lockRefT;
381 osi_lock_ref_t *lockRefp;
383 if ((i = lockp->type) != 0) {
384 if (i >= 0 && i < OSI_NLOCKTYPES)
385 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
389 /* otherwise we're the fast base type */
390 csp = &osi_baseAtomicCS[lockp->atomicIndex];
391 EnterCriticalSection(csp);
393 if (lockOrderValidation && lockp->level != 0) {
395 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
396 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
398 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
399 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
400 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
401 lock_FreeLockRef(lockRefp);
406 osi_assertx(found, "write lock not found in TLS queue");
408 TlsSetValue(tls_LockRefH, lockRefH);
409 TlsSetValue(tls_LockRefT, lockRefT);
412 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
413 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
416 _InterlockedAnd( &lockp->flags, ~OSI_LOCKFLAG_EXCL);
417 if (lockp->waiters) {
418 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
421 /* and finally release the big lock */
422 LeaveCriticalSection(csp);
426 void lock_ConvertWToR(osi_rwlock_t *lockp)
429 CRITICAL_SECTION *csp;
431 if ((i = lockp->type) != 0) {
432 if (i >= 0 && i < OSI_NLOCKTYPES)
433 (osi_lockOps[i]->ConvertWToRProc)(lockp);
437 /* otherwise we're the fast base type */
438 csp = &osi_baseAtomicCS[lockp->atomicIndex];
439 EnterCriticalSection(csp);
441 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
442 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
444 /* convert write lock to read lock */
445 _InterlockedAnd(&lockp->flags, ~OSI_LOCKFLAG_EXCL);
449 osi_assertx(lockp->readers == 1, "read lock not one");
451 if (lockp->waiters) {
452 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
455 /* and finally release the big lock */
456 LeaveCriticalSection(csp);
460 void lock_ConvertRToW(osi_rwlock_t *lockp)
463 CRITICAL_SECTION *csp;
464 DWORD tid = thrd_Current();
466 if ((i = lockp->type) != 0) {
467 if (i >= 0 && i < OSI_NLOCKTYPES)
468 (osi_lockOps[i]->ConvertRToWProc)(lockp);
472 /* otherwise we're the fast base type */
473 csp = &osi_baseAtomicCS[lockp->atomicIndex];
474 EnterCriticalSection(csp);
476 osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
477 osi_assertx(lockp->readers > 0, "read lock not held");
480 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
481 if ( lockp->tid[i] == tid ) {
482 for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
483 lockp->tid[i] = lockp->tid[i+1];
490 if (--(lockp->readers) == 0) {
491 /* convert read lock to write lock */
492 _InterlockedOr(&lockp->flags, OSI_LOCKFLAG_EXCL);
495 osi_assertx(lockp->readers > 0, "read lock underflow");
498 osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE);
500 osi_assertx(lockp->waiters >= 0, "waiters underflow");
501 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
504 LeaveCriticalSection(csp);
507 void lock_ObtainMutex(struct osi_mutex *lockp)
510 CRITICAL_SECTION *csp;
511 osi_queue_t * lockRefH, *lockRefT;
512 osi_lock_ref_t *lockRefp;
514 if ((i=lockp->type) != 0) {
515 if (i >= 0 && i < OSI_NLOCKTYPES)
516 (osi_lockOps[i]->ObtainMutexProc)(lockp);
520 /* otherwise we're the fast base type */
521 csp = &osi_baseAtomicCS[lockp->atomicIndex];
522 EnterCriticalSection(csp);
524 if (lockOrderValidation) {
525 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
526 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
528 if (lockp->level != 0)
529 lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
532 /* here we have the fast lock, so see if we can obtain the real lock */
533 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
535 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
537 osi_assertx(lockp->waiters >= 0, "waiters underflow");
538 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
540 /* if we're here, all clear to set the lock */
541 _InterlockedOr(&lockp->flags, OSI_LOCKFLAG_EXCL);
542 lockp->tid = thrd_Current();
545 LeaveCriticalSection(csp);
547 if (lockOrderValidation) {
548 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
549 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
550 TlsSetValue(tls_LockRefH, lockRefH);
551 TlsSetValue(tls_LockRefT, lockRefT);
555 void lock_ReleaseMutex(struct osi_mutex *lockp)
558 CRITICAL_SECTION *csp;
559 osi_queue_t * lockRefH, *lockRefT;
560 osi_lock_ref_t *lockRefp;
562 if ((i = lockp->type) != 0) {
563 if (i >= 0 && i < OSI_NLOCKTYPES)
564 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
568 /* otherwise we're the fast base type */
569 csp = &osi_baseAtomicCS[lockp->atomicIndex];
570 EnterCriticalSection(csp);
572 if (lockOrderValidation && lockp->level != 0) {
574 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
575 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
577 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
578 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
579 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
580 lock_FreeLockRef(lockRefp);
586 osi_assertx(found, "mutex lock not found in TLS queue");
587 TlsSetValue(tls_LockRefH, lockRefH);
588 TlsSetValue(tls_LockRefT, lockRefT);
591 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
592 osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
594 _InterlockedAnd(&lockp->flags, ~OSI_LOCKFLAG_EXCL);
596 if (lockp->waiters) {
597 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
600 /* and finally release the big lock */
601 LeaveCriticalSection(csp);
605 int lock_TryRead(struct osi_rwlock *lockp)
608 CRITICAL_SECTION *csp;
609 osi_queue_t * lockRefH, *lockRefT;
610 osi_lock_ref_t *lockRefp;
612 if ((i=lockp->type) != 0)
613 if (i >= 0 && i < OSI_NLOCKTYPES)
614 return (osi_lockOps[i]->TryReadProc)(lockp);
616 /* otherwise we're the fast base type */
617 csp = &osi_baseAtomicCS[lockp->atomicIndex];
618 EnterCriticalSection(csp);
620 if (lockOrderValidation) {
621 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
622 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
624 if (lockp->level != 0) {
625 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
626 if (lockRefp->type == OSI_LOCK_RW) {
627 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
633 /* here we have the fast lock, so see if we can obtain the real lock */
634 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
638 /* if we're here, all clear to set the lock */
641 if (lockp->readers < OSI_RWLOCK_THREADS)
642 lockp->tid[lockp->readers-1] = thrd_Current();
647 LeaveCriticalSection(csp);
649 if (lockOrderValidation && i) {
650 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
651 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
652 TlsSetValue(tls_LockRefH, lockRefH);
653 TlsSetValue(tls_LockRefT, lockRefT);
660 int lock_TryWrite(struct osi_rwlock *lockp)
663 CRITICAL_SECTION *csp;
664 osi_queue_t * lockRefH, *lockRefT;
665 osi_lock_ref_t *lockRefp;
667 if ((i=lockp->type) != 0)
668 if (i >= 0 && i < OSI_NLOCKTYPES)
669 return (osi_lockOps[i]->TryWriteProc)(lockp);
671 /* otherwise we're the fast base type */
672 csp = &osi_baseAtomicCS[lockp->atomicIndex];
673 EnterCriticalSection(csp);
675 if (lockOrderValidation) {
676 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
677 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
679 if (lockp->level != 0) {
680 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
681 if (lockRefp->type == OSI_LOCK_RW) {
682 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
688 /* here we have the fast lock, so see if we can obtain the real lock */
689 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
690 || (lockp->readers > 0)) {
694 /* if we're here, all clear to set the lock */
695 _InterlockedOr(&lockp->flags, OSI_LOCKFLAG_EXCL);
696 lockp->tid[0] = thrd_Current();
700 LeaveCriticalSection(csp);
702 if (lockOrderValidation && i) {
703 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
704 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
705 TlsSetValue(tls_LockRefH, lockRefH);
706 TlsSetValue(tls_LockRefT, lockRefT);
713 int lock_TryMutex(struct osi_mutex *lockp) {
715 CRITICAL_SECTION *csp;
716 osi_queue_t * lockRefH, *lockRefT;
717 osi_lock_ref_t *lockRefp;
719 if ((i=lockp->type) != 0)
720 if (i >= 0 && i < OSI_NLOCKTYPES)
721 return (osi_lockOps[i]->TryMutexProc)(lockp);
723 /* otherwise we're the fast base type */
724 csp = &osi_baseAtomicCS[lockp->atomicIndex];
725 EnterCriticalSection(csp);
727 if (lockOrderValidation) {
728 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
729 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
731 if (lockp->level != 0) {
732 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
733 if (lockRefp->type == OSI_LOCK_MUTEX) {
734 osi_assertx(lockRefp->mx != lockp, "Mutex already held");
740 /* here we have the fast lock, so see if we can obtain the real lock */
741 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
745 /* if we're here, all clear to set the lock */
746 _InterlockedOr(&lockp->flags, OSI_LOCKFLAG_EXCL);
747 lockp->tid = thrd_Current();
751 LeaveCriticalSection(csp);
753 if (lockOrderValidation && i) {
754 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
755 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
756 TlsSetValue(tls_LockRefH, lockRefH);
757 TlsSetValue(tls_LockRefT, lockRefT);
763 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
766 CRITICAL_SECTION *csp;
767 osi_queue_t * lockRefH, *lockRefT;
768 osi_lock_ref_t *lockRefp;
769 DWORD tid = thrd_Current();
771 if ((i = lockp->type) != 0) {
772 if (i >= 0 && i < OSI_NLOCKTYPES)
773 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
777 /* otherwise we're the fast base type */
778 csp = &osi_baseAtomicCS[lockp->atomicIndex];
779 EnterCriticalSection(csp);
781 if (lockOrderValidation && lockp->level != 0) {
782 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
783 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
785 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
786 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
787 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
788 lock_FreeLockRef(lockRefp);
793 TlsSetValue(tls_LockRefH, lockRefH);
794 TlsSetValue(tls_LockRefT, lockRefT);
797 osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
800 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
801 if ( lockp->tid[i] == tid ) {
802 for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
803 lockp->tid[i] = lockp->tid[i+1];
810 /* XXX better to get the list of things to wakeup from TSignalForMLs, and
811 * then do the wakeup after SleepSpin releases the low-level mutex.
813 if (--(lockp->readers) == 0 && lockp->waiters) {
814 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
817 /* now call into scheduler to sleep atomically with releasing spin lock */
818 osi_SleepSpin(sleepVal, csp);
821 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
824 CRITICAL_SECTION *csp;
825 osi_queue_t * lockRefH, *lockRefT;
826 osi_lock_ref_t *lockRefp;
827 DWORD tid = thrd_Current();
829 if ((i = lockp->type) != 0) {
830 if (i >= 0 && i < OSI_NLOCKTYPES)
831 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
835 /* otherwise we're the fast base type */
836 csp = &osi_baseAtomicCS[lockp->atomicIndex];
837 EnterCriticalSection(csp);
839 if (lockOrderValidation && lockp->level != 0) {
840 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
841 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
843 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
844 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
845 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
846 lock_FreeLockRef(lockRefp);
851 TlsSetValue(tls_LockRefH, lockRefH);
852 TlsSetValue(tls_LockRefT, lockRefT);
855 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
857 _InterlockedAnd(&lockp->flags, ~OSI_LOCKFLAG_EXCL);
859 if (lockp->waiters) {
860 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
863 /* and finally release the big lock */
864 osi_SleepSpin(sleepVal, csp);
867 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
870 CRITICAL_SECTION *csp;
871 osi_queue_t * lockRefH, *lockRefT;
872 osi_lock_ref_t *lockRefp;
874 if ((i = lockp->type) != 0) {
875 if (i >= 0 && i < OSI_NLOCKTYPES)
876 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
880 /* otherwise we're the fast base type */
881 csp = &osi_baseAtomicCS[lockp->atomicIndex];
882 EnterCriticalSection(csp);
884 if (lockOrderValidation && lockp->level != 0) {
885 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
886 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
888 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
889 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
890 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
891 lock_FreeLockRef(lockRefp);
896 TlsSetValue(tls_LockRefH, lockRefH);
897 TlsSetValue(tls_LockRefT, lockRefT);
900 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
902 _InterlockedAnd(&lockp->flags, ~OSI_LOCKFLAG_EXCL);
904 if (lockp->waiters) {
905 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
908 /* and finally release the big lock */
909 osi_SleepSpin(sleepVal, csp);
912 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
916 if ((i=lockp->type) != 0)
917 if (i >= 0 && i < OSI_NLOCKTYPES)
918 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
921 void lock_FinalizeMutex(osi_mutex_t *lockp)
925 if ((i=lockp->type) != 0)
926 if (i >= 0 && i < OSI_NLOCKTYPES)
927 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
930 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
934 if ((i = osi_lockTypeDefault) > 0) {
935 if (i >= 0 && i < OSI_NLOCKTYPES)
936 (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
941 * otherwise we have the base case, which requires no special
944 memset(mp, 0, sizeof(osi_mutex_t));
945 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
947 osi_TInit(&mp->d.turn);
951 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
955 if ((i = osi_lockTypeDefault) > 0) {
956 if (i >= 0 && i < OSI_NLOCKTYPES)
957 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
961 /* otherwise we have the base case, which requires no special
964 memset(mp, 0, sizeof(osi_rwlock_t));
965 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
967 osi_TInit(&mp->d.turn);
971 int lock_GetRWLockState(osi_rwlock_t *lp)
974 CRITICAL_SECTION *csp;
976 if ((i=lp->type) != 0)
977 if (i >= 0 && i < OSI_NLOCKTYPES)
978 return (osi_lockOps[i]->GetRWLockState)(lp);
980 /* otherwise we're the fast base type */
981 csp = &osi_baseAtomicCS[lp->atomicIndex];
982 EnterCriticalSection(csp);
984 /* here we have the fast lock, so see if we can obtain the real lock */
985 if (lp->flags & OSI_LOCKFLAG_EXCL)
986 i = OSI_RWLOCK_WRITEHELD;
990 i |= OSI_RWLOCK_READHELD;
992 LeaveCriticalSection(csp);
997 int lock_GetMutexState(struct osi_mutex *mp)
1000 CRITICAL_SECTION *csp;
1002 if ((i=mp->type) != 0)
1003 if (i >= 0 && i < OSI_NLOCKTYPES)
1004 return (osi_lockOps[i]->GetMutexState)(mp);
1006 /* otherwise we're the fast base type */
1007 csp = &osi_baseAtomicCS[mp->atomicIndex];
1008 EnterCriticalSection(csp);
1010 if (mp->flags & OSI_LOCKFLAG_EXCL)
1015 LeaveCriticalSection(csp);