2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
13 #include <afs/param.h>
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long atomicIndexCounter = 0;
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
33 void osi_BaseInit(void)
37 for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38 InitializeCriticalSection(&osi_baseAtomicCS[i]);
40 if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41 osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
43 if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44 osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
46 InitializeCriticalSection(&lock_ref_CS);
50 osi_SetLockOrderValidation(int on)
52 lockOrderValidation = (BOOLEAN)on;
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
58 osi_lock_ref_t * lockRefp = NULL;
60 EnterCriticalSection(&lock_ref_CS);
61 if (lock_ref_FreeListp) {
62 lockRefp = lock_ref_FreeListp;
63 osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64 (osi_queue_t **) &lock_ref_FreeListEndp,
67 LeaveCriticalSection(&lock_ref_CS);
70 lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
72 memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73 lockRefp->type = type;
82 osi_panic("Invalid Lock Type", __FILE__, __LINE__);
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
91 EnterCriticalSection(&lock_ref_CS);
92 osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93 (osi_queue_t **) &lock_ref_FreeListEndp,
95 LeaveCriticalSection(&lock_ref_CS);
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
101 osi_lock_ref_t * lockRefp;
103 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104 if (lockRefp->type == OSI_LOCK_RW) {
105 if (lockRefp->rw == lockp) {
106 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107 osi_panic(msg, __FILE__, __LINE__);
109 if (lockRefp->rw->level > lockp->level) {
110 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112 osi_panic(msg, __FILE__, __LINE__);
115 if (lockRefp->mx->level > lockp->level) {
116 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118 osi_panic(msg, __FILE__, __LINE__);
120 osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
128 osi_lock_ref_t * lockRefp;
130 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131 if (lockRefp->type == OSI_LOCK_MUTEX) {
132 if (lockRefp->mx == lockp) {
133 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134 osi_panic(msg, __FILE__, __LINE__);
136 if (lockRefp->mx->level > lockp->level) {
137 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139 osi_panic(msg, __FILE__, __LINE__);
142 if (lockRefp->rw->level > lockp->level) {
143 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145 osi_panic(msg, __FILE__, __LINE__);
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
154 CRITICAL_SECTION *csp;
155 osi_queue_t * lockRefH, *lockRefT;
156 osi_lock_ref_t *lockRefp;
158 if ((i=lockp->type) != 0) {
159 if (i >= 0 && i < OSI_NLOCKTYPES)
160 (osi_lockOps[i]->ObtainWriteProc)(lockp);
164 if (lockOrderValidation) {
165 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
166 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
168 if (lockp->level != 0)
169 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
172 /* otherwise we're the fast base type */
173 csp = &osi_baseAtomicCS[lockp->atomicIndex];
174 EnterCriticalSection(csp);
176 /* here we have the fast lock, so see if we can obtain the real lock */
177 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
178 (lockp->readers > 0)) {
180 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
182 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
185 /* if we're here, all clear to set the lock */
186 lockp->flags |= OSI_LOCKFLAG_EXCL;
189 lockp->tid = thrd_Current();
191 LeaveCriticalSection(csp);
193 if (lockOrderValidation) {
194 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
195 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
196 TlsSetValue(tls_LockRefH, lockRefH);
197 TlsSetValue(tls_LockRefT, lockRefT);
201 void lock_ObtainRead(osi_rwlock_t *lockp)
204 CRITICAL_SECTION *csp;
205 osi_queue_t * lockRefH, *lockRefT;
206 osi_lock_ref_t *lockRefp;
208 if ((i=lockp->type) != 0) {
209 if (i >= 0 && i < OSI_NLOCKTYPES)
210 (osi_lockOps[i]->ObtainReadProc)(lockp);
214 if (lockOrderValidation) {
215 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
216 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
218 if (lockp->level != 0)
219 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
222 /* otherwise we're the fast base type */
223 csp = &osi_baseAtomicCS[lockp->atomicIndex];
224 EnterCriticalSection(csp);
226 /* here we have the fast lock, so see if we can obtain the real lock */
227 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
229 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp);
231 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
234 /* if we're here, all clear to set the lock */
238 LeaveCriticalSection(csp);
240 if (lockOrderValidation) {
241 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
242 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
243 TlsSetValue(tls_LockRefH, lockRefH);
244 TlsSetValue(tls_LockRefT, lockRefT);
248 void lock_ReleaseRead(osi_rwlock_t *lockp)
251 CRITICAL_SECTION *csp;
252 osi_queue_t * lockRefH, *lockRefT;
253 osi_lock_ref_t *lockRefp;
255 if ((i = lockp->type) != 0) {
256 if (i >= 0 && i < OSI_NLOCKTYPES)
257 (osi_lockOps[i]->ReleaseReadProc)(lockp);
261 if (lockOrderValidation && lockp->level != 0) {
263 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
264 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
266 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
267 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
268 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
269 lock_FreeLockRef(lockRefp);
274 osi_assertx(found, "read lock not found in TLS queue");
276 TlsSetValue(tls_LockRefH, lockRefH);
277 TlsSetValue(tls_LockRefT, lockRefT);
280 /* otherwise we're the fast base type */
281 csp = &osi_baseAtomicCS[lockp->atomicIndex];
282 EnterCriticalSection(csp);
284 osi_assertx(lockp->readers > 0, "read lock not held");
286 /* releasing a read lock can allow readers or writers */
287 if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
288 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
291 /* and finally release the big lock */
292 LeaveCriticalSection(csp);
296 void lock_ReleaseWrite(osi_rwlock_t *lockp)
299 CRITICAL_SECTION *csp;
300 osi_queue_t * lockRefH, *lockRefT;
301 osi_lock_ref_t *lockRefp;
303 if ((i = lockp->type) != 0) {
304 if (i >= 0 && i < OSI_NLOCKTYPES)
305 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
309 if (lockOrderValidation && lockp->level != 0) {
311 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
312 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
314 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
315 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
316 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
317 lock_FreeLockRef(lockRefp);
322 osi_assertx(found, "write lock not found in TLS queue");
324 TlsSetValue(tls_LockRefH, lockRefH);
325 TlsSetValue(tls_LockRefT, lockRefT);
328 /* otherwise we're the fast base type */
329 csp = &osi_baseAtomicCS[lockp->atomicIndex];
330 EnterCriticalSection(csp);
332 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
333 osi_assertx(lockp->tid == thrd_Current(), "write lock not held by current thread");
337 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
338 if (!osi_TEmpty(&lockp->d.turn)) {
339 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
342 /* and finally release the big lock */
343 LeaveCriticalSection(csp);
347 void lock_ConvertWToR(osi_rwlock_t *lockp)
350 CRITICAL_SECTION *csp;
352 if ((i = lockp->type) != 0) {
353 if (i >= 0 && i < OSI_NLOCKTYPES)
354 (osi_lockOps[i]->ConvertWToRProc)(lockp);
358 /* otherwise we're the fast base type */
359 csp = &osi_baseAtomicCS[lockp->atomicIndex];
360 EnterCriticalSection(csp);
362 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
363 osi_assertx(lockp->tid == thrd_Current(), "write lock not held by current thread");
365 /* convert write lock to read lock */
366 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
371 if (!osi_TEmpty(&lockp->d.turn)) {
372 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
375 /* and finally release the big lock */
376 LeaveCriticalSection(csp);
380 void lock_ConvertRToW(osi_rwlock_t *lockp)
383 CRITICAL_SECTION *csp;
385 if ((i = lockp->type) != 0) {
386 if (i >= 0 && i < OSI_NLOCKTYPES)
387 (osi_lockOps[i]->ConvertRToWProc)(lockp);
391 /* otherwise we're the fast base type */
392 csp = &osi_baseAtomicCS[lockp->atomicIndex];
393 EnterCriticalSection(csp);
395 osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
396 osi_assertx(lockp->readers > 0, "read lock not held");
398 if (--lockp->readers == 0) {
399 /* convert read lock to write lock */
400 lockp->flags |= OSI_LOCKFLAG_EXCL;
403 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
405 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
408 lockp->tid = thrd_Current();
409 LeaveCriticalSection(csp);
412 void lock_ObtainMutex(struct osi_mutex *lockp)
415 CRITICAL_SECTION *csp;
416 osi_queue_t * lockRefH, *lockRefT;
417 osi_lock_ref_t *lockRefp;
419 if ((i=lockp->type) != 0) {
420 if (i >= 0 && i < OSI_NLOCKTYPES)
421 (osi_lockOps[i]->ObtainMutexProc)(lockp);
425 if (lockOrderValidation) {
426 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
427 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
429 if (lockp->level != 0)
430 lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
433 /* otherwise we're the fast base type */
434 csp = &osi_baseAtomicCS[lockp->atomicIndex];
435 EnterCriticalSection(csp);
437 /* here we have the fast lock, so see if we can obtain the real lock */
438 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
440 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
442 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
445 /* if we're here, all clear to set the lock */
446 lockp->flags |= OSI_LOCKFLAG_EXCL;
448 lockp->tid = thrd_Current();
449 LeaveCriticalSection(csp);
451 if (lockOrderValidation) {
452 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
453 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
454 TlsSetValue(tls_LockRefH, lockRefH);
455 TlsSetValue(tls_LockRefT, lockRefT);
459 void lock_ReleaseMutex(struct osi_mutex *lockp)
462 CRITICAL_SECTION *csp;
463 osi_queue_t * lockRefH, *lockRefT;
464 osi_lock_ref_t *lockRefp;
466 if ((i = lockp->type) != 0) {
467 if (i >= 0 && i < OSI_NLOCKTYPES)
468 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
472 if (lockOrderValidation && lockp->level != 0) {
474 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
475 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
477 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
478 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
479 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
480 lock_FreeLockRef(lockRefp);
486 osi_assertx(found, "mutex lock not found in TLS queue");
487 TlsSetValue(tls_LockRefH, lockRefH);
488 TlsSetValue(tls_LockRefT, lockRefT);
491 /* otherwise we're the fast base type */
492 csp = &osi_baseAtomicCS[lockp->atomicIndex];
493 EnterCriticalSection(csp);
495 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
496 osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
498 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
500 if (!osi_TEmpty(&lockp->d.turn)) {
501 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
504 /* and finally release the big lock */
505 LeaveCriticalSection(csp);
509 int lock_TryRead(struct osi_rwlock *lockp)
512 CRITICAL_SECTION *csp;
513 osi_queue_t * lockRefH, *lockRefT;
514 osi_lock_ref_t *lockRefp;
516 if ((i=lockp->type) != 0)
517 if (i >= 0 && i < OSI_NLOCKTYPES)
518 return (osi_lockOps[i]->TryReadProc)(lockp);
520 if (lockOrderValidation) {
521 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
522 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
524 if (lockp->level != 0) {
525 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
526 if (lockRefp->type == OSI_LOCK_RW) {
527 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
533 /* otherwise we're the fast base type */
534 csp = &osi_baseAtomicCS[lockp->atomicIndex];
535 EnterCriticalSection(csp);
537 /* here we have the fast lock, so see if we can obtain the real lock */
538 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
542 /* if we're here, all clear to set the lock */
547 LeaveCriticalSection(csp);
549 if (lockOrderValidation && i) {
550 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
551 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
552 TlsSetValue(tls_LockRefH, lockRefH);
553 TlsSetValue(tls_LockRefT, lockRefT);
560 int lock_TryWrite(struct osi_rwlock *lockp)
563 CRITICAL_SECTION *csp;
564 osi_queue_t * lockRefH, *lockRefT;
565 osi_lock_ref_t *lockRefp;
567 if ((i=lockp->type) != 0)
568 if (i >= 0 && i < OSI_NLOCKTYPES)
569 return (osi_lockOps[i]->TryWriteProc)(lockp);
571 if (lockOrderValidation) {
572 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
573 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
575 if (lockp->level != 0) {
576 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
577 if (lockRefp->type == OSI_LOCK_RW) {
578 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
584 /* otherwise we're the fast base type */
585 csp = &osi_baseAtomicCS[lockp->atomicIndex];
586 EnterCriticalSection(csp);
588 /* here we have the fast lock, so see if we can obtain the real lock */
589 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
590 || (lockp->readers > 0)) {
594 /* if we're here, all clear to set the lock */
595 lockp->flags |= OSI_LOCKFLAG_EXCL;
600 lockp->tid = thrd_Current();
602 LeaveCriticalSection(csp);
604 if (lockOrderValidation && i) {
605 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
606 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
607 TlsSetValue(tls_LockRefH, lockRefH);
608 TlsSetValue(tls_LockRefT, lockRefT);
615 int lock_TryMutex(struct osi_mutex *lockp) {
617 CRITICAL_SECTION *csp;
618 osi_queue_t * lockRefH, *lockRefT;
619 osi_lock_ref_t *lockRefp;
621 if ((i=lockp->type) != 0)
622 if (i >= 0 && i < OSI_NLOCKTYPES)
623 return (osi_lockOps[i]->TryMutexProc)(lockp);
625 if (lockOrderValidation) {
626 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
627 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
629 if (lockp->level != 0) {
630 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
631 if (lockRefp->type == OSI_LOCK_MUTEX) {
632 osi_assertx(lockRefp->mx != lockp, "Mutex already held");
638 /* otherwise we're the fast base type */
639 csp = &osi_baseAtomicCS[lockp->atomicIndex];
640 EnterCriticalSection(csp);
642 /* here we have the fast lock, so see if we can obtain the real lock */
643 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
647 /* if we're here, all clear to set the lock */
648 lockp->flags |= OSI_LOCKFLAG_EXCL;
653 lockp->tid = thrd_Current();
655 LeaveCriticalSection(csp);
657 if (lockOrderValidation && i) {
658 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
659 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
660 TlsSetValue(tls_LockRefH, lockRefH);
661 TlsSetValue(tls_LockRefT, lockRefT);
666 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
669 CRITICAL_SECTION *csp;
670 osi_queue_t * lockRefH, *lockRefT;
671 osi_lock_ref_t *lockRefp;
673 if ((i = lockp->type) != 0) {
674 if (i >= 0 && i < OSI_NLOCKTYPES)
675 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
679 if (lockOrderValidation && lockp->level != 0) {
680 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
681 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
683 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
684 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
685 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
686 lock_FreeLockRef(lockRefp);
691 TlsSetValue(tls_LockRefH, lockRefH);
692 TlsSetValue(tls_LockRefT, lockRefT);
695 /* otherwise we're the fast base type */
696 csp = &osi_baseAtomicCS[lockp->atomicIndex];
697 EnterCriticalSection(csp);
699 osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
701 /* XXX better to get the list of things to wakeup from TSignalForMLs, and
702 * then do the wakeup after SleepSpin releases the low-level mutex.
704 if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
705 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
708 /* now call into scheduler to sleep atomically with releasing spin lock */
709 osi_SleepSpin(sleepVal, csp);
712 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
715 CRITICAL_SECTION *csp;
716 osi_queue_t * lockRefH, *lockRefT;
717 osi_lock_ref_t *lockRefp;
719 if ((i = lockp->type) != 0) {
720 if (i >= 0 && i < OSI_NLOCKTYPES)
721 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
725 if (lockOrderValidation && lockp->level != 0) {
726 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
727 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
729 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
730 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
731 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
732 lock_FreeLockRef(lockRefp);
737 TlsSetValue(tls_LockRefH, lockRefH);
738 TlsSetValue(tls_LockRefT, lockRefT);
741 /* otherwise we're the fast base type */
742 csp = &osi_baseAtomicCS[lockp->atomicIndex];
743 EnterCriticalSection(csp);
745 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
747 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
748 if (!osi_TEmpty(&lockp->d.turn)) {
749 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
752 /* and finally release the big lock */
753 osi_SleepSpin(sleepVal, csp);
756 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
759 CRITICAL_SECTION *csp;
760 osi_queue_t * lockRefH, *lockRefT;
761 osi_lock_ref_t *lockRefp;
763 if ((i = lockp->type) != 0) {
764 if (i >= 0 && i < OSI_NLOCKTYPES)
765 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
769 if (lockOrderValidation && lockp->level != 0) {
770 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
771 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
773 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
774 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
775 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
776 lock_FreeLockRef(lockRefp);
781 TlsSetValue(tls_LockRefH, lockRefH);
782 TlsSetValue(tls_LockRefT, lockRefT);
785 /* otherwise we're the fast base type */
786 csp = &osi_baseAtomicCS[lockp->atomicIndex];
787 EnterCriticalSection(csp);
789 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
791 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
792 if (!osi_TEmpty(&lockp->d.turn)) {
793 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
796 /* and finally release the big lock */
797 osi_SleepSpin(sleepVal, csp);
800 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
804 if ((i=lockp->type) != 0)
805 if (i >= 0 && i < OSI_NLOCKTYPES)
806 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
809 void lock_FinalizeMutex(osi_mutex_t *lockp)
813 if ((i=lockp->type) != 0)
814 if (i >= 0 && i < OSI_NLOCKTYPES)
815 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
818 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
822 if ((i = osi_lockTypeDefault) > 0) {
823 if (i >= 0 && i < OSI_NLOCKTYPES)
824 (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
828 /* otherwise we have the base case, which requires no special
834 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
836 osi_TInit(&mp->d.turn);
840 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
844 if ((i = osi_lockTypeDefault) > 0) {
845 if (i >= 0 && i < OSI_NLOCKTYPES)
846 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
850 /* otherwise we have the base case, which requires no special
855 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
859 osi_TInit(&mp->d.turn);
863 int lock_GetRWLockState(osi_rwlock_t *lp)
866 CRITICAL_SECTION *csp;
868 if ((i=lp->type) != 0)
869 if (i >= 0 && i < OSI_NLOCKTYPES)
870 return (osi_lockOps[i]->GetRWLockState)(lp);
872 /* otherwise we're the fast base type */
873 csp = &osi_baseAtomicCS[lp->atomicIndex];
874 EnterCriticalSection(csp);
876 /* here we have the fast lock, so see if we can obtain the real lock */
877 if (lp->flags & OSI_LOCKFLAG_EXCL)
878 i = OSI_RWLOCK_WRITEHELD;
882 i |= OSI_RWLOCK_READHELD;
884 LeaveCriticalSection(csp);
889 int lock_GetMutexState(struct osi_mutex *mp)
892 CRITICAL_SECTION *csp;
894 if ((i=mp->type) != 0)
895 if (i >= 0 && i < OSI_NLOCKTYPES)
896 return (osi_lockOps[i]->GetMutexState)(mp);
898 /* otherwise we're the fast base type */
899 csp = &osi_baseAtomicCS[mp->atomicIndex];
900 EnterCriticalSection(csp);
902 if (mp->flags & OSI_LOCKFLAG_EXCL)
907 LeaveCriticalSection(csp);