2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
13 #include <afs/param.h>
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long atomicIndexCounter = 0;
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
33 void osi_BaseInit(void)
37 for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38 InitializeCriticalSection(&osi_baseAtomicCS[i]);
40 if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41 osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
43 if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44 osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
46 InitializeCriticalSection(&lock_ref_CS);
50 osi_SetLockOrderValidation(int on)
52 lockOrderValidation = (BOOLEAN)on;
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
58 osi_lock_ref_t * lockRefp = NULL;
60 EnterCriticalSection(&lock_ref_CS);
61 if (lock_ref_FreeListp) {
62 lockRefp = lock_ref_FreeListp;
63 osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64 (osi_queue_t **) &lock_ref_FreeListEndp,
67 LeaveCriticalSection(&lock_ref_CS);
70 lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
72 memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73 lockRefp->type = type;
82 osi_panic("Invalid Lock Type", __FILE__, __LINE__);
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
91 EnterCriticalSection(&lock_ref_CS);
92 osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93 (osi_queue_t **) &lock_ref_FreeListEndp,
95 LeaveCriticalSection(&lock_ref_CS);
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
101 osi_lock_ref_t * lockRefp;
103 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104 if (lockRefp->type == OSI_LOCK_RW) {
105 if (lockRefp->rw == lockp) {
106 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107 osi_panic(msg, __FILE__, __LINE__);
109 if (lockRefp->rw->level > lockp->level) {
110 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112 osi_panic(msg, __FILE__, __LINE__);
115 if (lockRefp->mx->level > lockp->level) {
116 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118 osi_panic(msg, __FILE__, __LINE__);
120 osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
128 osi_lock_ref_t * lockRefp;
130 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131 if (lockRefp->type == OSI_LOCK_MUTEX) {
132 if (lockRefp->mx == lockp) {
133 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134 osi_panic(msg, __FILE__, __LINE__);
136 if (lockRefp->mx->level > lockp->level) {
137 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139 osi_panic(msg, __FILE__, __LINE__);
142 if (lockRefp->rw->level > lockp->level) {
143 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145 osi_panic(msg, __FILE__, __LINE__);
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
154 CRITICAL_SECTION *csp;
155 osi_queue_t * lockRefH, *lockRefT;
156 osi_lock_ref_t *lockRefp;
157 DWORD tid = thrd_Current();
159 if ((i=lockp->type) != 0) {
160 if (i >= 0 && i < OSI_NLOCKTYPES)
161 (osi_lockOps[i]->ObtainWriteProc)(lockp);
165 if (lockOrderValidation) {
166 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
167 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
169 if (lockp->level != 0)
170 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
173 /* otherwise we're the fast base type */
174 csp = &osi_baseAtomicCS[lockp->atomicIndex];
175 EnterCriticalSection(csp);
177 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
178 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
180 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
181 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
185 /* here we have the fast lock, so see if we can obtain the real lock */
186 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
187 (lockp->readers > 0)) {
189 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
191 osi_assertx(lockp->waiters >= 0, "waiters underflow");
192 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
194 /* if we're here, all clear to set the lock */
195 lockp->flags |= OSI_LOCKFLAG_EXCL;
198 osi_assertx(lockp->readers == 0, "write lock readers present");
200 LeaveCriticalSection(csp);
202 if (lockOrderValidation) {
203 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
204 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
205 TlsSetValue(tls_LockRefH, lockRefH);
206 TlsSetValue(tls_LockRefT, lockRefT);
210 void lock_ObtainRead(osi_rwlock_t *lockp)
213 CRITICAL_SECTION *csp;
214 osi_queue_t * lockRefH, *lockRefT;
215 osi_lock_ref_t *lockRefp;
216 DWORD tid = thrd_Current();
218 if ((i=lockp->type) != 0) {
219 if (i >= 0 && i < OSI_NLOCKTYPES)
220 (osi_lockOps[i]->ObtainReadProc)(lockp);
224 if (lockOrderValidation) {
225 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
226 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
228 if (lockp->level != 0)
229 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
232 /* otherwise we're the fast base type */
233 csp = &osi_baseAtomicCS[lockp->atomicIndex];
234 EnterCriticalSection(csp);
236 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
237 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
239 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
240 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
244 /* here we have the fast lock, so see if we can obtain the real lock */
245 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
247 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
249 osi_assertx(lockp->waiters >= 0, "waiters underflow");
250 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
252 /* if we're here, all clear to set the lock */
253 if (++lockp->readers <= OSI_RWLOCK_THREADS)
254 lockp->tid[lockp->readers-1] = tid;
256 LeaveCriticalSection(csp);
258 if (lockOrderValidation) {
259 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
260 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
261 TlsSetValue(tls_LockRefH, lockRefH);
262 TlsSetValue(tls_LockRefT, lockRefT);
266 void lock_ReleaseRead(osi_rwlock_t *lockp)
269 CRITICAL_SECTION *csp;
270 osi_queue_t * lockRefH, *lockRefT;
271 osi_lock_ref_t *lockRefp;
272 DWORD tid = thrd_Current();
274 if ((i = lockp->type) != 0) {
275 if (i >= 0 && i < OSI_NLOCKTYPES)
276 (osi_lockOps[i]->ReleaseReadProc)(lockp);
280 /* otherwise we're the fast base type */
281 csp = &osi_baseAtomicCS[lockp->atomicIndex];
282 EnterCriticalSection(csp);
284 if (lockOrderValidation && lockp->level != 0) {
286 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
287 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
289 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
290 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
291 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
292 lock_FreeLockRef(lockRefp);
297 osi_assertx(found, "read lock not found in TLS queue");
299 TlsSetValue(tls_LockRefH, lockRefH);
300 TlsSetValue(tls_LockRefT, lockRefT);
303 osi_assertx(lockp->readers > 0, "read lock not held");
305 for ( i=0; i < lockp->readers; i++) {
306 if ( lockp->tid[i] == tid ) {
307 for ( ; i < lockp->readers - 1; i++)
308 lockp->tid[i] = lockp->tid[i+1];
314 /* releasing a read lock can allow readers or writers */
315 if (--(lockp->readers) == 0 && !osi_TEmpty(&lockp->d.turn)) {
316 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
319 osi_assertx(lockp->readers >= 0, "read lock underflow");
321 /* and finally release the big lock */
322 LeaveCriticalSection(csp);
326 void lock_ReleaseWrite(osi_rwlock_t *lockp)
329 CRITICAL_SECTION *csp;
330 osi_queue_t * lockRefH, *lockRefT;
331 osi_lock_ref_t *lockRefp;
333 if ((i = lockp->type) != 0) {
334 if (i >= 0 && i < OSI_NLOCKTYPES)
335 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
339 /* otherwise we're the fast base type */
340 csp = &osi_baseAtomicCS[lockp->atomicIndex];
341 EnterCriticalSection(csp);
343 if (lockOrderValidation && lockp->level != 0) {
345 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
346 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
348 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
349 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
350 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
351 lock_FreeLockRef(lockRefp);
356 osi_assertx(found, "write lock not found in TLS queue");
358 TlsSetValue(tls_LockRefH, lockRefH);
359 TlsSetValue(tls_LockRefT, lockRefT);
362 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
363 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
367 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
368 if (!osi_TEmpty(&lockp->d.turn)) {
369 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
372 /* and finally release the big lock */
373 LeaveCriticalSection(csp);
377 void lock_ConvertWToR(osi_rwlock_t *lockp)
380 CRITICAL_SECTION *csp;
382 if ((i = lockp->type) != 0) {
383 if (i >= 0 && i < OSI_NLOCKTYPES)
384 (osi_lockOps[i]->ConvertWToRProc)(lockp);
388 /* otherwise we're the fast base type */
389 csp = &osi_baseAtomicCS[lockp->atomicIndex];
390 EnterCriticalSection(csp);
392 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
393 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
395 /* convert write lock to read lock */
396 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
399 osi_assertx(lockp->readers == 1, "read lock not one");
401 if (!osi_TEmpty(&lockp->d.turn)) {
402 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
405 /* and finally release the big lock */
406 LeaveCriticalSection(csp);
410 void lock_ConvertRToW(osi_rwlock_t *lockp)
413 CRITICAL_SECTION *csp;
414 DWORD tid = thrd_Current();
416 if ((i = lockp->type) != 0) {
417 if (i >= 0 && i < OSI_NLOCKTYPES)
418 (osi_lockOps[i]->ConvertRToWProc)(lockp);
422 /* otherwise we're the fast base type */
423 csp = &osi_baseAtomicCS[lockp->atomicIndex];
424 EnterCriticalSection(csp);
426 osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
427 osi_assertx(lockp->readers > 0, "read lock not held");
429 for ( i=0; i < lockp->readers; i++) {
430 if ( lockp->tid[i] == tid ) {
431 for ( ; i < lockp->readers - 1; i++)
432 lockp->tid[i] = lockp->tid[i+1];
438 if (--(lockp->readers) == 0) {
439 /* convert read lock to write lock */
440 lockp->flags |= OSI_LOCKFLAG_EXCL;
443 osi_assertx(lockp->readers > 0, "read lock underflow");
446 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
448 osi_assertx(lockp->waiters >= 0, "waiters underflow");
449 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
452 LeaveCriticalSection(csp);
455 void lock_ObtainMutex(struct osi_mutex *lockp)
458 CRITICAL_SECTION *csp;
459 osi_queue_t * lockRefH, *lockRefT;
460 osi_lock_ref_t *lockRefp;
462 if ((i=lockp->type) != 0) {
463 if (i >= 0 && i < OSI_NLOCKTYPES)
464 (osi_lockOps[i]->ObtainMutexProc)(lockp);
468 /* otherwise we're the fast base type */
469 csp = &osi_baseAtomicCS[lockp->atomicIndex];
470 EnterCriticalSection(csp);
472 if (lockOrderValidation) {
473 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
474 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
476 if (lockp->level != 0)
477 lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
480 /* here we have the fast lock, so see if we can obtain the real lock */
481 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
483 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
485 osi_assertx(lockp->waiters >= 0, "waiters underflow");
486 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
488 /* if we're here, all clear to set the lock */
489 lockp->flags |= OSI_LOCKFLAG_EXCL;
490 lockp->tid = thrd_Current();
493 LeaveCriticalSection(csp);
495 if (lockOrderValidation) {
496 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
497 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
498 TlsSetValue(tls_LockRefH, lockRefH);
499 TlsSetValue(tls_LockRefT, lockRefT);
503 void lock_ReleaseMutex(struct osi_mutex *lockp)
506 CRITICAL_SECTION *csp;
507 osi_queue_t * lockRefH, *lockRefT;
508 osi_lock_ref_t *lockRefp;
510 if ((i = lockp->type) != 0) {
511 if (i >= 0 && i < OSI_NLOCKTYPES)
512 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
516 /* otherwise we're the fast base type */
517 csp = &osi_baseAtomicCS[lockp->atomicIndex];
518 EnterCriticalSection(csp);
520 if (lockOrderValidation && lockp->level != 0) {
522 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
523 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
525 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
526 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
527 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
528 lock_FreeLockRef(lockRefp);
534 osi_assertx(found, "mutex lock not found in TLS queue");
535 TlsSetValue(tls_LockRefH, lockRefH);
536 TlsSetValue(tls_LockRefT, lockRefT);
539 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
540 osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
542 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
544 if (!osi_TEmpty(&lockp->d.turn)) {
545 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
548 /* and finally release the big lock */
549 LeaveCriticalSection(csp);
553 int lock_TryRead(struct osi_rwlock *lockp)
556 CRITICAL_SECTION *csp;
557 osi_queue_t * lockRefH, *lockRefT;
558 osi_lock_ref_t *lockRefp;
560 if ((i=lockp->type) != 0)
561 if (i >= 0 && i < OSI_NLOCKTYPES)
562 return (osi_lockOps[i]->TryReadProc)(lockp);
564 /* otherwise we're the fast base type */
565 csp = &osi_baseAtomicCS[lockp->atomicIndex];
566 EnterCriticalSection(csp);
568 if (lockOrderValidation) {
569 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
570 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
572 if (lockp->level != 0) {
573 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
574 if (lockRefp->type == OSI_LOCK_RW) {
575 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
581 /* here we have the fast lock, so see if we can obtain the real lock */
582 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
586 /* if we're here, all clear to set the lock */
587 if (++(lockp->readers) < OSI_RWLOCK_THREADS)
588 lockp->tid[lockp->readers-1] = thrd_Current();
592 LeaveCriticalSection(csp);
594 if (lockOrderValidation && i) {
595 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
596 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
597 TlsSetValue(tls_LockRefH, lockRefH);
598 TlsSetValue(tls_LockRefT, lockRefT);
605 int lock_TryWrite(struct osi_rwlock *lockp)
608 CRITICAL_SECTION *csp;
609 osi_queue_t * lockRefH, *lockRefT;
610 osi_lock_ref_t *lockRefp;
612 if ((i=lockp->type) != 0)
613 if (i >= 0 && i < OSI_NLOCKTYPES)
614 return (osi_lockOps[i]->TryWriteProc)(lockp);
616 /* otherwise we're the fast base type */
617 csp = &osi_baseAtomicCS[lockp->atomicIndex];
618 EnterCriticalSection(csp);
620 if (lockOrderValidation) {
621 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
622 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
624 if (lockp->level != 0) {
625 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
626 if (lockRefp->type == OSI_LOCK_RW) {
627 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
633 /* here we have the fast lock, so see if we can obtain the real lock */
634 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
635 || (lockp->readers > 0)) {
639 /* if we're here, all clear to set the lock */
640 lockp->flags |= OSI_LOCKFLAG_EXCL;
641 lockp->tid[0] = thrd_Current();
645 LeaveCriticalSection(csp);
647 if (lockOrderValidation && i) {
648 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
649 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
650 TlsSetValue(tls_LockRefH, lockRefH);
651 TlsSetValue(tls_LockRefT, lockRefT);
658 int lock_TryMutex(struct osi_mutex *lockp) {
660 CRITICAL_SECTION *csp;
661 osi_queue_t * lockRefH, *lockRefT;
662 osi_lock_ref_t *lockRefp;
664 if ((i=lockp->type) != 0)
665 if (i >= 0 && i < OSI_NLOCKTYPES)
666 return (osi_lockOps[i]->TryMutexProc)(lockp);
668 /* otherwise we're the fast base type */
669 csp = &osi_baseAtomicCS[lockp->atomicIndex];
670 EnterCriticalSection(csp);
672 if (lockOrderValidation) {
673 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
674 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
676 if (lockp->level != 0) {
677 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
678 if (lockRefp->type == OSI_LOCK_MUTEX) {
679 osi_assertx(lockRefp->mx != lockp, "Mutex already held");
685 /* here we have the fast lock, so see if we can obtain the real lock */
686 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
690 /* if we're here, all clear to set the lock */
691 lockp->flags |= OSI_LOCKFLAG_EXCL;
692 lockp->tid = thrd_Current();
696 LeaveCriticalSection(csp);
698 if (lockOrderValidation && i) {
699 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
700 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
701 TlsSetValue(tls_LockRefH, lockRefH);
702 TlsSetValue(tls_LockRefT, lockRefT);
708 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
711 CRITICAL_SECTION *csp;
712 osi_queue_t * lockRefH, *lockRefT;
713 osi_lock_ref_t *lockRefp;
714 DWORD tid = thrd_Current();
716 if ((i = lockp->type) != 0) {
717 if (i >= 0 && i < OSI_NLOCKTYPES)
718 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
722 /* otherwise we're the fast base type */
723 csp = &osi_baseAtomicCS[lockp->atomicIndex];
724 EnterCriticalSection(csp);
726 if (lockOrderValidation && lockp->level != 0) {
727 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
728 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
730 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
731 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
732 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
733 lock_FreeLockRef(lockRefp);
738 TlsSetValue(tls_LockRefH, lockRefH);
739 TlsSetValue(tls_LockRefT, lockRefT);
742 osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
744 for ( i=0; i < lockp->readers; i++) {
745 if ( lockp->tid[i] == tid ) {
746 for ( ; i < lockp->readers - 1; i++)
747 lockp->tid[i] = lockp->tid[i+1];
753 /* XXX better to get the list of things to wakeup from TSignalForMLs, and
754 * then do the wakeup after SleepSpin releases the low-level mutex.
756 if (--(lockp->readers) == 0 && !osi_TEmpty(&lockp->d.turn)) {
757 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
760 /* now call into scheduler to sleep atomically with releasing spin lock */
761 osi_SleepSpin(sleepVal, csp);
764 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
767 CRITICAL_SECTION *csp;
768 osi_queue_t * lockRefH, *lockRefT;
769 osi_lock_ref_t *lockRefp;
770 DWORD tid = thrd_Current();
772 if ((i = lockp->type) != 0) {
773 if (i >= 0 && i < OSI_NLOCKTYPES)
774 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
778 /* otherwise we're the fast base type */
779 csp = &osi_baseAtomicCS[lockp->atomicIndex];
780 EnterCriticalSection(csp);
782 if (lockOrderValidation && lockp->level != 0) {
783 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
784 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
786 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
787 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
788 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
789 lock_FreeLockRef(lockRefp);
794 TlsSetValue(tls_LockRefH, lockRefH);
795 TlsSetValue(tls_LockRefT, lockRefT);
798 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
800 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
802 if (!osi_TEmpty(&lockp->d.turn)) {
803 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
806 /* and finally release the big lock */
807 osi_SleepSpin(sleepVal, csp);
810 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
813 CRITICAL_SECTION *csp;
814 osi_queue_t * lockRefH, *lockRefT;
815 osi_lock_ref_t *lockRefp;
817 if ((i = lockp->type) != 0) {
818 if (i >= 0 && i < OSI_NLOCKTYPES)
819 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
823 /* otherwise we're the fast base type */
824 csp = &osi_baseAtomicCS[lockp->atomicIndex];
825 EnterCriticalSection(csp);
827 if (lockOrderValidation && lockp->level != 0) {
828 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
829 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
831 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
832 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
833 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
834 lock_FreeLockRef(lockRefp);
839 TlsSetValue(tls_LockRefH, lockRefH);
840 TlsSetValue(tls_LockRefT, lockRefT);
843 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
845 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
847 if (!osi_TEmpty(&lockp->d.turn)) {
848 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
851 /* and finally release the big lock */
852 osi_SleepSpin(sleepVal, csp);
855 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
859 if ((i=lockp->type) != 0)
860 if (i >= 0 && i < OSI_NLOCKTYPES)
861 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
864 void lock_FinalizeMutex(osi_mutex_t *lockp)
868 if ((i=lockp->type) != 0)
869 if (i >= 0 && i < OSI_NLOCKTYPES)
870 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
873 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
877 if ((i = osi_lockTypeDefault) > 0) {
878 if (i >= 0 && i < OSI_NLOCKTYPES)
879 (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
884 * otherwise we have the base case, which requires no special
887 memset(mp, 0, sizeof(osi_mutex_t));
888 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
890 osi_TInit(&mp->d.turn);
894 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
898 if ((i = osi_lockTypeDefault) > 0) {
899 if (i >= 0 && i < OSI_NLOCKTYPES)
900 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
904 /* otherwise we have the base case, which requires no special
907 memset(mp, 0, sizeof(osi_rwlock_t));
908 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
910 osi_TInit(&mp->d.turn);
914 int lock_GetRWLockState(osi_rwlock_t *lp)
917 CRITICAL_SECTION *csp;
919 if ((i=lp->type) != 0)
920 if (i >= 0 && i < OSI_NLOCKTYPES)
921 return (osi_lockOps[i]->GetRWLockState)(lp);
923 /* otherwise we're the fast base type */
924 csp = &osi_baseAtomicCS[lp->atomicIndex];
925 EnterCriticalSection(csp);
927 /* here we have the fast lock, so see if we can obtain the real lock */
928 if (lp->flags & OSI_LOCKFLAG_EXCL)
929 i = OSI_RWLOCK_WRITEHELD;
933 i |= OSI_RWLOCK_READHELD;
935 LeaveCriticalSection(csp);
940 int lock_GetMutexState(struct osi_mutex *mp)
943 CRITICAL_SECTION *csp;
945 if ((i=mp->type) != 0)
946 if (i >= 0 && i < OSI_NLOCKTYPES)
947 return (osi_lockOps[i]->GetMutexState)(mp);
949 /* otherwise we're the fast base type */
950 csp = &osi_baseAtomicCS[mp->atomicIndex];
951 EnterCriticalSection(csp);
953 if (mp->flags & OSI_LOCKFLAG_EXCL)
958 LeaveCriticalSection(csp);