2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
13 #include <afs/param.h>
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long atomicIndexCounter = 0;
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
33 void osi_BaseInit(void)
37 for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38 InitializeCriticalSection(&osi_baseAtomicCS[i]);
40 if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41 osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
43 if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44 osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
46 InitializeCriticalSection(&lock_ref_CS);
50 osi_SetLockOrderValidation(int on)
52 lockOrderValidation = (BOOLEAN)on;
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
58 osi_lock_ref_t * lockRefp = NULL;
60 EnterCriticalSection(&lock_ref_CS);
61 if (lock_ref_FreeListp) {
62 lockRefp = lock_ref_FreeListp;
63 osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64 (osi_queue_t **) &lock_ref_FreeListEndp,
67 LeaveCriticalSection(&lock_ref_CS);
70 lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
72 memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73 lockRefp->type = type;
82 osi_panic("Invalid Lock Type", __FILE__, __LINE__);
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
91 EnterCriticalSection(&lock_ref_CS);
92 osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93 (osi_queue_t **) &lock_ref_FreeListEndp,
95 LeaveCriticalSection(&lock_ref_CS);
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
101 osi_lock_ref_t * lockRefp;
103 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104 if (lockRefp->type == OSI_LOCK_RW) {
105 if (lockRefp->rw == lockp) {
106 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107 osi_panic(msg, __FILE__, __LINE__);
109 if (lockRefp->rw->level > lockp->level) {
110 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112 osi_panic(msg, __FILE__, __LINE__);
115 if (lockRefp->mx->level > lockp->level) {
116 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118 osi_panic(msg, __FILE__, __LINE__);
120 osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
128 osi_lock_ref_t * lockRefp;
130 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131 if (lockRefp->type == OSI_LOCK_MUTEX) {
132 if (lockRefp->mx == lockp) {
133 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134 osi_panic(msg, __FILE__, __LINE__);
136 if (lockRefp->mx->level > lockp->level) {
137 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139 osi_panic(msg, __FILE__, __LINE__);
142 if (lockRefp->rw->level > lockp->level) {
143 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145 osi_panic(msg, __FILE__, __LINE__);
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
154 CRITICAL_SECTION *csp;
155 osi_queue_t * lockRefH, *lockRefT;
156 osi_lock_ref_t *lockRefp;
157 DWORD tid = thrd_Current();
159 if ((i=lockp->type) != 0) {
160 if (i >= 0 && i < OSI_NLOCKTYPES)
161 (osi_lockOps[i]->ObtainWriteProc)(lockp);
165 if (lockOrderValidation) {
166 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
167 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
169 if (lockp->level != 0)
170 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
173 /* otherwise we're the fast base type */
174 csp = &osi_baseAtomicCS[lockp->atomicIndex];
175 EnterCriticalSection(csp);
177 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
178 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
182 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
183 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
188 /* here we have the fast lock, so see if we can obtain the real lock */
189 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
190 (lockp->readers > 0)) {
192 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
194 osi_assertx(lockp->waiters >= 0, "waiters underflow");
195 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
197 /* if we're here, all clear to set the lock */
198 lockp->flags |= OSI_LOCKFLAG_EXCL;
201 osi_assertx(lockp->readers == 0, "write lock readers present");
203 LeaveCriticalSection(csp);
205 if (lockOrderValidation) {
206 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
207 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
208 TlsSetValue(tls_LockRefH, lockRefH);
209 TlsSetValue(tls_LockRefT, lockRefT);
213 void lock_ObtainRead(osi_rwlock_t *lockp)
216 CRITICAL_SECTION *csp;
217 osi_queue_t * lockRefH, *lockRefT;
218 osi_lock_ref_t *lockRefp;
219 DWORD tid = thrd_Current();
221 if ((i=lockp->type) != 0) {
222 if (i >= 0 && i < OSI_NLOCKTYPES)
223 (osi_lockOps[i]->ObtainReadProc)(lockp);
227 if (lockOrderValidation) {
228 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
229 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
231 if (lockp->level != 0)
232 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
235 /* otherwise we're the fast base type */
236 csp = &osi_baseAtomicCS[lockp->atomicIndex];
237 EnterCriticalSection(csp);
239 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
240 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
244 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
245 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
250 /* here we have the fast lock, so see if we can obtain the real lock */
251 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
253 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
255 osi_assertx(lockp->waiters >= 0, "waiters underflow");
256 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
258 /* if we're here, all clear to set the lock */
261 if (lockp->readers <= OSI_RWLOCK_THREADS)
262 lockp->tid[lockp->readers-1] = tid;
265 LeaveCriticalSection(csp);
267 if (lockOrderValidation) {
268 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
269 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
270 TlsSetValue(tls_LockRefH, lockRefH);
271 TlsSetValue(tls_LockRefT, lockRefT);
275 void lock_ReleaseRead(osi_rwlock_t *lockp)
278 CRITICAL_SECTION *csp;
279 osi_queue_t * lockRefH, *lockRefT;
280 osi_lock_ref_t *lockRefp;
281 DWORD tid = thrd_Current();
283 if ((i = lockp->type) != 0) {
284 if (i >= 0 && i < OSI_NLOCKTYPES)
285 (osi_lockOps[i]->ReleaseReadProc)(lockp);
289 /* otherwise we're the fast base type */
290 csp = &osi_baseAtomicCS[lockp->atomicIndex];
291 EnterCriticalSection(csp);
293 if (lockOrderValidation && lockp->level != 0) {
295 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
296 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
298 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
299 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
300 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
301 lock_FreeLockRef(lockRefp);
306 osi_assertx(found, "read lock not found in TLS queue");
308 TlsSetValue(tls_LockRefH, lockRefH);
309 TlsSetValue(tls_LockRefT, lockRefT);
312 osi_assertx(lockp->readers > 0, "read lock not held");
315 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
316 if ( lockp->tid[i] == tid ) {
317 for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
318 lockp->tid[i] = lockp->tid[i+1];
327 /* releasing a read lock can allow writers */
328 if (lockp->readers == 0 && lockp->waiters) {
329 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
332 osi_assertx(lockp->readers >= 0, "read lock underflow");
334 /* and finally release the big lock */
335 LeaveCriticalSection(csp);
339 void lock_ReleaseWrite(osi_rwlock_t *lockp)
342 CRITICAL_SECTION *csp;
343 osi_queue_t * lockRefH, *lockRefT;
344 osi_lock_ref_t *lockRefp;
346 if ((i = lockp->type) != 0) {
347 if (i >= 0 && i < OSI_NLOCKTYPES)
348 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
352 /* otherwise we're the fast base type */
353 csp = &osi_baseAtomicCS[lockp->atomicIndex];
354 EnterCriticalSection(csp);
356 if (lockOrderValidation && lockp->level != 0) {
358 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
359 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
361 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
362 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
363 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
364 lock_FreeLockRef(lockRefp);
369 osi_assertx(found, "write lock not found in TLS queue");
371 TlsSetValue(tls_LockRefH, lockRefH);
372 TlsSetValue(tls_LockRefT, lockRefT);
375 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
376 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
379 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
380 if (lockp->waiters) {
381 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
384 /* and finally release the big lock */
385 LeaveCriticalSection(csp);
389 void lock_ConvertWToR(osi_rwlock_t *lockp)
392 CRITICAL_SECTION *csp;
394 if ((i = lockp->type) != 0) {
395 if (i >= 0 && i < OSI_NLOCKTYPES)
396 (osi_lockOps[i]->ConvertWToRProc)(lockp);
400 /* otherwise we're the fast base type */
401 csp = &osi_baseAtomicCS[lockp->atomicIndex];
402 EnterCriticalSection(csp);
404 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
405 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
407 /* convert write lock to read lock */
408 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
412 osi_assertx(lockp->readers == 1, "read lock not one");
414 if (lockp->waiters) {
415 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
418 /* and finally release the big lock */
419 LeaveCriticalSection(csp);
423 void lock_ConvertRToW(osi_rwlock_t *lockp)
426 CRITICAL_SECTION *csp;
427 DWORD tid = thrd_Current();
429 if ((i = lockp->type) != 0) {
430 if (i >= 0 && i < OSI_NLOCKTYPES)
431 (osi_lockOps[i]->ConvertRToWProc)(lockp);
435 /* otherwise we're the fast base type */
436 csp = &osi_baseAtomicCS[lockp->atomicIndex];
437 EnterCriticalSection(csp);
439 osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
440 osi_assertx(lockp->readers > 0, "read lock not held");
443 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
444 if ( lockp->tid[i] == tid ) {
445 for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
446 lockp->tid[i] = lockp->tid[i+1];
453 if (--(lockp->readers) == 0) {
454 /* convert read lock to write lock */
455 lockp->flags |= OSI_LOCKFLAG_EXCL;
458 osi_assertx(lockp->readers > 0, "read lock underflow");
461 osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE);
463 osi_assertx(lockp->waiters >= 0, "waiters underflow");
464 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
467 LeaveCriticalSection(csp);
470 void lock_ObtainMutex(struct osi_mutex *lockp)
473 CRITICAL_SECTION *csp;
474 osi_queue_t * lockRefH, *lockRefT;
475 osi_lock_ref_t *lockRefp;
477 if ((i=lockp->type) != 0) {
478 if (i >= 0 && i < OSI_NLOCKTYPES)
479 (osi_lockOps[i]->ObtainMutexProc)(lockp);
483 /* otherwise we're the fast base type */
484 csp = &osi_baseAtomicCS[lockp->atomicIndex];
485 EnterCriticalSection(csp);
487 if (lockOrderValidation) {
488 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
489 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
491 if (lockp->level != 0)
492 lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
495 /* here we have the fast lock, so see if we can obtain the real lock */
496 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
498 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
500 osi_assertx(lockp->waiters >= 0, "waiters underflow");
501 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
503 /* if we're here, all clear to set the lock */
504 lockp->flags |= OSI_LOCKFLAG_EXCL;
505 lockp->tid = thrd_Current();
508 LeaveCriticalSection(csp);
510 if (lockOrderValidation) {
511 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
512 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
513 TlsSetValue(tls_LockRefH, lockRefH);
514 TlsSetValue(tls_LockRefT, lockRefT);
518 void lock_ReleaseMutex(struct osi_mutex *lockp)
521 CRITICAL_SECTION *csp;
522 osi_queue_t * lockRefH, *lockRefT;
523 osi_lock_ref_t *lockRefp;
525 if ((i = lockp->type) != 0) {
526 if (i >= 0 && i < OSI_NLOCKTYPES)
527 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
531 /* otherwise we're the fast base type */
532 csp = &osi_baseAtomicCS[lockp->atomicIndex];
533 EnterCriticalSection(csp);
535 if (lockOrderValidation && lockp->level != 0) {
537 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
538 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
540 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
541 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
542 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
543 lock_FreeLockRef(lockRefp);
549 osi_assertx(found, "mutex lock not found in TLS queue");
550 TlsSetValue(tls_LockRefH, lockRefH);
551 TlsSetValue(tls_LockRefT, lockRefT);
554 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
555 osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
557 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
559 if (lockp->waiters) {
560 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
563 /* and finally release the big lock */
564 LeaveCriticalSection(csp);
568 int lock_TryRead(struct osi_rwlock *lockp)
571 CRITICAL_SECTION *csp;
572 osi_queue_t * lockRefH, *lockRefT;
573 osi_lock_ref_t *lockRefp;
575 if ((i=lockp->type) != 0)
576 if (i >= 0 && i < OSI_NLOCKTYPES)
577 return (osi_lockOps[i]->TryReadProc)(lockp);
579 /* otherwise we're the fast base type */
580 csp = &osi_baseAtomicCS[lockp->atomicIndex];
581 EnterCriticalSection(csp);
583 if (lockOrderValidation) {
584 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
585 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
587 if (lockp->level != 0) {
588 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
589 if (lockRefp->type == OSI_LOCK_RW) {
590 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
596 /* here we have the fast lock, so see if we can obtain the real lock */
597 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
601 /* if we're here, all clear to set the lock */
604 if (lockp->readers < OSI_RWLOCK_THREADS)
605 lockp->tid[lockp->readers-1] = thrd_Current();
610 LeaveCriticalSection(csp);
612 if (lockOrderValidation && i) {
613 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
614 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
615 TlsSetValue(tls_LockRefH, lockRefH);
616 TlsSetValue(tls_LockRefT, lockRefT);
623 int lock_TryWrite(struct osi_rwlock *lockp)
626 CRITICAL_SECTION *csp;
627 osi_queue_t * lockRefH, *lockRefT;
628 osi_lock_ref_t *lockRefp;
630 if ((i=lockp->type) != 0)
631 if (i >= 0 && i < OSI_NLOCKTYPES)
632 return (osi_lockOps[i]->TryWriteProc)(lockp);
634 /* otherwise we're the fast base type */
635 csp = &osi_baseAtomicCS[lockp->atomicIndex];
636 EnterCriticalSection(csp);
638 if (lockOrderValidation) {
639 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
640 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
642 if (lockp->level != 0) {
643 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
644 if (lockRefp->type == OSI_LOCK_RW) {
645 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
651 /* here we have the fast lock, so see if we can obtain the real lock */
652 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
653 || (lockp->readers > 0)) {
657 /* if we're here, all clear to set the lock */
658 lockp->flags |= OSI_LOCKFLAG_EXCL;
659 lockp->tid[0] = thrd_Current();
663 LeaveCriticalSection(csp);
665 if (lockOrderValidation && i) {
666 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
667 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
668 TlsSetValue(tls_LockRefH, lockRefH);
669 TlsSetValue(tls_LockRefT, lockRefT);
676 int lock_TryMutex(struct osi_mutex *lockp) {
678 CRITICAL_SECTION *csp;
679 osi_queue_t * lockRefH, *lockRefT;
680 osi_lock_ref_t *lockRefp;
682 if ((i=lockp->type) != 0)
683 if (i >= 0 && i < OSI_NLOCKTYPES)
684 return (osi_lockOps[i]->TryMutexProc)(lockp);
686 /* otherwise we're the fast base type */
687 csp = &osi_baseAtomicCS[lockp->atomicIndex];
688 EnterCriticalSection(csp);
690 if (lockOrderValidation) {
691 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
692 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
694 if (lockp->level != 0) {
695 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
696 if (lockRefp->type == OSI_LOCK_MUTEX) {
697 osi_assertx(lockRefp->mx != lockp, "Mutex already held");
703 /* here we have the fast lock, so see if we can obtain the real lock */
704 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
708 /* if we're here, all clear to set the lock */
709 lockp->flags |= OSI_LOCKFLAG_EXCL;
710 lockp->tid = thrd_Current();
714 LeaveCriticalSection(csp);
716 if (lockOrderValidation && i) {
717 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
718 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
719 TlsSetValue(tls_LockRefH, lockRefH);
720 TlsSetValue(tls_LockRefT, lockRefT);
726 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
729 CRITICAL_SECTION *csp;
730 osi_queue_t * lockRefH, *lockRefT;
731 osi_lock_ref_t *lockRefp;
732 DWORD tid = thrd_Current();
734 if ((i = lockp->type) != 0) {
735 if (i >= 0 && i < OSI_NLOCKTYPES)
736 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
740 /* otherwise we're the fast base type */
741 csp = &osi_baseAtomicCS[lockp->atomicIndex];
742 EnterCriticalSection(csp);
744 if (lockOrderValidation && lockp->level != 0) {
745 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
746 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
748 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
749 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
750 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
751 lock_FreeLockRef(lockRefp);
756 TlsSetValue(tls_LockRefH, lockRefH);
757 TlsSetValue(tls_LockRefT, lockRefT);
760 osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
763 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
764 if ( lockp->tid[i] == tid ) {
765 for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
766 lockp->tid[i] = lockp->tid[i+1];
773 /* XXX better to get the list of things to wakeup from TSignalForMLs, and
774 * then do the wakeup after SleepSpin releases the low-level mutex.
776 if (--(lockp->readers) == 0 && lockp->waiters) {
777 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
780 /* now call into scheduler to sleep atomically with releasing spin lock */
781 osi_SleepSpin(sleepVal, csp);
784 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
787 CRITICAL_SECTION *csp;
788 osi_queue_t * lockRefH, *lockRefT;
789 osi_lock_ref_t *lockRefp;
790 DWORD tid = thrd_Current();
792 if ((i = lockp->type) != 0) {
793 if (i >= 0 && i < OSI_NLOCKTYPES)
794 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
798 /* otherwise we're the fast base type */
799 csp = &osi_baseAtomicCS[lockp->atomicIndex];
800 EnterCriticalSection(csp);
802 if (lockOrderValidation && lockp->level != 0) {
803 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
804 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
806 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
807 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
808 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
809 lock_FreeLockRef(lockRefp);
814 TlsSetValue(tls_LockRefH, lockRefH);
815 TlsSetValue(tls_LockRefT, lockRefT);
818 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
820 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
822 if (lockp->waiters) {
823 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
826 /* and finally release the big lock */
827 osi_SleepSpin(sleepVal, csp);
830 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
833 CRITICAL_SECTION *csp;
834 osi_queue_t * lockRefH, *lockRefT;
835 osi_lock_ref_t *lockRefp;
837 if ((i = lockp->type) != 0) {
838 if (i >= 0 && i < OSI_NLOCKTYPES)
839 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
843 /* otherwise we're the fast base type */
844 csp = &osi_baseAtomicCS[lockp->atomicIndex];
845 EnterCriticalSection(csp);
847 if (lockOrderValidation && lockp->level != 0) {
848 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
849 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
851 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
852 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
853 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
854 lock_FreeLockRef(lockRefp);
859 TlsSetValue(tls_LockRefH, lockRefH);
860 TlsSetValue(tls_LockRefT, lockRefT);
863 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
865 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
867 if (lockp->waiters) {
868 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
871 /* and finally release the big lock */
872 osi_SleepSpin(sleepVal, csp);
875 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
879 if ((i=lockp->type) != 0)
880 if (i >= 0 && i < OSI_NLOCKTYPES)
881 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
884 void lock_FinalizeMutex(osi_mutex_t *lockp)
888 if ((i=lockp->type) != 0)
889 if (i >= 0 && i < OSI_NLOCKTYPES)
890 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
893 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
897 if ((i = osi_lockTypeDefault) > 0) {
898 if (i >= 0 && i < OSI_NLOCKTYPES)
899 (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
904 * otherwise we have the base case, which requires no special
907 memset(mp, 0, sizeof(osi_mutex_t));
908 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
910 osi_TInit(&mp->d.turn);
914 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
918 if ((i = osi_lockTypeDefault) > 0) {
919 if (i >= 0 && i < OSI_NLOCKTYPES)
920 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
924 /* otherwise we have the base case, which requires no special
927 memset(mp, 0, sizeof(osi_rwlock_t));
928 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
930 osi_TInit(&mp->d.turn);
934 int lock_GetRWLockState(osi_rwlock_t *lp)
937 CRITICAL_SECTION *csp;
939 if ((i=lp->type) != 0)
940 if (i >= 0 && i < OSI_NLOCKTYPES)
941 return (osi_lockOps[i]->GetRWLockState)(lp);
943 /* otherwise we're the fast base type */
944 csp = &osi_baseAtomicCS[lp->atomicIndex];
945 EnterCriticalSection(csp);
947 /* here we have the fast lock, so see if we can obtain the real lock */
948 if (lp->flags & OSI_LOCKFLAG_EXCL)
949 i = OSI_RWLOCK_WRITEHELD;
953 i |= OSI_RWLOCK_READHELD;
955 LeaveCriticalSection(csp);
960 int lock_GetMutexState(struct osi_mutex *mp)
963 CRITICAL_SECTION *csp;
965 if ((i=mp->type) != 0)
966 if (i >= 0 && i < OSI_NLOCKTYPES)
967 return (osi_lockOps[i]->GetMutexState)(mp);
969 /* otherwise we're the fast base type */
970 csp = &osi_baseAtomicCS[mp->atomicIndex];
971 EnterCriticalSection(csp);
973 if (mp->flags & OSI_LOCKFLAG_EXCL)
978 LeaveCriticalSection(csp);