2 * Copyright 2000, International Business Machines Corporation and others.
5 * This software has been released under the terms of the IBM Public
6 * License. For details, see the LICENSE file in the top-level source
7 * directory or online at http://www.openafs.org/dl/license10.html
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
13 #include <afs/param.h>
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long atomicIndexCounter = 0;
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
33 void osi_BaseInit(void)
37 for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38 InitializeCriticalSection(&osi_baseAtomicCS[i]);
40 if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41 osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
43 if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44 osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
46 InitializeCriticalSection(&lock_ref_CS);
50 osi_SetLockOrderValidation(int on)
52 lockOrderValidation = (BOOLEAN)on;
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
58 osi_lock_ref_t * lockRefp = NULL;
60 EnterCriticalSection(&lock_ref_CS);
61 if (lock_ref_FreeListp) {
62 lockRefp = lock_ref_FreeListp;
63 osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64 (osi_queue_t **) &lock_ref_FreeListEndp,
67 LeaveCriticalSection(&lock_ref_CS);
70 lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
72 memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73 lockRefp->type = type;
82 osi_panic("Invalid Lock Type", __FILE__, __LINE__);
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
91 EnterCriticalSection(&lock_ref_CS);
92 osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93 (osi_queue_t **) &lock_ref_FreeListEndp,
95 LeaveCriticalSection(&lock_ref_CS);
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
101 osi_lock_ref_t * lockRefp;
103 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104 if (lockRefp->type == OSI_LOCK_RW) {
105 if (lockRefp->rw == lockp) {
106 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107 osi_panic(msg, __FILE__, __LINE__);
109 if (lockRefp->rw->level > lockp->level) {
110 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112 osi_panic(msg, __FILE__, __LINE__);
115 if (lockRefp->mx->level > lockp->level) {
116 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118 osi_panic(msg, __FILE__, __LINE__);
120 osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
128 osi_lock_ref_t * lockRefp;
130 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131 if (lockRefp->type == OSI_LOCK_MUTEX) {
132 if (lockRefp->mx == lockp) {
133 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134 osi_panic(msg, __FILE__, __LINE__);
136 if (lockRefp->mx->level > lockp->level) {
137 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138 lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139 osi_panic(msg, __FILE__, __LINE__);
142 if (lockRefp->rw->level > lockp->level) {
143 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144 lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145 osi_panic(msg, __FILE__, __LINE__);
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
154 CRITICAL_SECTION *csp;
155 osi_queue_t * lockRefH, *lockRefT;
156 osi_lock_ref_t *lockRefp;
157 DWORD tid = thrd_Current();
159 if ((i=lockp->type) != 0) {
160 if (i >= 0 && i < OSI_NLOCKTYPES)
161 (osi_lockOps[i]->ObtainWriteProc)(lockp);
165 if (lockOrderValidation) {
166 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
167 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
169 if (lockp->level != 0)
170 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
173 /* otherwise we're the fast base type */
174 csp = &osi_baseAtomicCS[lockp->atomicIndex];
175 EnterCriticalSection(csp);
177 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
178 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
180 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
181 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
185 /* here we have the fast lock, so see if we can obtain the real lock */
186 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
187 (lockp->readers > 0)) {
189 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
191 osi_assertx(lockp->waiters >= 0, "waiters underflow");
192 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
194 /* if we're here, all clear to set the lock */
195 lockp->flags |= OSI_LOCKFLAG_EXCL;
198 osi_assertx(lockp->readers == 0, "write lock readers present");
200 LeaveCriticalSection(csp);
202 if (lockOrderValidation) {
203 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
204 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
205 TlsSetValue(tls_LockRefH, lockRefH);
206 TlsSetValue(tls_LockRefT, lockRefT);
210 void lock_ObtainRead(osi_rwlock_t *lockp)
213 CRITICAL_SECTION *csp;
214 osi_queue_t * lockRefH, *lockRefT;
215 osi_lock_ref_t *lockRefp;
216 DWORD tid = thrd_Current();
218 if ((i=lockp->type) != 0) {
219 if (i >= 0 && i < OSI_NLOCKTYPES)
220 (osi_lockOps[i]->ObtainReadProc)(lockp);
224 if (lockOrderValidation) {
225 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
226 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
228 if (lockp->level != 0)
229 lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
232 /* otherwise we're the fast base type */
233 csp = &osi_baseAtomicCS[lockp->atomicIndex];
234 EnterCriticalSection(csp);
236 if (lockp->flags & OSI_LOCKFLAG_EXCL) {
237 osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
239 for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
240 osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
244 /* here we have the fast lock, so see if we can obtain the real lock */
245 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
247 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
249 osi_assertx(lockp->waiters >= 0, "waiters underflow");
250 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
252 /* if we're here, all clear to set the lock */
253 if (++lockp->readers <= OSI_RWLOCK_THREADS)
254 lockp->tid[lockp->readers-1] = tid;
256 LeaveCriticalSection(csp);
258 if (lockOrderValidation) {
259 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
260 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
261 TlsSetValue(tls_LockRefH, lockRefH);
262 TlsSetValue(tls_LockRefT, lockRefT);
266 void lock_ReleaseRead(osi_rwlock_t *lockp)
269 CRITICAL_SECTION *csp;
270 osi_queue_t * lockRefH, *lockRefT;
271 osi_lock_ref_t *lockRefp;
272 DWORD tid = thrd_Current();
274 if ((i = lockp->type) != 0) {
275 if (i >= 0 && i < OSI_NLOCKTYPES)
276 (osi_lockOps[i]->ReleaseReadProc)(lockp);
280 /* otherwise we're the fast base type */
281 csp = &osi_baseAtomicCS[lockp->atomicIndex];
282 EnterCriticalSection(csp);
284 if (lockOrderValidation && lockp->level != 0) {
286 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
287 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
289 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
290 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
291 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
292 lock_FreeLockRef(lockRefp);
297 osi_assertx(found, "read lock not found in TLS queue");
299 TlsSetValue(tls_LockRefH, lockRefH);
300 TlsSetValue(tls_LockRefT, lockRefT);
303 osi_assertx(lockp->readers > 0, "read lock not held");
305 for ( i=0; i < lockp->readers; i++) {
306 if ( lockp->tid[i] == tid ) {
307 for ( ; i < lockp->readers - 1; i++)
308 lockp->tid[i] = lockp->tid[i+1];
316 /* releasing a read lock can allow writers */
317 if (lockp->readers == 0 && lockp->waiters) {
318 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
321 osi_assertx(lockp->readers >= 0, "read lock underflow");
323 /* and finally release the big lock */
324 LeaveCriticalSection(csp);
328 void lock_ReleaseWrite(osi_rwlock_t *lockp)
331 CRITICAL_SECTION *csp;
332 osi_queue_t * lockRefH, *lockRefT;
333 osi_lock_ref_t *lockRefp;
335 if ((i = lockp->type) != 0) {
336 if (i >= 0 && i < OSI_NLOCKTYPES)
337 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
341 /* otherwise we're the fast base type */
342 csp = &osi_baseAtomicCS[lockp->atomicIndex];
343 EnterCriticalSection(csp);
345 if (lockOrderValidation && lockp->level != 0) {
347 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
348 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
350 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
351 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
352 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
353 lock_FreeLockRef(lockRefp);
358 osi_assertx(found, "write lock not found in TLS queue");
360 TlsSetValue(tls_LockRefH, lockRefH);
361 TlsSetValue(tls_LockRefT, lockRefT);
364 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
365 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
369 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
370 if (lockp->waiters) {
371 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
374 /* and finally release the big lock */
375 LeaveCriticalSection(csp);
379 void lock_ConvertWToR(osi_rwlock_t *lockp)
382 CRITICAL_SECTION *csp;
384 if ((i = lockp->type) != 0) {
385 if (i >= 0 && i < OSI_NLOCKTYPES)
386 (osi_lockOps[i]->ConvertWToRProc)(lockp);
390 /* otherwise we're the fast base type */
391 csp = &osi_baseAtomicCS[lockp->atomicIndex];
392 EnterCriticalSection(csp);
394 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
395 osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
397 /* convert write lock to read lock */
398 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
401 osi_assertx(lockp->readers == 1, "read lock not one");
403 if (lockp->waiters) {
404 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
407 /* and finally release the big lock */
408 LeaveCriticalSection(csp);
412 void lock_ConvertRToW(osi_rwlock_t *lockp)
415 CRITICAL_SECTION *csp;
416 DWORD tid = thrd_Current();
418 if ((i = lockp->type) != 0) {
419 if (i >= 0 && i < OSI_NLOCKTYPES)
420 (osi_lockOps[i]->ConvertRToWProc)(lockp);
424 /* otherwise we're the fast base type */
425 csp = &osi_baseAtomicCS[lockp->atomicIndex];
426 EnterCriticalSection(csp);
428 osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
429 osi_assertx(lockp->readers > 0, "read lock not held");
431 for ( i=0; i < lockp->readers; i++) {
432 if ( lockp->tid[i] == tid ) {
433 for ( ; i < lockp->readers - 1; i++)
434 lockp->tid[i] = lockp->tid[i+1];
440 if (--(lockp->readers) == 0) {
441 /* convert read lock to write lock */
442 lockp->flags |= OSI_LOCKFLAG_EXCL;
445 osi_assertx(lockp->readers > 0, "read lock underflow");
448 osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE);
450 osi_assertx(lockp->waiters >= 0, "waiters underflow");
451 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
454 LeaveCriticalSection(csp);
457 void lock_ObtainMutex(struct osi_mutex *lockp)
460 CRITICAL_SECTION *csp;
461 osi_queue_t * lockRefH, *lockRefT;
462 osi_lock_ref_t *lockRefp;
464 if ((i=lockp->type) != 0) {
465 if (i >= 0 && i < OSI_NLOCKTYPES)
466 (osi_lockOps[i]->ObtainMutexProc)(lockp);
470 /* otherwise we're the fast base type */
471 csp = &osi_baseAtomicCS[lockp->atomicIndex];
472 EnterCriticalSection(csp);
474 if (lockOrderValidation) {
475 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
476 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
478 if (lockp->level != 0)
479 lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
482 /* here we have the fast lock, so see if we can obtain the real lock */
483 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
485 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
487 osi_assertx(lockp->waiters >= 0, "waiters underflow");
488 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
490 /* if we're here, all clear to set the lock */
491 lockp->flags |= OSI_LOCKFLAG_EXCL;
492 lockp->tid = thrd_Current();
495 LeaveCriticalSection(csp);
497 if (lockOrderValidation) {
498 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
499 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
500 TlsSetValue(tls_LockRefH, lockRefH);
501 TlsSetValue(tls_LockRefT, lockRefT);
505 void lock_ReleaseMutex(struct osi_mutex *lockp)
508 CRITICAL_SECTION *csp;
509 osi_queue_t * lockRefH, *lockRefT;
510 osi_lock_ref_t *lockRefp;
512 if ((i = lockp->type) != 0) {
513 if (i >= 0 && i < OSI_NLOCKTYPES)
514 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
518 /* otherwise we're the fast base type */
519 csp = &osi_baseAtomicCS[lockp->atomicIndex];
520 EnterCriticalSection(csp);
522 if (lockOrderValidation && lockp->level != 0) {
524 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
525 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
527 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
528 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
529 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
530 lock_FreeLockRef(lockRefp);
536 osi_assertx(found, "mutex lock not found in TLS queue");
537 TlsSetValue(tls_LockRefH, lockRefH);
538 TlsSetValue(tls_LockRefT, lockRefT);
541 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
542 osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
544 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
546 if (lockp->waiters) {
547 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
550 /* and finally release the big lock */
551 LeaveCriticalSection(csp);
555 int lock_TryRead(struct osi_rwlock *lockp)
558 CRITICAL_SECTION *csp;
559 osi_queue_t * lockRefH, *lockRefT;
560 osi_lock_ref_t *lockRefp;
562 if ((i=lockp->type) != 0)
563 if (i >= 0 && i < OSI_NLOCKTYPES)
564 return (osi_lockOps[i]->TryReadProc)(lockp);
566 /* otherwise we're the fast base type */
567 csp = &osi_baseAtomicCS[lockp->atomicIndex];
568 EnterCriticalSection(csp);
570 if (lockOrderValidation) {
571 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
572 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
574 if (lockp->level != 0) {
575 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
576 if (lockRefp->type == OSI_LOCK_RW) {
577 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
583 /* here we have the fast lock, so see if we can obtain the real lock */
584 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
588 /* if we're here, all clear to set the lock */
589 if (++(lockp->readers) < OSI_RWLOCK_THREADS)
590 lockp->tid[lockp->readers-1] = thrd_Current();
594 LeaveCriticalSection(csp);
596 if (lockOrderValidation && i) {
597 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
598 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
599 TlsSetValue(tls_LockRefH, lockRefH);
600 TlsSetValue(tls_LockRefT, lockRefT);
607 int lock_TryWrite(struct osi_rwlock *lockp)
610 CRITICAL_SECTION *csp;
611 osi_queue_t * lockRefH, *lockRefT;
612 osi_lock_ref_t *lockRefp;
614 if ((i=lockp->type) != 0)
615 if (i >= 0 && i < OSI_NLOCKTYPES)
616 return (osi_lockOps[i]->TryWriteProc)(lockp);
618 /* otherwise we're the fast base type */
619 csp = &osi_baseAtomicCS[lockp->atomicIndex];
620 EnterCriticalSection(csp);
622 if (lockOrderValidation) {
623 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
624 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
626 if (lockp->level != 0) {
627 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
628 if (lockRefp->type == OSI_LOCK_RW) {
629 osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
635 /* here we have the fast lock, so see if we can obtain the real lock */
636 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
637 || (lockp->readers > 0)) {
641 /* if we're here, all clear to set the lock */
642 lockp->flags |= OSI_LOCKFLAG_EXCL;
643 lockp->tid[0] = thrd_Current();
647 LeaveCriticalSection(csp);
649 if (lockOrderValidation && i) {
650 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
651 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
652 TlsSetValue(tls_LockRefH, lockRefH);
653 TlsSetValue(tls_LockRefT, lockRefT);
660 int lock_TryMutex(struct osi_mutex *lockp) {
662 CRITICAL_SECTION *csp;
663 osi_queue_t * lockRefH, *lockRefT;
664 osi_lock_ref_t *lockRefp;
666 if ((i=lockp->type) != 0)
667 if (i >= 0 && i < OSI_NLOCKTYPES)
668 return (osi_lockOps[i]->TryMutexProc)(lockp);
670 /* otherwise we're the fast base type */
671 csp = &osi_baseAtomicCS[lockp->atomicIndex];
672 EnterCriticalSection(csp);
674 if (lockOrderValidation) {
675 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
676 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
678 if (lockp->level != 0) {
679 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
680 if (lockRefp->type == OSI_LOCK_MUTEX) {
681 osi_assertx(lockRefp->mx != lockp, "Mutex already held");
687 /* here we have the fast lock, so see if we can obtain the real lock */
688 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
692 /* if we're here, all clear to set the lock */
693 lockp->flags |= OSI_LOCKFLAG_EXCL;
694 lockp->tid = thrd_Current();
698 LeaveCriticalSection(csp);
700 if (lockOrderValidation && i) {
701 lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
702 osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
703 TlsSetValue(tls_LockRefH, lockRefH);
704 TlsSetValue(tls_LockRefT, lockRefT);
710 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
713 CRITICAL_SECTION *csp;
714 osi_queue_t * lockRefH, *lockRefT;
715 osi_lock_ref_t *lockRefp;
716 DWORD tid = thrd_Current();
718 if ((i = lockp->type) != 0) {
719 if (i >= 0 && i < OSI_NLOCKTYPES)
720 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
724 /* otherwise we're the fast base type */
725 csp = &osi_baseAtomicCS[lockp->atomicIndex];
726 EnterCriticalSection(csp);
728 if (lockOrderValidation && lockp->level != 0) {
729 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
730 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
732 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
733 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
734 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
735 lock_FreeLockRef(lockRefp);
740 TlsSetValue(tls_LockRefH, lockRefH);
741 TlsSetValue(tls_LockRefT, lockRefT);
744 osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
746 for ( i=0; i < lockp->readers; i++) {
747 if ( lockp->tid[i] == tid ) {
748 for ( ; i < lockp->readers - 1; i++)
749 lockp->tid[i] = lockp->tid[i+1];
755 /* XXX better to get the list of things to wakeup from TSignalForMLs, and
756 * then do the wakeup after SleepSpin releases the low-level mutex.
758 if (--(lockp->readers) == 0 && lockp->waiters) {
759 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
762 /* now call into scheduler to sleep atomically with releasing spin lock */
763 osi_SleepSpin(sleepVal, csp);
766 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
769 CRITICAL_SECTION *csp;
770 osi_queue_t * lockRefH, *lockRefT;
771 osi_lock_ref_t *lockRefp;
772 DWORD tid = thrd_Current();
774 if ((i = lockp->type) != 0) {
775 if (i >= 0 && i < OSI_NLOCKTYPES)
776 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
780 /* otherwise we're the fast base type */
781 csp = &osi_baseAtomicCS[lockp->atomicIndex];
782 EnterCriticalSection(csp);
784 if (lockOrderValidation && lockp->level != 0) {
785 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
786 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
788 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
789 if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
790 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
791 lock_FreeLockRef(lockRefp);
796 TlsSetValue(tls_LockRefH, lockRefH);
797 TlsSetValue(tls_LockRefT, lockRefT);
800 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
802 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
804 if (lockp->waiters) {
805 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
808 /* and finally release the big lock */
809 osi_SleepSpin(sleepVal, csp);
812 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
815 CRITICAL_SECTION *csp;
816 osi_queue_t * lockRefH, *lockRefT;
817 osi_lock_ref_t *lockRefp;
819 if ((i = lockp->type) != 0) {
820 if (i >= 0 && i < OSI_NLOCKTYPES)
821 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
825 /* otherwise we're the fast base type */
826 csp = &osi_baseAtomicCS[lockp->atomicIndex];
827 EnterCriticalSection(csp);
829 if (lockOrderValidation && lockp->level != 0) {
830 lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
831 lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
833 for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
834 if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
835 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
836 lock_FreeLockRef(lockRefp);
841 TlsSetValue(tls_LockRefH, lockRefH);
842 TlsSetValue(tls_LockRefT, lockRefT);
845 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
847 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
849 if (lockp->waiters) {
850 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
853 /* and finally release the big lock */
854 osi_SleepSpin(sleepVal, csp);
857 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
861 if ((i=lockp->type) != 0)
862 if (i >= 0 && i < OSI_NLOCKTYPES)
863 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
866 void lock_FinalizeMutex(osi_mutex_t *lockp)
870 if ((i=lockp->type) != 0)
871 if (i >= 0 && i < OSI_NLOCKTYPES)
872 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
875 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
879 if ((i = osi_lockTypeDefault) > 0) {
880 if (i >= 0 && i < OSI_NLOCKTYPES)
881 (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
886 * otherwise we have the base case, which requires no special
889 memset(mp, 0, sizeof(osi_mutex_t));
890 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
892 osi_TInit(&mp->d.turn);
896 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
900 if ((i = osi_lockTypeDefault) > 0) {
901 if (i >= 0 && i < OSI_NLOCKTYPES)
902 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
906 /* otherwise we have the base case, which requires no special
909 memset(mp, 0, sizeof(osi_rwlock_t));
910 mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
912 osi_TInit(&mp->d.turn);
916 int lock_GetRWLockState(osi_rwlock_t *lp)
919 CRITICAL_SECTION *csp;
921 if ((i=lp->type) != 0)
922 if (i >= 0 && i < OSI_NLOCKTYPES)
923 return (osi_lockOps[i]->GetRWLockState)(lp);
925 /* otherwise we're the fast base type */
926 csp = &osi_baseAtomicCS[lp->atomicIndex];
927 EnterCriticalSection(csp);
929 /* here we have the fast lock, so see if we can obtain the real lock */
930 if (lp->flags & OSI_LOCKFLAG_EXCL)
931 i = OSI_RWLOCK_WRITEHELD;
935 i |= OSI_RWLOCK_READHELD;
937 LeaveCriticalSection(csp);
942 int lock_GetMutexState(struct osi_mutex *mp)
945 CRITICAL_SECTION *csp;
947 if ((i=mp->type) != 0)
948 if (i >= 0 && i < OSI_NLOCKTYPES)
949 return (osi_lockOps[i]->GetMutexState)(mp);
951 /* otherwise we're the fast base type */
952 csp = &osi_baseAtomicCS[mp->atomicIndex];
953 EnterCriticalSection(csp);
955 if (mp->flags & OSI_LOCKFLAG_EXCL)
960 LeaveCriticalSection(csp);