Windows: make lock reader history debug only
[openafs.git] / src / WINNT / client_osi / osibasel.c
1 /*
2  * Copyright 2000, International Business Machines Corporation and others.
3  * All Rights Reserved.
4  *
5  * This software has been released under the terms of the IBM Public
6  * License.  For details, see the LICENSE file in the top-level source
7  * directory or online at http://www.openafs.org/dl/license10.html
8  */
9
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
11
12
13 #include <afs/param.h>
14 #include <afs/stds.h>
15
16 #include <windows.h>
17 #include "osi.h"
18 #include <assert.h>
19 #include <stdio.h>
20
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long     atomicIndexCounter = 0;
24
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
32
33 void osi_BaseInit(void)
34 {
35     int i;
36
37     for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38         InitializeCriticalSection(&osi_baseAtomicCS[i]);
39
40     if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41         osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
42
43     if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44         osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
45
46     InitializeCriticalSection(&lock_ref_CS);
47 }
48
49 void
50 osi_SetLockOrderValidation(int on)
51 {
52     lockOrderValidation = (BOOLEAN)on;
53 }
54
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
57 {
58     osi_lock_ref_t * lockRefp = NULL;
59
60     EnterCriticalSection(&lock_ref_CS);
61     if (lock_ref_FreeListp) {
62         lockRefp = lock_ref_FreeListp;
63         osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64                        (osi_queue_t **) &lock_ref_FreeListEndp,
65                        &lockRefp->q);
66     }
67     LeaveCriticalSection(&lock_ref_CS);
68
69     if (lockRefp == NULL)
70         lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
71
72     memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73     lockRefp->type = type;
74     switch (type) {
75     case OSI_LOCK_MUTEX:
76         lockRefp->mx = lockp;
77         break;
78     case OSI_LOCK_RW:
79         lockRefp->rw = lockp;
80         break;
81     default:
82         osi_panic("Invalid Lock Type", __FILE__, __LINE__);
83     }
84
85     return lockRefp;
86 }
87
88 static void
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
90 {
91     EnterCriticalSection(&lock_ref_CS);
92     osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93                (osi_queue_t **) &lock_ref_FreeListEndp,
94                &lockRefp->q);
95     LeaveCriticalSection(&lock_ref_CS);
96 }
97
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
99 {
100     char msg[512];
101     osi_lock_ref_t * lockRefp;
102
103     for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104         if (lockRefp->type == OSI_LOCK_RW) {
105             if (lockRefp->rw == lockp) {
106                 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107                 osi_panic(msg, __FILE__, __LINE__);
108             }
109             if (lockRefp->rw->level > lockp->level) {
110                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111                          lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112                 osi_panic(msg, __FILE__, __LINE__);
113             }
114         } else {
115             if (lockRefp->mx->level > lockp->level) {
116                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117                          lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118                 osi_panic(msg, __FILE__, __LINE__);
119             }
120             osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
121         }
122     }
123 }
124
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
126 {
127     char msg[512];
128     osi_lock_ref_t * lockRefp;
129
130     for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131         if (lockRefp->type == OSI_LOCK_MUTEX) {
132             if (lockRefp->mx == lockp) {
133                 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134                 osi_panic(msg, __FILE__, __LINE__);
135             }
136             if (lockRefp->mx->level > lockp->level) {
137                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138                          lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139                 osi_panic(msg, __FILE__, __LINE__);
140             }
141         } else {
142             if (lockRefp->rw->level > lockp->level) {
143                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144                          lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145                 osi_panic(msg, __FILE__, __LINE__);
146             }
147         }
148     }
149 }
150
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
152 {
153     long i;
154     CRITICAL_SECTION *csp;
155     osi_queue_t * lockRefH, *lockRefT;
156     osi_lock_ref_t *lockRefp;
157     DWORD tid = thrd_Current();
158
159     if ((i=lockp->type) != 0) {
160         if (i >= 0 && i < OSI_NLOCKTYPES)
161             (osi_lockOps[i]->ObtainWriteProc)(lockp);
162         return;
163     }
164
165     if (lockOrderValidation) {
166         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
167         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
168
169         if (lockp->level != 0)
170             lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
171     }
172
173     /* otherwise we're the fast base type */
174     csp = &osi_baseAtomicCS[lockp->atomicIndex];
175     EnterCriticalSection(csp);
176
177     if (lockp->flags & OSI_LOCKFLAG_EXCL) {
178         osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
179     }
180 #ifdef DEBUG
181     else {
182         for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
183             osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
184         }
185     }
186 #endif
187
188     /* here we have the fast lock, so see if we can obtain the real lock */
189     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
190         (lockp->readers > 0)) {
191         lockp->waiters++;
192         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
193         lockp->waiters--;
194         osi_assertx(lockp->waiters >= 0, "waiters underflow");
195         osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
196     } else {
197         /* if we're here, all clear to set the lock */
198         lockp->flags |= OSI_LOCKFLAG_EXCL;
199         lockp->tid[0] = tid;
200     }
201     osi_assertx(lockp->readers == 0, "write lock readers present");
202
203     LeaveCriticalSection(csp);
204
205     if (lockOrderValidation) {
206         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
207         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
208         TlsSetValue(tls_LockRefH, lockRefH);
209         TlsSetValue(tls_LockRefT, lockRefT);
210     }
211 }
212
213 void lock_ObtainRead(osi_rwlock_t *lockp)
214 {
215     long i;
216     CRITICAL_SECTION *csp;
217     osi_queue_t * lockRefH, *lockRefT;
218     osi_lock_ref_t *lockRefp;
219     DWORD tid = thrd_Current();
220
221     if ((i=lockp->type) != 0) {
222         if (i >= 0 && i < OSI_NLOCKTYPES)
223             (osi_lockOps[i]->ObtainReadProc)(lockp);
224         return;
225     }
226
227     if (lockOrderValidation) {
228         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
229         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
230
231         if (lockp->level != 0)
232             lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
233     }
234
235     /* otherwise we're the fast base type */
236     csp = &osi_baseAtomicCS[lockp->atomicIndex];
237     EnterCriticalSection(csp);
238
239     if (lockp->flags & OSI_LOCKFLAG_EXCL) {
240         osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
241     }
242 #ifdef DEBUG
243     else {
244         for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
245             osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
246         }
247     }
248 #endif
249
250     /* here we have the fast lock, so see if we can obtain the real lock */
251     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
252         lockp->waiters++;
253         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
254         lockp->waiters--;
255         osi_assertx(lockp->waiters >= 0, "waiters underflow");
256         osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
257     } else {
258         /* if we're here, all clear to set the lock */
259         lockp->readers++;
260 #ifdef DEBUG
261         if (lockp->readers <= OSI_RWLOCK_THREADS)
262             lockp->tid[lockp->readers-1] = tid;
263 #endif
264     }
265     LeaveCriticalSection(csp);
266
267     if (lockOrderValidation) {
268         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
269         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
270         TlsSetValue(tls_LockRefH, lockRefH);
271         TlsSetValue(tls_LockRefT, lockRefT);
272     }
273 }
274
275 void lock_ReleaseRead(osi_rwlock_t *lockp)
276 {
277     long i;
278     CRITICAL_SECTION *csp;
279     osi_queue_t * lockRefH, *lockRefT;
280     osi_lock_ref_t *lockRefp;
281     DWORD tid = thrd_Current();
282
283     if ((i = lockp->type) != 0) {
284         if (i >= 0 && i < OSI_NLOCKTYPES)
285             (osi_lockOps[i]->ReleaseReadProc)(lockp);
286         return;
287     }
288
289     /* otherwise we're the fast base type */
290     csp = &osi_baseAtomicCS[lockp->atomicIndex];
291     EnterCriticalSection(csp);
292
293     if (lockOrderValidation && lockp->level != 0) {
294         int found = 0;
295         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
296         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
297
298         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
299             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
300                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
301                 lock_FreeLockRef(lockRefp);
302                 found = 1;
303                 break;
304             }
305         }
306         osi_assertx(found, "read lock not found in TLS queue");
307
308         TlsSetValue(tls_LockRefH, lockRefH);
309         TlsSetValue(tls_LockRefT, lockRefT);
310     }
311
312     osi_assertx(lockp->readers > 0, "read lock not held");
313
314 #ifdef DEBUG
315     for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
316         if ( lockp->tid[i] == tid ) {
317             for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
318                 lockp->tid[i] = lockp->tid[i+1];
319             lockp->tid[i] = 0;
320             break;
321         }
322     }
323 #endif
324
325         lockp->readers--;
326
327     /* releasing a read lock can allow writers */
328     if (lockp->readers == 0 && lockp->waiters) {
329         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
330     }
331     else {
332         osi_assertx(lockp->readers >= 0, "read lock underflow");
333
334         /* and finally release the big lock */
335         LeaveCriticalSection(csp);
336     }
337 }
338
339 void lock_ReleaseWrite(osi_rwlock_t *lockp)
340 {
341     long i;
342     CRITICAL_SECTION *csp;
343     osi_queue_t * lockRefH, *lockRefT;
344     osi_lock_ref_t *lockRefp;
345
346     if ((i = lockp->type) != 0) {
347         if (i >= 0 && i < OSI_NLOCKTYPES)
348             (osi_lockOps[i]->ReleaseWriteProc)(lockp);
349         return;
350     }
351
352     /* otherwise we're the fast base type */
353     csp = &osi_baseAtomicCS[lockp->atomicIndex];
354     EnterCriticalSection(csp);
355
356     if (lockOrderValidation && lockp->level != 0) {
357         int found = 0;
358         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
359         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
360
361         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
362             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
363                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
364                 lock_FreeLockRef(lockRefp);
365                 found = 1;
366                 break;
367             }
368         }
369         osi_assertx(found, "write lock not found in TLS queue");
370
371         TlsSetValue(tls_LockRefH, lockRefH);
372         TlsSetValue(tls_LockRefT, lockRefT);
373     }
374
375     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
376     osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
377     lockp->tid[0] = 0;
378
379     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
380     if (lockp->waiters) {
381         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
382     }
383     else {
384         /* and finally release the big lock */
385         LeaveCriticalSection(csp);
386     }
387 }
388
389 void lock_ConvertWToR(osi_rwlock_t *lockp)
390 {
391     long i;
392     CRITICAL_SECTION *csp;
393
394     if ((i = lockp->type) != 0) {
395         if (i >= 0 && i < OSI_NLOCKTYPES)
396             (osi_lockOps[i]->ConvertWToRProc)(lockp);
397         return;
398     }
399
400     /* otherwise we're the fast base type */
401     csp = &osi_baseAtomicCS[lockp->atomicIndex];
402     EnterCriticalSection(csp);
403
404     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
405     osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
406
407     /* convert write lock to read lock */
408     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
409     lockp->tid[0] = 0;
410     lockp->readers++;
411
412     osi_assertx(lockp->readers == 1, "read lock not one");
413
414     if (lockp->waiters) {
415         osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
416     }
417     else {
418         /* and finally release the big lock */
419         LeaveCriticalSection(csp);
420     }
421 }
422
423 void lock_ConvertRToW(osi_rwlock_t *lockp)
424 {
425     long i;
426     CRITICAL_SECTION *csp;
427     DWORD tid = thrd_Current();
428
429     if ((i = lockp->type) != 0) {
430         if (i >= 0 && i < OSI_NLOCKTYPES)
431             (osi_lockOps[i]->ConvertRToWProc)(lockp);
432         return;
433     }
434
435     /* otherwise we're the fast base type */
436     csp = &osi_baseAtomicCS[lockp->atomicIndex];
437     EnterCriticalSection(csp);
438
439     osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
440     osi_assertx(lockp->readers > 0, "read lock not held");
441
442 #ifdef DEBUG
443     for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
444         if ( lockp->tid[i] == tid ) {
445             for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
446                 lockp->tid[i] = lockp->tid[i+1];
447             lockp->tid[i] = 0;
448             break;
449         }
450     }
451 #endif
452
453     if (--(lockp->readers) == 0) {
454         /* convert read lock to write lock */
455         lockp->flags |= OSI_LOCKFLAG_EXCL;
456         lockp->tid[0] = tid;
457     } else {
458         osi_assertx(lockp->readers > 0, "read lock underflow");
459
460         lockp->waiters++;
461         osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE);
462         lockp->waiters--;
463         osi_assertx(lockp->waiters >= 0, "waiters underflow");
464         osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
465     }
466
467     LeaveCriticalSection(csp);
468 }
469
470 void lock_ObtainMutex(struct osi_mutex *lockp)
471 {
472     long i;
473     CRITICAL_SECTION *csp;
474     osi_queue_t * lockRefH, *lockRefT;
475     osi_lock_ref_t *lockRefp;
476
477     if ((i=lockp->type) != 0) {
478         if (i >= 0 && i < OSI_NLOCKTYPES)
479             (osi_lockOps[i]->ObtainMutexProc)(lockp);
480         return;
481     }
482
483     /* otherwise we're the fast base type */
484     csp = &osi_baseAtomicCS[lockp->atomicIndex];
485     EnterCriticalSection(csp);
486
487     if (lockOrderValidation) {
488         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
489         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
490
491         if (lockp->level != 0)
492             lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
493     }
494
495     /* here we have the fast lock, so see if we can obtain the real lock */
496     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
497         lockp->waiters++;
498         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
499         lockp->waiters--;
500         osi_assertx(lockp->waiters >= 0, "waiters underflow");
501         osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
502     } else {
503         /* if we're here, all clear to set the lock */
504         lockp->flags |= OSI_LOCKFLAG_EXCL;
505         lockp->tid = thrd_Current();
506     }
507
508     LeaveCriticalSection(csp);
509
510     if (lockOrderValidation) {
511         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
512         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
513         TlsSetValue(tls_LockRefH, lockRefH);
514         TlsSetValue(tls_LockRefT, lockRefT);
515     }
516 }
517
518 void lock_ReleaseMutex(struct osi_mutex *lockp)
519 {
520     long i;
521     CRITICAL_SECTION *csp;
522     osi_queue_t * lockRefH, *lockRefT;
523     osi_lock_ref_t *lockRefp;
524
525     if ((i = lockp->type) != 0) {
526         if (i >= 0 && i < OSI_NLOCKTYPES)
527             (osi_lockOps[i]->ReleaseMutexProc)(lockp);
528         return;
529     }
530
531     /* otherwise we're the fast base type */
532     csp = &osi_baseAtomicCS[lockp->atomicIndex];
533     EnterCriticalSection(csp);
534
535     if (lockOrderValidation && lockp->level != 0) {
536         int found = 0;
537         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
538         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
539
540         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
541             if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
542                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
543                 lock_FreeLockRef(lockRefp);
544                 found = 1;
545                 break;
546             }
547         }
548
549         osi_assertx(found, "mutex lock not found in TLS queue");
550         TlsSetValue(tls_LockRefH, lockRefH);
551         TlsSetValue(tls_LockRefT, lockRefT);
552     }
553
554     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
555     osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
556
557     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
558     lockp->tid = 0;
559     if (lockp->waiters) {
560         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
561     }
562     else {
563         /* and finally release the big lock */
564         LeaveCriticalSection(csp);
565     }
566 }
567
568 int lock_TryRead(struct osi_rwlock *lockp)
569 {
570     long i;
571     CRITICAL_SECTION *csp;
572     osi_queue_t * lockRefH, *lockRefT;
573     osi_lock_ref_t *lockRefp;
574
575     if ((i=lockp->type) != 0)
576         if (i >= 0 && i < OSI_NLOCKTYPES)
577             return (osi_lockOps[i]->TryReadProc)(lockp);
578
579     /* otherwise we're the fast base type */
580     csp = &osi_baseAtomicCS[lockp->atomicIndex];
581     EnterCriticalSection(csp);
582
583     if (lockOrderValidation) {
584         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
585         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
586
587         if (lockp->level != 0) {
588             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
589                 if (lockRefp->type == OSI_LOCK_RW) {
590                     osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
591                 }
592             }
593         }
594     }
595
596     /* here we have the fast lock, so see if we can obtain the real lock */
597     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
598         i = 0;
599     }
600     else {
601         /* if we're here, all clear to set the lock */
602         lockp->readers++;
603 #ifdef DEBUG
604         if (lockp->readers < OSI_RWLOCK_THREADS)
605             lockp->tid[lockp->readers-1] = thrd_Current();
606 #endif
607         i = 1;
608     }
609
610     LeaveCriticalSection(csp);
611
612     if (lockOrderValidation && i) {
613         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
614         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
615         TlsSetValue(tls_LockRefH, lockRefH);
616         TlsSetValue(tls_LockRefT, lockRefT);
617     }
618
619     return i;
620 }
621
622
623 int lock_TryWrite(struct osi_rwlock *lockp)
624 {
625     long i;
626     CRITICAL_SECTION *csp;
627     osi_queue_t * lockRefH, *lockRefT;
628     osi_lock_ref_t *lockRefp;
629
630     if ((i=lockp->type) != 0)
631         if (i >= 0 && i < OSI_NLOCKTYPES)
632             return (osi_lockOps[i]->TryWriteProc)(lockp);
633
634     /* otherwise we're the fast base type */
635     csp = &osi_baseAtomicCS[lockp->atomicIndex];
636     EnterCriticalSection(csp);
637
638     if (lockOrderValidation) {
639         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
640         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
641
642         if (lockp->level != 0) {
643             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
644                 if (lockRefp->type == OSI_LOCK_RW) {
645                     osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
646                 }
647             }
648         }
649     }
650
651     /* here we have the fast lock, so see if we can obtain the real lock */
652     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
653          || (lockp->readers > 0)) {
654         i = 0;
655     }
656     else {
657         /* if we're here, all clear to set the lock */
658         lockp->flags |= OSI_LOCKFLAG_EXCL;
659         lockp->tid[0] = thrd_Current();
660         i = 1;
661     }
662
663     LeaveCriticalSection(csp);
664
665     if (lockOrderValidation && i) {
666         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
667         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
668         TlsSetValue(tls_LockRefH, lockRefH);
669         TlsSetValue(tls_LockRefT, lockRefT);
670     }
671
672     return i;
673 }
674
675
676 int lock_TryMutex(struct osi_mutex *lockp) {
677     long i;
678     CRITICAL_SECTION *csp;
679     osi_queue_t * lockRefH, *lockRefT;
680     osi_lock_ref_t *lockRefp;
681
682     if ((i=lockp->type) != 0)
683         if (i >= 0 && i < OSI_NLOCKTYPES)
684             return (osi_lockOps[i]->TryMutexProc)(lockp);
685
686     /* otherwise we're the fast base type */
687     csp = &osi_baseAtomicCS[lockp->atomicIndex];
688     EnterCriticalSection(csp);
689
690     if (lockOrderValidation) {
691         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
692         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
693
694         if (lockp->level != 0) {
695             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
696                 if (lockRefp->type == OSI_LOCK_MUTEX) {
697                     osi_assertx(lockRefp->mx != lockp, "Mutex already held");
698                 }
699             }
700         }
701     }
702
703     /* here we have the fast lock, so see if we can obtain the real lock */
704     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
705         i = 0;
706     }
707     else {
708         /* if we're here, all clear to set the lock */
709         lockp->flags |= OSI_LOCKFLAG_EXCL;
710         lockp->tid = thrd_Current();
711         i = 1;
712     }
713
714     LeaveCriticalSection(csp);
715
716     if (lockOrderValidation && i) {
717         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
718         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
719         TlsSetValue(tls_LockRefH, lockRefH);
720         TlsSetValue(tls_LockRefT, lockRefT);
721     }
722
723     return i;
724 }
725
726 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
727 {
728     long i;
729     CRITICAL_SECTION *csp;
730     osi_queue_t * lockRefH, *lockRefT;
731     osi_lock_ref_t *lockRefp;
732     DWORD tid = thrd_Current();
733
734     if ((i = lockp->type) != 0) {
735         if (i >= 0 && i < OSI_NLOCKTYPES)
736             (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
737         return;
738     }
739
740     /* otherwise we're the fast base type */
741     csp = &osi_baseAtomicCS[lockp->atomicIndex];
742     EnterCriticalSection(csp);
743
744     if (lockOrderValidation && lockp->level != 0) {
745         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
746         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
747
748         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
749             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
750                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
751                 lock_FreeLockRef(lockRefp);
752                 break;
753             }
754         }
755
756         TlsSetValue(tls_LockRefH, lockRefH);
757         TlsSetValue(tls_LockRefT, lockRefT);
758     }
759
760     osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
761
762 #ifdef DEBUG
763     for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++) {
764         if ( lockp->tid[i] == tid ) {
765             for ( ; i < (lockp->readers - 1) && i < (OSI_RWLOCK_THREADS - 1); i++)
766                 lockp->tid[i] = lockp->tid[i+1];
767             lockp->tid[i] = 0;
768             break;
769         }
770     }
771 #endif
772
773     /* XXX better to get the list of things to wakeup from TSignalForMLs, and
774      * then do the wakeup after SleepSpin releases the low-level mutex.
775      */
776     if (--(lockp->readers) == 0 && lockp->waiters) {
777         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
778     }
779
780     /* now call into scheduler to sleep atomically with releasing spin lock */
781     osi_SleepSpin(sleepVal, csp);
782 }
783
784 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
785 {
786     long i;
787     CRITICAL_SECTION *csp;
788     osi_queue_t * lockRefH, *lockRefT;
789     osi_lock_ref_t *lockRefp;
790     DWORD tid = thrd_Current();
791
792     if ((i = lockp->type) != 0) {
793         if (i >= 0 && i < OSI_NLOCKTYPES)
794             (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
795         return;
796     }
797
798     /* otherwise we're the fast base type */
799     csp = &osi_baseAtomicCS[lockp->atomicIndex];
800     EnterCriticalSection(csp);
801
802     if (lockOrderValidation && lockp->level != 0) {
803         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
804         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
805
806         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
807             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
808                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
809                 lock_FreeLockRef(lockRefp);
810                 break;
811             }
812         }
813
814         TlsSetValue(tls_LockRefH, lockRefH);
815         TlsSetValue(tls_LockRefT, lockRefT);
816     }
817
818     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
819
820     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
821     lockp->tid[0] = 0;
822     if (lockp->waiters) {
823         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
824     }
825
826     /* and finally release the big lock */
827     osi_SleepSpin(sleepVal, csp);
828 }
829
830 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
831 {
832     long i;
833     CRITICAL_SECTION *csp;
834     osi_queue_t * lockRefH, *lockRefT;
835     osi_lock_ref_t *lockRefp;
836
837     if ((i = lockp->type) != 0) {
838         if (i >= 0 && i < OSI_NLOCKTYPES)
839             (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
840         return;
841     }
842
843     /* otherwise we're the fast base type */
844     csp = &osi_baseAtomicCS[lockp->atomicIndex];
845     EnterCriticalSection(csp);
846
847     if (lockOrderValidation && lockp->level != 0) {
848         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
849         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
850
851         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
852             if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
853                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
854                 lock_FreeLockRef(lockRefp);
855                 break;
856             }
857         }
858
859         TlsSetValue(tls_LockRefH, lockRefH);
860         TlsSetValue(tls_LockRefT, lockRefT);
861     }
862
863     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
864
865     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
866     lockp->tid = 0;
867     if (lockp->waiters) {
868         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
869     }
870
871     /* and finally release the big lock */
872     osi_SleepSpin(sleepVal, csp);
873 }
874
875 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
876 {
877     long i;
878
879     if ((i=lockp->type) != 0)
880         if (i >= 0 && i < OSI_NLOCKTYPES)
881             (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
882 }
883
884 void lock_FinalizeMutex(osi_mutex_t *lockp)
885 {
886     long i;
887
888     if ((i=lockp->type) != 0)
889         if (i >= 0 && i < OSI_NLOCKTYPES)
890             (osi_lockOps[i]->FinalizeMutexProc)(lockp);
891 }
892
893 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
894 {
895     int i;
896
897     if ((i = osi_lockTypeDefault) > 0) {
898         if (i >= 0 && i < OSI_NLOCKTYPES)
899             (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
900         return;
901     }
902
903     /*
904      * otherwise we have the base case, which requires no special
905      * initialization.
906      */
907     memset(mp, 0, sizeof(osi_mutex_t));
908     mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
909     mp->level = level;
910     osi_TInit(&mp->d.turn);
911     return;
912 }
913
914 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
915 {
916     int i;
917
918     if ((i = osi_lockTypeDefault) > 0) {
919         if (i >= 0 && i < OSI_NLOCKTYPES)
920             (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
921         return;
922     }
923
924     /* otherwise we have the base case, which requires no special
925      * initialization.
926      */
927     memset(mp, 0, sizeof(osi_rwlock_t));
928     mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
929     mp->level = level;
930     osi_TInit(&mp->d.turn);
931     return;
932 }
933
934 int lock_GetRWLockState(osi_rwlock_t *lp)
935 {
936     long i;
937     CRITICAL_SECTION *csp;
938
939     if ((i=lp->type) != 0)
940         if (i >= 0 && i < OSI_NLOCKTYPES)
941             return (osi_lockOps[i]->GetRWLockState)(lp);
942
943     /* otherwise we're the fast base type */
944     csp = &osi_baseAtomicCS[lp->atomicIndex];
945     EnterCriticalSection(csp);
946
947     /* here we have the fast lock, so see if we can obtain the real lock */
948     if (lp->flags & OSI_LOCKFLAG_EXCL)
949         i = OSI_RWLOCK_WRITEHELD;
950     else
951         i = 0;
952     if (lp->readers > 0)
953         i |= OSI_RWLOCK_READHELD;
954
955     LeaveCriticalSection(csp);
956
957     return i;
958 }
959
960 int lock_GetMutexState(struct osi_mutex *mp)
961 {
962     long i;
963     CRITICAL_SECTION *csp;
964
965     if ((i=mp->type) != 0)
966         if (i >= 0 && i < OSI_NLOCKTYPES)
967             return (osi_lockOps[i]->GetMutexState)(mp);
968
969     /* otherwise we're the fast base type */
970     csp = &osi_baseAtomicCS[mp->atomicIndex];
971     EnterCriticalSection(csp);
972
973     if (mp->flags & OSI_LOCKFLAG_EXCL)
974         i = OSI_MUTEX_HELD;
975     else
976         i = 0;
977
978     LeaveCriticalSection(csp);
979
980     return i;
981 }