Windows: add osi_TWaitExt(), fix osi_TWait()
[openafs.git] / src / WINNT / client_osi / osibasel.c
1 /*
2  * Copyright 2000, International Business Machines Corporation and others.
3  * All Rights Reserved.
4  *
5  * This software has been released under the terms of the IBM Public
6  * License.  For details, see the LICENSE file in the top-level source
7  * directory or online at http://www.openafs.org/dl/license10.html
8  */
9
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
11
12
13 #include <afs/param.h>
14 #include <afs/stds.h>
15
16 #include <windows.h>
17 #include "osi.h"
18 #include <assert.h>
19 #include <stdio.h>
20
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long     atomicIndexCounter = 0;
24
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
32
33 void osi_BaseInit(void)
34 {
35     int i;
36
37     for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38         InitializeCriticalSection(&osi_baseAtomicCS[i]);
39
40     if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41         osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
42
43     if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44         osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
45
46     InitializeCriticalSection(&lock_ref_CS);
47 }
48
49 void
50 osi_SetLockOrderValidation(int on)
51 {
52     lockOrderValidation = (BOOLEAN)on;
53 }
54
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
57 {
58     osi_lock_ref_t * lockRefp = NULL;
59
60     EnterCriticalSection(&lock_ref_CS);
61     if (lock_ref_FreeListp) {
62         lockRefp = lock_ref_FreeListp;
63         osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64                        (osi_queue_t **) &lock_ref_FreeListEndp,
65                        &lockRefp->q);
66     }
67     LeaveCriticalSection(&lock_ref_CS);
68
69     if (lockRefp == NULL)
70         lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
71
72     memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73     lockRefp->type = type;
74     switch (type) {
75     case OSI_LOCK_MUTEX:
76         lockRefp->mx = lockp;
77         break;
78     case OSI_LOCK_RW:
79         lockRefp->rw = lockp;
80         break;
81     default:
82         osi_panic("Invalid Lock Type", __FILE__, __LINE__);
83     }
84
85     return lockRefp;
86 }
87
88 static void
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
90 {
91     EnterCriticalSection(&lock_ref_CS);
92     osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93                (osi_queue_t **) &lock_ref_FreeListEndp,
94                &lockRefp->q);
95     LeaveCriticalSection(&lock_ref_CS);
96 }
97
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
99 {
100     char msg[512];
101     osi_lock_ref_t * lockRefp;
102
103     for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104         if (lockRefp->type == OSI_LOCK_RW) {
105             if (lockRefp->rw == lockp) {
106                 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107                 osi_panic(msg, __FILE__, __LINE__);
108             }
109             if (lockRefp->rw->level > lockp->level) {
110                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111                          lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112                 osi_panic(msg, __FILE__, __LINE__);
113             }
114         } else {
115             if (lockRefp->mx->level > lockp->level) {
116                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117                          lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118                 osi_panic(msg, __FILE__, __LINE__);
119             }
120             osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
121         }
122     }
123 }
124
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
126 {
127     char msg[512];
128     osi_lock_ref_t * lockRefp;
129
130     for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131         if (lockRefp->type == OSI_LOCK_MUTEX) {
132             if (lockRefp->mx == lockp) {
133                 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134                 osi_panic(msg, __FILE__, __LINE__);
135             }
136             if (lockRefp->mx->level > lockp->level) {
137                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138                          lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139                 osi_panic(msg, __FILE__, __LINE__);
140             }
141         } else {
142             if (lockRefp->rw->level > lockp->level) {
143                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144                          lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145                 osi_panic(msg, __FILE__, __LINE__);
146             }
147         }
148     }
149 }
150
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
152 {
153     long i;
154     CRITICAL_SECTION *csp;
155     osi_queue_t * lockRefH, *lockRefT;
156     osi_lock_ref_t *lockRefp;
157     DWORD tid = thrd_Current();
158
159     if ((i=lockp->type) != 0) {
160         if (i >= 0 && i < OSI_NLOCKTYPES)
161             (osi_lockOps[i]->ObtainWriteProc)(lockp);
162         return;
163     }
164
165     if (lockOrderValidation) {
166         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
167         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
168
169         if (lockp->level != 0)
170             lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
171     }
172
173     /* otherwise we're the fast base type */
174     csp = &osi_baseAtomicCS[lockp->atomicIndex];
175     EnterCriticalSection(csp);
176
177     if (lockp->flags & OSI_LOCKFLAG_EXCL) {
178         osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
179     } else {
180         for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
181             osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
182         }
183     }
184
185     /* here we have the fast lock, so see if we can obtain the real lock */
186     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
187         (lockp->readers > 0)) {
188         lockp->waiters++;
189         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
190         lockp->waiters--;
191         osi_assertx(lockp->waiters >= 0, "waiters underflow");
192         osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
193     } else {
194         /* if we're here, all clear to set the lock */
195         lockp->flags |= OSI_LOCKFLAG_EXCL;
196         lockp->tid[0] = tid;
197     }
198     osi_assertx(lockp->readers == 0, "write lock readers present");
199
200     LeaveCriticalSection(csp);
201
202     if (lockOrderValidation) {
203         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
204         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
205         TlsSetValue(tls_LockRefH, lockRefH);
206         TlsSetValue(tls_LockRefT, lockRefT);
207     }
208 }
209
210 void lock_ObtainRead(osi_rwlock_t *lockp)
211 {
212     long i;
213     CRITICAL_SECTION *csp;
214     osi_queue_t * lockRefH, *lockRefT;
215     osi_lock_ref_t *lockRefp;
216     DWORD tid = thrd_Current();
217
218     if ((i=lockp->type) != 0) {
219         if (i >= 0 && i < OSI_NLOCKTYPES)
220             (osi_lockOps[i]->ObtainReadProc)(lockp);
221         return;
222     }
223
224     if (lockOrderValidation) {
225         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
226         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
227
228         if (lockp->level != 0)
229             lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
230     }
231
232     /* otherwise we're the fast base type */
233     csp = &osi_baseAtomicCS[lockp->atomicIndex];
234     EnterCriticalSection(csp);
235
236     if (lockp->flags & OSI_LOCKFLAG_EXCL) {
237         osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD");
238     } else {
239         for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) {
240             osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
241         }
242     }
243
244     /* here we have the fast lock, so see if we can obtain the real lock */
245     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
246         lockp->waiters++;
247         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
248         lockp->waiters--;
249         osi_assertx(lockp->waiters >= 0, "waiters underflow");
250         osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
251     } else {
252         /* if we're here, all clear to set the lock */
253         if (++lockp->readers <= OSI_RWLOCK_THREADS)
254             lockp->tid[lockp->readers-1] = tid;
255     }
256     LeaveCriticalSection(csp);
257
258     if (lockOrderValidation) {
259         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
260         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
261         TlsSetValue(tls_LockRefH, lockRefH);
262         TlsSetValue(tls_LockRefT, lockRefT);
263     }
264 }
265
266 void lock_ReleaseRead(osi_rwlock_t *lockp)
267 {
268     long i;
269     CRITICAL_SECTION *csp;
270     osi_queue_t * lockRefH, *lockRefT;
271     osi_lock_ref_t *lockRefp;
272     DWORD tid = thrd_Current();
273
274     if ((i = lockp->type) != 0) {
275         if (i >= 0 && i < OSI_NLOCKTYPES)
276             (osi_lockOps[i]->ReleaseReadProc)(lockp);
277         return;
278     }
279
280     /* otherwise we're the fast base type */
281     csp = &osi_baseAtomicCS[lockp->atomicIndex];
282     EnterCriticalSection(csp);
283
284     if (lockOrderValidation && lockp->level != 0) {
285         int found = 0;
286         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
287         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
288
289         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
290             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
291                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
292                 lock_FreeLockRef(lockRefp);
293                 found = 1;
294                 break;
295             }
296         }
297         osi_assertx(found, "read lock not found in TLS queue");
298
299         TlsSetValue(tls_LockRefH, lockRefH);
300         TlsSetValue(tls_LockRefT, lockRefT);
301     }
302
303     osi_assertx(lockp->readers > 0, "read lock not held");
304
305     for ( i=0; i < lockp->readers; i++) {
306         if ( lockp->tid[i] == tid ) {
307             for ( ; i < lockp->readers - 1; i++)
308                 lockp->tid[i] = lockp->tid[i+1];
309             lockp->tid[i] = 0;
310             break;
311         }
312     }
313
314         lockp->readers--;
315
316     /* releasing a read lock can allow writers */
317     if (lockp->readers == 0 && lockp->waiters) {
318         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
319     }
320     else {
321         osi_assertx(lockp->readers >= 0, "read lock underflow");
322
323         /* and finally release the big lock */
324         LeaveCriticalSection(csp);
325     }
326 }
327
328 void lock_ReleaseWrite(osi_rwlock_t *lockp)
329 {
330     long i;
331     CRITICAL_SECTION *csp;
332     osi_queue_t * lockRefH, *lockRefT;
333     osi_lock_ref_t *lockRefp;
334
335     if ((i = lockp->type) != 0) {
336         if (i >= 0 && i < OSI_NLOCKTYPES)
337             (osi_lockOps[i]->ReleaseWriteProc)(lockp);
338         return;
339     }
340
341     /* otherwise we're the fast base type */
342     csp = &osi_baseAtomicCS[lockp->atomicIndex];
343     EnterCriticalSection(csp);
344
345     if (lockOrderValidation && lockp->level != 0) {
346         int found = 0;
347         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
348         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
349
350         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
351             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
352                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
353                 lock_FreeLockRef(lockRefp);
354                 found = 1;
355                 break;
356             }
357         }
358         osi_assertx(found, "write lock not found in TLS queue");
359
360         TlsSetValue(tls_LockRefH, lockRefH);
361         TlsSetValue(tls_LockRefT, lockRefT);
362     }
363
364     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
365     osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
366
367     lockp->tid[0] = 0;
368
369     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
370     if (lockp->waiters) {
371         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
372     }
373     else {
374         /* and finally release the big lock */
375         LeaveCriticalSection(csp);
376     }
377 }
378
379 void lock_ConvertWToR(osi_rwlock_t *lockp)
380 {
381     long i;
382     CRITICAL_SECTION *csp;
383
384     if ((i = lockp->type) != 0) {
385         if (i >= 0 && i < OSI_NLOCKTYPES)
386             (osi_lockOps[i]->ConvertWToRProc)(lockp);
387         return;
388     }
389
390     /* otherwise we're the fast base type */
391     csp = &osi_baseAtomicCS[lockp->atomicIndex];
392     EnterCriticalSection(csp);
393
394     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
395     osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
396
397     /* convert write lock to read lock */
398     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
399     lockp->readers++;
400
401     osi_assertx(lockp->readers == 1, "read lock not one");
402
403     if (lockp->waiters) {
404         osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
405     }
406     else {
407         /* and finally release the big lock */
408         LeaveCriticalSection(csp);
409     }
410 }
411
412 void lock_ConvertRToW(osi_rwlock_t *lockp)
413 {
414     long i;
415     CRITICAL_SECTION *csp;
416     DWORD tid = thrd_Current();
417
418     if ((i = lockp->type) != 0) {
419         if (i >= 0 && i < OSI_NLOCKTYPES)
420             (osi_lockOps[i]->ConvertRToWProc)(lockp);
421         return;
422     }
423
424     /* otherwise we're the fast base type */
425     csp = &osi_baseAtomicCS[lockp->atomicIndex];
426     EnterCriticalSection(csp);
427
428     osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
429     osi_assertx(lockp->readers > 0, "read lock not held");
430
431     for ( i=0; i < lockp->readers; i++) {
432         if ( lockp->tid[i] == tid ) {
433             for ( ; i < lockp->readers - 1; i++)
434                 lockp->tid[i] = lockp->tid[i+1];
435             lockp->tid[i] = 0;
436             break;
437         }
438     }
439
440     if (--(lockp->readers) == 0) {
441         /* convert read lock to write lock */
442         lockp->flags |= OSI_LOCKFLAG_EXCL;
443         lockp->tid[0] = tid;
444     } else {
445         osi_assertx(lockp->readers > 0, "read lock underflow");
446
447         lockp->waiters++;
448         osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE);
449         lockp->waiters--;
450         osi_assertx(lockp->waiters >= 0, "waiters underflow");
451         osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
452     }
453
454     LeaveCriticalSection(csp);
455 }
456
457 void lock_ObtainMutex(struct osi_mutex *lockp)
458 {
459     long i;
460     CRITICAL_SECTION *csp;
461     osi_queue_t * lockRefH, *lockRefT;
462     osi_lock_ref_t *lockRefp;
463
464     if ((i=lockp->type) != 0) {
465         if (i >= 0 && i < OSI_NLOCKTYPES)
466             (osi_lockOps[i]->ObtainMutexProc)(lockp);
467         return;
468     }
469
470     /* otherwise we're the fast base type */
471     csp = &osi_baseAtomicCS[lockp->atomicIndex];
472     EnterCriticalSection(csp);
473
474     if (lockOrderValidation) {
475         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
476         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
477
478         if (lockp->level != 0)
479             lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
480     }
481
482     /* here we have the fast lock, so see if we can obtain the real lock */
483     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
484         lockp->waiters++;
485         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
486         lockp->waiters--;
487         osi_assertx(lockp->waiters >= 0, "waiters underflow");
488         osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
489     } else {
490         /* if we're here, all clear to set the lock */
491         lockp->flags |= OSI_LOCKFLAG_EXCL;
492         lockp->tid = thrd_Current();
493     }
494
495     LeaveCriticalSection(csp);
496
497     if (lockOrderValidation) {
498         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
499         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
500         TlsSetValue(tls_LockRefH, lockRefH);
501         TlsSetValue(tls_LockRefT, lockRefT);
502     }
503 }
504
505 void lock_ReleaseMutex(struct osi_mutex *lockp)
506 {
507     long i;
508     CRITICAL_SECTION *csp;
509     osi_queue_t * lockRefH, *lockRefT;
510     osi_lock_ref_t *lockRefp;
511
512     if ((i = lockp->type) != 0) {
513         if (i >= 0 && i < OSI_NLOCKTYPES)
514             (osi_lockOps[i]->ReleaseMutexProc)(lockp);
515         return;
516     }
517
518     /* otherwise we're the fast base type */
519     csp = &osi_baseAtomicCS[lockp->atomicIndex];
520     EnterCriticalSection(csp);
521
522     if (lockOrderValidation && lockp->level != 0) {
523         int found = 0;
524         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
525         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
526
527         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
528             if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
529                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
530                 lock_FreeLockRef(lockRefp);
531                 found = 1;
532                 break;
533             }
534         }
535
536         osi_assertx(found, "mutex lock not found in TLS queue");
537         TlsSetValue(tls_LockRefH, lockRefH);
538         TlsSetValue(tls_LockRefT, lockRefT);
539     }
540
541     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
542     osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
543
544     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
545     lockp->tid = 0;
546     if (lockp->waiters) {
547         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
548     }
549     else {
550         /* and finally release the big lock */
551         LeaveCriticalSection(csp);
552     }
553 }
554
555 int lock_TryRead(struct osi_rwlock *lockp)
556 {
557     long i;
558     CRITICAL_SECTION *csp;
559     osi_queue_t * lockRefH, *lockRefT;
560     osi_lock_ref_t *lockRefp;
561
562     if ((i=lockp->type) != 0)
563         if (i >= 0 && i < OSI_NLOCKTYPES)
564             return (osi_lockOps[i]->TryReadProc)(lockp);
565
566     /* otherwise we're the fast base type */
567     csp = &osi_baseAtomicCS[lockp->atomicIndex];
568     EnterCriticalSection(csp);
569
570     if (lockOrderValidation) {
571         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
572         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
573
574         if (lockp->level != 0) {
575             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
576                 if (lockRefp->type == OSI_LOCK_RW) {
577                     osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
578                 }
579             }
580         }
581     }
582
583     /* here we have the fast lock, so see if we can obtain the real lock */
584     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
585         i = 0;
586     }
587     else {
588         /* if we're here, all clear to set the lock */
589         if (++(lockp->readers) < OSI_RWLOCK_THREADS)
590             lockp->tid[lockp->readers-1] = thrd_Current();
591         i = 1;
592     }
593
594     LeaveCriticalSection(csp);
595
596     if (lockOrderValidation && i) {
597         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
598         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
599         TlsSetValue(tls_LockRefH, lockRefH);
600         TlsSetValue(tls_LockRefT, lockRefT);
601     }
602
603     return i;
604 }
605
606
607 int lock_TryWrite(struct osi_rwlock *lockp)
608 {
609     long i;
610     CRITICAL_SECTION *csp;
611     osi_queue_t * lockRefH, *lockRefT;
612     osi_lock_ref_t *lockRefp;
613
614     if ((i=lockp->type) != 0)
615         if (i >= 0 && i < OSI_NLOCKTYPES)
616             return (osi_lockOps[i]->TryWriteProc)(lockp);
617
618     /* otherwise we're the fast base type */
619     csp = &osi_baseAtomicCS[lockp->atomicIndex];
620     EnterCriticalSection(csp);
621
622     if (lockOrderValidation) {
623         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
624         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
625
626         if (lockp->level != 0) {
627             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
628                 if (lockRefp->type == OSI_LOCK_RW) {
629                     osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
630                 }
631             }
632         }
633     }
634
635     /* here we have the fast lock, so see if we can obtain the real lock */
636     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
637          || (lockp->readers > 0)) {
638         i = 0;
639     }
640     else {
641         /* if we're here, all clear to set the lock */
642         lockp->flags |= OSI_LOCKFLAG_EXCL;
643         lockp->tid[0] = thrd_Current();
644         i = 1;
645     }
646
647     LeaveCriticalSection(csp);
648
649     if (lockOrderValidation && i) {
650         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
651         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
652         TlsSetValue(tls_LockRefH, lockRefH);
653         TlsSetValue(tls_LockRefT, lockRefT);
654     }
655
656     return i;
657 }
658
659
660 int lock_TryMutex(struct osi_mutex *lockp) {
661     long i;
662     CRITICAL_SECTION *csp;
663     osi_queue_t * lockRefH, *lockRefT;
664     osi_lock_ref_t *lockRefp;
665
666     if ((i=lockp->type) != 0)
667         if (i >= 0 && i < OSI_NLOCKTYPES)
668             return (osi_lockOps[i]->TryMutexProc)(lockp);
669
670     /* otherwise we're the fast base type */
671     csp = &osi_baseAtomicCS[lockp->atomicIndex];
672     EnterCriticalSection(csp);
673
674     if (lockOrderValidation) {
675         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
676         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
677
678         if (lockp->level != 0) {
679             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
680                 if (lockRefp->type == OSI_LOCK_MUTEX) {
681                     osi_assertx(lockRefp->mx != lockp, "Mutex already held");
682                 }
683             }
684         }
685     }
686
687     /* here we have the fast lock, so see if we can obtain the real lock */
688     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
689         i = 0;
690     }
691     else {
692         /* if we're here, all clear to set the lock */
693         lockp->flags |= OSI_LOCKFLAG_EXCL;
694         lockp->tid = thrd_Current();
695         i = 1;
696     }
697
698     LeaveCriticalSection(csp);
699
700     if (lockOrderValidation && i) {
701         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
702         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
703         TlsSetValue(tls_LockRefH, lockRefH);
704         TlsSetValue(tls_LockRefT, lockRefT);
705     }
706
707     return i;
708 }
709
710 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
711 {
712     long i;
713     CRITICAL_SECTION *csp;
714     osi_queue_t * lockRefH, *lockRefT;
715     osi_lock_ref_t *lockRefp;
716     DWORD tid = thrd_Current();
717
718     if ((i = lockp->type) != 0) {
719         if (i >= 0 && i < OSI_NLOCKTYPES)
720             (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
721         return;
722     }
723
724     /* otherwise we're the fast base type */
725     csp = &osi_baseAtomicCS[lockp->atomicIndex];
726     EnterCriticalSection(csp);
727
728     if (lockOrderValidation && lockp->level != 0) {
729         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
730         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
731
732         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
733             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
734                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
735                 lock_FreeLockRef(lockRefp);
736                 break;
737             }
738         }
739
740         TlsSetValue(tls_LockRefH, lockRefH);
741         TlsSetValue(tls_LockRefT, lockRefT);
742     }
743
744     osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
745
746     for ( i=0; i < lockp->readers; i++) {
747         if ( lockp->tid[i] == tid ) {
748             for ( ; i < lockp->readers - 1; i++)
749                 lockp->tid[i] = lockp->tid[i+1];
750             lockp->tid[i] = 0;
751             break;
752         }
753     }
754
755     /* XXX better to get the list of things to wakeup from TSignalForMLs, and
756      * then do the wakeup after SleepSpin releases the low-level mutex.
757      */
758     if (--(lockp->readers) == 0 && lockp->waiters) {
759         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
760     }
761
762     /* now call into scheduler to sleep atomically with releasing spin lock */
763     osi_SleepSpin(sleepVal, csp);
764 }
765
766 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
767 {
768     long i;
769     CRITICAL_SECTION *csp;
770     osi_queue_t * lockRefH, *lockRefT;
771     osi_lock_ref_t *lockRefp;
772     DWORD tid = thrd_Current();
773
774     if ((i = lockp->type) != 0) {
775         if (i >= 0 && i < OSI_NLOCKTYPES)
776             (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
777         return;
778     }
779
780     /* otherwise we're the fast base type */
781     csp = &osi_baseAtomicCS[lockp->atomicIndex];
782     EnterCriticalSection(csp);
783
784     if (lockOrderValidation && lockp->level != 0) {
785         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
786         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
787
788         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
789             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
790                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
791                 lock_FreeLockRef(lockRefp);
792                 break;
793             }
794         }
795
796         TlsSetValue(tls_LockRefH, lockRefH);
797         TlsSetValue(tls_LockRefT, lockRefT);
798     }
799
800     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
801
802     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
803     lockp->tid[0] = 0;
804     if (lockp->waiters) {
805         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
806     }
807
808     /* and finally release the big lock */
809     osi_SleepSpin(sleepVal, csp);
810 }
811
812 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
813 {
814     long i;
815     CRITICAL_SECTION *csp;
816     osi_queue_t * lockRefH, *lockRefT;
817     osi_lock_ref_t *lockRefp;
818
819     if ((i = lockp->type) != 0) {
820         if (i >= 0 && i < OSI_NLOCKTYPES)
821             (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
822         return;
823     }
824
825     /* otherwise we're the fast base type */
826     csp = &osi_baseAtomicCS[lockp->atomicIndex];
827     EnterCriticalSection(csp);
828
829     if (lockOrderValidation && lockp->level != 0) {
830         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
831         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
832
833         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
834             if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
835                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
836                 lock_FreeLockRef(lockRefp);
837                 break;
838             }
839         }
840
841         TlsSetValue(tls_LockRefH, lockRefH);
842         TlsSetValue(tls_LockRefT, lockRefT);
843     }
844
845     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
846
847     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
848     lockp->tid = 0;
849     if (lockp->waiters) {
850         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
851     }
852
853     /* and finally release the big lock */
854     osi_SleepSpin(sleepVal, csp);
855 }
856
857 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
858 {
859     long i;
860
861     if ((i=lockp->type) != 0)
862         if (i >= 0 && i < OSI_NLOCKTYPES)
863             (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
864 }
865
866 void lock_FinalizeMutex(osi_mutex_t *lockp)
867 {
868     long i;
869
870     if ((i=lockp->type) != 0)
871         if (i >= 0 && i < OSI_NLOCKTYPES)
872             (osi_lockOps[i]->FinalizeMutexProc)(lockp);
873 }
874
875 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
876 {
877     int i;
878
879     if ((i = osi_lockTypeDefault) > 0) {
880         if (i >= 0 && i < OSI_NLOCKTYPES)
881             (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
882         return;
883     }
884
885     /*
886      * otherwise we have the base case, which requires no special
887      * initialization.
888      */
889     memset(mp, 0, sizeof(osi_mutex_t));
890     mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
891     mp->level = level;
892     osi_TInit(&mp->d.turn);
893     return;
894 }
895
896 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
897 {
898     int i;
899
900     if ((i = osi_lockTypeDefault) > 0) {
901         if (i >= 0 && i < OSI_NLOCKTYPES)
902             (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
903         return;
904     }
905
906     /* otherwise we have the base case, which requires no special
907      * initialization.
908      */
909     memset(mp, 0, sizeof(osi_rwlock_t));
910     mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
911     mp->level = level;
912     osi_TInit(&mp->d.turn);
913     return;
914 }
915
916 int lock_GetRWLockState(osi_rwlock_t *lp)
917 {
918     long i;
919     CRITICAL_SECTION *csp;
920
921     if ((i=lp->type) != 0)
922         if (i >= 0 && i < OSI_NLOCKTYPES)
923             return (osi_lockOps[i]->GetRWLockState)(lp);
924
925     /* otherwise we're the fast base type */
926     csp = &osi_baseAtomicCS[lp->atomicIndex];
927     EnterCriticalSection(csp);
928
929     /* here we have the fast lock, so see if we can obtain the real lock */
930     if (lp->flags & OSI_LOCKFLAG_EXCL)
931         i = OSI_RWLOCK_WRITEHELD;
932     else
933         i = 0;
934     if (lp->readers > 0)
935         i |= OSI_RWLOCK_READHELD;
936
937     LeaveCriticalSection(csp);
938
939     return i;
940 }
941
942 int lock_GetMutexState(struct osi_mutex *mp)
943 {
944     long i;
945     CRITICAL_SECTION *csp;
946
947     if ((i=mp->type) != 0)
948         if (i >= 0 && i < OSI_NLOCKTYPES)
949             return (osi_lockOps[i]->GetMutexState)(mp);
950
951     /* otherwise we're the fast base type */
952     csp = &osi_baseAtomicCS[mp->atomicIndex];
953     EnterCriticalSection(csp);
954
955     if (mp->flags & OSI_LOCKFLAG_EXCL)
956         i = OSI_MUTEX_HELD;
957     else
958         i = 0;
959
960     LeaveCriticalSection(csp);
961
962     return i;
963 }