d6916e3a6f08dfcd7491e57dcf9857f3a6844c15
[openafs.git] / src / WINNT / client_osi / osibasel.c
1 /*
2  * Copyright 2000, International Business Machines Corporation and others.
3  * All Rights Reserved.
4  *
5  * This software has been released under the terms of the IBM Public
6  * License.  For details, see the LICENSE file in the top-level source
7  * directory or online at http://www.openafs.org/dl/license10.html
8  */
9
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
11
12
13 #include <afs/param.h>
14 #include <afs/stds.h>
15
16 #include <windows.h>
17 #include "osi.h"
18 #include <assert.h>
19 #include <stdio.h>
20
21 /* atomicity-providing critical sections */
22 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 static long     atomicIndexCounter = 0;
24
25 /* Thread local storage index for lock tracking */
26 static DWORD tls_LockRefH = 0;
27 static DWORD tls_LockRefT = 0;
28 static BOOLEAN lockOrderValidation = 0;
29 static osi_lock_ref_t * lock_ref_FreeListp = NULL;
30 static osi_lock_ref_t * lock_ref_FreeListEndp = NULL;
31 CRITICAL_SECTION lock_ref_CS;
32
33 void osi_BaseInit(void)
34 {
35     int i;
36
37     for(i=0; i<OSI_MUTEXHASHSIZE; i++)
38         InitializeCriticalSection(&osi_baseAtomicCS[i]);
39
40     if ((tls_LockRefH = TlsAlloc()) == TLS_OUT_OF_INDEXES)
41         osi_panic("TlsAlloc(tls_LockRefH) failure", __FILE__, __LINE__);
42
43     if ((tls_LockRefT = TlsAlloc()) == TLS_OUT_OF_INDEXES)
44         osi_panic("TlsAlloc(tls_LockRefT) failure", __FILE__, __LINE__);
45
46     InitializeCriticalSection(&lock_ref_CS);
47 }
48
49 void
50 osi_SetLockOrderValidation(int on)
51 {
52     lockOrderValidation = (BOOLEAN)on;
53 }
54
55 static osi_lock_ref_t *
56 lock_GetLockRef(void * lockp, char type)
57 {
58     osi_lock_ref_t * lockRefp = NULL;
59
60     EnterCriticalSection(&lock_ref_CS);
61     if (lock_ref_FreeListp) {
62         lockRefp = lock_ref_FreeListp;
63         osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp,
64                        (osi_queue_t **) &lock_ref_FreeListEndp,
65                        &lockRefp->q);
66     }
67     LeaveCriticalSection(&lock_ref_CS);
68
69     if (lockRefp == NULL)
70         lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t));
71
72     memset(lockRefp, 0, sizeof(osi_lock_ref_t));
73     lockRefp->type = type;
74     switch (type) {
75     case OSI_LOCK_MUTEX:
76         lockRefp->mx = lockp;
77         break;
78     case OSI_LOCK_RW:
79         lockRefp->rw = lockp;
80         break;
81     default:
82         osi_panic("Invalid Lock Type", __FILE__, __LINE__);
83     }
84
85     return lockRefp;
86 }
87
88 static void
89 lock_FreeLockRef(osi_lock_ref_t * lockRefp)
90 {
91     EnterCriticalSection(&lock_ref_CS);
92     osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp,
93                (osi_queue_t **) &lock_ref_FreeListEndp,
94                &lockRefp->q);
95     LeaveCriticalSection(&lock_ref_CS);
96 }
97
98 void lock_VerifyOrderRW(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_rwlock_t *lockp)
99 {
100     char msg[512];
101     osi_lock_ref_t * lockRefp;
102
103     for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
104         if (lockRefp->type == OSI_LOCK_RW) {
105             if (lockRefp->rw == lockp) {
106                 sprintf(msg, "RW Lock 0x%p level %d already held", lockp, lockp->level);
107                 osi_panic(msg, __FILE__, __LINE__);
108             }
109             if (lockRefp->rw->level > lockp->level) {
110                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
111                          lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
112                 osi_panic(msg, __FILE__, __LINE__);
113             }
114         } else {
115             if (lockRefp->mx->level > lockp->level) {
116                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
117                          lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
118                 osi_panic(msg, __FILE__, __LINE__);
119             }
120             osi_assertx(lockRefp->mx->level <= lockp->level, "Lock hierarchy violation");
121         }
122     }
123 }
124
125 void lock_VerifyOrderMX(osi_queue_t *lockRefH, osi_queue_t *lockRefT, osi_mutex_t *lockp)
126 {
127     char msg[512];
128     osi_lock_ref_t * lockRefp;
129
130     for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
131         if (lockRefp->type == OSI_LOCK_MUTEX) {
132             if (lockRefp->mx == lockp) {
133                 sprintf(msg, "MX Lock 0x%p level %d already held", lockp, lockp->level);
134                 osi_panic(msg, __FILE__, __LINE__);
135             }
136             if (lockRefp->mx->level > lockp->level) {
137                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
138                          lockRefp->mx, lockRefp->mx->level, lockp, lockp->level);
139                 osi_panic(msg, __FILE__, __LINE__);
140             }
141         } else {
142             if (lockRefp->rw->level > lockp->level) {
143                 sprintf(msg, "Lock hierarchy violation Held lock 0x%p level %d > Requested lock 0x%p level %d",
144                          lockRefp->rw, lockRefp->rw->level, lockp, lockp->level);
145                 osi_panic(msg, __FILE__, __LINE__);
146             }
147         }
148     }
149 }
150
151 void lock_ObtainWrite(osi_rwlock_t *lockp)
152 {
153     long i;
154     CRITICAL_SECTION *csp;
155     osi_queue_t * lockRefH, *lockRefT;
156     osi_lock_ref_t *lockRefp;
157
158     if ((i=lockp->type) != 0) {
159         if (i >= 0 && i < OSI_NLOCKTYPES)
160             (osi_lockOps[i]->ObtainWriteProc)(lockp);
161         return;
162     }
163
164     if (lockOrderValidation) {
165         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
166         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
167
168         if (lockp->level != 0)
169             lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
170     }
171
172     /* otherwise we're the fast base type */
173     csp = &osi_baseAtomicCS[lockp->atomicIndex];
174     EnterCriticalSection(csp);
175
176     /* here we have the fast lock, so see if we can obtain the real lock */
177     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) ||
178         (lockp->readers > 0)) {
179         lockp->waiters++;
180         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
181         lockp->waiters--;
182         osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
183     } else {
184         /* if we're here, all clear to set the lock */
185         lockp->flags |= OSI_LOCKFLAG_EXCL;
186         lockp->tid[0] = thrd_Current();
187     }
188     LeaveCriticalSection(csp);
189
190     if (lockOrderValidation) {
191         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
192         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
193         TlsSetValue(tls_LockRefH, lockRefH);
194         TlsSetValue(tls_LockRefT, lockRefT);
195     }
196 }
197
198 void lock_ObtainRead(osi_rwlock_t *lockp)
199 {
200     long i;
201     CRITICAL_SECTION *csp;
202     osi_queue_t * lockRefH, *lockRefT;
203     osi_lock_ref_t *lockRefp;
204     DWORD tid = thrd_Current();
205
206     if ((i=lockp->type) != 0) {
207         if (i >= 0 && i < OSI_NLOCKTYPES)
208             (osi_lockOps[i]->ObtainReadProc)(lockp);
209         return;
210     }
211
212     if (lockOrderValidation) {
213         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
214         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
215
216         if (lockp->level != 0)
217             lock_VerifyOrderRW(lockRefH, lockRefT, lockp);
218     }
219
220     /* otherwise we're the fast base type */
221     csp = &osi_baseAtomicCS[lockp->atomicIndex];
222     EnterCriticalSection(csp);
223
224     for ( i=0; i < lockp->readers; i++ ) {
225         osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD");
226     }
227
228     /* here we have the fast lock, so see if we can obtain the real lock */
229     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
230         lockp->waiters++;
231         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, lockp->tid, csp);
232         lockp->waiters--;
233         osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
234     } else {
235         /* if we're here, all clear to set the lock */
236         if (++lockp->readers <= OSI_RWLOCK_THREADS)
237             lockp->tid[lockp->readers-1] = tid;
238     }
239     LeaveCriticalSection(csp);
240
241     if (lockOrderValidation) {
242         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
243         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
244         TlsSetValue(tls_LockRefH, lockRefH);
245         TlsSetValue(tls_LockRefT, lockRefT);
246     }
247 }
248
249 void lock_ReleaseRead(osi_rwlock_t *lockp)
250 {
251     long i;
252     CRITICAL_SECTION *csp;
253     osi_queue_t * lockRefH, *lockRefT;
254     osi_lock_ref_t *lockRefp;
255     DWORD tid = thrd_Current();
256
257     if ((i = lockp->type) != 0) {
258         if (i >= 0 && i < OSI_NLOCKTYPES)
259             (osi_lockOps[i]->ReleaseReadProc)(lockp);
260         return;
261     }
262
263     if (lockOrderValidation && lockp->level != 0) {
264         int found = 0;
265         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
266         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
267
268         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
269             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
270                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
271                 lock_FreeLockRef(lockRefp);
272                 found = 1;
273                 break;
274             }
275         }
276         osi_assertx(found, "read lock not found in TLS queue");
277
278         TlsSetValue(tls_LockRefH, lockRefH);
279         TlsSetValue(tls_LockRefT, lockRefT);
280     }
281
282     /* otherwise we're the fast base type */
283     csp = &osi_baseAtomicCS[lockp->atomicIndex];
284     EnterCriticalSection(csp);
285
286     osi_assertx(lockp->readers > 0, "read lock not held");
287
288     for ( i=0; i < lockp->readers; i++) {
289         if ( lockp->tid[i] == tid ) {
290             for ( ; i < lockp->readers - 1; i++)
291                 lockp->tid[i] = lockp->tid[i+1];
292             lockp->tid[i] = 0;
293             break;
294         }
295     }
296
297     /* releasing a read lock can allow readers or writers */
298     if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
299         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
300     }
301     else {
302         /* and finally release the big lock */
303         LeaveCriticalSection(csp);
304     }
305 }
306
307 void lock_ReleaseWrite(osi_rwlock_t *lockp)
308 {
309     long i;
310     CRITICAL_SECTION *csp;
311     osi_queue_t * lockRefH, *lockRefT;
312     osi_lock_ref_t *lockRefp;
313
314     if ((i = lockp->type) != 0) {
315         if (i >= 0 && i < OSI_NLOCKTYPES)
316             (osi_lockOps[i]->ReleaseWriteProc)(lockp);
317         return;
318     }
319
320     if (lockOrderValidation && lockp->level != 0) {
321         int found = 0;
322         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
323         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
324
325         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
326             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
327                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
328                 lock_FreeLockRef(lockRefp);
329                 found = 1;
330                 break;
331             }
332         }
333         osi_assertx(found, "write lock not found in TLS queue");
334
335         TlsSetValue(tls_LockRefH, lockRefH);
336         TlsSetValue(tls_LockRefT, lockRefT);
337     }
338
339     /* otherwise we're the fast base type */
340     csp = &osi_baseAtomicCS[lockp->atomicIndex];
341     EnterCriticalSection(csp);
342
343     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
344     osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
345
346     lockp->tid[0] = 0;
347
348     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
349     if (!osi_TEmpty(&lockp->d.turn)) {
350         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
351     }
352     else {
353         /* and finally release the big lock */
354         LeaveCriticalSection(csp);
355     }
356 }
357
358 void lock_ConvertWToR(osi_rwlock_t *lockp)
359 {
360     long i;
361     CRITICAL_SECTION *csp;
362
363     if ((i = lockp->type) != 0) {
364         if (i >= 0 && i < OSI_NLOCKTYPES)
365             (osi_lockOps[i]->ConvertWToRProc)(lockp);
366         return;
367     }
368
369     /* otherwise we're the fast base type */
370     csp = &osi_baseAtomicCS[lockp->atomicIndex];
371     EnterCriticalSection(csp);
372
373     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
374     osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread");
375
376     /* convert write lock to read lock */
377     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
378     lockp->readers++;
379
380     if (!osi_TEmpty(&lockp->d.turn)) {
381         osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
382     }
383     else {
384         /* and finally release the big lock */
385         LeaveCriticalSection(csp);
386     }
387 }
388
389 void lock_ConvertRToW(osi_rwlock_t *lockp)
390 {
391     long i;
392     CRITICAL_SECTION *csp;
393     DWORD tid = thrd_Current();
394
395     if ((i = lockp->type) != 0) {
396         if (i >= 0 && i < OSI_NLOCKTYPES)
397             (osi_lockOps[i]->ConvertRToWProc)(lockp);
398         return;
399     }
400
401     /* otherwise we're the fast base type */
402     csp = &osi_baseAtomicCS[lockp->atomicIndex];
403     EnterCriticalSection(csp);
404
405     osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held");
406     osi_assertx(lockp->readers > 0, "read lock not held");
407
408     for ( i=0; i < lockp->readers; i++) {
409         if ( lockp->tid[i] == tid ) {
410             for ( ; i < lockp->readers - 1; i++)
411                 lockp->tid[i] = lockp->tid[i+1];
412             lockp->tid[i] = 0;
413             break;
414         }
415     }
416
417     if (--lockp->readers == 0) {
418         /* convert read lock to write lock */
419         lockp->flags |= OSI_LOCKFLAG_EXCL;
420         lockp->tid[0] = tid;
421     } else {
422         lockp->waiters++;
423         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp);
424         lockp->waiters--;
425         osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
426     }
427
428     LeaveCriticalSection(csp);
429 }
430
431 void lock_ObtainMutex(struct osi_mutex *lockp)
432 {
433     long i;
434     CRITICAL_SECTION *csp;
435     osi_queue_t * lockRefH, *lockRefT;
436     osi_lock_ref_t *lockRefp;
437
438     if ((i=lockp->type) != 0) {
439         if (i >= 0 && i < OSI_NLOCKTYPES)
440             (osi_lockOps[i]->ObtainMutexProc)(lockp);
441         return;
442     }
443
444     if (lockOrderValidation) {
445         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
446         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
447
448         if (lockp->level != 0)
449             lock_VerifyOrderMX(lockRefH, lockRefT, lockp);
450     }
451
452     /* otherwise we're the fast base type */
453     csp = &osi_baseAtomicCS[lockp->atomicIndex];
454     EnterCriticalSection(csp);
455
456     /* here we have the fast lock, so see if we can obtain the real lock */
457     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
458         lockp->waiters++;
459         osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp);
460         lockp->waiters--;
461         osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
462     } else {
463         /* if we're here, all clear to set the lock */
464         lockp->flags |= OSI_LOCKFLAG_EXCL;
465         lockp->tid = thrd_Current();
466     }
467     LeaveCriticalSection(csp);
468
469     if (lockOrderValidation) {
470         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
471         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
472         TlsSetValue(tls_LockRefH, lockRefH);
473         TlsSetValue(tls_LockRefT, lockRefT);
474     }
475 }
476
477 void lock_ReleaseMutex(struct osi_mutex *lockp)
478 {
479     long i;
480     CRITICAL_SECTION *csp;
481     osi_queue_t * lockRefH, *lockRefT;
482     osi_lock_ref_t *lockRefp;
483
484     if ((i = lockp->type) != 0) {
485         if (i >= 0 && i < OSI_NLOCKTYPES)
486             (osi_lockOps[i]->ReleaseMutexProc)(lockp);
487         return;
488     }
489
490     if (lockOrderValidation && lockp->level != 0) {
491         int found = 0;
492         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
493         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
494
495         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
496             if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
497                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
498                 lock_FreeLockRef(lockRefp);
499                 found = 1;
500                 break;
501             }
502         }
503
504         osi_assertx(found, "mutex lock not found in TLS queue");
505         TlsSetValue(tls_LockRefH, lockRefH);
506         TlsSetValue(tls_LockRefT, lockRefT);
507     }
508
509     /* otherwise we're the fast base type */
510     csp = &osi_baseAtomicCS[lockp->atomicIndex];
511     EnterCriticalSection(csp);
512
513     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
514     osi_assertx(lockp->tid == thrd_Current(), "mutex not held by current thread");
515
516     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
517     lockp->tid = 0;
518     if (!osi_TEmpty(&lockp->d.turn)) {
519         osi_TSignalForMLs(&lockp->d.turn, 0, csp);
520     }
521     else {
522         /* and finally release the big lock */
523         LeaveCriticalSection(csp);
524     }
525 }
526
527 int lock_TryRead(struct osi_rwlock *lockp)
528 {
529     long i;
530     CRITICAL_SECTION *csp;
531     osi_queue_t * lockRefH, *lockRefT;
532     osi_lock_ref_t *lockRefp;
533
534     if ((i=lockp->type) != 0)
535         if (i >= 0 && i < OSI_NLOCKTYPES)
536             return (osi_lockOps[i]->TryReadProc)(lockp);
537
538     if (lockOrderValidation) {
539         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
540         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
541
542         if (lockp->level != 0) {
543             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
544                 if (lockRefp->type == OSI_LOCK_RW) {
545                     osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
546                 }
547             }
548         }
549     }
550
551     /* otherwise we're the fast base type */
552     csp = &osi_baseAtomicCS[lockp->atomicIndex];
553     EnterCriticalSection(csp);
554
555     /* here we have the fast lock, so see if we can obtain the real lock */
556     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
557         i = 0;
558     }
559     else {
560         /* if we're here, all clear to set the lock */
561         if (++lockp->readers < OSI_RWLOCK_THREADS)
562             lockp->tid[lockp->readers-1] = thrd_Current();
563         i = 1;
564     }
565
566     LeaveCriticalSection(csp);
567
568     if (lockOrderValidation && i) {
569         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
570         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
571         TlsSetValue(tls_LockRefH, lockRefH);
572         TlsSetValue(tls_LockRefT, lockRefT);
573     }
574
575     return i;
576 }
577
578
579 int lock_TryWrite(struct osi_rwlock *lockp)
580 {
581     long i;
582     CRITICAL_SECTION *csp;
583     osi_queue_t * lockRefH, *lockRefT;
584     osi_lock_ref_t *lockRefp;
585
586     if ((i=lockp->type) != 0)
587         if (i >= 0 && i < OSI_NLOCKTYPES)
588             return (osi_lockOps[i]->TryWriteProc)(lockp);
589
590     if (lockOrderValidation) {
591         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
592         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
593
594         if (lockp->level != 0) {
595             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
596                 if (lockRefp->type == OSI_LOCK_RW) {
597                     osi_assertx(lockRefp->rw != lockp, "RW Lock already held");
598                 }
599             }
600         }
601     }
602
603     /* otherwise we're the fast base type */
604     csp = &osi_baseAtomicCS[lockp->atomicIndex];
605     EnterCriticalSection(csp);
606
607     /* here we have the fast lock, so see if we can obtain the real lock */
608     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
609          || (lockp->readers > 0)) {
610         i = 0;
611     }
612     else {
613         /* if we're here, all clear to set the lock */
614         lockp->flags |= OSI_LOCKFLAG_EXCL;
615         lockp->tid[0] = thrd_Current();
616         i = 1;
617     }
618
619     LeaveCriticalSection(csp);
620
621     if (lockOrderValidation && i) {
622         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW);
623         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
624         TlsSetValue(tls_LockRefH, lockRefH);
625         TlsSetValue(tls_LockRefT, lockRefT);
626     }
627
628     return i;
629 }
630
631
632 int lock_TryMutex(struct osi_mutex *lockp) {
633     long i;
634     CRITICAL_SECTION *csp;
635     osi_queue_t * lockRefH, *lockRefT;
636     osi_lock_ref_t *lockRefp;
637
638     if ((i=lockp->type) != 0)
639         if (i >= 0 && i < OSI_NLOCKTYPES)
640             return (osi_lockOps[i]->TryMutexProc)(lockp);
641
642     if (lockOrderValidation) {
643         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
644         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
645
646         if (lockp->level != 0) {
647             for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
648                 if (lockRefp->type == OSI_LOCK_MUTEX) {
649                     osi_assertx(lockRefp->mx != lockp, "Mutex already held");
650                 }
651             }
652         }
653     }
654
655     /* otherwise we're the fast base type */
656     csp = &osi_baseAtomicCS[lockp->atomicIndex];
657     EnterCriticalSection(csp);
658
659     /* here we have the fast lock, so see if we can obtain the real lock */
660     if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
661         i = 0;
662     }
663     else {
664         /* if we're here, all clear to set the lock */
665         lockp->flags |= OSI_LOCKFLAG_EXCL;
666         lockp->tid = thrd_Current();
667         i = 1;
668     }
669
670     LeaveCriticalSection(csp);
671
672     if (lockOrderValidation && i) {
673         lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX);
674         osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q);
675         TlsSetValue(tls_LockRefH, lockRefH);
676         TlsSetValue(tls_LockRefT, lockRefT);
677     }
678     return i;
679 }
680
681 void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp)
682 {
683     long i;
684     CRITICAL_SECTION *csp;
685     osi_queue_t * lockRefH, *lockRefT;
686     osi_lock_ref_t *lockRefp;
687     DWORD tid = thrd_Current();
688
689     if ((i = lockp->type) != 0) {
690         if (i >= 0 && i < OSI_NLOCKTYPES)
691             (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
692         return;
693     }
694
695     if (lockOrderValidation && lockp->level != 0) {
696         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
697         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
698
699         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
700             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
701                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
702                 lock_FreeLockRef(lockRefp);
703                 break;
704             }
705         }
706
707         TlsSetValue(tls_LockRefH, lockRefH);
708         TlsSetValue(tls_LockRefT, lockRefT);
709     }
710
711     /* otherwise we're the fast base type */
712     csp = &osi_baseAtomicCS[lockp->atomicIndex];
713     EnterCriticalSection(csp);
714
715     osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
716
717     for ( i=0; i < lockp->readers; i++) {
718         if ( lockp->tid[i] == tid ) {
719             for ( ; i < lockp->readers - 1; i++)
720                 lockp->tid[i] = lockp->tid[i+1];
721             lockp->tid[i] = 0;
722             break;
723         }
724     }
725
726     /* XXX better to get the list of things to wakeup from TSignalForMLs, and
727      * then do the wakeup after SleepSpin releases the low-level mutex.
728      */
729     if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
730         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
731     }
732
733     /* now call into scheduler to sleep atomically with releasing spin lock */
734     osi_SleepSpin(sleepVal, csp);
735 }
736
737 void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp)
738 {
739     long i;
740     CRITICAL_SECTION *csp;
741     osi_queue_t * lockRefH, *lockRefT;
742     osi_lock_ref_t *lockRefp;
743     DWORD tid = thrd_Current();
744
745     if ((i = lockp->type) != 0) {
746         if (i >= 0 && i < OSI_NLOCKTYPES)
747             (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
748         return;
749     }
750
751     if (lockOrderValidation && lockp->level != 0) {
752         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
753         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
754
755         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
756             if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) {
757                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
758                 lock_FreeLockRef(lockRefp);
759                 break;
760             }
761         }
762
763         TlsSetValue(tls_LockRefH, lockRefH);
764         TlsSetValue(tls_LockRefT, lockRefT);
765     }
766
767     /* otherwise we're the fast base type */
768     csp = &osi_baseAtomicCS[lockp->atomicIndex];
769     EnterCriticalSection(csp);
770
771     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
772
773     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
774     lockp->tid[0] = 0;
775     if (!osi_TEmpty(&lockp->d.turn)) {
776         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
777     }
778
779     /* and finally release the big lock */
780     osi_SleepSpin(sleepVal, csp);
781 }
782
783 void osi_SleepM(LONG_PTR sleepVal, struct osi_mutex *lockp)
784 {
785     long i;
786     CRITICAL_SECTION *csp;
787     osi_queue_t * lockRefH, *lockRefT;
788     osi_lock_ref_t *lockRefp;
789
790     if ((i = lockp->type) != 0) {
791         if (i >= 0 && i < OSI_NLOCKTYPES)
792             (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
793         return;
794     }
795
796     if (lockOrderValidation && lockp->level != 0) {
797         lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH);
798         lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT);
799
800         for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) {
801             if (lockRefp->type == OSI_LOCK_MUTEX && lockRefp->mx == lockp) {
802                 osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q);
803                 lock_FreeLockRef(lockRefp);
804                 break;
805             }
806         }
807
808         TlsSetValue(tls_LockRefH, lockRefH);
809         TlsSetValue(tls_LockRefT, lockRefT);
810     }
811
812     /* otherwise we're the fast base type */
813     csp = &osi_baseAtomicCS[lockp->atomicIndex];
814     EnterCriticalSection(csp);
815
816     osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
817
818     lockp->flags &= ~OSI_LOCKFLAG_EXCL;
819     lockp->tid = 0;
820     if (!osi_TEmpty(&lockp->d.turn)) {
821         osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
822     }
823
824     /* and finally release the big lock */
825     osi_SleepSpin(sleepVal, csp);
826 }
827
828 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
829 {
830     long i;
831
832     if ((i=lockp->type) != 0)
833         if (i >= 0 && i < OSI_NLOCKTYPES)
834             (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
835 }
836
837 void lock_FinalizeMutex(osi_mutex_t *lockp)
838 {
839     long i;
840
841     if ((i=lockp->type) != 0)
842         if (i >= 0 && i < OSI_NLOCKTYPES)
843             (osi_lockOps[i]->FinalizeMutexProc)(lockp);
844 }
845
846 void lock_InitializeMutex(osi_mutex_t *mp, char *namep, unsigned short level)
847 {
848     int i;
849
850     if ((i = osi_lockTypeDefault) > 0) {
851         if (i >= 0 && i < OSI_NLOCKTYPES)
852             (osi_lockOps[i]->InitializeMutexProc)(mp, namep, level);
853         return;
854     }
855
856     /* otherwise we have the base case, which requires no special
857      * initialization.
858      */
859     mp->type = 0;
860     mp->flags = 0;
861     mp->tid = 0;
862     mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
863     mp->level = level;
864     osi_TInit(&mp->d.turn);
865     return;
866 }
867
868 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep, unsigned short level)
869 {
870     int i;
871
872     if ((i = osi_lockTypeDefault) > 0) {
873         if (i >= 0 && i < OSI_NLOCKTYPES)
874             (osi_lockOps[i]->InitializeRWLockProc)(mp, namep, level);
875         return;
876     }
877
878     /* otherwise we have the base case, which requires no special
879      * initialization.
880      */
881     memset(mp, 0, sizeof(osi_rwlock_t));
882     mp->atomicIndex = (unsigned short)(InterlockedIncrement(&atomicIndexCounter) % OSI_MUTEXHASHSIZE);
883     mp->level = level;
884     osi_TInit(&mp->d.turn);
885     return;
886 }
887
888 int lock_GetRWLockState(osi_rwlock_t *lp)
889 {
890     long i;
891     CRITICAL_SECTION *csp;
892
893     if ((i=lp->type) != 0)
894         if (i >= 0 && i < OSI_NLOCKTYPES)
895             return (osi_lockOps[i]->GetRWLockState)(lp);
896
897     /* otherwise we're the fast base type */
898     csp = &osi_baseAtomicCS[lp->atomicIndex];
899     EnterCriticalSection(csp);
900
901     /* here we have the fast lock, so see if we can obtain the real lock */
902     if (lp->flags & OSI_LOCKFLAG_EXCL)
903         i = OSI_RWLOCK_WRITEHELD;
904     else
905         i = 0;
906     if (lp->readers > 0)
907         i |= OSI_RWLOCK_READHELD;
908
909     LeaveCriticalSection(csp);
910
911     return i;
912 }
913
914 int lock_GetMutexState(struct osi_mutex *mp)
915 {
916     long i;
917     CRITICAL_SECTION *csp;
918
919     if ((i=mp->type) != 0)
920         if (i >= 0 && i < OSI_NLOCKTYPES)
921             return (osi_lockOps[i]->GetMutexState)(mp);
922
923     /* otherwise we're the fast base type */
924     csp = &osi_baseAtomicCS[mp->atomicIndex];
925     EnterCriticalSection(csp);
926
927     if (mp->flags & OSI_LOCKFLAG_EXCL)
928         i = OSI_MUTEX_HELD;
929     else
930         i = 0;
931
932     LeaveCriticalSection(csp);
933
934     return i;
935 }