Initial IBM OpenAFS 1.0 tree
[openafs.git] / src / WINNT / client_osi / osibasel.c
1 /* 
2  * Copyright (C) 1998, 1989 Transarc Corporation - All rights reserved
3  *
4  * (C) COPYRIGHT IBM CORPORATION 1987, 1988
5  * LICENSED MATERIALS - PROPERTY OF IBM
6  *
7  *
8  */
9
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
11
12
13 #include <afs/param.h>
14 #include <afs/stds.h>
15
16 #include <windows.h>
17 #include "osi.h"
18 #include <assert.h>
19
20 /* atomicity-providing critical sections */
21 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
22
23 void osi_BaseInit(void)
24 {
25         int i;
26
27         for(i=0; i<OSI_MUTEXHASHSIZE; i++)
28                 InitializeCriticalSection(&osi_baseAtomicCS[i]);
29 }
30
31 void lock_ObtainWrite(osi_rwlock_t *lockp)
32 {
33         long i;
34         CRITICAL_SECTION *csp;
35
36         if ((i=lockp->type) != 0) {
37                 (osi_lockOps[i]->ObtainWriteProc)(lockp);
38                 return;
39         }
40
41         /* otherwise we're the fast base type */
42         csp = &osi_baseAtomicCS[lockp->atomicIndex];
43         EnterCriticalSection(csp);
44
45         /* here we have the fast lock, so see if we can obtain the real lock */
46         if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
47                 || (lockp->readers > 0)) {
48                 lockp->waiters++;
49                 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
50                 lockp->waiters--;
51                 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
52         }
53         else {
54                 /* if we're here, all clear to set the lock */
55                 lockp->flags |= OSI_LOCKFLAG_EXCL;
56         }
57
58         LeaveCriticalSection(csp);
59 }
60
61 void lock_ObtainRead(osi_rwlock_t *lockp)
62 {
63         long i;
64         CRITICAL_SECTION *csp;
65
66         if ((i=lockp->type) != 0) {
67                 (osi_lockOps[i]->ObtainReadProc)(lockp);
68                 return;
69         }
70
71         /* otherwise we're the fast base type */
72         csp = &osi_baseAtomicCS[lockp->atomicIndex];
73         EnterCriticalSection(csp);
74
75         /* here we have the fast lock, so see if we can obtain the real lock */
76         if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
77                 lockp->waiters++;
78                 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp);
79                 lockp->waiters--;
80                 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
81         }
82         else {
83                 /* if we're here, all clear to set the lock */
84                 lockp->readers++;
85         }
86         LeaveCriticalSection(csp);
87 }
88
89 void lock_ReleaseRead(osi_rwlock_t *lockp)
90 {
91         long i;
92         CRITICAL_SECTION *csp;
93
94         if ((i = lockp->type) != 0) {
95                 (osi_lockOps[i]->ReleaseReadProc)(lockp);
96                 return;
97         }
98
99         /* otherwise we're the fast base type */
100         csp = &osi_baseAtomicCS[lockp->atomicIndex];
101         EnterCriticalSection(csp);
102
103         osi_assertx(lockp->readers > 0, "read lock not held");
104         
105         /* releasing a read lock can allow readers or writers */
106         if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
107                 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
108         }
109         else {
110                 /* and finally release the big lock */
111                 LeaveCriticalSection(csp);
112         }
113 }
114
115 void lock_ReleaseWrite(osi_rwlock_t *lockp)
116 {
117         long i;
118         CRITICAL_SECTION *csp;
119
120         if ((i = lockp->type) != 0) {
121                 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
122                 return;
123         }
124
125         /* otherwise we're the fast base type */
126         csp = &osi_baseAtomicCS[lockp->atomicIndex];
127         EnterCriticalSection(csp);
128
129         osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
130         
131         lockp->flags &= ~OSI_LOCKFLAG_EXCL;
132         if (!osi_TEmpty(&lockp->d.turn)) {
133                 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
134         }
135         else {
136                 /* and finally release the big lock */
137                 LeaveCriticalSection(csp);
138         }
139 }
140
141 void lock_ConvertWToR(osi_rwlock_t *lockp)
142 {
143         long i;
144         CRITICAL_SECTION *csp;
145
146         if ((i = lockp->type) != 0) {
147                 (osi_lockOps[i]->ConvertWToRProc)(lockp);
148                 return;
149         }
150
151         /* otherwise we're the fast base type */
152         csp = &osi_baseAtomicCS[lockp->atomicIndex];
153         EnterCriticalSection(csp);
154
155         osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
156         
157         /* convert write lock to read lock */
158         lockp->flags &= ~OSI_LOCKFLAG_EXCL;
159         lockp->readers++;
160
161         if (!osi_TEmpty(&lockp->d.turn)) {
162                 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
163         }
164         else {
165                 /* and finally release the big lock */
166                 LeaveCriticalSection(csp);
167         }
168 }
169
170 void lock_ObtainMutex(struct osi_mutex *lockp)
171 {
172         long i;
173         CRITICAL_SECTION *csp;
174
175         if ((i=lockp->type) != 0) {
176                 (osi_lockOps[i]->ObtainMutexProc)(lockp);
177                 return;
178         }
179
180         /* otherwise we're the fast base type */
181         csp = &osi_baseAtomicCS[lockp->atomicIndex];
182         EnterCriticalSection(csp);
183
184         /* here we have the fast lock, so see if we can obtain the real lock */
185         if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
186                 lockp->waiters++;
187                 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
188                 lockp->waiters--;
189                 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
190         }
191         else {
192                 /* if we're here, all clear to set the lock */
193                 lockp->flags |= OSI_LOCKFLAG_EXCL;
194         }
195         LeaveCriticalSection(csp);
196 }
197
198 void lock_ReleaseMutex(struct osi_mutex *lockp)
199 {
200         long i;
201         CRITICAL_SECTION *csp;
202
203         if ((i = lockp->type) != 0) {
204                 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
205                 return;
206         }
207
208         /* otherwise we're the fast base type */
209         csp = &osi_baseAtomicCS[lockp->atomicIndex];
210         EnterCriticalSection(csp);
211
212         osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
213         
214         lockp->flags &= ~OSI_LOCKFLAG_EXCL;
215         if (!osi_TEmpty(&lockp->d.turn)) {
216                 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
217         }
218         else {
219                 /* and finally release the big lock */
220                 LeaveCriticalSection(csp);
221         }
222 }
223
224 int lock_TryRead(struct osi_rwlock *lockp)
225 {
226         long i;
227         CRITICAL_SECTION *csp;
228
229         if ((i=lockp->type) != 0)
230                 return (osi_lockOps[i]->TryReadProc)(lockp);
231
232         /* otherwise we're the fast base type */
233         csp = &osi_baseAtomicCS[lockp->atomicIndex];
234         EnterCriticalSection(csp);
235
236         /* here we have the fast lock, so see if we can obtain the real lock */
237         if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
238                 i = 0;
239         }
240         else {
241                 /* if we're here, all clear to set the lock */
242                 lockp->readers++;
243                 i = 1;
244         }
245
246         LeaveCriticalSection(csp);
247
248         return i;
249 }
250
251
252 int lock_TryWrite(struct osi_rwlock *lockp)
253 {
254         long i;
255         CRITICAL_SECTION *csp;
256
257         if ((i=lockp->type) != 0)
258                 return (osi_lockOps[i]->TryWriteProc)(lockp);
259
260         /* otherwise we're the fast base type */
261         csp = &osi_baseAtomicCS[lockp->atomicIndex];
262         EnterCriticalSection(csp);
263
264         /* here we have the fast lock, so see if we can obtain the real lock */
265         if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
266                 || (lockp->readers > 0)) {
267                 i = 0;
268         }
269         else {
270                 /* if we're here, all clear to set the lock */
271                 lockp->flags |= OSI_LOCKFLAG_EXCL;
272                 i = 1;
273         }
274
275         LeaveCriticalSection(csp);
276
277         return i;
278 }
279
280
281 int lock_TryMutex(struct osi_mutex *lockp) {
282         long i;
283         CRITICAL_SECTION *csp;
284
285         if ((i=lockp->type) != 0)
286                 return (osi_lockOps[i]->TryMutexProc)(lockp);
287
288         /* otherwise we're the fast base type */
289         csp = &osi_baseAtomicCS[lockp->atomicIndex];
290         EnterCriticalSection(csp);
291
292         /* here we have the fast lock, so see if we can obtain the real lock */
293         if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
294                 i = 0;
295         }
296         else {
297                 /* if we're here, all clear to set the lock */
298                 lockp->flags |= OSI_LOCKFLAG_EXCL;
299                 i = 1;
300         }
301
302         LeaveCriticalSection(csp);
303
304         return i;
305 }
306
307 void osi_SleepR(long sleepVal, struct osi_rwlock *lockp)
308 {
309         long i;
310         CRITICAL_SECTION *csp;
311
312         if ((i = lockp->type) != 0) {
313                 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
314                 return;
315         }
316
317         /* otherwise we're the fast base type */
318         csp = &osi_baseAtomicCS[lockp->atomicIndex];
319         EnterCriticalSection(csp);
320
321         osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
322         
323         /* XXX better to get the list of things to wakeup from TSignalForMLs, and
324          * then do the wakeup after SleepSpin releases the low-level mutex.
325          */
326         if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
327                 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
328         }
329
330         /* now call into scheduler to sleep atomically with releasing spin lock */
331         osi_SleepSpin(sleepVal, csp);
332 }
333
334 void osi_SleepW(long sleepVal, struct osi_rwlock *lockp)
335 {
336         long i;
337         CRITICAL_SECTION *csp;
338
339         if ((i = lockp->type) != 0) {
340                 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
341                 return;
342         }
343
344         /* otherwise we're the fast base type */
345         csp = &osi_baseAtomicCS[lockp->atomicIndex];
346         EnterCriticalSection(csp);
347
348         osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
349         
350         lockp->flags &= ~OSI_LOCKFLAG_EXCL;
351         if (!osi_TEmpty(&lockp->d.turn)) {
352                 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
353         }
354
355         /* and finally release the big lock */
356         osi_SleepSpin(sleepVal, csp);
357 }
358
359 void osi_SleepM(long sleepVal, struct osi_mutex *lockp)
360 {
361         long i;
362         CRITICAL_SECTION *csp;
363
364         if ((i = lockp->type) != 0) {
365                 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
366                 return;
367         }
368
369         /* otherwise we're the fast base type */
370         csp = &osi_baseAtomicCS[lockp->atomicIndex];
371         EnterCriticalSection(csp);
372
373         osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
374         
375         lockp->flags &= ~OSI_LOCKFLAG_EXCL;
376         if (!osi_TEmpty(&lockp->d.turn)) {
377                 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
378         }
379
380         /* and finally release the big lock */
381         osi_SleepSpin(sleepVal, csp);
382 }
383
384 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
385 {
386         long i;
387
388         if ((i=lockp->type) != 0)
389                 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
390 }
391
392 void lock_FinalizeMutex(osi_mutex_t *lockp)
393 {
394         long i;
395
396         if ((i=lockp->type) != 0)
397                 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
398 }
399
400 void lock_InitializeMutex(osi_mutex_t *mp, char *namep)
401 {
402         int i;
403
404         if ((i = osi_lockTypeDefault) > 0) {
405                 (osi_lockOps[i]->InitializeMutexProc)(mp, namep);
406                 return;
407         }
408
409         /* otherwise we have the base case, which requires no special
410          * initialization.
411          */
412         mp->type = 0;
413         mp->flags = 0;
414         mp->atomicIndex = osi_MUTEXHASH(mp);
415         osi_TInit(&mp->d.turn);
416         return;
417 }
418
419 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep)
420 {
421         int i;
422
423         if ((i = osi_lockTypeDefault) > 0) {
424                 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep);
425                 return;
426         }
427         
428         /* otherwise we have the base case, which requires no special
429          * initialization.
430          */
431         mp->type = 0;
432         mp->flags = 0;
433         mp->atomicIndex = osi_MUTEXHASH(mp);
434         mp->readers = 0;
435         osi_TInit(&mp->d.turn);
436         return;
437 }
438
439 int lock_GetRWLockState(osi_rwlock_t *lp)
440 {
441         long i;
442         CRITICAL_SECTION *csp;
443
444         if ((i=lp->type) != 0)
445                 return (osi_lockOps[i]->GetRWLockState)(lp);
446
447         /* otherwise we're the fast base type */
448         csp = &osi_baseAtomicCS[lp->atomicIndex];
449         EnterCriticalSection(csp);
450
451         /* here we have the fast lock, so see if we can obtain the real lock */
452         if (lp->flags & OSI_LOCKFLAG_EXCL) i = OSI_RWLOCK_WRITEHELD;
453         else i = 0;
454         if (lp->readers > 0) i |= OSI_RWLOCK_READHELD;
455
456         LeaveCriticalSection(csp);
457
458         return i;
459 }
460
461 int lock_GetMutexState(struct osi_mutex *mp) {
462         long i;
463         CRITICAL_SECTION *csp;
464
465         if ((i=mp->type) != 0)
466                 return (osi_lockOps[i]->GetMutexState)(mp);
467
468         /* otherwise we're the fast base type */
469         csp = &osi_baseAtomicCS[mp->atomicIndex];
470         EnterCriticalSection(csp);
471
472         if (mp->flags & OSI_LOCKFLAG_EXCL)
473                 i = OSI_MUTEX_HELD;
474         else
475                 i = 0;
476
477         LeaveCriticalSection(csp);
478
479         return i;
480 }