2 * Copyright (C) 1998, 1989 Transarc Corporation - All rights reserved
4 * (C) COPYRIGHT IBM CORPORATION 1987, 1988
5 * LICENSED MATERIALS - PROPERTY OF IBM
10 /* Copyright (C) 1994 Cazamar Systems, Inc. */
13 #include <afs/param.h>
20 /* atomicity-providing critical sections */
21 CRITICAL_SECTION osi_baseAtomicCS[OSI_MUTEXHASHSIZE];
23 void osi_BaseInit(void)
27 for(i=0; i<OSI_MUTEXHASHSIZE; i++)
28 InitializeCriticalSection(&osi_baseAtomicCS[i]);
31 void lock_ObtainWrite(osi_rwlock_t *lockp)
34 CRITICAL_SECTION *csp;
36 if ((i=lockp->type) != 0) {
37 (osi_lockOps[i]->ObtainWriteProc)(lockp);
41 /* otherwise we're the fast base type */
42 csp = &osi_baseAtomicCS[lockp->atomicIndex];
43 EnterCriticalSection(csp);
45 /* here we have the fast lock, so see if we can obtain the real lock */
46 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
47 || (lockp->readers > 0)) {
49 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
51 osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL));
54 /* if we're here, all clear to set the lock */
55 lockp->flags |= OSI_LOCKFLAG_EXCL;
58 LeaveCriticalSection(csp);
61 void lock_ObtainRead(osi_rwlock_t *lockp)
64 CRITICAL_SECTION *csp;
66 if ((i=lockp->type) != 0) {
67 (osi_lockOps[i]->ObtainReadProc)(lockp);
71 /* otherwise we're the fast base type */
72 csp = &osi_baseAtomicCS[lockp->atomicIndex];
73 EnterCriticalSection(csp);
75 /* here we have the fast lock, so see if we can obtain the real lock */
76 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
78 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4READ, &lockp->readers, csp);
80 osi_assert(!(lockp->flags & OSI_LOCKFLAG_EXCL) && lockp->readers > 0);
83 /* if we're here, all clear to set the lock */
86 LeaveCriticalSection(csp);
89 void lock_ReleaseRead(osi_rwlock_t *lockp)
92 CRITICAL_SECTION *csp;
94 if ((i = lockp->type) != 0) {
95 (osi_lockOps[i]->ReleaseReadProc)(lockp);
99 /* otherwise we're the fast base type */
100 csp = &osi_baseAtomicCS[lockp->atomicIndex];
101 EnterCriticalSection(csp);
103 osi_assertx(lockp->readers > 0, "read lock not held");
105 /* releasing a read lock can allow readers or writers */
106 if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
107 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
110 /* and finally release the big lock */
111 LeaveCriticalSection(csp);
115 void lock_ReleaseWrite(osi_rwlock_t *lockp)
118 CRITICAL_SECTION *csp;
120 if ((i = lockp->type) != 0) {
121 (osi_lockOps[i]->ReleaseWriteProc)(lockp);
125 /* otherwise we're the fast base type */
126 csp = &osi_baseAtomicCS[lockp->atomicIndex];
127 EnterCriticalSection(csp);
129 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
131 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
132 if (!osi_TEmpty(&lockp->d.turn)) {
133 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
136 /* and finally release the big lock */
137 LeaveCriticalSection(csp);
141 void lock_ConvertWToR(osi_rwlock_t *lockp)
144 CRITICAL_SECTION *csp;
146 if ((i = lockp->type) != 0) {
147 (osi_lockOps[i]->ConvertWToRProc)(lockp);
151 /* otherwise we're the fast base type */
152 csp = &osi_baseAtomicCS[lockp->atomicIndex];
153 EnterCriticalSection(csp);
155 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held");
157 /* convert write lock to read lock */
158 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
161 if (!osi_TEmpty(&lockp->d.turn)) {
162 osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp);
165 /* and finally release the big lock */
166 LeaveCriticalSection(csp);
170 void lock_ObtainMutex(struct osi_mutex *lockp)
173 CRITICAL_SECTION *csp;
175 if ((i=lockp->type) != 0) {
176 (osi_lockOps[i]->ObtainMutexProc)(lockp);
180 /* otherwise we're the fast base type */
181 csp = &osi_baseAtomicCS[lockp->atomicIndex];
182 EnterCriticalSection(csp);
184 /* here we have the fast lock, so see if we can obtain the real lock */
185 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
187 osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, csp);
189 osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL);
192 /* if we're here, all clear to set the lock */
193 lockp->flags |= OSI_LOCKFLAG_EXCL;
195 LeaveCriticalSection(csp);
198 void lock_ReleaseMutex(struct osi_mutex *lockp)
201 CRITICAL_SECTION *csp;
203 if ((i = lockp->type) != 0) {
204 (osi_lockOps[i]->ReleaseMutexProc)(lockp);
208 /* otherwise we're the fast base type */
209 csp = &osi_baseAtomicCS[lockp->atomicIndex];
210 EnterCriticalSection(csp);
212 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "mutex not held");
214 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
215 if (!osi_TEmpty(&lockp->d.turn)) {
216 osi_TSignalForMLs(&lockp->d.turn, 0, csp);
219 /* and finally release the big lock */
220 LeaveCriticalSection(csp);
224 int lock_TryRead(struct osi_rwlock *lockp)
227 CRITICAL_SECTION *csp;
229 if ((i=lockp->type) != 0)
230 return (osi_lockOps[i]->TryReadProc)(lockp);
232 /* otherwise we're the fast base type */
233 csp = &osi_baseAtomicCS[lockp->atomicIndex];
234 EnterCriticalSection(csp);
236 /* here we have the fast lock, so see if we can obtain the real lock */
237 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
241 /* if we're here, all clear to set the lock */
246 LeaveCriticalSection(csp);
252 int lock_TryWrite(struct osi_rwlock *lockp)
255 CRITICAL_SECTION *csp;
257 if ((i=lockp->type) != 0)
258 return (osi_lockOps[i]->TryWriteProc)(lockp);
260 /* otherwise we're the fast base type */
261 csp = &osi_baseAtomicCS[lockp->atomicIndex];
262 EnterCriticalSection(csp);
264 /* here we have the fast lock, so see if we can obtain the real lock */
265 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)
266 || (lockp->readers > 0)) {
270 /* if we're here, all clear to set the lock */
271 lockp->flags |= OSI_LOCKFLAG_EXCL;
275 LeaveCriticalSection(csp);
281 int lock_TryMutex(struct osi_mutex *lockp) {
283 CRITICAL_SECTION *csp;
285 if ((i=lockp->type) != 0)
286 return (osi_lockOps[i]->TryMutexProc)(lockp);
288 /* otherwise we're the fast base type */
289 csp = &osi_baseAtomicCS[lockp->atomicIndex];
290 EnterCriticalSection(csp);
292 /* here we have the fast lock, so see if we can obtain the real lock */
293 if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) {
297 /* if we're here, all clear to set the lock */
298 lockp->flags |= OSI_LOCKFLAG_EXCL;
302 LeaveCriticalSection(csp);
307 void osi_SleepR(long sleepVal, struct osi_rwlock *lockp)
310 CRITICAL_SECTION *csp;
312 if ((i = lockp->type) != 0) {
313 (osi_lockOps[i]->SleepRProc)(sleepVal, lockp);
317 /* otherwise we're the fast base type */
318 csp = &osi_baseAtomicCS[lockp->atomicIndex];
319 EnterCriticalSection(csp);
321 osi_assertx(lockp->readers > 0, "osi_SleepR: not held");
323 /* XXX better to get the list of things to wakeup from TSignalForMLs, and
324 * then do the wakeup after SleepSpin releases the low-level mutex.
326 if (--lockp->readers == 0 && !osi_TEmpty(&lockp->d.turn)) {
327 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
330 /* now call into scheduler to sleep atomically with releasing spin lock */
331 osi_SleepSpin(sleepVal, csp);
334 void osi_SleepW(long sleepVal, struct osi_rwlock *lockp)
337 CRITICAL_SECTION *csp;
339 if ((i = lockp->type) != 0) {
340 (osi_lockOps[i]->SleepWProc)(sleepVal, lockp);
344 /* otherwise we're the fast base type */
345 csp = &osi_baseAtomicCS[lockp->atomicIndex];
346 EnterCriticalSection(csp);
348 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held");
350 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
351 if (!osi_TEmpty(&lockp->d.turn)) {
352 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
355 /* and finally release the big lock */
356 osi_SleepSpin(sleepVal, csp);
359 void osi_SleepM(long sleepVal, struct osi_mutex *lockp)
362 CRITICAL_SECTION *csp;
364 if ((i = lockp->type) != 0) {
365 (osi_lockOps[i]->SleepMProc)(sleepVal, lockp);
369 /* otherwise we're the fast base type */
370 csp = &osi_baseAtomicCS[lockp->atomicIndex];
371 EnterCriticalSection(csp);
373 osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepM not held");
375 lockp->flags &= ~OSI_LOCKFLAG_EXCL;
376 if (!osi_TEmpty(&lockp->d.turn)) {
377 osi_TSignalForMLs(&lockp->d.turn, 0, NULL);
380 /* and finally release the big lock */
381 osi_SleepSpin(sleepVal, csp);
384 void lock_FinalizeRWLock(osi_rwlock_t *lockp)
388 if ((i=lockp->type) != 0)
389 (osi_lockOps[i]->FinalizeRWLockProc)(lockp);
392 void lock_FinalizeMutex(osi_mutex_t *lockp)
396 if ((i=lockp->type) != 0)
397 (osi_lockOps[i]->FinalizeMutexProc)(lockp);
400 void lock_InitializeMutex(osi_mutex_t *mp, char *namep)
404 if ((i = osi_lockTypeDefault) > 0) {
405 (osi_lockOps[i]->InitializeMutexProc)(mp, namep);
409 /* otherwise we have the base case, which requires no special
414 mp->atomicIndex = osi_MUTEXHASH(mp);
415 osi_TInit(&mp->d.turn);
419 void lock_InitializeRWLock(osi_rwlock_t *mp, char *namep)
423 if ((i = osi_lockTypeDefault) > 0) {
424 (osi_lockOps[i]->InitializeRWLockProc)(mp, namep);
428 /* otherwise we have the base case, which requires no special
433 mp->atomicIndex = osi_MUTEXHASH(mp);
435 osi_TInit(&mp->d.turn);
439 int lock_GetRWLockState(osi_rwlock_t *lp)
442 CRITICAL_SECTION *csp;
444 if ((i=lp->type) != 0)
445 return (osi_lockOps[i]->GetRWLockState)(lp);
447 /* otherwise we're the fast base type */
448 csp = &osi_baseAtomicCS[lp->atomicIndex];
449 EnterCriticalSection(csp);
451 /* here we have the fast lock, so see if we can obtain the real lock */
452 if (lp->flags & OSI_LOCKFLAG_EXCL) i = OSI_RWLOCK_WRITEHELD;
454 if (lp->readers > 0) i |= OSI_RWLOCK_READHELD;
456 LeaveCriticalSection(csp);
461 int lock_GetMutexState(struct osi_mutex *mp) {
463 CRITICAL_SECTION *csp;
465 if ((i=mp->type) != 0)
466 return (osi_lockOps[i]->GetMutexState)(mp);
468 /* otherwise we're the fast base type */
469 csp = &osi_baseAtomicCS[mp->atomicIndex];
470 EnterCriticalSection(csp);
472 if (mp->flags & OSI_LOCKFLAG_EXCL)
477 LeaveCriticalSection(csp);