RX: No userspace atomic_ops in Solaris pre-10
[openafs.git] / src / rx / rx_atomic.h
1 /*
2  * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
27
28 #define RX_ATOMIC_INIT(i) { (i) }
29
30 #ifdef AFS_NT40_ENV
31 typedef struct {
32     volatile int var;
33 } rx_atomic_t;
34
35 static_inline void
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
37     atomic->var = val;
38 }
39
40 static_inline int
41 rx_atomic_read(rx_atomic_t *atomic) {
42     return atomic->var;
43 }
44
45 static_inline void
46 rx_atomic_inc(rx_atomic_t *atomic) {
47     InterlockedIncrement(&atomic->var);
48 }
49
50 static_inline int
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52     return InterlockedIncrement(&atomic->var);
53 }
54
55 static_inline void
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57     InterlockedExchangeAdd(&atomic->var, change);
58 }
59
60 static_inline void
61 rx_atomic_dec(rx_atomic_t *atomic) {
62     InterlockedDecrement(&atomic->var);
63 }
64
65 static_inline int
66 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
67     return InterlockedDecrement(&atomic->var);
68 }
69
70 static_inline void
71 rx_atomic_sub(rx_atomic_t *atomic, int change) {
72     InterlockedExchangeAdd(&atomic->var, 0 - change);
73 }
74
75 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
76
77 #include <libkern/OSAtomic.h>
78 #if defined(KERNEL) && !defined(UKERNEL)
79 #define OSAtomicIncrement32 OSIncrementAtomic
80 #define OSAtomicAdd32 OSAddAtomic
81 #define OSAtomicDecrement32 OSDecrementAtomic
82 #endif
83
84 typedef struct {
85     volatile int var;
86 } rx_atomic_t;
87
88 static_inline void
89 rx_atomic_set(rx_atomic_t *atomic, int val) {
90     atomic->var = val;
91 }
92
93 static_inline int
94 rx_atomic_read(rx_atomic_t *atomic) {
95     return atomic->var;
96 }
97
98 static_inline void
99 rx_atomic_inc(rx_atomic_t *atomic) {
100     OSAtomicIncrement32(&atomic->var);
101 }
102
103 static_inline int
104 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
105     return OSAtomicIncrement32(&atomic->var);
106 }
107
108 static_inline void
109 rx_atomic_add(rx_atomic_t *atomic, int change) {
110     OSAtomicAdd32(change, &atomic->var);
111 }
112
113 static_inline void
114 rx_atomic_dec(rx_atomic_t *atomic) {
115     OSAtomicDecrement32(&atomic->var);
116 }
117
118 static_inline int
119 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
120     return OSAtomicDecrement32(&atomic->var);
121 }
122
123 static_inline void
124 rx_atomic_sub(rx_atomic_t *atomic, int change) {
125     OSAtomicAdd32(0 - change, &atomic->var);
126 }
127 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
128 #include <asm/atomic.h>
129
130 typedef atomic_t rx_atomic_t;
131
132 #define rx_atomic_set(X)          atomic_set(X)
133 #define rx_atomic_read(X)         atomic_read(X)
134 #define rx_atomic_inc(X)          atomic_inc(X)
135 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
136 #define rx_atomic_add(X, V)       atomic_add(V, X)
137 #define rx_atomic_dec(X)          atomic_dec(X)
138 #define rx_atomic_sub(X, V)       atomic_sub(V, X)
139
140 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN58_ENV) && defined(KERNEL) && !defined(UKERNEL))
141
142 # if defined(KERNEL) && !defined(UKERNEL)
143 #  include <sys/atomic.h>
144 # else
145 #  include <atomic.h>
146 # endif
147
148 typedef struct {
149     volatile unsigned int var;
150 } rx_atomic_t;
151
152 static_inline void
153 rx_atomic_set(rx_atomic_t *atomic, int val) {
154     atomic->var = val;
155 }
156
157 static_inline int
158 rx_atomic_read(rx_atomic_t *atomic) {
159     return atomic->var;
160 }
161
162 static_inline void
163 rx_atomic_inc(rx_atomic_t *atomic) {
164     atomic_inc_32(&atomic->var);
165 }
166
167 static_inline int
168 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
169     return atomic_inc_32_nv(&atomic->var);
170 }
171
172 static_inline void
173 rx_atomic_add(rx_atomic_t *atomic, int change) {
174     atomic_add_32(&atomic->var, change);
175 }
176
177 static_inline void
178 rx_atomic_dec(rx_atomic_t *atomic) {
179     atomic_dec_32(&atomic->var);
180 }
181
182 static_inline int
183 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
184     return atomic_dec_32_nv(&atomic->var);
185 }
186
187 static_inline void
188 rx_atomic_sub(rx_atomic_t *atomic, int change) {
189     atomic_add_32(&atomic->var, 0 - change);
190 }
191
192 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
193
194 typedef struct {
195    volatile int var;
196 } rx_atomic_t;
197
198 static_inline void
199 rx_atomic_set(rx_atomic_t *atomic, int val) {
200     atomic->var = val;
201 }
202
203 static_inline int
204 rx_atomic_read(rx_atomic_t *atomic) {
205     return atomic->var;
206 }
207
208 static_inline void
209 rx_atomic_inc(rx_atomic_t *atomic) {
210     (void)__sync_fetch_and_add(&atomic->var, 1);
211 }
212
213 static_inline int
214 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
215     return __sync_add_and_fetch(&atomic->var, 1);
216 }
217
218 static_inline void
219 rx_atomic_add(rx_atomic_t *atomic, int change) {
220     (void)__sync_fetch_and_add(&atomic->var, change);
221 }
222
223 static_inline void
224 rx_atomic_dec(rx_atomic_t *atomic) {
225     (void)__sync_fetch_and_sub(&atomic->var, 1);
226 }
227
228 static_inline int
229 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
230     return __sync_sub_and_fetch(&atomic->var, 1);
231 }
232
233 static_inline void
234 rx_atomic_sub(rx_atomic_t *atomic, int change) {
235     (void)__sync_fetch_and_sub(&atomic->var, change);
236 }
237
238 #else
239
240 /* If we're on a platform where we have no idea how to do atomics,
241  * then we fall back to using a single process wide mutex to protect
242  * all atomic variables. This won't be the quickest thing ever.
243  */
244
245 #ifdef RX_ENABLE_LOCKS
246 extern afs_kmutex_t rx_atomic_mutex;
247 #endif
248
249 typedef struct {
250     int var;
251 } rx_atomic_t;
252
253 static_inline void
254 rx_atomic_set(rx_atomic_t *atomic, int val) {
255     MUTEX_ENTER(&rx_atomic_mutex);
256     atomic->var = val;
257     MUTEX_EXIT(&rx_atomic_mutex);
258 }
259
260 static_inline int
261 rx_atomic_read(rx_atomic_t *atomic) {
262     int out;
263
264     MUTEX_ENTER(&rx_atomic_mutex);
265     out = atomic->var;
266     MUTEX_EXIT(&rx_atomic_mutex);
267
268     return out;
269 }
270
271 static_inline void
272 rx_atomic_inc(rx_atomic_t *atomic) {
273    MUTEX_ENTER(&rx_atomic_mutex);
274    atomic->var++;
275    MUTEX_EXIT(&rx_atomic_mutex);
276 }
277
278 static_inline int
279 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
280     int retval;
281     MUTEX_ENTER(&rx_atomic_mutex);
282     atomic->var++;
283     retval = atomic->var;
284     MUTEX_EXIT(&rx_atomic_mutex);
285     return retval;
286 }
287
288 static_inline void
289 rx_atomic_add(rx_atomic_t *atomic, int change) {
290     MUTEX_ENTER(&rx_atomic_mutex);
291     atomic->var += change;
292     MUTEX_EXIT(&rx_atomic_mutex);
293 }
294
295 static_inline void
296 rx_atomic_dec(rx_atomic_t *atomic) {
297     MUTEX_ENTER(&rx_atomic_mutex);
298     atomic->var--;
299     MUTEX_EXIT(&rx_atomic_mutex);
300 }
301
302 static_inline int
303 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
304     int retval;
305     MUTEX_ENTER(&rx_atomic_mutex);
306     atomic->var--;
307     retval = atomic->var;
308     MUTEX_EXIT(&rx_atomic_mutex);
309     return retval;
310 }
311
312
313 static_inline void
314 rx_atomic_sub(rx_atomic_t *atomic, int change) {
315     MUTEX_ENTER(&rx_atomic_mutex);
316     atomic->var -= change;
317     MUTEX_EXIT(&rx_atomic_mutex);
318 }
319
320 #endif
321
322 #endif