rx: remove rx_atomic bitops
[openafs.git] / src / rx / rx_atomic.h
1 /*
2  * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
27
28 #define RX_ATOMIC_INIT(i) { (i) }
29
30 #ifdef AFS_NT40_ENV
31 typedef struct {
32     volatile int var;
33 } rx_atomic_t;
34
35 static_inline void
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
37     atomic->var = val;
38 }
39
40 static_inline int
41 rx_atomic_read(rx_atomic_t *atomic) {
42     return atomic->var;
43 }
44
45 static_inline void
46 rx_atomic_inc(rx_atomic_t *atomic) {
47     InterlockedIncrement(&atomic->var);
48 }
49
50 static_inline int
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52     return InterlockedIncrement(&atomic->var);
53 }
54
55 static_inline void
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57     InterlockedExchangeAdd(&atomic->var, change);
58 }
59
60 static_inline int
61 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
62     return InterlockedExchangeAdd(&atomic->var, change) + change;
63 }
64
65 static_inline void
66 rx_atomic_dec(rx_atomic_t *atomic) {
67     InterlockedDecrement(&atomic->var);
68 }
69
70 static_inline int
71 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
72     return InterlockedDecrement(&atomic->var);
73 }
74
75 static_inline void
76 rx_atomic_sub(rx_atomic_t *atomic, int change) {
77     InterlockedExchangeAdd(&atomic->var, 0 - change);
78 }
79
80 #elif defined(AFS_AIX61_ENV) || defined(AFS_USR_AIX61_ENV)
81 #include <sys/atomic_op.h>
82
83 typedef struct {
84    volatile int var;
85 } rx_atomic_t;
86
87 static_inline void
88 rx_atomic_set(rx_atomic_t *atomic, int val) {
89     atomic->var = val;
90 }
91
92 static_inline int
93 rx_atomic_read(rx_atomic_t *atomic) {
94     return atomic->var;
95 }
96
97 static_inline void
98 rx_atomic_inc(rx_atomic_t *atomic) {
99     fetch_and_add(&atomic->var, 1);
100 }
101
102 static_inline int
103 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
104     return (fetch_and_add(&atomic->var, 1) + 1);
105 }
106
107 static_inline void
108 rx_atomic_add(rx_atomic_t *atomic, int change) {
109     fetch_and_add(&atomic->var, change);
110 }
111
112 static_inline int
113 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
114     return (fetch_and_add(&atomic->var, change) + change);
115 }
116
117 static_inline void
118 rx_atomic_dec(rx_atomic_t *atomic) {
119     fetch_and_add(&atomic->var, -1);
120 }
121
122 static_inline int
123 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
124     return (fetch_and_add(&atomic->var, -1) - 1);
125 }
126
127 static_inline void
128 rx_atomic_sub(rx_atomic_t *atomic, int change) {
129     fetch_and_add(&atomic->var, -change);
130 }
131
132 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
133
134 # if (defined(AFS_DARWIN160_ENV) || defined(AFS_USR_DARWIN160_ENV)) && !defined(KERNEL)
135 #  define OSATOMIC_USE_INLINED 1
136 # endif
137
138 # include <libkern/OSAtomic.h>
139
140 # if defined(KERNEL) && !defined(UKERNEL)
141 static_inline int
142 OSAtomicIncrement32(volatile int *value)
143 {
144     return OSIncrementAtomic(value) + 1;
145 }
146
147 static_inline int
148 OSAtomicAdd32(int amount, volatile int *value)
149 {
150     return OSAddAtomic(amount, value) + amount;
151 }
152
153 static_inline int
154 OSAtomicDecrement32(volatile int *value)
155 {
156     return OSDecrementAtomic(value) - 1;
157 }
158
159 # endif
160
161 typedef struct {
162     volatile int var;
163 } rx_atomic_t;
164
165 static_inline void
166 rx_atomic_set(rx_atomic_t *atomic, int val) {
167     atomic->var = val;
168 }
169
170 static_inline int
171 rx_atomic_read(rx_atomic_t *atomic) {
172     return atomic->var;
173 }
174
175 static_inline void
176 rx_atomic_inc(rx_atomic_t *atomic) {
177     OSAtomicIncrement32(&atomic->var);
178 }
179
180 static_inline int
181 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
182     return OSAtomicIncrement32(&atomic->var);
183 }
184
185 static_inline void
186 rx_atomic_add(rx_atomic_t *atomic, int change) {
187     OSAtomicAdd32(change, &atomic->var);
188 }
189
190 static_inline int
191 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
192     return OSAtomicAdd32(change, &atomic->var);
193 }
194
195 static_inline void
196 rx_atomic_dec(rx_atomic_t *atomic) {
197     OSAtomicDecrement32(&atomic->var);
198 }
199
200 static_inline int
201 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
202     return OSAtomicDecrement32(&atomic->var);
203 }
204
205 static_inline void
206 rx_atomic_sub(rx_atomic_t *atomic, int change) {
207     OSAtomicAdd32(0 - change, &atomic->var);
208 }
209 #elif defined(AFS_LINUX26_ENV) && defined(KERNEL)
210 #include <asm/atomic.h>
211
212 typedef atomic_t rx_atomic_t;
213
214 #define rx_atomic_set(X, V)       atomic_set(X, V)
215 #define rx_atomic_read(X)         atomic_read(X)
216 #define rx_atomic_inc(X)          atomic_inc(X)
217 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
218 #define rx_atomic_add(X, V)       atomic_add(V, X)
219 #define rx_atomic_add_and_read(X, V) atomic_add_return(V, X)
220 #define rx_atomic_dec(X)          atomic_dec(X)
221 #define rx_atomic_dec_and_read(X) atomic_dec_return(X)
222 #define rx_atomic_sub(X, V)       atomic_sub(V, X)
223
224 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN5_ENV) && defined(KERNEL) && !defined(UKERNEL))
225
226 # if defined(KERNEL) && !defined(UKERNEL)
227 #  include <sys/atomic.h>
228 # else
229 #  include <atomic.h>
230 # endif
231
232 #ifndef AFS_SUN510_ENV
233 # define atomic_inc_32(X)    atomic_add_32((X), 1)
234 # define atomic_inc_32_nv(X) atomic_add_32_nv((X), 1)
235 # define atomic_dec_32(X)    atomic_add_32((X), -1)
236 # define atomic_dec_32_nv(X) atomic_add_32_nv((X), -1)
237 #endif
238
239 typedef struct {
240     volatile unsigned int var;
241 } rx_atomic_t;
242
243 static_inline void
244 rx_atomic_set(rx_atomic_t *atomic, int val) {
245     atomic->var = val;
246 }
247
248 static_inline int
249 rx_atomic_read(rx_atomic_t *atomic) {
250     return atomic->var;
251 }
252
253 static_inline void
254 rx_atomic_inc(rx_atomic_t *atomic) {
255     atomic_inc_32(&atomic->var);
256 }
257
258 static_inline int
259 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
260     return atomic_inc_32_nv(&atomic->var);
261 }
262
263 static_inline void
264 rx_atomic_add(rx_atomic_t *atomic, int change) {
265     atomic_add_32(&atomic->var, change);
266 }
267
268 static_inline int
269 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
270     return atomic_add_32_nv(&atomic->var, change);
271 }
272
273 static_inline void
274 rx_atomic_dec(rx_atomic_t *atomic) {
275     atomic_dec_32(&atomic->var);
276 }
277
278 static_inline int
279 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
280     return atomic_dec_32_nv(&atomic->var);
281 }
282
283 static_inline void
284 rx_atomic_sub(rx_atomic_t *atomic, int change) {
285     atomic_add_32(&atomic->var, 0 - change);
286 }
287
288 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
289
290 typedef struct {
291    volatile int var;
292 } rx_atomic_t;
293
294 static_inline void
295 rx_atomic_set(rx_atomic_t *atomic, int val) {
296     atomic->var = val;
297 }
298
299 static_inline int
300 rx_atomic_read(rx_atomic_t *atomic) {
301     return atomic->var;
302 }
303
304 static_inline void
305 rx_atomic_inc(rx_atomic_t *atomic) {
306     (void)__sync_fetch_and_add(&atomic->var, 1);
307 }
308
309 static_inline int
310 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
311     return __sync_add_and_fetch(&atomic->var, 1);
312 }
313
314 static_inline void
315 rx_atomic_add(rx_atomic_t *atomic, int change) {
316     (void)__sync_fetch_and_add(&atomic->var, change);
317 }
318
319 static_inline int
320 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
321     return __sync_fetch_and_add(&atomic->var, change);
322 }
323
324 static_inline void
325 rx_atomic_dec(rx_atomic_t *atomic) {
326     (void)__sync_fetch_and_sub(&atomic->var, 1);
327 }
328
329 static_inline int
330 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
331     return __sync_sub_and_fetch(&atomic->var, 1);
332 }
333
334 static_inline void
335 rx_atomic_sub(rx_atomic_t *atomic, int change) {
336     (void)__sync_fetch_and_sub(&atomic->var, change);
337 }
338
339 #else
340
341 /* If we're on a platform where we have no idea how to do atomics,
342  * then we fall back to using a single process wide mutex to protect
343  * all atomic variables. This won't be the quickest thing ever.
344  */
345
346 #ifdef RX_ENABLE_LOCKS
347 extern afs_kmutex_t rx_atomic_mutex;
348 #endif
349
350 typedef struct {
351     int var;
352 } rx_atomic_t;
353
354 static_inline void
355 rx_atomic_set(rx_atomic_t *atomic, int val) {
356     MUTEX_ENTER(&rx_atomic_mutex);
357     atomic->var = val;
358     MUTEX_EXIT(&rx_atomic_mutex);
359 }
360
361 static_inline int
362 rx_atomic_read(rx_atomic_t *atomic) {
363     int out;
364
365     MUTEX_ENTER(&rx_atomic_mutex);
366     out = atomic->var;
367     MUTEX_EXIT(&rx_atomic_mutex);
368
369     return out;
370 }
371
372 static_inline void
373 rx_atomic_inc(rx_atomic_t *atomic) {
374    MUTEX_ENTER(&rx_atomic_mutex);
375    atomic->var++;
376    MUTEX_EXIT(&rx_atomic_mutex);
377 }
378
379 static_inline int
380 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
381     int retval;
382     MUTEX_ENTER(&rx_atomic_mutex);
383     atomic->var++;
384     retval = atomic->var;
385     MUTEX_EXIT(&rx_atomic_mutex);
386     return retval;
387 }
388
389 static_inline void
390 rx_atomic_add(rx_atomic_t *atomic, int change) {
391     MUTEX_ENTER(&rx_atomic_mutex);
392     atomic->var += change;
393     MUTEX_EXIT(&rx_atomic_mutex);
394 }
395
396 static_inline int
397 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
398     int retval;
399
400     MUTEX_ENTER(&rx_atomic_mutex);
401     atomic->var += change;
402     retval = atomic->var;
403     MUTEX_EXIT(&rx_atomic_mutex);
404
405     return retval;
406 }
407
408 static_inline void
409 rx_atomic_dec(rx_atomic_t *atomic) {
410     MUTEX_ENTER(&rx_atomic_mutex);
411     atomic->var--;
412     MUTEX_EXIT(&rx_atomic_mutex);
413 }
414
415 static_inline int
416 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
417     int retval;
418     MUTEX_ENTER(&rx_atomic_mutex);
419     atomic->var--;
420     retval = atomic->var;
421     MUTEX_EXIT(&rx_atomic_mutex);
422     return retval;
423 }
424
425
426 static_inline void
427 rx_atomic_sub(rx_atomic_t *atomic, int change) {
428     MUTEX_ENTER(&rx_atomic_mutex);
429     atomic->var -= change;
430     MUTEX_EXIT(&rx_atomic_mutex);
431 }
432
433 #endif
434
435 #endif