51e2f76f6cc8a2b1b4d158f0ac57b03d229b410a
[openafs.git] / src / rx / rx_atomic.h
1 /*
2  * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
27
28 #define RX_ATOMIC_INIT(i) { (i) }
29
30 #ifdef AFS_NT40_ENV
31 typedef struct {
32     volatile int var;
33 } rx_atomic_t;
34
35 static_inline void
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
37     atomic->var = val;
38 }
39
40 static_inline int
41 rx_atomic_read(rx_atomic_t *atomic) {
42     return atomic->var;
43 }
44
45 static_inline void
46 rx_atomic_inc(rx_atomic_t *atomic) {
47     InterlockedIncrement(&atomic->var);
48 }
49
50 static_inline int
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52     return InterlockedIncrement(&atomic->var);
53 }
54
55 static_inline void
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57     InterlockedExchangeAdd(&atomic->var, change);
58 }
59
60 static_inline int
61 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
62     return InterlockedExchangeAdd(&atomic->var, change) + change;
63 }
64
65 static_inline void
66 rx_atomic_dec(rx_atomic_t *atomic) {
67     InterlockedDecrement(&atomic->var);
68 }
69
70 static_inline int
71 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
72     return InterlockedDecrement(&atomic->var);
73 }
74
75 static_inline void
76 rx_atomic_sub(rx_atomic_t *atomic, int change) {
77     InterlockedExchangeAdd(&atomic->var, 0 - change);
78 }
79
80 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
81
82 #include <libkern/OSAtomic.h>
83 #if defined(KERNEL) && !defined(UKERNEL)
84 #define OSAtomicIncrement32 OSIncrementAtomic
85 #define OSAtomicAdd32 OSAddAtomic
86 #define OSAtomicDecrement32 OSDecrementAtomic
87 #endif
88
89 typedef struct {
90     volatile int var;
91 } rx_atomic_t;
92
93 static_inline void
94 rx_atomic_set(rx_atomic_t *atomic, int val) {
95     atomic->var = val;
96 }
97
98 static_inline int
99 rx_atomic_read(rx_atomic_t *atomic) {
100     return atomic->var;
101 }
102
103 static_inline void
104 rx_atomic_inc(rx_atomic_t *atomic) {
105     OSAtomicIncrement32(&atomic->var);
106 }
107
108 static_inline int
109 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
110     return OSAtomicIncrement32(&atomic->var);
111 }
112
113 static_inline void
114 rx_atomic_add(rx_atomic_t *atomic, int change) {
115     OSAtomicAdd32(change, &atomic->var);
116 }
117
118 static_inline int
119 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
120     return OSAtomicAdd32(change, &atomic->var);
121 }
122
123 static_inline void
124 rx_atomic_dec(rx_atomic_t *atomic) {
125     OSAtomicDecrement32(&atomic->var);
126 }
127
128 static_inline int
129 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
130     return OSAtomicDecrement32(&atomic->var);
131 }
132
133 static_inline void
134 rx_atomic_sub(rx_atomic_t *atomic, int change) {
135     OSAtomicAdd32(0 - change, &atomic->var);
136 }
137 #elif defined(AFS_LINUX26_ENV) && defined(KERNEL)
138 #include <asm/atomic.h>
139
140 typedef atomic_t rx_atomic_t;
141
142 #define rx_atomic_set(X, V)       atomic_set(X, V)
143 #define rx_atomic_read(X)         atomic_read(X)
144 #define rx_atomic_inc(X)          atomic_inc(X)
145 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
146 #define rx_atomic_add(X, V)       atomic_add(V, X)
147 #define rx_atomic_add_and_read(X, V) atomic_add_return(V, X);
148 #define rx_atomic_dec(X)          atomic_dec(X)
149 #define rx_atomic_dec_and_read(X) atomic_dec_return(X)
150 #define rx_atomic_sub(X, V)       atomic_sub(V, X)
151
152 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN5_ENV) && defined(KERNEL) && !defined(UKERNEL))
153
154 # if defined(KERNEL) && !defined(UKERNEL)
155 #  include <sys/atomic.h>
156 # else
157 #  include <atomic.h>
158 # endif
159
160 #ifndef AFS_SUN510_ENV
161 # define atomic_inc_32(X)    atomic_add_32((X), 1)
162 # define atomic_inc_32_nv(X) atomic_add_32_nv((X), 1)
163 # define atomic_dec_32(X)    atomic_add_32((X), -1)
164 # define atomic_dec_32_nv(X) atomic_add_32_nv((X), -1)
165 #endif
166
167 typedef struct {
168     volatile unsigned int var;
169 } rx_atomic_t;
170
171 static_inline void
172 rx_atomic_set(rx_atomic_t *atomic, int val) {
173     atomic->var = val;
174 }
175
176 static_inline int
177 rx_atomic_read(rx_atomic_t *atomic) {
178     return atomic->var;
179 }
180
181 static_inline void
182 rx_atomic_inc(rx_atomic_t *atomic) {
183     atomic_inc_32(&atomic->var);
184 }
185
186 static_inline int
187 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
188     return atomic_inc_32_nv(&atomic->var);
189 }
190
191 static_inline void
192 rx_atomic_add(rx_atomic_t *atomic, int change) {
193     atomic_add_32(&atomic->var, change);
194 }
195
196 static_inline int
197 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
198     return atomic_add_32_nv(&atomic->var, change);
199 }
200
201 static_inline void
202 rx_atomic_dec(rx_atomic_t *atomic) {
203     atomic_dec_32(&atomic->var);
204 }
205
206 static_inline int
207 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
208     return atomic_dec_32_nv(&atomic->var);
209 }
210
211 static_inline void
212 rx_atomic_sub(rx_atomic_t *atomic, int change) {
213     atomic_add_32(&atomic->var, 0 - change);
214 }
215
216 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
217
218 typedef struct {
219    volatile int var;
220 } rx_atomic_t;
221
222 static_inline void
223 rx_atomic_set(rx_atomic_t *atomic, int val) {
224     atomic->var = val;
225 }
226
227 static_inline int
228 rx_atomic_read(rx_atomic_t *atomic) {
229     return atomic->var;
230 }
231
232 static_inline void
233 rx_atomic_inc(rx_atomic_t *atomic) {
234     (void)__sync_fetch_and_add(&atomic->var, 1);
235 }
236
237 static_inline int
238 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
239     return __sync_add_and_fetch(&atomic->var, 1);
240 }
241
242 static_inline void
243 rx_atomic_add(rx_atomic_t *atomic, int change) {
244     (void)__sync_fetch_and_add(&atomic->var, change);
245 }
246
247 static_inline int
248 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
249     return __sync_fetch_and_add(&atomic->var, change);
250 }
251
252 static_inline void
253 rx_atomic_dec(rx_atomic_t *atomic) {
254     (void)__sync_fetch_and_sub(&atomic->var, 1);
255 }
256
257 static_inline int
258 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
259     return __sync_sub_and_fetch(&atomic->var, 1);
260 }
261
262 static_inline void
263 rx_atomic_sub(rx_atomic_t *atomic, int change) {
264     (void)__sync_fetch_and_sub(&atomic->var, change);
265 }
266
267 #else
268
269 /* If we're on a platform where we have no idea how to do atomics,
270  * then we fall back to using a single process wide mutex to protect
271  * all atomic variables. This won't be the quickest thing ever.
272  */
273
274 #ifdef RX_ENABLE_LOCKS
275 extern afs_kmutex_t rx_atomic_mutex;
276 #endif
277
278 typedef struct {
279     int var;
280 } rx_atomic_t;
281
282 static_inline void
283 rx_atomic_set(rx_atomic_t *atomic, int val) {
284     MUTEX_ENTER(&rx_atomic_mutex);
285     atomic->var = val;
286     MUTEX_EXIT(&rx_atomic_mutex);
287 }
288
289 static_inline int
290 rx_atomic_read(rx_atomic_t *atomic) {
291     int out;
292
293     MUTEX_ENTER(&rx_atomic_mutex);
294     out = atomic->var;
295     MUTEX_EXIT(&rx_atomic_mutex);
296
297     return out;
298 }
299
300 static_inline void
301 rx_atomic_inc(rx_atomic_t *atomic) {
302    MUTEX_ENTER(&rx_atomic_mutex);
303    atomic->var++;
304    MUTEX_EXIT(&rx_atomic_mutex);
305 }
306
307 static_inline int
308 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
309     int retval;
310     MUTEX_ENTER(&rx_atomic_mutex);
311     atomic->var++;
312     retval = atomic->var;
313     MUTEX_EXIT(&rx_atomic_mutex);
314     return retval;
315 }
316
317 static_inline void
318 rx_atomic_add(rx_atomic_t *atomic, int change) {
319     MUTEX_ENTER(&rx_atomic_mutex);
320     atomic->var += change;
321     MUTEX_EXIT(&rx_atomic_mutex);
322 }
323
324 static_inline int
325 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
326     int retval;
327
328     MUTEX_ENTER(&rx_atomic_mutex);
329     atomic->var += change;
330     retval = atomic->var;
331     MUTEX_EXIT(&rx_atomic_mutex);
332
333     return retval;
334 }
335
336 static_inline void
337 rx_atomic_dec(rx_atomic_t *atomic) {
338     MUTEX_ENTER(&rx_atomic_mutex);
339     atomic->var--;
340     MUTEX_EXIT(&rx_atomic_mutex);
341 }
342
343 static_inline int
344 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
345     int retval;
346     MUTEX_ENTER(&rx_atomic_mutex);
347     atomic->var--;
348     retval = atomic->var;
349     MUTEX_EXIT(&rx_atomic_mutex);
350     return retval;
351 }
352
353
354 static_inline void
355 rx_atomic_sub(rx_atomic_t *atomic, int change) {
356     MUTEX_ENTER(&rx_atomic_mutex);
357     atomic->var -= change;
358     MUTEX_EXIT(&rx_atomic_mutex);
359 }
360
361 #endif
362
363 #endif