FBSD: deal with kernel API rename
[openafs.git] / src / rx / rx_atomic.h
1 /*
2  * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
27
28 #define RX_ATOMIC_INIT(i) { (i) }
29
30 #ifdef AFS_NT40_ENV
31 typedef struct {
32     volatile int var;
33 } rx_atomic_t;
34
35 static_inline void
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
37     atomic->var = val;
38 }
39
40 static_inline int
41 rx_atomic_read(rx_atomic_t *atomic) {
42     return atomic->var;
43 }
44
45 static_inline void
46 rx_atomic_inc(rx_atomic_t *atomic) {
47     InterlockedIncrement(&atomic->var);
48 }
49
50 static_inline int
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52     return InterlockedIncrement(&atomic->var);
53 }
54
55 static_inline void
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57     InterlockedExchangeAdd(&atomic->var, change);
58 }
59
60 static_inline void
61 rx_atomic_dec(rx_atomic_t *atomic) {
62     InterlockedDecrement(&atomic->var);
63 }
64
65 static_inline int
66 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
67     return InterlockedDecrement(&atomic->var);
68 }
69
70 static_inline void
71 rx_atomic_sub(rx_atomic_t *atomic, int change) {
72     InterlockedExchangeAdd(&atomic->var, 0 - change);
73 }
74
75 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
76
77 #include <libkern/OSAtomic.h>
78 #if defined(KERNEL) && !defined(UKERNEL)
79 #define OSAtomicIncrement32 OSIncrementAtomic
80 #define OSAtomicAdd32 OSAddAtomic
81 #define OSAtomicDecrement32 OSDecrementAtomic
82 #endif
83
84 typedef struct {
85     volatile int var;
86 } rx_atomic_t;
87
88 static_inline void
89 rx_atomic_set(rx_atomic_t *atomic, int val) {
90     atomic->var = val;
91 }
92
93 static_inline int
94 rx_atomic_read(rx_atomic_t *atomic) {
95     return atomic->var;
96 }
97
98 static_inline void
99 rx_atomic_inc(rx_atomic_t *atomic) {
100     OSAtomicIncrement32(&atomic->var);
101 }
102
103 static_inline int
104 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
105     return OSAtomicIncrement32(&atomic->var);
106 }
107
108 static_inline void
109 rx_atomic_add(rx_atomic_t *atomic, int change) {
110     OSAtomicAdd32(change, &atomic->var);
111 }
112
113 static_inline void
114 rx_atomic_dec(rx_atomic_t *atomic) {
115     OSAtomicDecrement32(&atomic->var);
116 }
117
118 static_inline int
119 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
120     return OSAtomicDecrement32(&atomic->var);
121 }
122
123 static_inline void
124 rx_atomic_sub(rx_atomic_t *atomic, int change) {
125     OSAtomicAdd32(0 - change, &atomic->var);
126 }
127 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
128 #include <asm/atomic.h>
129
130 typedef atomic_t rx_atomic_t;
131
132 #define rx_atomic_set(X, V)       atomic_set(X, V)
133 #define rx_atomic_read(X)         atomic_read(X)
134 #define rx_atomic_inc(X)          atomic_inc(X)
135 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
136 #define rx_atomic_add(X, V)       atomic_add(V, X)
137 #define rx_atomic_dec(X)          atomic_dec(X)
138 #define rx_atomic_dec_and_read(X) atomic_dec_return(X)
139 #define rx_atomic_sub(X, V)       atomic_sub(V, X)
140
141 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN5_ENV) && defined(KERNEL) && !defined(UKERNEL))
142
143 # if defined(KERNEL) && !defined(UKERNEL)
144 #  include <sys/atomic.h>
145 # else
146 #  include <atomic.h>
147 # endif
148
149 #ifndef AFS_SUN510_ENV
150 # define atomic_inc_32(X)    atomic_add_32((X), 1)
151 # define atomic_inc_32_nv(X) atomic_add_32_nv((X), 1)
152 # define atomic_dec_32(X)    atomic_add_32((X), -1)
153 # define atomic_dec_32_nv(X) atomic_add_32_nv((X), -1)
154 #endif
155
156 typedef struct {
157     volatile unsigned int var;
158 } rx_atomic_t;
159
160 static_inline void
161 rx_atomic_set(rx_atomic_t *atomic, int val) {
162     atomic->var = val;
163 }
164
165 static_inline int
166 rx_atomic_read(rx_atomic_t *atomic) {
167     return atomic->var;
168 }
169
170 static_inline void
171 rx_atomic_inc(rx_atomic_t *atomic) {
172     atomic_inc_32(&atomic->var);
173 }
174
175 static_inline int
176 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
177     return atomic_inc_32_nv(&atomic->var);
178 }
179
180 static_inline void
181 rx_atomic_add(rx_atomic_t *atomic, int change) {
182     atomic_add_32(&atomic->var, change);
183 }
184
185 static_inline void
186 rx_atomic_dec(rx_atomic_t *atomic) {
187     atomic_dec_32(&atomic->var);
188 }
189
190 static_inline int
191 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
192     return atomic_dec_32_nv(&atomic->var);
193 }
194
195 static_inline void
196 rx_atomic_sub(rx_atomic_t *atomic, int change) {
197     atomic_add_32(&atomic->var, 0 - change);
198 }
199
200 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
201
202 typedef struct {
203    volatile int var;
204 } rx_atomic_t;
205
206 static_inline void
207 rx_atomic_set(rx_atomic_t *atomic, int val) {
208     atomic->var = val;
209 }
210
211 static_inline int
212 rx_atomic_read(rx_atomic_t *atomic) {
213     return atomic->var;
214 }
215
216 static_inline void
217 rx_atomic_inc(rx_atomic_t *atomic) {
218     (void)__sync_fetch_and_add(&atomic->var, 1);
219 }
220
221 static_inline int
222 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
223     return __sync_add_and_fetch(&atomic->var, 1);
224 }
225
226 static_inline void
227 rx_atomic_add(rx_atomic_t *atomic, int change) {
228     (void)__sync_fetch_and_add(&atomic->var, change);
229 }
230
231 static_inline void
232 rx_atomic_dec(rx_atomic_t *atomic) {
233     (void)__sync_fetch_and_sub(&atomic->var, 1);
234 }
235
236 static_inline int
237 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
238     return __sync_sub_and_fetch(&atomic->var, 1);
239 }
240
241 static_inline void
242 rx_atomic_sub(rx_atomic_t *atomic, int change) {
243     (void)__sync_fetch_and_sub(&atomic->var, change);
244 }
245
246 #else
247
248 /* If we're on a platform where we have no idea how to do atomics,
249  * then we fall back to using a single process wide mutex to protect
250  * all atomic variables. This won't be the quickest thing ever.
251  */
252
253 #ifdef RX_ENABLE_LOCKS
254 extern afs_kmutex_t rx_atomic_mutex;
255 #endif
256
257 typedef struct {
258     int var;
259 } rx_atomic_t;
260
261 static_inline void
262 rx_atomic_set(rx_atomic_t *atomic, int val) {
263     MUTEX_ENTER(&rx_atomic_mutex);
264     atomic->var = val;
265     MUTEX_EXIT(&rx_atomic_mutex);
266 }
267
268 static_inline int
269 rx_atomic_read(rx_atomic_t *atomic) {
270     int out;
271
272     MUTEX_ENTER(&rx_atomic_mutex);
273     out = atomic->var;
274     MUTEX_EXIT(&rx_atomic_mutex);
275
276     return out;
277 }
278
279 static_inline void
280 rx_atomic_inc(rx_atomic_t *atomic) {
281    MUTEX_ENTER(&rx_atomic_mutex);
282    atomic->var++;
283    MUTEX_EXIT(&rx_atomic_mutex);
284 }
285
286 static_inline int
287 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
288     int retval;
289     MUTEX_ENTER(&rx_atomic_mutex);
290     atomic->var++;
291     retval = atomic->var;
292     MUTEX_EXIT(&rx_atomic_mutex);
293     return retval;
294 }
295
296 static_inline void
297 rx_atomic_add(rx_atomic_t *atomic, int change) {
298     MUTEX_ENTER(&rx_atomic_mutex);
299     atomic->var += change;
300     MUTEX_EXIT(&rx_atomic_mutex);
301 }
302
303 static_inline void
304 rx_atomic_dec(rx_atomic_t *atomic) {
305     MUTEX_ENTER(&rx_atomic_mutex);
306     atomic->var--;
307     MUTEX_EXIT(&rx_atomic_mutex);
308 }
309
310 static_inline int
311 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
312     int retval;
313     MUTEX_ENTER(&rx_atomic_mutex);
314     atomic->var--;
315     retval = atomic->var;
316     MUTEX_EXIT(&rx_atomic_mutex);
317     return retval;
318 }
319
320
321 static_inline void
322 rx_atomic_sub(rx_atomic_t *atomic, int change) {
323     MUTEX_ENTER(&rx_atomic_mutex);
324     atomic->var -= change;
325     MUTEX_EXIT(&rx_atomic_mutex);
326 }
327
328 #endif
329
330 #endif