Linux: correct use of atomic_add and atomic_sub functions
[openafs.git] / src / rx / rx_atomic.h
1 /*
2  * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #define RX_ATOMIC_INIT(i) { (i) }
26
27 #ifdef AFS_NT40_ENV
28 typedef struct {
29     volatile int var;
30 } rx_atomic_t;
31
32 static_inline void
33 rx_atomic_set(rx_atomic_t *atomic, int val) {
34     atomic->var = val;
35 }
36
37 static_inline int
38 rx_atomic_read(rx_atomic_t *atomic) {
39     return atomic->var;
40 }
41
42 static_inline void
43 rx_atomic_inc(rx_atomic_t *atomic) {
44     InterlockedIncrement(&atomic->var);
45 }
46
47 static_inline int
48 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
49     return InterlockedIncrement(&atomic->var);
50 }
51
52 static_inline void
53 rx_atomic_add(rx_atomic_t *atomic, int change) {
54     InterlockedExchangeAdd(&atomic->var, change);
55 }
56
57 static_inline void
58 rx_atomic_dec(rx_atomic_t *atomic) {
59     InterlockedDecrement(&atomic->var);
60 }
61
62 static_inline void
63 rx_atomic_sub(rx_atomic_t *atomic, int change) {
64     InterlockedExchangeAdd(&atomic->var, 0 - change);
65 }
66
67 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
68
69 #include <libkern/OSAtomic.h>
70 #if defined(KERNEL) && !defined(UKERNEL)
71 #define OSAtomicIncrement32 OSIncrementAtomic
72 #define OSAtomicAdd32 OSAddAtomic
73 #define OSAtomicDecrement32 OSDecrementAtomic
74 #endif
75
76 typedef struct {
77     volatile int var;
78 } rx_atomic_t;
79
80 static_inline void
81 rx_atomic_set(rx_atomic_t *atomic, int val) {
82     atomic->var = val;
83 }
84
85 static_inline int
86 rx_atomic_read(rx_atomic_t *atomic) {
87     return atomic->var;
88 }
89
90 static_inline void
91 rx_atomic_inc(rx_atomic_t *atomic) {
92     OSAtomicIncrement32(&atomic->var);
93 }
94
95 static_inline int
96 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
97     return OSAtomicIncrement32(&atomic->var);
98 }
99
100 static_inline void
101 rx_atomic_add(rx_atomic_t *atomic, int change) {
102     OSAtomicAdd32(change, &atomic->var);
103 }
104
105 static_inline void
106 rx_atomic_dec(rx_atomic_t *atomic) {
107     OSAtomicDecrement32(&atomic->var);
108 }
109
110 static_inline void
111 rx_atomic_sub(rx_atomic_t *atomic, int change) {
112     OSAtomicAdd32(0 - change, &atomic->var);
113 }
114 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
115 #include <asm/atomic.h>
116
117 typedef atomic_t rx_atomic_t;
118
119 #define rx_atomic_set(X)          atomic_set(X)
120 #define rx_atomic_read(X)         atomic_read(X)
121 #define rx_atomic_inc(X)          atomic_inc(X)
122 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
123 #define rx_atomic_add(X, V)       atomic_add(V, X)
124 #define rx_atomic_dec(X)          atomic_dec(X)
125 #define rx_atomic_sub(X, V)       atomic_sub(V, X)
126
127 #elif defined(AFS_SUN58_ENV)
128 typedef struct {
129     volatile int var;
130 } rx_atomic_t;
131
132 static_inline void
133 rx_atomic_set(rx_atomic_t *atomic, int val) {
134     atomic->var = val;
135 }
136
137 static_inline int
138 rx_atomic_read(rx_atomic_t *atomic) {
139     return atomic->var;
140 }
141
142 static_inline void
143 rx_atomic_inc(rx_atomic_t *atomic) {
144     atomic_inc_32(&atomic->var);
145 }
146
147 static_inline int
148 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
149     return atomic_inc_32_nv(&atomic->var);
150 }
151
152 static_inline void
153 rx_atomic_add(rx_atomic_t *atomic, int change) {
154     atomic_add_32(&atomic->var, change);
155 }
156
157 static_inline void
158 rx_atomic_dec(rx_atomic_t *atomic) {
159     atomic_dec_32(&atomic->var);
160 }
161
162 static_inline void
163 rx_atomic_sub(rx_atomic_t *atomic, int change) {
164     atomic_add_32(&object, 0 - change);
165 }
166
167 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
168
169 typedef struct {
170    volatile int var;
171 } rx_atomic_t;
172
173 static_inline void
174 rx_atomic_set(rx_atomic_t *atomic, int val) {
175     atomic->var = val;
176 }
177
178 static_inline int
179 rx_atomic_read(rx_atomic_t *atomic) {
180     return atomic->var;
181 }
182
183 static_inline void
184 rx_atomic_inc(rx_atomic_t *atomic) {
185     (void)__sync_fetch_and_add(&atomic->var, 1);
186 }
187
188 static_inline int
189 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
190     return __sync_add_and_fetch(&atomic->var, 1);
191 }
192
193 static_inline void
194 rx_atomic_add(rx_atomic_t *atomic, int change) {
195     (void)__sync_fetch_and_add(&atomic->var, change);
196 }
197
198 static_inline void
199 rx_atomic_dec(rx_atomic_t *atomic) {
200     (void)__sync_fetch_and_sub(&atomic->var, 1);
201 }
202
203 static_inline void
204 rx_atomic_sub(rx_atomic_t *atomic, int change) {
205     (void)__sync_fetch_and_sub(&atomic->var, change);
206 }
207
208 #else
209
210 /* If we're on a platform where we have no idea how to do atomics,
211  * then we fall back to using a single process wide mutex to protect
212  * all atomic variables. This won't be the quickest thing ever.
213  */
214
215 #ifdef RX_ENABLE_LOCKS
216 extern afs_kmutex_t rx_atomic_mutex;
217 #endif
218
219 typedef struct {
220     int var;
221 } rx_atomic_t;
222
223 static_inline void
224 rx_atomic_set(rx_atomic_t *atomic, int val) {
225     MUTEX_ENTER(&rx_atomic_mutex);
226     atomic->var = val;
227     MUTEX_EXIT(&rx_atomic_mutex);
228 }
229
230 static_inline int
231 rx_atomic_read(rx_atomic_t *atomic) {
232     int out;
233
234     MUTEX_ENTER(&rx_atomic_mutex);
235     out = atomic->var;
236     MUTEX_EXIT(&rx_atomic_mutex);
237
238     return out;
239 }
240
241 static_inline void
242 rx_atomic_inc(rx_atomic_t *atomic) {
243    MUTEX_ENTER(&rx_atomic_mutex);
244    atomic->var++;
245    MUTEX_EXIT(&rx_atomic_mutex);
246 }
247
248 static_inline int
249 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
250     int retval;
251     MUTEX_ENTER(&rx_atomic_mutex);
252     atomic->var++;
253     retval = atomic->var;
254     MUTEX_EXIT(&rx_atomic_mutex);
255     return retval;
256 }
257
258 static_inline void
259 rx_atomic_add(rx_atomic_t *atomic, int change) {
260     MUTEX_ENTER(&rx_atomic_mutex);
261     atomic->var += change;
262     MUTEX_EXIT(&rx_atomic_mutex);
263 }
264
265 static_inline void
266 rx_atomic_dec(rx_atomic_t *atomic) {
267     MUTEX_ENTER(&rx_atomic_mutex);
268     atomic->var--;
269     MUTEX_EXIT(&rx_atomic_mutex);
270 }
271
272 static_inline void
273 rx_atomic_sub(rx_atomic_t *atomic, int change) {
274     MUTEX_ENTER(&rx_atomic_mutex);
275     atomic->var -= change;
276     MUTEX_ENTER(&rx_atomic_mutex);
277 }
278
279 #endif