rx: Move bytesSent + bytesRcvd into app only data
[openafs.git] / src / rx / rx_atomic.h
1 /*
2  * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
23  */
24
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
27
28 #define RX_ATOMIC_INIT(i) { (i) }
29
30 #ifdef AFS_NT40_ENV
31 typedef struct {
32     volatile int var;
33 } rx_atomic_t;
34
35 static_inline void
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
37     atomic->var = val;
38 }
39
40 static_inline int
41 rx_atomic_read(rx_atomic_t *atomic) {
42     return atomic->var;
43 }
44
45 static_inline void
46 rx_atomic_inc(rx_atomic_t *atomic) {
47     InterlockedIncrement(&atomic->var);
48 }
49
50 static_inline int
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52     return InterlockedIncrement(&atomic->var);
53 }
54
55 static_inline void
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57     InterlockedExchangeAdd(&atomic->var, change);
58 }
59
60 static_inline int
61 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
62     return InterlockedExchangeAdd(&atomic->var, change) + change;
63 }
64
65 static_inline void
66 rx_atomic_dec(rx_atomic_t *atomic) {
67     InterlockedDecrement(&atomic->var);
68 }
69
70 static_inline int
71 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
72     return InterlockedDecrement(&atomic->var);
73 }
74
75 static_inline void
76 rx_atomic_sub(rx_atomic_t *atomic, int change) {
77     InterlockedExchangeAdd(&atomic->var, 0 - change);
78 }
79
80 #elif defined(AFS_AIX61_ENV) || defined(AFS_USR_AIX61_ENV)
81 #include <sys/atomic_op.h>
82
83 typedef struct {
84    volatile int var;
85 } rx_atomic_t;
86
87 static_inline void
88 rx_atomic_set(rx_atomic_t *atomic, int val) {
89     atomic->var = val;
90 }
91
92 static_inline int
93 rx_atomic_read(rx_atomic_t *atomic) {
94     return atomic->var;
95 }
96
97 static_inline void
98 rx_atomic_inc(rx_atomic_t *atomic) {
99     fetch_and_add(&atomic->var, 1);
100 }
101
102 static_inline int
103 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
104     return (fetch_and_add(&atomic->var, 1) + 1);
105 }
106
107 static_inline void
108 rx_atomic_add(rx_atomic_t *atomic, int change) {
109     fetch_and_add(&atomic->var, change);
110 }
111
112 static_inline int
113 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
114     return (fetch_and_add(&atomic->var, change) + change);
115 }
116
117 static_inline void
118 rx_atomic_dec(rx_atomic_t *atomic) {
119     fetch_and_add(&atomic->var, -1);
120 }
121
122 static_inline int
123 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
124     return (fetch_and_add(&atomic->var, -1) - 1);
125 }
126
127 static_inline void
128 rx_atomic_sub(rx_atomic_t *atomic, int change) {
129     fetch_and_add(&atomic->var, -change);
130 }
131
132 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
133
134 #include <libkern/OSAtomic.h>
135 #if defined(KERNEL) && !defined(UKERNEL)
136 #define OSAtomicIncrement32 OSIncrementAtomic
137 #define OSAtomicAdd32 OSAddAtomic
138 #define OSAtomicDecrement32 OSDecrementAtomic
139 #endif
140
141 typedef struct {
142     volatile int var;
143 } rx_atomic_t;
144
145 static_inline void
146 rx_atomic_set(rx_atomic_t *atomic, int val) {
147     atomic->var = val;
148 }
149
150 static_inline int
151 rx_atomic_read(rx_atomic_t *atomic) {
152     return atomic->var;
153 }
154
155 static_inline void
156 rx_atomic_inc(rx_atomic_t *atomic) {
157     OSAtomicIncrement32(&atomic->var);
158 }
159
160 static_inline int
161 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
162     return OSAtomicIncrement32(&atomic->var);
163 }
164
165 static_inline void
166 rx_atomic_add(rx_atomic_t *atomic, int change) {
167     OSAtomicAdd32(change, &atomic->var);
168 }
169
170 static_inline int
171 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
172     return OSAtomicAdd32(change, &atomic->var);
173 }
174
175 static_inline void
176 rx_atomic_dec(rx_atomic_t *atomic) {
177     OSAtomicDecrement32(&atomic->var);
178 }
179
180 static_inline int
181 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
182     return OSAtomicDecrement32(&atomic->var);
183 }
184
185 static_inline void
186 rx_atomic_sub(rx_atomic_t *atomic, int change) {
187     OSAtomicAdd32(0 - change, &atomic->var);
188 }
189 #elif defined(AFS_LINUX26_ENV) && defined(KERNEL)
190 #include <asm/atomic.h>
191
192 typedef atomic_t rx_atomic_t;
193
194 #define rx_atomic_set(X, V)       atomic_set(X, V)
195 #define rx_atomic_read(X)         atomic_read(X)
196 #define rx_atomic_inc(X)          atomic_inc(X)
197 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
198 #define rx_atomic_add(X, V)       atomic_add(V, X)
199 #define rx_atomic_add_and_read(X, V) atomic_add_return(V, X);
200 #define rx_atomic_dec(X)          atomic_dec(X)
201 #define rx_atomic_dec_and_read(X) atomic_dec_return(X)
202 #define rx_atomic_sub(X, V)       atomic_sub(V, X)
203
204 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN5_ENV) && defined(KERNEL) && !defined(UKERNEL))
205
206 # if defined(KERNEL) && !defined(UKERNEL)
207 #  include <sys/atomic.h>
208 # else
209 #  include <atomic.h>
210 # endif
211
212 #ifndef AFS_SUN510_ENV
213 # define atomic_inc_32(X)    atomic_add_32((X), 1)
214 # define atomic_inc_32_nv(X) atomic_add_32_nv((X), 1)
215 # define atomic_dec_32(X)    atomic_add_32((X), -1)
216 # define atomic_dec_32_nv(X) atomic_add_32_nv((X), -1)
217 #endif
218
219 typedef struct {
220     volatile unsigned int var;
221 } rx_atomic_t;
222
223 static_inline void
224 rx_atomic_set(rx_atomic_t *atomic, int val) {
225     atomic->var = val;
226 }
227
228 static_inline int
229 rx_atomic_read(rx_atomic_t *atomic) {
230     return atomic->var;
231 }
232
233 static_inline void
234 rx_atomic_inc(rx_atomic_t *atomic) {
235     atomic_inc_32(&atomic->var);
236 }
237
238 static_inline int
239 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
240     return atomic_inc_32_nv(&atomic->var);
241 }
242
243 static_inline void
244 rx_atomic_add(rx_atomic_t *atomic, int change) {
245     atomic_add_32(&atomic->var, change);
246 }
247
248 static_inline int
249 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
250     return atomic_add_32_nv(&atomic->var, change);
251 }
252
253 static_inline void
254 rx_atomic_dec(rx_atomic_t *atomic) {
255     atomic_dec_32(&atomic->var);
256 }
257
258 static_inline int
259 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
260     return atomic_dec_32_nv(&atomic->var);
261 }
262
263 static_inline void
264 rx_atomic_sub(rx_atomic_t *atomic, int change) {
265     atomic_add_32(&atomic->var, 0 - change);
266 }
267
268 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
269
270 typedef struct {
271    volatile int var;
272 } rx_atomic_t;
273
274 static_inline void
275 rx_atomic_set(rx_atomic_t *atomic, int val) {
276     atomic->var = val;
277 }
278
279 static_inline int
280 rx_atomic_read(rx_atomic_t *atomic) {
281     return atomic->var;
282 }
283
284 static_inline void
285 rx_atomic_inc(rx_atomic_t *atomic) {
286     (void)__sync_fetch_and_add(&atomic->var, 1);
287 }
288
289 static_inline int
290 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
291     return __sync_add_and_fetch(&atomic->var, 1);
292 }
293
294 static_inline void
295 rx_atomic_add(rx_atomic_t *atomic, int change) {
296     (void)__sync_fetch_and_add(&atomic->var, change);
297 }
298
299 static_inline int
300 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
301     return __sync_fetch_and_add(&atomic->var, change);
302 }
303
304 static_inline void
305 rx_atomic_dec(rx_atomic_t *atomic) {
306     (void)__sync_fetch_and_sub(&atomic->var, 1);
307 }
308
309 static_inline int
310 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
311     return __sync_sub_and_fetch(&atomic->var, 1);
312 }
313
314 static_inline void
315 rx_atomic_sub(rx_atomic_t *atomic, int change) {
316     (void)__sync_fetch_and_sub(&atomic->var, change);
317 }
318
319 #else
320
321 /* If we're on a platform where we have no idea how to do atomics,
322  * then we fall back to using a single process wide mutex to protect
323  * all atomic variables. This won't be the quickest thing ever.
324  */
325
326 #ifdef RX_ENABLE_LOCKS
327 extern afs_kmutex_t rx_atomic_mutex;
328 #endif
329
330 typedef struct {
331     int var;
332 } rx_atomic_t;
333
334 static_inline void
335 rx_atomic_set(rx_atomic_t *atomic, int val) {
336     MUTEX_ENTER(&rx_atomic_mutex);
337     atomic->var = val;
338     MUTEX_EXIT(&rx_atomic_mutex);
339 }
340
341 static_inline int
342 rx_atomic_read(rx_atomic_t *atomic) {
343     int out;
344
345     MUTEX_ENTER(&rx_atomic_mutex);
346     out = atomic->var;
347     MUTEX_EXIT(&rx_atomic_mutex);
348
349     return out;
350 }
351
352 static_inline void
353 rx_atomic_inc(rx_atomic_t *atomic) {
354    MUTEX_ENTER(&rx_atomic_mutex);
355    atomic->var++;
356    MUTEX_EXIT(&rx_atomic_mutex);
357 }
358
359 static_inline int
360 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
361     int retval;
362     MUTEX_ENTER(&rx_atomic_mutex);
363     atomic->var++;
364     retval = atomic->var;
365     MUTEX_EXIT(&rx_atomic_mutex);
366     return retval;
367 }
368
369 static_inline void
370 rx_atomic_add(rx_atomic_t *atomic, int change) {
371     MUTEX_ENTER(&rx_atomic_mutex);
372     atomic->var += change;
373     MUTEX_EXIT(&rx_atomic_mutex);
374 }
375
376 static_inline int
377 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
378     int retval;
379
380     MUTEX_ENTER(&rx_atomic_mutex);
381     atomic->var += change;
382     retval = atomic->var;
383     MUTEX_EXIT(&rx_atomic_mutex);
384
385     return retval;
386 }
387
388 static_inline void
389 rx_atomic_dec(rx_atomic_t *atomic) {
390     MUTEX_ENTER(&rx_atomic_mutex);
391     atomic->var--;
392     MUTEX_EXIT(&rx_atomic_mutex);
393 }
394
395 static_inline int
396 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
397     int retval;
398     MUTEX_ENTER(&rx_atomic_mutex);
399     atomic->var--;
400     retval = atomic->var;
401     MUTEX_EXIT(&rx_atomic_mutex);
402     return retval;
403 }
404
405
406 static_inline void
407 rx_atomic_sub(rx_atomic_t *atomic, int change) {
408     MUTEX_ENTER(&rx_atomic_mutex);
409     atomic->var -= change;
410     MUTEX_EXIT(&rx_atomic_mutex);
411 }
412
413 #endif
414
415 #endif