2 * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
28 #define RX_ATOMIC_INIT(i) { (i) }
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
41 rx_atomic_read(rx_atomic_t *atomic) {
46 rx_atomic_inc(rx_atomic_t *atomic) {
47 InterlockedIncrement(&atomic->var);
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52 return InterlockedIncrement(&atomic->var);
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57 InterlockedExchangeAdd(&atomic->var, change);
61 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
62 return InterlockedExchangeAdd(&atomic->var, change) + change;
66 rx_atomic_dec(rx_atomic_t *atomic) {
67 InterlockedDecrement(&atomic->var);
71 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
72 return InterlockedDecrement(&atomic->var);
76 rx_atomic_sub(rx_atomic_t *atomic, int change) {
77 InterlockedExchangeAdd(&atomic->var, 0 - change);
80 #elif defined(AFS_AIX61_ENV) || defined(AFS_USR_AIX61_ENV)
81 #include <sys/atomic_op.h>
88 rx_atomic_set(rx_atomic_t *atomic, int val) {
93 rx_atomic_read(rx_atomic_t *atomic) {
98 rx_atomic_inc(rx_atomic_t *atomic) {
99 fetch_and_add(&atomic->var, 1);
103 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
104 return (fetch_and_add(&atomic->var, 1) + 1);
108 rx_atomic_add(rx_atomic_t *atomic, int change) {
109 fetch_and_add(&atomic->var, change);
113 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
114 return (fetch_and_add(&atomic->var, change) + change);
118 rx_atomic_dec(rx_atomic_t *atomic) {
119 fetch_and_add(&atomic->var, -1);
123 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
124 return (fetch_and_add(&atomic->var, -1) - 1);
128 rx_atomic_sub(rx_atomic_t *atomic, int change) {
129 fetch_and_add(&atomic->var, -change);
132 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
134 #include <libkern/OSAtomic.h>
135 #if defined(KERNEL) && !defined(UKERNEL)
136 #define OSAtomicIncrement32 OSIncrementAtomic
137 #define OSAtomicAdd32 OSAddAtomic
138 #define OSAtomicDecrement32 OSDecrementAtomic
146 rx_atomic_set(rx_atomic_t *atomic, int val) {
151 rx_atomic_read(rx_atomic_t *atomic) {
156 rx_atomic_inc(rx_atomic_t *atomic) {
157 OSAtomicIncrement32(&atomic->var);
161 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
162 return OSAtomicIncrement32(&atomic->var);
166 rx_atomic_add(rx_atomic_t *atomic, int change) {
167 OSAtomicAdd32(change, &atomic->var);
171 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
172 return OSAtomicAdd32(change, &atomic->var);
176 rx_atomic_dec(rx_atomic_t *atomic) {
177 OSAtomicDecrement32(&atomic->var);
181 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
182 return OSAtomicDecrement32(&atomic->var);
186 rx_atomic_sub(rx_atomic_t *atomic, int change) {
187 OSAtomicAdd32(0 - change, &atomic->var);
189 #elif defined(AFS_LINUX26_ENV) && defined(KERNEL)
190 #include <asm/atomic.h>
192 typedef atomic_t rx_atomic_t;
194 #define rx_atomic_set(X, V) atomic_set(X, V)
195 #define rx_atomic_read(X) atomic_read(X)
196 #define rx_atomic_inc(X) atomic_inc(X)
197 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
198 #define rx_atomic_add(X, V) atomic_add(V, X)
199 #define rx_atomic_add_and_read(X, V) atomic_add_return(V, X);
200 #define rx_atomic_dec(X) atomic_dec(X)
201 #define rx_atomic_dec_and_read(X) atomic_dec_return(X)
202 #define rx_atomic_sub(X, V) atomic_sub(V, X)
204 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN5_ENV) && defined(KERNEL) && !defined(UKERNEL))
206 # if defined(KERNEL) && !defined(UKERNEL)
207 # include <sys/atomic.h>
212 #ifndef AFS_SUN510_ENV
213 # define atomic_inc_32(X) atomic_add_32((X), 1)
214 # define atomic_inc_32_nv(X) atomic_add_32_nv((X), 1)
215 # define atomic_dec_32(X) atomic_add_32((X), -1)
216 # define atomic_dec_32_nv(X) atomic_add_32_nv((X), -1)
220 volatile unsigned int var;
224 rx_atomic_set(rx_atomic_t *atomic, int val) {
229 rx_atomic_read(rx_atomic_t *atomic) {
234 rx_atomic_inc(rx_atomic_t *atomic) {
235 atomic_inc_32(&atomic->var);
239 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
240 return atomic_inc_32_nv(&atomic->var);
244 rx_atomic_add(rx_atomic_t *atomic, int change) {
245 atomic_add_32(&atomic->var, change);
249 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
250 return atomic_add_32_nv(&atomic->var, change);
254 rx_atomic_dec(rx_atomic_t *atomic) {
255 atomic_dec_32(&atomic->var);
259 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
260 return atomic_dec_32_nv(&atomic->var);
264 rx_atomic_sub(rx_atomic_t *atomic, int change) {
265 atomic_add_32(&atomic->var, 0 - change);
268 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
275 rx_atomic_set(rx_atomic_t *atomic, int val) {
280 rx_atomic_read(rx_atomic_t *atomic) {
285 rx_atomic_inc(rx_atomic_t *atomic) {
286 (void)__sync_fetch_and_add(&atomic->var, 1);
290 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
291 return __sync_add_and_fetch(&atomic->var, 1);
295 rx_atomic_add(rx_atomic_t *atomic, int change) {
296 (void)__sync_fetch_and_add(&atomic->var, change);
300 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
301 return __sync_fetch_and_add(&atomic->var, change);
305 rx_atomic_dec(rx_atomic_t *atomic) {
306 (void)__sync_fetch_and_sub(&atomic->var, 1);
310 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
311 return __sync_sub_and_fetch(&atomic->var, 1);
315 rx_atomic_sub(rx_atomic_t *atomic, int change) {
316 (void)__sync_fetch_and_sub(&atomic->var, change);
321 /* If we're on a platform where we have no idea how to do atomics,
322 * then we fall back to using a single process wide mutex to protect
323 * all atomic variables. This won't be the quickest thing ever.
326 #ifdef RX_ENABLE_LOCKS
327 extern afs_kmutex_t rx_atomic_mutex;
335 rx_atomic_set(rx_atomic_t *atomic, int val) {
336 MUTEX_ENTER(&rx_atomic_mutex);
338 MUTEX_EXIT(&rx_atomic_mutex);
342 rx_atomic_read(rx_atomic_t *atomic) {
345 MUTEX_ENTER(&rx_atomic_mutex);
347 MUTEX_EXIT(&rx_atomic_mutex);
353 rx_atomic_inc(rx_atomic_t *atomic) {
354 MUTEX_ENTER(&rx_atomic_mutex);
356 MUTEX_EXIT(&rx_atomic_mutex);
360 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
362 MUTEX_ENTER(&rx_atomic_mutex);
364 retval = atomic->var;
365 MUTEX_EXIT(&rx_atomic_mutex);
370 rx_atomic_add(rx_atomic_t *atomic, int change) {
371 MUTEX_ENTER(&rx_atomic_mutex);
372 atomic->var += change;
373 MUTEX_EXIT(&rx_atomic_mutex);
377 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
380 MUTEX_ENTER(&rx_atomic_mutex);
381 atomic->var += change;
382 retval = atomic->var;
383 MUTEX_EXIT(&rx_atomic_mutex);
389 rx_atomic_dec(rx_atomic_t *atomic) {
390 MUTEX_ENTER(&rx_atomic_mutex);
392 MUTEX_EXIT(&rx_atomic_mutex);
396 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
398 MUTEX_ENTER(&rx_atomic_mutex);
400 retval = atomic->var;
401 MUTEX_EXIT(&rx_atomic_mutex);
407 rx_atomic_sub(rx_atomic_t *atomic, int change) {
408 MUTEX_ENTER(&rx_atomic_mutex);
409 atomic->var -= change;
410 MUTEX_EXIT(&rx_atomic_mutex);