2 * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
28 #define RX_ATOMIC_INIT(i) { (i) }
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
41 rx_atomic_read(rx_atomic_t *atomic) {
46 rx_atomic_inc(rx_atomic_t *atomic) {
47 InterlockedIncrement(&atomic->var);
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52 return InterlockedIncrement(&atomic->var);
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57 InterlockedExchangeAdd(&atomic->var, change);
61 rx_atomic_dec(rx_atomic_t *atomic) {
62 InterlockedDecrement(&atomic->var);
66 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
67 return InterlockedDecrement(&atomic->var);
71 rx_atomic_sub(rx_atomic_t *atomic, int change) {
72 InterlockedExchangeAdd(&atomic->var, 0 - change);
75 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
77 #include <libkern/OSAtomic.h>
78 #if defined(KERNEL) && !defined(UKERNEL)
79 #define OSAtomicIncrement32 OSIncrementAtomic
80 #define OSAtomicAdd32 OSAddAtomic
81 #define OSAtomicDecrement32 OSDecrementAtomic
89 rx_atomic_set(rx_atomic_t *atomic, int val) {
94 rx_atomic_read(rx_atomic_t *atomic) {
99 rx_atomic_inc(rx_atomic_t *atomic) {
100 OSAtomicIncrement32(&atomic->var);
104 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
105 return OSAtomicIncrement32(&atomic->var);
109 rx_atomic_add(rx_atomic_t *atomic, int change) {
110 OSAtomicAdd32(change, &atomic->var);
114 rx_atomic_dec(rx_atomic_t *atomic) {
115 OSAtomicDecrement32(&atomic->var);
119 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
120 return OSAtomicDecrement32(&atomic->var);
124 rx_atomic_sub(rx_atomic_t *atomic, int change) {
125 OSAtomicAdd32(0 - change, &atomic->var);
127 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
128 #include <asm/atomic.h>
130 typedef atomic_t rx_atomic_t;
132 #define rx_atomic_set(X) atomic_set(X)
133 #define rx_atomic_read(X) atomic_read(X)
134 #define rx_atomic_inc(X) atomic_inc(X)
135 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
136 #define rx_atomic_add(X, V) atomic_add(V, X)
137 #define rx_atomic_dec(X) atomic_dec(X)
138 #define rx_atomic_sub(X, V) atomic_sub(V, X)
140 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN58_ENV) && defined(KERNEL) && !defined(UKERNEL))
142 # if defined(KERNEL) && !defined(UKERNEL)
143 # include <sys/atomic.h>
149 volatile unsigned int var;
153 rx_atomic_set(rx_atomic_t *atomic, int val) {
158 rx_atomic_read(rx_atomic_t *atomic) {
163 rx_atomic_inc(rx_atomic_t *atomic) {
164 atomic_inc_32(&atomic->var);
168 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
169 return atomic_inc_32_nv(&atomic->var);
173 rx_atomic_add(rx_atomic_t *atomic, int change) {
174 atomic_add_32(&atomic->var, change);
178 rx_atomic_dec(rx_atomic_t *atomic) {
179 atomic_dec_32(&atomic->var);
183 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
184 return atomic_dec_32_nv(&atomic->var);
188 rx_atomic_sub(rx_atomic_t *atomic, int change) {
189 atomic_add_32(&atomic->var, 0 - change);
192 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
199 rx_atomic_set(rx_atomic_t *atomic, int val) {
204 rx_atomic_read(rx_atomic_t *atomic) {
209 rx_atomic_inc(rx_atomic_t *atomic) {
210 (void)__sync_fetch_and_add(&atomic->var, 1);
214 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
215 return __sync_add_and_fetch(&atomic->var, 1);
219 rx_atomic_add(rx_atomic_t *atomic, int change) {
220 (void)__sync_fetch_and_add(&atomic->var, change);
224 rx_atomic_dec(rx_atomic_t *atomic) {
225 (void)__sync_fetch_and_sub(&atomic->var, 1);
229 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
230 return __sync_sub_and_fetch(&atomic->var, 1);
234 rx_atomic_sub(rx_atomic_t *atomic, int change) {
235 (void)__sync_fetch_and_sub(&atomic->var, change);
240 /* If we're on a platform where we have no idea how to do atomics,
241 * then we fall back to using a single process wide mutex to protect
242 * all atomic variables. This won't be the quickest thing ever.
245 #ifdef RX_ENABLE_LOCKS
246 extern afs_kmutex_t rx_atomic_mutex;
254 rx_atomic_set(rx_atomic_t *atomic, int val) {
255 MUTEX_ENTER(&rx_atomic_mutex);
257 MUTEX_EXIT(&rx_atomic_mutex);
261 rx_atomic_read(rx_atomic_t *atomic) {
264 MUTEX_ENTER(&rx_atomic_mutex);
266 MUTEX_EXIT(&rx_atomic_mutex);
272 rx_atomic_inc(rx_atomic_t *atomic) {
273 MUTEX_ENTER(&rx_atomic_mutex);
275 MUTEX_EXIT(&rx_atomic_mutex);
279 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
281 MUTEX_ENTER(&rx_atomic_mutex);
283 retval = atomic->var;
284 MUTEX_EXIT(&rx_atomic_mutex);
289 rx_atomic_add(rx_atomic_t *atomic, int change) {
290 MUTEX_ENTER(&rx_atomic_mutex);
291 atomic->var += change;
292 MUTEX_EXIT(&rx_atomic_mutex);
296 rx_atomic_dec(rx_atomic_t *atomic) {
297 MUTEX_ENTER(&rx_atomic_mutex);
299 MUTEX_EXIT(&rx_atomic_mutex);
303 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
305 MUTEX_ENTER(&rx_atomic_mutex);
307 retval = atomic->var;
308 MUTEX_EXIT(&rx_atomic_mutex);
314 rx_atomic_sub(rx_atomic_t *atomic, int change) {
315 MUTEX_ENTER(&rx_atomic_mutex);
316 atomic->var -= change;
317 MUTEX_EXIT(&rx_atomic_mutex);