2 * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #define RX_ATOMIC_INIT(i) { (i) }
33 rx_atomic_set(rx_atomic_t *atomic, int val) {
38 rx_atomic_read(rx_atomic_t *atomic) {
43 rx_atomic_inc(rx_atomic_t *atomic) {
44 InterlockedIncrement(&atomic->var);
48 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
49 return InterlockedIncrement(&atomic->var);
53 rx_atomic_add(rx_atomic_t *atomic, int change) {
54 InterlockedExchangeAdd(&atomic->var, change);
58 rx_atomic_dec(rx_atomic_t *atomic) {
59 InterlockedDecrement(&atomic->var);
63 rx_atomic_sub(rx_atomic_t *atomic, int change) {
64 InterlockedExchangeAdd(&atomic->var, 0 - change);
67 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
69 #include <libkern/OSAtomic.h>
75 rx_atomic_set(rx_atomic_t *atomic, int val) {
80 rx_atomic_read(rx_atomic_t *atomic) {
85 rx_atomic_inc(rx_atomic_t *atomic) {
86 OSAtomicIncrement32(&atomic->var);
90 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
91 return OSAtomicIncrement32(&atomic->var);
95 rx_atomic_add(rx_atomic_t *atomic, int change) {
96 OSAtomicAdd32(change, &atomic->var);
100 rx_atomic_dec(rx_atomic_t *atomic) {
101 OSAtomicDecrement32(&atomic->var);
105 rx_atomic_sub(rx_atomic_t *atomic, int change) {
106 OSAtomicAdd32(0 - change, &atomic->var);
108 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
109 #include <asm/atomic.h>
111 typedef atomic_t rx_atomic_t;
113 #define rx_atomic_set(X) atomic_set(X)
114 #define rx_atomic_read(X) atomic_read(X)
115 #define rx_atomic_inc(X) atomic_inc(X)
116 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
117 #define rx_atomic_add(X, V) atomic_add(X, V)
118 #define rx_atomic_dec(X) atomic_dec(X)
119 #define rx_atomic_sub(X, V) atomic_sub(X, V)
121 #elif defined(AFS_SUN58_ENV)
127 rx_atomic_set(rx_atomic_t *atomic, int val) {
132 rx_atomic_read(rx_atomic_t *atomic) {
137 rx_atomic_inc(rx_atomic_t *atomic) {
138 atomic_inc_32(&atomic->var);
142 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
143 return atomic_inc_32_nv(&atomic->var);
147 rx_atomic_add(rx_atomic_t *atomic, int change) {
148 atomic_add_32(&atomic->var, change);
152 rx_atomic_dec(rx_atomic_t *atomic) {
153 atomic_dec_32(&atomic->var);
157 rx_atomic_sub(rx_atomic_t *atomic, int change) {
158 atomic_add_32(&object, 0 - change);
161 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
168 rx_atomic_set(rx_atomic_t *atomic, int val) {
173 rx_atomic_read(rx_atomic_t *atomic) {
178 rx_atomic_inc(rx_atomic_t *atomic) {
179 (void)__sync_fetch_and_add(&atomic->var, 1);
183 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
184 return __sync_add_and_fetch(&atomic->var, 1);
188 rx_atomic_add(rx_atomic_t *atomic, int change) {
189 (void)__sync_fetch_and_add(&atomic->var, change);
193 rx_atomic_dec(rx_atomic_t *atomic) {
194 (void)__sync_fetch_and_sub(&atomic->var, 1);
198 rx_atomic_sub(rx_atomic_t *atomic, int change) {
199 (void)__sync_fetch_and_sub(&atomic->var, change);
204 /* If we're on a platform where we have no idea how to do atomics,
205 * then we fall back to using a single process wide mutex to protect
206 * all atomic variables. This won't be the quickest thing ever.
209 #ifdef RX_ENABLE_LOCKS
210 extern afs_kmutex_t rx_atomic_mutex;
218 rx_atomic_set(rx_atomic_t *atomic, int val) {
219 MUTEX_ENTER(&rx_atomic_mutex);
221 MUTEX_EXIT(&rx_atomic_mutex);
225 rx_atomic_read(rx_atomic_t *atomic) {
228 MUTEX_ENTER(&rx_atomic_mutex);
230 MUTEX_EXIT(&rx_atomic_mutex);
236 rx_atomic_inc(rx_atomic_t *atomic) {
237 MUTEX_ENTER(&rx_atomic_mutex);
239 MUTEX_EXIT(&rx_atomic_mutex);
243 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
245 MUTEX_ENTER(&rx_atomic_mutex);
247 retval = atomic->var;
248 MUTEX_EXIT(&rx_atomic_mutex);
253 rx_atomic_add(rx_atomic_t *atomic, int change) {
254 MUTEX_ENTER(&rx_atomic_mutex);
255 atomic->var += change;
256 MUTEX_EXIT(&rx_atomic_mutex);
260 rx_atomic_dec(rx_atomic_t *atomic) {
261 MUTEX_ENTER(&rx_atomic_mutex);
263 MUTEX_EXIT(&rx_atomic_mutex);
267 rx_atomic_sub(rx_atomic_t *atomic, int change) {
268 MUTEX_ENTER(&rx_atomic_mutex);
269 atomic->var -= change;
270 MUTEX_ENTER(&rx_atomic_mutex);