2 * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #define RX_ATOMIC_INIT(i) { (i) }
33 rx_atomic_set(rx_atomic_t *atomic, int val) {
38 rx_atomic_read(rx_atomic_t *atomic) {
43 rx_atomic_inc(rx_atomic_t *atomic) {
44 InterlockedIncrement(&atomic->var);
48 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
49 return InterlockedIncrement(&atomic->var);
53 rx_atomic_add(rx_atomic_t *atomic, int change) {
54 InterlockedExchangeAdd(&atomic->var, change);
58 rx_atomic_dec(rx_atomic_t *atomic) {
59 InterlockedDecrement(&atomic->var);
63 rx_atomic_sub(rx_atomic_t *atomic, int change) {
64 InterlockedExchangeAdd(&atomic->var, 0 - change);
67 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
69 #include <libkern/OSAtomic.h>
70 #if defined(KERNEL) && !defined(UKERNEL)
71 #define OSAtomicIncrement32 OSIncrementAtomic
72 #define OSAtomicAdd32 OSAddAtomic
73 #define OSAtomicDecrement32 OSDecrementAtomic
81 rx_atomic_set(rx_atomic_t *atomic, int val) {
86 rx_atomic_read(rx_atomic_t *atomic) {
91 rx_atomic_inc(rx_atomic_t *atomic) {
92 OSAtomicIncrement32(&atomic->var);
96 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
97 return OSAtomicIncrement32(&atomic->var);
101 rx_atomic_add(rx_atomic_t *atomic, int change) {
102 OSAtomicAdd32(change, &atomic->var);
106 rx_atomic_dec(rx_atomic_t *atomic) {
107 OSAtomicDecrement32(&atomic->var);
111 rx_atomic_sub(rx_atomic_t *atomic, int change) {
112 OSAtomicAdd32(0 - change, &atomic->var);
114 #elif defined(AFS_LINUX20_ENV) && defined(KERNEL)
115 #include <asm/atomic.h>
117 typedef atomic_t rx_atomic_t;
119 #define rx_atomic_set(X) atomic_set(X)
120 #define rx_atomic_read(X) atomic_read(X)
121 #define rx_atomic_inc(X) atomic_inc(X)
122 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
123 #define rx_atomic_add(X, V) atomic_add(V, X)
124 #define rx_atomic_dec(X) atomic_dec(X)
125 #define rx_atomic_sub(X, V) atomic_sub(V, X)
127 #elif defined(AFS_SUN58_ENV)
132 volatile unsigned int var;
136 rx_atomic_set(rx_atomic_t *atomic, int val) {
141 rx_atomic_read(rx_atomic_t *atomic) {
146 rx_atomic_inc(rx_atomic_t *atomic) {
147 atomic_inc_32(&atomic->var);
151 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
152 return atomic_inc_32_nv(&atomic->var);
156 rx_atomic_add(rx_atomic_t *atomic, int change) {
157 atomic_add_32(&atomic->var, change);
161 rx_atomic_dec(rx_atomic_t *atomic) {
162 atomic_dec_32(&atomic->var);
166 rx_atomic_sub(rx_atomic_t *atomic, int change) {
167 atomic_add_32(&atomic->var, 0 - change);
170 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
177 rx_atomic_set(rx_atomic_t *atomic, int val) {
182 rx_atomic_read(rx_atomic_t *atomic) {
187 rx_atomic_inc(rx_atomic_t *atomic) {
188 (void)__sync_fetch_and_add(&atomic->var, 1);
192 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
193 return __sync_add_and_fetch(&atomic->var, 1);
197 rx_atomic_add(rx_atomic_t *atomic, int change) {
198 (void)__sync_fetch_and_add(&atomic->var, change);
202 rx_atomic_dec(rx_atomic_t *atomic) {
203 (void)__sync_fetch_and_sub(&atomic->var, 1);
207 rx_atomic_sub(rx_atomic_t *atomic, int change) {
208 (void)__sync_fetch_and_sub(&atomic->var, change);
213 /* If we're on a platform where we have no idea how to do atomics,
214 * then we fall back to using a single process wide mutex to protect
215 * all atomic variables. This won't be the quickest thing ever.
218 #ifdef RX_ENABLE_LOCKS
219 extern afs_kmutex_t rx_atomic_mutex;
227 rx_atomic_set(rx_atomic_t *atomic, int val) {
228 MUTEX_ENTER(&rx_atomic_mutex);
230 MUTEX_EXIT(&rx_atomic_mutex);
234 rx_atomic_read(rx_atomic_t *atomic) {
237 MUTEX_ENTER(&rx_atomic_mutex);
239 MUTEX_EXIT(&rx_atomic_mutex);
245 rx_atomic_inc(rx_atomic_t *atomic) {
246 MUTEX_ENTER(&rx_atomic_mutex);
248 MUTEX_EXIT(&rx_atomic_mutex);
252 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
254 MUTEX_ENTER(&rx_atomic_mutex);
256 retval = atomic->var;
257 MUTEX_EXIT(&rx_atomic_mutex);
262 rx_atomic_add(rx_atomic_t *atomic, int change) {
263 MUTEX_ENTER(&rx_atomic_mutex);
264 atomic->var += change;
265 MUTEX_EXIT(&rx_atomic_mutex);
269 rx_atomic_dec(rx_atomic_t *atomic) {
270 MUTEX_ENTER(&rx_atomic_mutex);
272 MUTEX_EXIT(&rx_atomic_mutex);
276 rx_atomic_sub(rx_atomic_t *atomic, int change) {
277 MUTEX_ENTER(&rx_atomic_mutex);
278 atomic->var -= change;
279 MUTEX_EXIT(&rx_atomic_mutex);