2 * Copyright (c) 2010 Your Filesystem Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR `AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #ifndef OPENAFS_RX_ATOMIC_H
26 #define OPENAFS_RX_ATOMIC_H 1
28 #define RX_ATOMIC_INIT(i) { (i) }
36 rx_atomic_set(rx_atomic_t *atomic, int val) {
41 rx_atomic_read(rx_atomic_t *atomic) {
46 rx_atomic_inc(rx_atomic_t *atomic) {
47 InterlockedIncrement(&atomic->var);
51 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
52 return InterlockedIncrement(&atomic->var);
56 rx_atomic_add(rx_atomic_t *atomic, int change) {
57 InterlockedExchangeAdd(&atomic->var, change);
61 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
62 return InterlockedExchangeAdd(&atomic->var, change) + change;
66 rx_atomic_dec(rx_atomic_t *atomic) {
67 InterlockedDecrement(&atomic->var);
71 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
72 return InterlockedDecrement(&atomic->var);
76 rx_atomic_sub(rx_atomic_t *atomic, int change) {
77 InterlockedExchangeAdd(&atomic->var, 0 - change);
80 #elif defined(AFS_DARWIN80_ENV) || defined(AFS_USR_DARWIN80_ENV)
82 #include <libkern/OSAtomic.h>
83 #if defined(KERNEL) && !defined(UKERNEL)
84 #define OSAtomicIncrement32 OSIncrementAtomic
85 #define OSAtomicAdd32 OSAddAtomic
86 #define OSAtomicDecrement32 OSDecrementAtomic
94 rx_atomic_set(rx_atomic_t *atomic, int val) {
99 rx_atomic_read(rx_atomic_t *atomic) {
104 rx_atomic_inc(rx_atomic_t *atomic) {
105 OSAtomicIncrement32(&atomic->var);
109 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
110 return OSAtomicIncrement32(&atomic->var);
114 rx_atomic_add(rx_atomic_t *atomic, int change) {
115 OSAtomicAdd32(change, &atomic->var);
119 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
120 return OSAtomicAdd32(change, &atomic->var);
124 rx_atomic_dec(rx_atomic_t *atomic) {
125 OSAtomicDecrement32(&atomic->var);
129 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
130 return OSAtomicDecrement32(&atomic->var);
134 rx_atomic_sub(rx_atomic_t *atomic, int change) {
135 OSAtomicAdd32(0 - change, &atomic->var);
137 #elif defined(AFS_LINUX26_ENV) && defined(KERNEL)
138 #include <asm/atomic.h>
140 typedef atomic_t rx_atomic_t;
142 #define rx_atomic_set(X, V) atomic_set(X, V)
143 #define rx_atomic_read(X) atomic_read(X)
144 #define rx_atomic_inc(X) atomic_inc(X)
145 #define rx_atomic_inc_and_read(X) atomic_inc_return(X)
146 #define rx_atomic_add(X, V) atomic_add(V, X)
147 #define rx_atomic_add_and_read(X, V) atomic_add_return(V, X);
148 #define rx_atomic_dec(X) atomic_dec(X)
149 #define rx_atomic_dec_and_read(X) atomic_dec_return(X)
150 #define rx_atomic_sub(X, V) atomic_sub(V, X)
152 #elif defined(AFS_SUN510_ENV) || (defined(AFS_SUN5_ENV) && defined(KERNEL) && !defined(UKERNEL))
154 # if defined(KERNEL) && !defined(UKERNEL)
155 # include <sys/atomic.h>
160 #ifndef AFS_SUN510_ENV
161 # define atomic_inc_32(X) atomic_add_32((X), 1)
162 # define atomic_inc_32_nv(X) atomic_add_32_nv((X), 1)
163 # define atomic_dec_32(X) atomic_add_32((X), -1)
164 # define atomic_dec_32_nv(X) atomic_add_32_nv((X), -1)
168 volatile unsigned int var;
172 rx_atomic_set(rx_atomic_t *atomic, int val) {
177 rx_atomic_read(rx_atomic_t *atomic) {
182 rx_atomic_inc(rx_atomic_t *atomic) {
183 atomic_inc_32(&atomic->var);
187 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
188 return atomic_inc_32_nv(&atomic->var);
192 rx_atomic_add(rx_atomic_t *atomic, int change) {
193 atomic_add_32(&atomic->var, change);
197 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
198 return atomic_add_32_nv(&atomic->var, change);
202 rx_atomic_dec(rx_atomic_t *atomic) {
203 atomic_dec_32(&atomic->var);
207 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
208 return atomic_dec_32_nv(&atomic->var);
212 rx_atomic_sub(rx_atomic_t *atomic, int change) {
213 atomic_add_32(&atomic->var, 0 - change);
216 #elif defined(__GNUC__) && defined(HAVE_SYNC_FETCH_AND_ADD)
223 rx_atomic_set(rx_atomic_t *atomic, int val) {
228 rx_atomic_read(rx_atomic_t *atomic) {
233 rx_atomic_inc(rx_atomic_t *atomic) {
234 (void)__sync_fetch_and_add(&atomic->var, 1);
238 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
239 return __sync_add_and_fetch(&atomic->var, 1);
243 rx_atomic_add(rx_atomic_t *atomic, int change) {
244 (void)__sync_fetch_and_add(&atomic->var, change);
248 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
249 return __sync_fetch_and_add(&atomic->var, change);
253 rx_atomic_dec(rx_atomic_t *atomic) {
254 (void)__sync_fetch_and_sub(&atomic->var, 1);
258 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
259 return __sync_sub_and_fetch(&atomic->var, 1);
263 rx_atomic_sub(rx_atomic_t *atomic, int change) {
264 (void)__sync_fetch_and_sub(&atomic->var, change);
269 /* If we're on a platform where we have no idea how to do atomics,
270 * then we fall back to using a single process wide mutex to protect
271 * all atomic variables. This won't be the quickest thing ever.
274 #ifdef RX_ENABLE_LOCKS
275 extern afs_kmutex_t rx_atomic_mutex;
283 rx_atomic_set(rx_atomic_t *atomic, int val) {
284 MUTEX_ENTER(&rx_atomic_mutex);
286 MUTEX_EXIT(&rx_atomic_mutex);
290 rx_atomic_read(rx_atomic_t *atomic) {
293 MUTEX_ENTER(&rx_atomic_mutex);
295 MUTEX_EXIT(&rx_atomic_mutex);
301 rx_atomic_inc(rx_atomic_t *atomic) {
302 MUTEX_ENTER(&rx_atomic_mutex);
304 MUTEX_EXIT(&rx_atomic_mutex);
308 rx_atomic_inc_and_read(rx_atomic_t *atomic) {
310 MUTEX_ENTER(&rx_atomic_mutex);
312 retval = atomic->var;
313 MUTEX_EXIT(&rx_atomic_mutex);
318 rx_atomic_add(rx_atomic_t *atomic, int change) {
319 MUTEX_ENTER(&rx_atomic_mutex);
320 atomic->var += change;
321 MUTEX_EXIT(&rx_atomic_mutex);
325 rx_atomic_add_and_read(rx_atomic_t *atomic, int change) {
328 MUTEX_ENTER(&rx_atomic_mutex);
329 atomic->var += change;
330 retval = atomic->var;
331 MUTEX_EXIT(&rx_atomic_mutex);
337 rx_atomic_dec(rx_atomic_t *atomic) {
338 MUTEX_ENTER(&rx_atomic_mutex);
340 MUTEX_EXIT(&rx_atomic_mutex);
344 rx_atomic_dec_and_read(rx_atomic_t *atomic) {
346 MUTEX_ENTER(&rx_atomic_mutex);
348 retval = atomic->var;
349 MUTEX_EXIT(&rx_atomic_mutex);
355 rx_atomic_sub(rx_atomic_t *atomic, int change) {
356 MUTEX_ENTER(&rx_atomic_mutex);
357 atomic->var -= change;
358 MUTEX_EXIT(&rx_atomic_mutex);