1 /* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
3 Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
21 #if defined __CONFIG_GENERIC_I386__ || defined __CONFIG_I386__
22 # warning this file is only good for 486 or better
25 typedef int8_t atomic8_t;
26 typedef uint8_t uatomic8_t;
27 typedef int_fast8_t atomic_fast8_t;
28 typedef uint_fast8_t uatomic_fast8_t;
30 typedef int16_t atomic16_t;
31 typedef uint16_t uatomic16_t;
32 typedef int_fast16_t atomic_fast16_t;
33 typedef uint_fast16_t uatomic_fast16_t;
35 typedef int32_t atomic32_t;
36 typedef uint32_t uatomic32_t;
37 typedef int_fast32_t atomic_fast32_t;
38 typedef uint_fast32_t uatomic_fast32_t;
40 typedef int64_t atomic64_t;
41 typedef uint64_t uatomic64_t;
42 typedef int_fast64_t atomic_fast64_t;
43 typedef uint_fast64_t uatomic_fast64_t;
45 typedef intptr_t atomicptr_t;
46 typedef uintptr_t uatomicptr_t;
47 typedef intmax_t atomic_max_t;
48 typedef uintmax_t uatomic_max_t;
53 # define LOCK_PREFIX /* nothing */
55 # define LOCK_PREFIX "lock;"
60 #define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \
61 ({ __typeof (*mem) ret; \
62 __asm__ __volatile__ (LOCK_PREFIX "cmpxchgb %b2, %1" \
63 : "=a" (ret), "=m" (*mem) \
64 : "q" (newval), "m" (*mem), "0" (oldval)); \
67 #define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \
68 ({ __typeof (*mem) ret; \
69 __asm__ __volatile__ (LOCK_PREFIX "cmpxchgw %w2, %1" \
70 : "=a" (ret), "=m" (*mem) \
71 : "r" (newval), "m" (*mem), "0" (oldval)); \
74 #define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \
75 ({ __typeof (*mem) ret; \
76 __asm__ __volatile__ (LOCK_PREFIX "cmpxchgl %2, %1" \
77 : "=a" (ret), "=m" (*mem) \
78 : "r" (newval), "m" (*mem), "0" (oldval)); \
81 /* XXX We do not really need 64-bit compare-and-exchange. At least
82 not in the moment. Using it would mean causing portability
83 problems since not many other 32-bit architectures have support for
84 such an operation. So don't define any code for now. If it is
85 really going to be used the code below can be used on Intel Pentium
86 and later, but NOT on i486. */
88 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
89 ({ __typeof (*mem) ret = *(mem); abort (); ret = (newval); ret = (oldval); })
92 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
93 ({ __typeof (*mem) ret; \
94 __asm__ __volatile__ ("xchgl %2, %%ebx\n\t" \
95 LOCK_PREFIX "cmpxchg8b %1\n\t" \
97 : "=A" (ret), "=m" (*mem) \
98 : "DS" (((unsigned long long int) (newval)) \
100 "c" (((unsigned long long int) (newval)) >> 32), \
101 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
103 "d" (((unsigned long long int) (oldval)) >> 32)); \
106 # define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \
107 ({ __typeof (*mem) ret; \
108 __asm__ __volatile__ (LOCK_PREFIX "cmpxchg8b %1" \
109 : "=A" (ret), "=m" (*mem) \
110 : "b" (((unsigned long long int) (newval)) \
112 "c" (((unsigned long long int) (newval)) >> 32), \
113 "m" (*mem), "a" (((unsigned long long int) (oldval)) \
115 "d" (((unsigned long long int) (oldval)) >> 32)); \
121 /* Note that we need no lock prefix. */
122 #define atomic_exchange_acq(mem, newvalue) \
123 ({ __typeof (*mem) result; \
124 if (sizeof (*mem) == 1) \
125 __asm__ __volatile__ ("xchgb %b0, %1" \
126 : "=r" (result), "=m" (*mem) \
127 : "0" (newvalue), "m" (*mem)); \
128 else if (sizeof (*mem) == 2) \
129 __asm__ __volatile__ ("xchgw %w0, %1" \
130 : "=r" (result), "=m" (*mem) \
131 : "0" (newvalue), "m" (*mem)); \
132 else if (sizeof (*mem) == 4) \
133 __asm__ __volatile__ ("xchgl %0, %1" \
134 : "=r" (result), "=m" (*mem) \
135 : "0" (newvalue), "m" (*mem)); \
144 #define atomic_exchange_and_add(mem, value) \
145 ({ __typeof (*mem) __result; \
146 __typeof (value) __addval = (value); \
147 if (sizeof (*mem) == 1) \
148 __asm__ __volatile__ (LOCK_PREFIX "xaddb %b0, %1" \
149 : "=r" (__result), "=m" (*mem) \
150 : "0" (__addval), "m" (*mem)); \
151 else if (sizeof (*mem) == 2) \
152 __asm__ __volatile__ (LOCK_PREFIX "xaddw %w0, %1" \
153 : "=r" (__result), "=m" (*mem) \
154 : "0" (__addval), "m" (*mem)); \
155 else if (sizeof (*mem) == 4) \
156 __asm__ __volatile__ (LOCK_PREFIX "xaddl %0, %1" \
157 : "=r" (__result), "=m" (*mem) \
158 : "0" (__addval), "m" (*mem)); \
161 __typeof (mem) __memp = (mem); \
162 __typeof (*mem) __tmpval; \
163 __result = *__memp; \
165 __tmpval = __result; \
166 while ((__result = __arch_compare_and_exchange_val_64_acq \
167 (__memp, __result + __addval, __result)) == __tmpval); \
172 #define atomic_add(mem, value) \
173 (void) ({ if (__builtin_constant_p (value) && (value) == 1) \
174 atomic_increment (mem); \
175 else if (__builtin_constant_p (value) && (value) == -1) \
176 atomic_decrement (mem); \
177 else if (sizeof (*mem) == 1) \
178 __asm__ __volatile__ (LOCK_PREFIX "addb %b1, %0" \
180 : "ir" (value), "m" (*mem)); \
181 else if (sizeof (*mem) == 2) \
182 __asm__ __volatile__ (LOCK_PREFIX "addw %w1, %0" \
184 : "ir" (value), "m" (*mem)); \
185 else if (sizeof (*mem) == 4) \
186 __asm__ __volatile__ (LOCK_PREFIX "addl %1, %0" \
188 : "ir" (value), "m" (*mem)); \
191 __typeof (value) __addval = (value); \
192 __typeof (mem) __memp = (mem); \
193 __typeof (*mem) __oldval = *__memp; \
194 __typeof (*mem) __tmpval; \
196 __tmpval = __oldval; \
197 while ((__oldval = __arch_compare_and_exchange_val_64_acq \
198 (__memp, __oldval + __addval, __oldval)) == __tmpval); \
203 #define atomic_add_negative(mem, value) \
204 ({ unsigned char __result; \
205 if (sizeof (*mem) == 1) \
206 __asm__ __volatile__ (LOCK_PREFIX "addb %b2, %0; sets %1" \
207 : "=m" (*mem), "=qm" (__result) \
208 : "iq" (value), "m" (*mem)); \
209 else if (sizeof (*mem) == 2) \
210 __asm__ __volatile__ (LOCK_PREFIX "addw %w2, %0; sets %1" \
211 : "=m" (*mem), "=qm" (__result) \
212 : "ir" (value), "m" (*mem)); \
213 else if (sizeof (*mem) == 4) \
214 __asm__ __volatile__ (LOCK_PREFIX "addl %2, %0; sets %1" \
215 : "=m" (*mem), "=qm" (__result) \
216 : "ir" (value), "m" (*mem)); \
222 #define atomic_add_zero(mem, value) \
223 ({ unsigned char __result; \
224 if (sizeof (*mem) == 1) \
225 __asm__ __volatile__ (LOCK_PREFIX "addb %b2, %0; setz %1" \
226 : "=m" (*mem), "=qm" (__result) \
227 : "ir" (value), "m" (*mem)); \
228 else if (sizeof (*mem) == 2) \
229 __asm__ __volatile__ (LOCK_PREFIX "addw %w2, %0; setz %1" \
230 : "=m" (*mem), "=qm" (__result) \
231 : "ir" (value), "m" (*mem)); \
232 else if (sizeof (*mem) == 4) \
233 __asm__ __volatile__ (LOCK_PREFIX "addl %2, %0; setz %1" \
234 : "=m" (*mem), "=qm" (__result) \
235 : "ir" (value), "m" (*mem)); \
241 #define atomic_increment(mem) \
242 (void) ({ if (sizeof (*mem) == 1) \
243 __asm__ __volatile__ (LOCK_PREFIX "incb %b0" \
246 else if (sizeof (*mem) == 2) \
247 __asm__ __volatile__ (LOCK_PREFIX "incw %w0" \
250 else if (sizeof (*mem) == 4) \
251 __asm__ __volatile__ (LOCK_PREFIX "incl %0" \
256 __typeof (mem) __memp = (mem); \
257 __typeof (*mem) __oldval = *__memp; \
258 __typeof (*mem) __tmpval; \
260 __tmpval = __oldval; \
261 while ((__oldval = __arch_compare_and_exchange_val_64_acq \
262 (__memp, __oldval + 1, __oldval)) == __tmpval); \
267 #define atomic_increment_and_test(mem) \
268 ({ unsigned char __result; \
269 if (sizeof (*mem) == 1) \
270 __asm__ __volatile__ (LOCK_PREFIX "incb %0; sete %b1" \
271 : "=m" (*mem), "=qm" (__result) \
273 else if (sizeof (*mem) == 2) \
274 __asm__ __volatile__ (LOCK_PREFIX "incw %0; sete %w1" \
275 : "=m" (*mem), "=qm" (__result) \
277 else if (sizeof (*mem) == 4) \
278 __asm__ __volatile__ (LOCK_PREFIX "incl %0; sete %1" \
279 : "=m" (*mem), "=qm" (__result) \
286 #define atomic_decrement(mem) \
287 (void) ({ if (sizeof (*mem) == 1) \
288 __asm__ __volatile__ (LOCK_PREFIX "decb %b0" \
291 else if (sizeof (*mem) == 2) \
292 __asm__ __volatile__ (LOCK_PREFIX "decw %w0" \
295 else if (sizeof (*mem) == 4) \
296 __asm__ __volatile__ (LOCK_PREFIX "decl %0" \
301 __typeof (mem) __memp = (mem); \
302 __typeof (*mem) __oldval = *__memp; \
303 __typeof (*mem) __tmpval; \
305 __tmpval = __oldval; \
306 while ((__oldval = __arch_compare_and_exchange_val_64_acq \
307 (__memp, __oldval - 1, __oldval)) == __tmpval); \
312 #define atomic_decrement_and_test(mem) \
313 ({ unsigned char __result; \
314 if (sizeof (*mem) == 1) \
315 __asm__ __volatile__ (LOCK_PREFIX "decb %b0; sete %1" \
316 : "=m" (*mem), "=qm" (__result) \
318 else if (sizeof (*mem) == 2) \
319 __asm__ __volatile__ (LOCK_PREFIX "decw %w0; sete %1" \
320 : "=m" (*mem), "=qm" (__result) \
322 else if (sizeof (*mem) == 4) \
323 __asm__ __volatile__ (LOCK_PREFIX "decl %0; sete %1" \
324 : "=m" (*mem), "=qm" (__result) \
331 #define atomic_bit_set(mem, bit) \
332 (void) ({ if (sizeof (*mem) == 1) \
333 __asm__ __volatile__ (LOCK_PREFIX "orb %b2, %0" \
335 : "m" (*mem), "ir" (1 << (bit))); \
336 else if (sizeof (*mem) == 2) \
337 __asm__ __volatile__ (LOCK_PREFIX "orw %w2, %0" \
339 : "m" (*mem), "ir" (1 << (bit))); \
340 else if (sizeof (*mem) == 4) \
341 __asm__ __volatile__ (LOCK_PREFIX "orl %2, %0" \
343 : "m" (*mem), "ir" (1 << (bit))); \
349 #define atomic_bit_test_set(mem, bit) \
350 ({ unsigned char __result; \
351 if (sizeof (*mem) == 1) \
352 __asm__ __volatile__ (LOCK_PREFIX "btsb %3, %1; setc %0" \
353 : "=q" (__result), "=m" (*mem) \
354 : "m" (*mem), "ir" (bit)); \
355 else if (sizeof (*mem) == 2) \
356 __asm__ __volatile__ (LOCK_PREFIX "btsw %3, %1; setc %0" \
357 : "=q" (__result), "=m" (*mem) \
358 : "m" (*mem), "ir" (bit)); \
359 else if (sizeof (*mem) == 4) \
360 __asm__ __volatile__ (LOCK_PREFIX "btsl %3, %1; setc %0" \
361 : "=q" (__result), "=m" (*mem) \
362 : "m" (*mem), "ir" (bit)); \
368 #define atomic_delay() __asm__ ("rep; nop")