1 /* Low-level functions for atomic operations. Mips version.
2 Copyright (C) 2005 Free Software Foundation, Inc.
3 This file is part of the GNU C Library.
5 The GNU C Library is free software; you can redistribute it and/or
6 modify it under the terms of the GNU Lesser General Public
7 License as published by the Free Software Foundation; either
8 version 2.1 of the License, or (at your option) any later version.
10 The GNU C Library is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with the GNU C Library; if not, see
17 <http://www.gnu.org/licenses/>. */
19 #ifndef _MIPS_BITS_ATOMIC_H
20 #define _MIPS_BITS_ATOMIC_H 1
25 typedef int32_t atomic32_t;
26 typedef uint32_t uatomic32_t;
27 typedef int_fast32_t atomic_fast32_t;
28 typedef uint_fast32_t uatomic_fast32_t;
30 typedef int64_t atomic64_t;
31 typedef uint64_t uatomic64_t;
32 typedef int_fast64_t atomic_fast64_t;
33 typedef uint_fast64_t uatomic_fast64_t;
35 typedef intptr_t atomicptr_t;
36 typedef uintptr_t uatomicptr_t;
37 typedef intmax_t atomic_max_t;
38 typedef uintmax_t uatomic_max_t;
40 #if _MIPS_SIM == _ABIO32
41 #define MIPS_PUSH_MIPS2 ".set mips2\n\t"
43 #define MIPS_PUSH_MIPS2
46 /* See the comments in <sys/asm.h> about the use of the sync instruction. */
48 # define MIPS_SYNC sync
51 /* Certain revisions of the R10000 Processor need an LL/SC Workaround
52 enabled. Revisions before 3.0 misbehave on atomic operations, and
53 Revs 2.6 and lower deadlock after several seconds due to other errata.
55 To quote the R10K Errata:
56 Workaround: The basic idea is to inhibit the four instructions
57 from simultaneously becoming active in R10000. Padding all
58 ll/sc sequences with nops or changing the looping branch in the
59 routines to a branch likely (which is always predicted taken
60 by R10000) will work. The nops should go after the loop, and the
61 number of them should be 28. This number could be decremented for
62 each additional instruction in the ll/sc loop such as the lock
63 modifier(s) between the ll and sc, the looping branch and its
64 delay slot. For typical short routines with one ll/sc loop, any
65 instructions after the loop could also count as a decrement. The
66 nop workaround pollutes the cache more but would be a few cycles
67 faster if all the code is in the cache and the looping branch
68 is predicted not taken. */
71 #ifdef _MIPS_ARCH_R10000
72 #define R10K_BEQZ_INSN "beqzl"
74 #define R10K_BEQZ_INSN "beqz"
77 #define MIPS_SYNC_STR_2(X) #X
78 #define MIPS_SYNC_STR_1(X) MIPS_SYNC_STR_2(X)
79 #define MIPS_SYNC_STR MIPS_SYNC_STR_1(MIPS_SYNC)
81 /* Compare and exchange. For all of the "xxx" routines, we expect a
82 "__prev" and a "__cmp" variable to be provided by the enclosing scope,
83 in which values are returned. */
85 #define __arch_compare_and_exchange_xxx_8_int(mem, newval, oldval, rel, acq) \
86 (abort (), __prev = __cmp = 0)
88 #define __arch_compare_and_exchange_xxx_16_int(mem, newval, oldval, rel, acq) \
89 (abort (), __prev = __cmp = 0)
91 #define __arch_compare_and_exchange_xxx_32_int(mem, newval, oldval, rel, acq) \
92 __asm__ __volatile__ ( \
102 R10K_BEQZ_INSN" %1,1b\n" \
106 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
107 : "r" (oldval), "r" (newval), "m" (*mem) \
110 #if _MIPS_SIM == _ABIO32
111 /* We can't do an atomic 64-bit operation in O32. */
112 #define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \
113 (abort (), __prev = __cmp = 0)
115 #define __arch_compare_and_exchange_xxx_64_int(mem, newval, oldval, rel, acq) \
116 __asm__ __volatile__ ("\n" \
126 R10K_BEQZ_INSN" %1,1b\n" \
130 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
131 : "r" (oldval), "r" (newval), "m" (*mem) \
135 /* For all "bool" routines, we return FALSE if exchange succesful. */
137 #define __arch_compare_and_exchange_bool_8_int(mem, new, old, rel, acq) \
138 ({ __typeof (*mem) __prev; int __cmp; \
139 __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \
142 #define __arch_compare_and_exchange_bool_16_int(mem, new, old, rel, acq) \
143 ({ __typeof (*mem) __prev; int __cmp; \
144 __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \
147 #define __arch_compare_and_exchange_bool_32_int(mem, new, old, rel, acq) \
148 ({ __typeof (*mem) __prev; int __cmp; \
149 __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \
152 #define __arch_compare_and_exchange_bool_64_int(mem, new, old, rel, acq) \
153 ({ __typeof (*mem) __prev; int __cmp; \
154 __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \
157 /* For all "val" routines, return the old value whether exchange
158 successful or not. */
160 #define __arch_compare_and_exchange_val_8_int(mem, new, old, rel, acq) \
161 ({ __typeof (*mem) __prev; int __cmp; \
162 __arch_compare_and_exchange_xxx_8_int(mem, new, old, rel, acq); \
163 (__typeof (*mem))__prev; })
165 #define __arch_compare_and_exchange_val_16_int(mem, new, old, rel, acq) \
166 ({ __typeof (*mem) __prev; int __cmp; \
167 __arch_compare_and_exchange_xxx_16_int(mem, new, old, rel, acq); \
168 (__typeof (*mem))__prev; })
170 #define __arch_compare_and_exchange_val_32_int(mem, new, old, rel, acq) \
171 ({ __typeof (*mem) __prev; int __cmp; \
172 __arch_compare_and_exchange_xxx_32_int(mem, new, old, rel, acq); \
173 (__typeof (*mem))__prev; })
175 #define __arch_compare_and_exchange_val_64_int(mem, new, old, rel, acq) \
176 ({ __typeof (*mem) __prev; int __cmp; \
177 __arch_compare_and_exchange_xxx_64_int(mem, new, old, rel, acq); \
178 (__typeof (*mem))__prev; })
180 /* Compare and exchange with "acquire" semantics, ie barrier after. */
182 #define atomic_compare_and_exchange_bool_acq(mem, new, old) \
183 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
184 mem, new, old, "", MIPS_SYNC_STR)
186 #define atomic_compare_and_exchange_val_acq(mem, new, old) \
187 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
188 mem, new, old, "", MIPS_SYNC_STR)
190 /* Compare and exchange with "release" semantics, ie barrier before. */
192 #define atomic_compare_and_exchange_bool_rel(mem, new, old) \
193 __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \
194 mem, new, old, MIPS_SYNC_STR, "")
196 #define atomic_compare_and_exchange_val_rel(mem, new, old) \
197 __atomic_val_bysize (__arch_compare_and_exchange_val, int, \
198 mem, new, old, MIPS_SYNC_STR, "")
202 /* Atomic exchange (without compare). */
204 #define __arch_exchange_xxx_8_int(mem, newval, rel, acq) \
207 #define __arch_exchange_xxx_16_int(mem, newval, rel, acq) \
210 #define __arch_exchange_xxx_32_int(mem, newval, rel, acq) \
211 ({ __typeof (*mem) __prev; int __cmp; \
212 __asm__ __volatile__ ("\n" \
220 R10K_BEQZ_INSN" %1,1b\n" \
224 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
225 : "r" (newval), "m" (*mem) \
229 #if _MIPS_SIM == _ABIO32
230 /* We can't do an atomic 64-bit operation in O32. */
231 #define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \
234 #define __arch_exchange_xxx_64_int(mem, newval, rel, acq) \
235 ({ __typeof (*mem) __prev; int __cmp; \
236 __asm__ __volatile__ ("\n" \
244 R10K_BEQZ_INSN" %1,1b\n" \
248 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
249 : "r" (newval), "m" (*mem) \
254 #define atomic_exchange_acq(mem, value) \
255 __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, "", MIPS_SYNC_STR)
257 #define atomic_exchange_rel(mem, value) \
258 __atomic_val_bysize (__arch_exchange_xxx, int, mem, value, MIPS_SYNC_STR, "")
261 /* Atomically add value and return the previous (unincremented) value. */
263 #define __arch_exchange_and_add_8_int(mem, newval, rel, acq) \
264 (abort (), (__typeof(*mem)) 0)
266 #define __arch_exchange_and_add_16_int(mem, newval, rel, acq) \
267 (abort (), (__typeof(*mem)) 0)
269 #define __arch_exchange_and_add_32_int(mem, value, rel, acq) \
270 ({ __typeof (*mem) __prev; int __cmp; \
271 __asm__ __volatile__ ("\n" \
277 "addu %1,%0,%3\n\t" \
279 R10K_BEQZ_INSN" %1,1b\n" \
283 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
284 : "r" (value), "m" (*mem) \
288 #if _MIPS_SIM == _ABIO32
289 /* We can't do an atomic 64-bit operation in O32. */
290 #define __arch_exchange_and_add_64_int(mem, value, rel, acq) \
291 (abort (), (__typeof(*mem)) 0)
293 #define __arch_exchange_and_add_64_int(mem, value, rel, acq) \
294 ({ __typeof (*mem) __prev; int __cmp; \
295 __asm__ __volatile__ ( \
301 "daddu %1,%0,%3\n\t" \
303 R10K_BEQZ_INSN" %1,1b\n" \
307 : "=&r" (__prev), "=&r" (__cmp), "=m" (*mem) \
308 : "r" (value), "m" (*mem) \
313 /* ??? Barrier semantics for atomic_exchange_and_add appear to be
314 undefined. Use full barrier for now, as that's safe. */
315 #define atomic_exchange_and_add(mem, value) \
316 __atomic_val_bysize (__arch_exchange_and_add, int, mem, value, \
317 MIPS_SYNC_STR, MIPS_SYNC_STR)
319 /* TODO: More atomic operations could be implemented efficiently; only the
320 basic requirements are done. */
322 #define atomic_full_barrier() \
323 __asm__ __volatile__ (".set push\n\t" \
325 MIPS_SYNC_STR "\n\t" \
326 ".set pop" : : : "memory")
328 #endif /* bits/atomic.h */