2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <cutils/atomic.h>
22 * Quasi-atomic 64-bit operations, for platforms that lack the real thing.
24 * TODO: unify ARMv6/x86/sh implementations using the to-be-written
25 * spin lock implementation. We don't want to rely on mutex innards,
26 * and it would be great if all platforms were running the same code.
29 #if defined(HAVE_MACOSX_IPC)
31 #include <libkern/OSAtomic.h>
33 #if defined(__ppc__) \
35 || defined(__powerpc__) \
36 || defined(__powerpc) \
37 || defined(__POWERPC__) \
40 #define NEED_QUASIATOMICS 1
43 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
44 volatile int64_t* addr)
46 return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
51 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
52 volatile int64_t* addr)
57 } while (dvmQuasiAtomicCas64(oldValue, value, addr));
61 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
63 return dvmQuasiAtomicSwap64Body(value, addr);
66 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
69 ANDROID_MEMBAR_STORE();
70 oldValue = dvmQuasiAtomicSwap64Body(value, addr);
71 /* TUNING: barriers can be avoided on some architectures */
72 ANDROID_MEMBAR_FULL();
76 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
78 return OSAtomicAdd64Barrier(0, addr);
82 #elif defined(__i386__) || defined(__x86_64__)
83 #define NEED_QUASIATOMICS 1
86 #include <machine/cpu-features.h>
88 // Clang can not process this assembly at the moment.
89 #if defined(__ARM_HAVE_LDREXD) && !defined(__clang__)
90 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
91 volatile int64_t* addr)
96 __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
97 "ldrexd %0, %H0, [%3]\n"
98 "strexd %1, %4, %H4, [%3]"
99 : "=&r" (prev), "=&r" (status), "+m"(*addr)
100 : "r" (addr), "r" (newvalue)
102 } while (__builtin_expect(status != 0, 0));
106 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
108 return dvmQuasiAtomicSwap64Body(newvalue, addr);
111 int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
114 ANDROID_MEMBAR_STORE();
115 prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
116 ANDROID_MEMBAR_FULL();
120 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
121 volatile int64_t* addr)
126 __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
127 "ldrexd %0, %H0, [%3]\n"
131 "strexdeq %1, %5, %H5, [%3]"
132 : "=&r" (prev), "=&r" (status), "+m"(*addr)
133 : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
135 } while (__builtin_expect(status != 0, 0));
136 return prev != oldvalue;
139 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
142 __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
143 "ldrexd %0, %H0, [%1]"
151 // on the device, we implement the 64-bit atomic operations through
152 // mutex locking. normally, this is bad because we must initialize
153 // a pthread_mutex_t before being able to use it, and this means
154 // having to do an initialization check on each function call, and
155 // that's where really ugly things begin...
157 // BUT, as a special twist, we take advantage of the fact that in our
158 // pthread library, a mutex is simply a volatile word whose value is always
159 // initialized to 0. In other words, simply declaring a static mutex
160 // object initializes it !
162 // another twist is that we use a small array of mutexes to dispatch
163 // the contention locks from different memory addresses
168 #define SWAP_LOCK_COUNT 32U
169 static pthread_mutex_t _swap_locks[SWAP_LOCK_COUNT];
171 #define SWAP_LOCK(addr) \
172 &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
175 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
178 pthread_mutex_t* lock = SWAP_LOCK(addr);
180 pthread_mutex_lock(lock);
185 pthread_mutex_unlock(lock);
189 /* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
190 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
192 return dvmQuasiAtomicSwap64(value, addr);
195 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
196 volatile int64_t* addr)
199 pthread_mutex_t* lock = SWAP_LOCK(addr);
201 pthread_mutex_lock(lock);
203 if (*addr == oldvalue) {
209 pthread_mutex_unlock(lock);
213 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
216 pthread_mutex_t* lock = SWAP_LOCK(addr);
218 pthread_mutex_lock(lock);
220 pthread_mutex_unlock(lock);
224 #endif /*__ARM_HAVE_LDREXD*/
226 /*****************************************************************************/
228 #define NEED_QUASIATOMICS 1
231 #error "Unsupported atomic operations for this platform"
235 #if NEED_QUASIATOMICS
237 /* Note that a spinlock is *not* a good idea in general
238 * since they can introduce subtle issues. For example,
239 * a real-time thread trying to acquire a spinlock already
240 * acquired by another thread will never yeld, making the
241 * CPU loop endlessly!
243 * However, this code is only used on the Linux simulator
244 * so it's probably ok for us.
246 * The alternative is to use a pthread mutex, but
247 * these must be initialized before being used, and
248 * then you have the problem of lazily initializing
249 * a mutex without any other synchronization primitive.
251 * TODO: these currently use sched_yield(), which is not guaranteed to
252 * do anything at all. We need to use dvmIterativeSleep or a wait /
253 * notify mechanism if the initial attempt fails.
256 /* global spinlock for all 64-bit quasiatomic operations */
257 static int32_t quasiatomic_spinlock = 0;
259 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
260 volatile int64_t* addr)
264 while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
265 #ifdef HAVE_WIN32_THREADS
272 if (*addr == oldvalue) {
279 android_atomic_release_store(0, &quasiatomic_spinlock);
284 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
288 while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
289 #ifdef HAVE_WIN32_THREADS
297 android_atomic_release_store(0, &quasiatomic_spinlock);
302 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
306 while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
307 #ifdef HAVE_WIN32_THREADS
316 android_atomic_release_store(0, &quasiatomic_spinlock);
321 /* Same as dvmQuasiAtomicSwap64 - syscall handles barrier */
322 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
324 return dvmQuasiAtomicSwap64(value, addr);
327 #endif /*NEED_QUASIATOMICS*/