2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <cutils/atomic.h>
22 * Quasi-atomic 64-bit operations, for platforms that lack the real thing.
24 * TODO: unify ARMv6/x86/sh implementations using the to-be-written
25 * spin lock implementation. We don't want to rely on mutex innards,
26 * and it would be great if all platforms were running the same code.
29 #if defined(HAVE_MACOSX_IPC)
31 #include <libkern/OSAtomic.h>
33 #if defined(__ppc__) \
35 || defined(__powerpc__) \
36 || defined(__powerpc) \
37 || defined(__POWERPC__) \
40 #define NEED_QUASIATOMICS 1
43 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
44 volatile int64_t* addr)
46 return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
50 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
55 } while (dvmQuasiAtomicCas64(oldValue, value, addr));
59 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
61 return OSAtomicAdd64Barrier(0, addr);
65 #elif defined(__i386__) || defined(__x86_64__)
66 #define NEED_QUASIATOMICS 1
69 #include <machine/cpu-features.h>
71 #ifdef __ARM_HAVE_LDREXD
72 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
77 __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
78 "ldrexd %0, %H0, [%3]\n"
79 "strexd %1, %4, %H4, [%3]"
80 : "=&r" (prev), "=&r" (status), "+m"(*addr)
81 : "r" (addr), "r" (newvalue)
83 } while (__builtin_expect(status != 0, 0));
87 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
88 volatile int64_t* addr)
93 __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
94 "ldrexd %0, %H0, [%3]\n"
98 "strexdeq %1, %5, %H5, [%3]"
99 : "=&r" (prev), "=&r" (status), "+m"(*addr)
100 : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
102 } while (__builtin_expect(status != 0, 0));
103 return prev != oldvalue;
106 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
109 __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
110 "ldrexd %0, %H0, [%1]"
118 // on the device, we implement the 64-bit atomic operations through
119 // mutex locking. normally, this is bad because we must initialize
120 // a pthread_mutex_t before being able to use it, and this means
121 // having to do an initialization check on each function call, and
122 // that's where really ugly things begin...
124 // BUT, as a special twist, we take advantage of the fact that in our
125 // pthread library, a mutex is simply a volatile word whose value is always
126 // initialized to 0. In other words, simply declaring a static mutex
127 // object initializes it !
129 // another twist is that we use a small array of mutexes to dispatch
130 // the contention locks from different memory addresses
135 #define SWAP_LOCK_COUNT 32U
136 static pthread_mutex_t _swap_locks[SWAP_LOCK_COUNT];
138 #define SWAP_LOCK(addr) \
139 &_swap_locks[((unsigned)(void*)(addr) >> 3U) % SWAP_LOCK_COUNT]
142 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
145 pthread_mutex_t* lock = SWAP_LOCK(addr);
147 pthread_mutex_lock(lock);
152 pthread_mutex_unlock(lock);
156 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
157 volatile int64_t* addr)
160 pthread_mutex_t* lock = SWAP_LOCK(addr);
162 pthread_mutex_lock(lock);
164 if (*addr == oldvalue) {
170 pthread_mutex_unlock(lock);
174 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
177 pthread_mutex_t* lock = SWAP_LOCK(addr);
179 pthread_mutex_lock(lock);
181 pthread_mutex_unlock(lock);
185 #endif /*__ARM_HAVE_LDREXD*/
187 /*****************************************************************************/
189 #define NEED_QUASIATOMICS 1
192 #error "Unsupported atomic operations for this platform"
196 #if NEED_QUASIATOMICS
198 /* Note that a spinlock is *not* a good idea in general
199 * since they can introduce subtle issues. For example,
200 * a real-time thread trying to acquire a spinlock already
201 * acquired by another thread will never yeld, making the
202 * CPU loop endlessly!
204 * However, this code is only used on the Linux simulator
205 * so it's probably ok for us.
207 * The alternative is to use a pthread mutex, but
208 * these must be initialized before being used, and
209 * then you have the problem of lazily initializing
210 * a mutex without any other synchronization primitive.
212 * TODO: these currently use sched_yield(), which is not guaranteed to
213 * do anything at all. We need to use dvmIterativeSleep or a wait /
214 * notify mechanism if the initial attempt fails.
217 /* global spinlock for all 64-bit quasiatomic operations */
218 static int32_t quasiatomic_spinlock = 0;
220 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
221 volatile int64_t* addr)
225 while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
226 #ifdef HAVE_WIN32_THREADS
233 if (*addr == oldvalue) {
240 android_atomic_release_store(0, &quasiatomic_spinlock);
245 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
249 while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
250 #ifdef HAVE_WIN32_THREADS
258 android_atomic_release_store(0, &quasiatomic_spinlock);
263 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
267 while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
268 #ifdef HAVE_WIN32_THREADS
277 android_atomic_release_store(0, &quasiatomic_spinlock);
282 #endif /*NEED_QUASIATOMICS*/