2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <cutils/atomic.h>
22 #include <machine/cpu-features.h>
25 /*****************************************************************************/
27 #if defined(HAVE_MACOSX_IPC)
28 #define NEED_MAC_QUASI_ATOMICS 1
30 #elif defined(__i386__) || defined(__x86_64__)
31 #define NEED_PTHREADS_QUASI_ATOMICS 1
33 #elif defined(__mips__)
34 #define NEED_PTHREADS_QUASI_ATOMICS 1
36 #elif defined(__arm__)
38 // TODO: Clang can not process our inline assembly at the moment.
39 #if defined(__ARM_HAVE_LDREXD) && !defined(__clang__)
40 #define NEED_ARM_LDREXD_QUASI_ATOMICS 1
42 #define NEED_PTHREADS_QUASI_ATOMICS 1
46 #define NEED_PTHREADS_QUASI_ATOMICS 1
49 #error "Unsupported atomic operations for this platform"
52 /*****************************************************************************/
54 #if NEED_ARM_LDREXD_QUASI_ATOMICS
56 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
57 volatile int64_t* addr)
62 __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
63 "ldrexd %0, %H0, [%3]\n"
64 "strexd %1, %4, %H4, [%3]"
65 : "=&r" (prev), "=&r" (status), "+m"(*addr)
66 : "r" (addr), "r" (newvalue)
68 } while (__builtin_expect(status != 0, 0));
72 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
74 return dvmQuasiAtomicSwap64Body(newvalue, addr);
77 int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
80 ANDROID_MEMBAR_STORE();
81 prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
82 ANDROID_MEMBAR_FULL();
86 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
87 volatile int64_t* addr)
92 __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
93 "ldrexd %0, %H0, [%3]\n"
97 "strexdeq %1, %5, %H5, [%3]"
98 : "=&r" (prev), "=&r" (status), "+m"(*addr)
99 : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
101 } while (__builtin_expect(status != 0, 0));
102 return prev != oldvalue;
105 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
108 __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
109 "ldrexd %0, %H0, [%1]"
116 /*****************************************************************************/
118 #if NEED_MAC_QUASI_ATOMICS
120 #include <libkern/OSAtomic.h>
122 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
123 volatile int64_t* addr)
125 return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
126 (int64_t*)addr) == 0;
130 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
131 volatile int64_t* addr)
136 } while (dvmQuasiAtomicCas64(oldValue, value, addr));
140 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
142 return dvmQuasiAtomicSwap64Body(value, addr);
145 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
148 ANDROID_MEMBAR_STORE();
149 oldValue = dvmQuasiAtomicSwap64Body(value, addr);
150 /* TUNING: barriers can be avoided on some architectures */
151 ANDROID_MEMBAR_FULL();
155 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
157 return OSAtomicAdd64Barrier(0, addr);
161 /*****************************************************************************/
163 #if NEED_PTHREADS_QUASI_ATOMICS
165 // In the absence of a better implementation, we implement the 64-bit atomic
166 // operations through mutex locking.
168 // another twist is that we use a small array of mutexes to dispatch
169 // the contention locks from different memory addresses
173 static const size_t kSwapLockCount = 32;
174 static pthread_mutex_t* gSwapLocks[kSwapLockCount];
176 void dvmQuasiAtomicsStartup() {
177 for (size_t i = 0; i < kSwapLockCount; ++i) {
178 pthread_mutex_t* m = new pthread_mutex_t;
184 void dvmQuasiAtomicsShutdown() {
185 for (size_t i = 0; i < kSwapLockCount; ++i) {
186 pthread_mutex_t* m = gSwapLocks[i];
187 gSwapLocks[i] = NULL;
193 static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
194 return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
197 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
200 pthread_mutex_t* lock = GetSwapLock(addr);
202 pthread_mutex_lock(lock);
207 pthread_mutex_unlock(lock);
211 /* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
212 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
214 return dvmQuasiAtomicSwap64(value, addr);
217 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
218 volatile int64_t* addr)
221 pthread_mutex_t* lock = GetSwapLock(addr);
223 pthread_mutex_lock(lock);
225 if (*addr == oldvalue) {
231 pthread_mutex_unlock(lock);
235 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
238 pthread_mutex_t* lock = GetSwapLock(addr);
240 pthread_mutex_lock(lock);
242 pthread_mutex_unlock(lock);
248 // The other implementations don't need any special setup.
249 void dvmQuasiAtomicsStartup() {}
250 void dvmQuasiAtomicsShutdown() {}
252 #endif /*NEED_PTHREADS_QUASI_ATOMICS*/