2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <cutils/atomic.h>
22 #include <machine/cpu-features.h>
25 /*****************************************************************************/
27 #if defined(HAVE_MACOSX_IPC)
28 #define NEED_MAC_QUASI_ATOMICS 1
30 #elif defined(__i386__) || defined(__x86_64__)
31 #define NEED_PTHREADS_QUASI_ATOMICS 1
33 #elif defined(__mips__)
34 #define NEED_PTHREADS_QUASI_ATOMICS 1
36 #elif defined(__arm__)
38 // TODO: Clang can not process our inline assembly at the moment.
39 #if defined(__ARM_HAVE_LDREXD) && !defined(__clang__)
40 #define NEED_ARM_LDREXD_QUASI_ATOMICS 1
42 #define NEED_PTHREADS_QUASI_ATOMICS 1
46 #error "Unsupported atomic operations for this platform"
49 /*****************************************************************************/
51 #if NEED_ARM_LDREXD_QUASI_ATOMICS
53 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
54 volatile int64_t* addr)
59 __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
60 "ldrexd %0, %H0, [%3]\n"
61 "strexd %1, %4, %H4, [%3]"
62 : "=&r" (prev), "=&r" (status), "+m"(*addr)
63 : "r" (addr), "r" (newvalue)
65 } while (__builtin_expect(status != 0, 0));
69 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
71 return dvmQuasiAtomicSwap64Body(newvalue, addr);
74 int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
77 ANDROID_MEMBAR_STORE();
78 prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
79 ANDROID_MEMBAR_FULL();
83 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
84 volatile int64_t* addr)
89 __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
90 "ldrexd %0, %H0, [%3]\n"
94 "strexdeq %1, %5, %H5, [%3]"
95 : "=&r" (prev), "=&r" (status), "+m"(*addr)
96 : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
98 } while (__builtin_expect(status != 0, 0));
99 return prev != oldvalue;
102 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
105 __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
106 "ldrexd %0, %H0, [%1]"
113 /*****************************************************************************/
115 #if NEED_MAC_QUASI_ATOMICS
117 #include <libkern/OSAtomic.h>
119 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
120 volatile int64_t* addr)
122 return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
123 (int64_t*)addr) == 0;
127 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
128 volatile int64_t* addr)
133 } while (dvmQuasiAtomicCas64(oldValue, value, addr));
137 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
139 return dvmQuasiAtomicSwap64Body(value, addr);
142 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
145 ANDROID_MEMBAR_STORE();
146 oldValue = dvmQuasiAtomicSwap64Body(value, addr);
147 /* TUNING: barriers can be avoided on some architectures */
148 ANDROID_MEMBAR_FULL();
152 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
154 return OSAtomicAdd64Barrier(0, addr);
158 /*****************************************************************************/
160 #if NEED_PTHREADS_QUASI_ATOMICS
162 // In the absence of a better implementation, we implement the 64-bit atomic
163 // operations through mutex locking.
165 // another twist is that we use a small array of mutexes to dispatch
166 // the contention locks from different memory addresses
170 static const size_t kSwapLockCount = 32;
171 static pthread_mutex_t* gSwapLocks[kSwapLockCount];
173 void dvmQuasiAtomicsStartup() {
174 for (size_t i = 0; i < kSwapLockCount; ++i) {
175 pthread_mutex_t* m = new pthread_mutex_t;
181 void dvmQuasiAtomicsShutdown() {
182 for (size_t i = 0; i < kSwapLockCount; ++i) {
183 pthread_mutex_t* m = gSwapLocks[i];
184 gSwapLocks[i] = NULL;
192 static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
193 return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
196 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
199 pthread_mutex_t* lock = GetSwapLock(addr);
201 pthread_mutex_lock(lock);
206 pthread_mutex_unlock(lock);
210 /* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
211 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
213 return dvmQuasiAtomicSwap64(value, addr);
216 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
217 volatile int64_t* addr)
220 pthread_mutex_t* lock = GetSwapLock(addr);
222 pthread_mutex_lock(lock);
224 if (*addr == oldvalue) {
230 pthread_mutex_unlock(lock);
234 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
237 pthread_mutex_t* lock = GetSwapLock(addr);
239 pthread_mutex_lock(lock);
241 pthread_mutex_unlock(lock);
247 // The other implementations don't need any special setup.
248 void dvmQuasiAtomicsStartup() {}
249 void dvmQuasiAtomicsShutdown() {}
251 #endif /*NEED_PTHREADS_QUASI_ATOMICS*/