2 * Copyright (C) 2010 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
19 #include <cutils/atomic.h>
22 #include <machine/cpu-features.h>
25 /*****************************************************************************/
27 #if defined(HAVE_MACOSX_IPC)
28 #define NEED_MAC_QUASI_ATOMICS 1
30 #elif defined(__i386__) || defined(__x86_64__)
31 #define NEED_PTHREADS_QUASI_ATOMICS 1
33 #elif defined(__mips__)
34 #define NEED_PTHREADS_QUASI_ATOMICS 1
36 #elif defined(__arm__)
38 #if defined(__ARM_HAVE_LDREXD)
39 #define NEED_ARM_LDREXD_QUASI_ATOMICS 1
41 #define NEED_PTHREADS_QUASI_ATOMICS 1
42 #endif /*__ARM_HAVE_LDREXD*/
45 #define NEED_PTHREADS_QUASI_ATOMICS 1
48 #error "Unsupported atomic operations for this platform"
51 /*****************************************************************************/
53 #if NEED_ARM_LDREXD_QUASI_ATOMICS
55 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t newvalue,
56 volatile int64_t* addr)
61 __asm__ __volatile__ ("@ dvmQuasiAtomicSwap64\n"
62 "ldrexd %0, %H0, [%3]\n"
63 "strexd %1, %4, %H4, [%3]"
64 : "=&r" (prev), "=&r" (status), "+m"(*addr)
65 : "r" (addr), "r" (newvalue)
67 } while (__builtin_expect(status != 0, 0));
71 int64_t dvmQuasiAtomicSwap64(int64_t newvalue, volatile int64_t* addr)
73 return dvmQuasiAtomicSwap64Body(newvalue, addr);
76 int64_t dvmQuasiAtomicSwap64Sync(int64_t newvalue, volatile int64_t* addr)
79 ANDROID_MEMBAR_STORE();
80 prev = dvmQuasiAtomicSwap64Body(newvalue, addr);
81 ANDROID_MEMBAR_FULL();
85 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
86 volatile int64_t* addr)
91 __asm__ __volatile__ ("@ dvmQuasiAtomicCas64\n"
92 "ldrexd %0, %H0, [%3]\n"
96 "strexdeq %1, %5, %H5, [%3]"
97 : "=&r" (prev), "=&r" (status), "+m"(*addr)
98 : "r" (addr), "Ir" (oldvalue), "r" (newvalue)
100 } while (__builtin_expect(status != 0, 0));
101 return prev != oldvalue;
104 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
107 __asm__ __volatile__ ("@ dvmQuasiAtomicRead64\n"
108 "ldrexd %0, %H0, [%1]"
115 /*****************************************************************************/
117 #if NEED_MAC_QUASI_ATOMICS
119 #include <libkern/OSAtomic.h>
121 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
122 volatile int64_t* addr)
124 return OSAtomicCompareAndSwap64Barrier(oldvalue, newvalue,
125 (int64_t*)addr) == 0;
129 static inline int64_t dvmQuasiAtomicSwap64Body(int64_t value,
130 volatile int64_t* addr)
135 } while (dvmQuasiAtomicCas64(oldValue, value, addr));
139 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
141 return dvmQuasiAtomicSwap64Body(value, addr);
144 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
147 ANDROID_MEMBAR_STORE();
148 oldValue = dvmQuasiAtomicSwap64Body(value, addr);
149 /* TUNING: barriers can be avoided on some architectures */
150 ANDROID_MEMBAR_FULL();
154 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
156 return OSAtomicAdd64Barrier(0, addr);
160 /*****************************************************************************/
162 #if NEED_PTHREADS_QUASI_ATOMICS
164 // In the absence of a better implementation, we implement the 64-bit atomic
165 // operations through mutex locking.
167 // another twist is that we use a small array of mutexes to dispatch
168 // the contention locks from different memory addresses
172 static const size_t kSwapLockCount = 32;
173 static pthread_mutex_t* gSwapLocks[kSwapLockCount];
175 void dvmQuasiAtomicsStartup() {
176 for (size_t i = 0; i < kSwapLockCount; ++i) {
177 pthread_mutex_t* m = new pthread_mutex_t;
183 void dvmQuasiAtomicsShutdown() {
184 for (size_t i = 0; i < kSwapLockCount; ++i) {
185 pthread_mutex_t* m = gSwapLocks[i];
186 gSwapLocks[i] = NULL;
192 static inline pthread_mutex_t* GetSwapLock(const volatile int64_t* addr) {
193 return gSwapLocks[((unsigned)(void*)(addr) >> 3U) % kSwapLockCount];
196 int64_t dvmQuasiAtomicSwap64(int64_t value, volatile int64_t* addr)
199 pthread_mutex_t* lock = GetSwapLock(addr);
201 pthread_mutex_lock(lock);
206 pthread_mutex_unlock(lock);
210 /* Same as dvmQuasiAtomicSwap64 - mutex handles barrier */
211 int64_t dvmQuasiAtomicSwap64Sync(int64_t value, volatile int64_t* addr)
213 return dvmQuasiAtomicSwap64(value, addr);
216 int dvmQuasiAtomicCas64(int64_t oldvalue, int64_t newvalue,
217 volatile int64_t* addr)
220 pthread_mutex_t* lock = GetSwapLock(addr);
222 pthread_mutex_lock(lock);
224 if (*addr == oldvalue) {
230 pthread_mutex_unlock(lock);
234 int64_t dvmQuasiAtomicRead64(volatile const int64_t* addr)
237 pthread_mutex_t* lock = GetSwapLock(addr);
239 pthread_mutex_lock(lock);
241 pthread_mutex_unlock(lock);
247 // The other implementations don't need any special setup.
248 void dvmQuasiAtomicsStartup() {}
249 void dvmQuasiAtomicsShutdown() {}
251 #endif /*NEED_PTHREADS_QUASI_ATOMICS*/