* these must be initialized before being used, and
* then you have the problem of lazily initializing
* a mutex without any other synchronization primitive.
+ *
+ * TODO: these currently use sched_yield(), which is not guaranteed to
+ * do anything at all. We need to use dvmIterativeSleep or a wait /
+ * notify mechanism if the initial attempt fails.
*/
/* global spinlock for all 64-bit quasiatomic operations */
volatile int64_t* addr) {
int result;
- while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
+ while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
#ifdef HAVE_WIN32_THREADS
Sleep(0);
#else
result = 1;
}
- android_atomic_swap(0, &quasiatomic_spinlock);
+ android_atomic_release_store(0, &quasiatomic_spinlock);
return result;
}
int64_t android_quasiatomic_read_64(volatile int64_t* addr) {
int64_t result;
- while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
+ while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
#ifdef HAVE_WIN32_THREADS
Sleep(0);
#else
}
result = *addr;
- android_atomic_swap(0, &quasiatomic_spinlock);
+ android_atomic_release_store(0, &quasiatomic_spinlock);
return result;
}
int64_t android_quasiatomic_swap_64(int64_t value, volatile int64_t* addr) {
int64_t result;
- while (android_atomic_cmpxchg(0, 1, &quasiatomic_spinlock)) {
+ while (android_atomic_acquire_cas(0, 1, &quasiatomic_spinlock)) {
#ifdef HAVE_WIN32_THREADS
Sleep(0);
#else
result = *addr;
*addr = value;
- android_atomic_swap(0, &quasiatomic_spinlock);
+ android_atomic_release_store(0, &quasiatomic_spinlock);
return result;
}
/*
* Full memory barrier. Ensures compiler ordering and SMP behavior.
*/
-#define MEM_BARRIER() android_membar_full()
+#define MEM_BARRIER() ANDROID_MEMBAR_FULL()
/*
* 32-bit atomic compare-and-swap macro. Performs a memory barrier
* before the swap (store-release).
*
- * If *_addr equals "_old", replace it with "_new" and return nonzero.
+ * If *_addr equals "_old", replace it with "_new" and return nonzero
+ * (i.e. returns "false" if the operation fails).
*
* Underlying function is currently declared:
- * int android_atomic_cmpxchg(int32_t old, int32_t new, volatile int32_t* addr)
+ * int release_cas(int32_t old, int32_t new, volatile int32_t* addr)
+ *
+ * TODO: rename macro to ATOMIC_RELEASE_CAS
*/
#define ATOMIC_CMP_SWAP(_addr, _old, _new) \
- (android_atomic_cmpxchg((_old), (_new), (_addr)) == 0)
+ (android_atomic_release_cas((_old), (_new), (_addr)) == 0)
/*