2 * Copyright (C) 2014 The Android Open Source Project
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
17 #include <gtest/gtest.h>
18 // Fool stdatomic.h into not using <atomic>.
20 #include <stdatomic.h>
24 TEST(stdatomic, LOCK_FREE) {
25 ASSERT_TRUE(ATOMIC_BOOL_LOCK_FREE);
26 ASSERT_TRUE(ATOMIC_CHAR16_T_LOCK_FREE);
27 ASSERT_TRUE(ATOMIC_CHAR32_T_LOCK_FREE);
28 ASSERT_TRUE(ATOMIC_CHAR_LOCK_FREE);
29 ASSERT_TRUE(ATOMIC_INT_LOCK_FREE);
30 ASSERT_TRUE(ATOMIC_LLONG_LOCK_FREE);
31 ASSERT_TRUE(ATOMIC_LONG_LOCK_FREE);
32 ASSERT_TRUE(ATOMIC_POINTER_LOCK_FREE);
33 ASSERT_TRUE(ATOMIC_SHORT_LOCK_FREE);
34 ASSERT_TRUE(ATOMIC_WCHAR_T_LOCK_FREE);
37 TEST(stdatomic, init) {
38 atomic_int v = ATOMIC_VAR_INIT(123);
39 ASSERT_EQ(123, atomic_load(&v));
42 ASSERT_EQ(456, atomic_load(&v));
44 atomic_flag f = ATOMIC_FLAG_INIT;
45 ASSERT_FALSE(atomic_flag_test_and_set(&f));
48 TEST(stdatomic, atomic_thread_fence) {
49 atomic_thread_fence(memory_order_relaxed);
50 atomic_thread_fence(memory_order_consume);
51 atomic_thread_fence(memory_order_acquire);
52 atomic_thread_fence(memory_order_release);
53 atomic_thread_fence(memory_order_acq_rel);
54 atomic_thread_fence(memory_order_seq_cst);
57 TEST(stdatomic, atomic_signal_fence) {
58 atomic_signal_fence(memory_order_relaxed);
59 atomic_signal_fence(memory_order_consume);
60 atomic_signal_fence(memory_order_acquire);
61 atomic_signal_fence(memory_order_release);
62 atomic_signal_fence(memory_order_acq_rel);
63 atomic_signal_fence(memory_order_seq_cst);
66 TEST(stdatomic, atomic_is_lock_free) {
68 ASSERT_TRUE(atomic_is_lock_free(&small));
70 // atomic_intmax_t(size = 64) is not lock free on mips32.
71 #if defined(__mips__) && !defined(__LP64__)
72 ASSERT_FALSE(atomic_is_lock_free(&big));
74 ASSERT_TRUE(atomic_is_lock_free(&big));
78 TEST(stdatomic, atomic_flag) {
79 atomic_flag f = ATOMIC_FLAG_INIT;
80 ASSERT_FALSE(atomic_flag_test_and_set(&f));
81 ASSERT_TRUE(atomic_flag_test_and_set(&f));
83 atomic_flag_clear(&f);
85 ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
86 ASSERT_TRUE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
88 atomic_flag_clear_explicit(&f, memory_order_relaxed);
89 ASSERT_FALSE(atomic_flag_test_and_set_explicit(&f, memory_order_relaxed));
92 TEST(stdatomic, atomic_store) {
94 atomic_store(&i, 123);
95 ASSERT_EQ(123, atomic_load(&i));
96 atomic_store_explicit(&i, 123, memory_order_relaxed);
97 ASSERT_EQ(123, atomic_load_explicit(&i, memory_order_relaxed));
100 TEST(stdatomic, atomic_exchange) {
102 atomic_store(&i, 123);
103 ASSERT_EQ(123, atomic_exchange(&i, 456));
104 ASSERT_EQ(456, atomic_exchange_explicit(&i, 123, memory_order_relaxed));
107 TEST(stdatomic, atomic_compare_exchange) {
111 atomic_store(&i, 123);
113 ASSERT_TRUE(atomic_compare_exchange_strong(&i, &expected, 456));
114 ASSERT_FALSE(atomic_compare_exchange_strong(&i, &expected, 456));
115 ASSERT_EQ(456, expected);
117 atomic_store(&i, 123);
119 ASSERT_TRUE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
120 ASSERT_FALSE(atomic_compare_exchange_strong_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
121 ASSERT_EQ(456, expected);
123 atomic_store(&i, 123);
125 ASSERT_TRUE(atomic_compare_exchange_weak(&i, &expected, 456));
126 ASSERT_FALSE(atomic_compare_exchange_weak(&i, &expected, 456));
127 ASSERT_EQ(456, expected);
129 atomic_store(&i, 123);
131 ASSERT_TRUE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
132 ASSERT_FALSE(atomic_compare_exchange_weak_explicit(&i, &expected, 456, memory_order_relaxed, memory_order_relaxed));
133 ASSERT_EQ(456, expected);
136 TEST(stdatomic, atomic_fetch_add) {
137 atomic_int i = ATOMIC_VAR_INIT(123);
138 ASSERT_EQ(123, atomic_fetch_add(&i, 1));
139 ASSERT_EQ(124, atomic_fetch_add_explicit(&i, 1, memory_order_relaxed));
140 ASSERT_EQ(125, atomic_load(&i));
143 TEST(stdatomic, atomic_fetch_sub) {
144 atomic_int i = ATOMIC_VAR_INIT(123);
145 ASSERT_EQ(123, atomic_fetch_sub(&i, 1));
146 ASSERT_EQ(122, atomic_fetch_sub_explicit(&i, 1, memory_order_relaxed));
147 ASSERT_EQ(121, atomic_load(&i));
150 TEST(stdatomic, atomic_fetch_or) {
151 atomic_int i = ATOMIC_VAR_INIT(0x100);
152 ASSERT_EQ(0x100, atomic_fetch_or(&i, 0x020));
153 ASSERT_EQ(0x120, atomic_fetch_or_explicit(&i, 0x003, memory_order_relaxed));
154 ASSERT_EQ(0x123, atomic_load(&i));
157 TEST(stdatomic, atomic_fetch_xor) {
158 atomic_int i = ATOMIC_VAR_INIT(0x100);
159 ASSERT_EQ(0x100, atomic_fetch_xor(&i, 0x120));
160 ASSERT_EQ(0x020, atomic_fetch_xor_explicit(&i, 0x103, memory_order_relaxed));
161 ASSERT_EQ(0x123, atomic_load(&i));
164 TEST(stdatomic, atomic_fetch_and) {
165 atomic_int i = ATOMIC_VAR_INIT(0x123);
166 ASSERT_EQ(0x123, atomic_fetch_and(&i, 0x00f));
167 ASSERT_EQ(0x003, atomic_fetch_and_explicit(&i, 0x2, memory_order_relaxed));
168 ASSERT_EQ(0x002, atomic_load(&i));
171 // And a rudimentary test of acquire-release memory ordering:
173 constexpr static uint_least32_t BIG = 10000000ul; // Assumed even below.
175 struct three_atomics {
176 atomic_uint_least32_t x;
177 char a[123]; // Everything in different cache lines,
178 // increase chance of compiler getting alignment wrong.
179 atomic_uint_least32_t y;
181 atomic_uint_least32_t z;
184 // Very simple acquire/release memory ordering sanity check.
185 static void* writer(void* arg) {
186 three_atomics* a = reinterpret_cast<three_atomics*>(arg);
187 for (uint_least32_t i = 0; i <= BIG; i+=2) {
188 atomic_store_explicit(&a->x, i, memory_order_relaxed);
189 atomic_store_explicit(&a->z, i, memory_order_relaxed);
190 atomic_store_explicit(&a->y, i, memory_order_release);
191 atomic_store_explicit(&a->x, i+1, memory_order_relaxed);
192 atomic_store_explicit(&a->z, i+1, memory_order_relaxed);
193 atomic_store_explicit(&a->y, i+1, memory_order_release);
198 static void* reader(void* arg) {
199 three_atomics* a = reinterpret_cast<three_atomics*>(arg);
200 uint_least32_t xval = 0, yval = 0, zval = 0;
202 size_t repeat_limit = 1000;
203 while (yval != BIG + 1) {
204 yval = atomic_load_explicit(&a->y, memory_order_acquire);
205 zval = atomic_load_explicit(&a->z, memory_order_relaxed);
206 xval = atomic_load_explicit(&a->x, memory_order_relaxed);
207 // If we see a given value of y, the immediately preceding
208 // stores to z and x, or later ones, should also be visible.
210 // Cant just ASSERT, since we are in a non-void function.
211 ADD_FAILURE() << "acquire-release ordering violation: "
212 << zval << " < " << yval << ", " << xval << "\n";
213 return 0; // Only report once.
216 // Cant just ASSERT, since we are in a non-void function.
217 ADD_FAILURE() << "acquire-release ordering violation: "
218 << xval << " < " << yval << ", " << zval << "\n";
219 return 0; // Only report once.
221 if (repeat < repeat_limit) ++repeat;
223 // The following assertion is not technically guaranteed to hold.
224 // But if it fails to hold, this test was useless, and we have a
225 // serious scheduling issue that we should probably know about.
226 EXPECT_EQ(repeat, repeat_limit);
230 TEST(stdatomic, ordering) {
231 // Run a memory ordering sanity test.
234 atomic_init(&a.x, 0ul);
235 atomic_init(&a.y, 0ul);
236 atomic_init(&a.z, 0ul);
238 ASSERT_EQ(0, pthread_create(&t1, 0, reader, &a));
239 ASSERT_EQ(0, pthread_create(&t2, 0, writer, &a));
240 ASSERT_EQ(0, pthread_join(t1, &result));
241 EXPECT_EQ(0, result);
242 ASSERT_EQ(0, pthread_join(t2, &result));
243 EXPECT_EQ(0, result);
244 EXPECT_EQ(atomic_load_explicit(&a.x, memory_order_consume), BIG + 1);
245 EXPECT_EQ(atomic_load_explicit(&a.y, memory_order_seq_cst), BIG + 1);
246 EXPECT_EQ(atomic_load(&a.z), BIG + 1);