blob: dda180173cfa133dd3fca0f860b51b9be0a9a854 [file] [log] [blame]
Elliott Hughes5ea047b2011-09-13 14:38:18 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_ATOMIC_H_
18#define ART_RUNTIME_ATOMIC_H_
Elliott Hughes5ea047b2011-09-13 14:38:18 -070019
Ian Rogers3e5cf302014-05-20 16:40:37 -070020#ifdef __clang__
21#define ART_HAVE_STDATOMIC 1
22#endif
23
Elliott Hughes7c6169d2012-05-02 16:11:48 -070024#include <stdint.h>
Ian Rogers3e5cf302014-05-20 16:40:37 -070025#if ART_HAVE_STDATOMIC
26#include <atomic>
27#endif
28#include <limits>
Ian Rogersb122a4b2013-11-19 18:00:50 -080029#include <vector>
Elliott Hughes7c6169d2012-05-02 16:11:48 -070030
Ian Rogersa9844542014-04-21 17:01:02 -070031#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080032#include "base/macros.h"
Elliott Hughes5ea047b2011-09-13 14:38:18 -070033
34namespace art {
35
Ian Rogersb122a4b2013-11-19 18:00:50 -080036class Mutex;
37
Hans Boehm30359612014-05-21 17:46:23 -070038// QuasiAtomic encapsulates two separate facilities that we are
39// trying to move away from: "quasiatomic" 64 bit operations
40// and custom memory fences. For the time being, they remain
41// exposed. Clients should be converted to use either class Atomic
42// below whenever possible, and should eventually use C++11 atomics.
43// The two facilities that do not have a good C++11 analog are
44// ThreadFenceForConstructor and Atomic::*JavaData.
45//
Elliott Hughes7c6169d2012-05-02 16:11:48 -070046// NOTE: Two "quasiatomic" operations on the exact same memory address
47// are guaranteed to operate atomically with respect to each other,
48// but no guarantees are made about quasiatomic operations mixed with
49// non-quasiatomic operations on the same address, nor about
50// quasiatomic operations that are performed on partially-overlapping
51// memory.
Elliott Hughes7c6169d2012-05-02 16:11:48 -070052class QuasiAtomic {
Ian Rogers936b37f2014-02-14 00:52:24 -080053#if defined(__mips__) && !defined(__LP64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -080054 static constexpr bool kNeedSwapMutexes = true;
55#else
56 static constexpr bool kNeedSwapMutexes = false;
57#endif
58
Elliott Hughes7c6169d2012-05-02 16:11:48 -070059 public:
60 static void Startup();
Elliott Hughes5ea047b2011-09-13 14:38:18 -070061
Elliott Hughes7c6169d2012-05-02 16:11:48 -070062 static void Shutdown();
Elliott Hughes5ea047b2011-09-13 14:38:18 -070063
Ian Rogers9adbff52013-01-23 18:19:03 -080064 // Reads the 64-bit value at "addr" without tearing.
Ian Rogersb122a4b2013-11-19 18:00:50 -080065 static int64_t Read64(volatile const int64_t* addr) {
66 if (!kNeedSwapMutexes) {
Ian Rogersa9844542014-04-21 17:01:02 -070067 int64_t value;
68#if defined(__LP64__)
69 value = *addr;
70#else
71#if defined(__arm__)
72#if defined(__ARM_FEATURE_LPAE)
73 // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear.
74 __asm__ __volatile__("@ QuasiAtomic::Read64\n"
75 "ldrd %0, %H0, %1"
76 : "=r" (value)
77 : "m" (*addr));
78#else
79 // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary.
80 __asm__ __volatile__("@ QuasiAtomic::Read64\n"
81 "ldrexd %0, %H0, %1"
82 : "=r" (value)
83 : "Q" (*addr));
84#endif
85#elif defined(__i386__)
86 __asm__ __volatile__(
87 "movq %1, %0\n"
88 : "=x" (value)
89 : "m" (*addr));
90#else
91 LOG(FATAL) << "Unsupported architecture";
92#endif
93#endif // defined(__LP64__)
94 return value;
Ian Rogersb122a4b2013-11-19 18:00:50 -080095 } else {
96 return SwapMutexRead64(addr);
97 }
98 }
Elliott Hughes7c6169d2012-05-02 16:11:48 -070099
Ian Rogers9adbff52013-01-23 18:19:03 -0800100 // Writes to the 64-bit value at "addr" without tearing.
Ian Rogersa9844542014-04-21 17:01:02 -0700101 static void Write64(volatile int64_t* addr, int64_t value) {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800102 if (!kNeedSwapMutexes) {
Ian Rogersa9844542014-04-21 17:01:02 -0700103#if defined(__LP64__)
104 *addr = value;
105#else
106#if defined(__arm__)
107#if defined(__ARM_FEATURE_LPAE)
108 // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear.
109 __asm__ __volatile__("@ QuasiAtomic::Write64\n"
110 "strd %1, %H1, %0"
111 : "=m"(*addr)
112 : "r" (value));
113#else
114 // The write is done as a swap so that the cache-line is in the exclusive state for the store.
115 int64_t prev;
116 int status;
117 do {
118 __asm__ __volatile__("@ QuasiAtomic::Write64\n"
119 "ldrexd %0, %H0, %2\n"
120 "strexd %1, %3, %H3, %2"
121 : "=&r" (prev), "=&r" (status), "+Q"(*addr)
122 : "r" (value)
123 : "cc");
124 } while (UNLIKELY(status != 0));
125#endif
126#elif defined(__i386__)
127 __asm__ __volatile__(
128 "movq %1, %0"
129 : "=m" (*addr)
130 : "x" (value));
131#else
132 LOG(FATAL) << "Unsupported architecture";
133#endif
134#endif // defined(__LP64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800135 } else {
Ian Rogersa9844542014-04-21 17:01:02 -0700136 SwapMutexWrite64(addr, value);
Ian Rogersb122a4b2013-11-19 18:00:50 -0800137 }
138 }
Ian Rogers9adbff52013-01-23 18:19:03 -0800139
140 // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value"
141 // and return true. Otherwise, don't swap, and return false.
Hans Boehm30359612014-05-21 17:46:23 -0700142 // This is fully ordered, i.e. it has C++11 memory_order_seq_cst
143 // semantics (assuming all other accesses use a mutex if this one does).
144 // This has "strong" semantics; if it fails then it is guaranteed that
145 // at some point during the execution of Cas64, *addr was not equal to
146 // old_value.
Ian Rogersb122a4b2013-11-19 18:00:50 -0800147 static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) {
148 if (!kNeedSwapMutexes) {
149 return __sync_bool_compare_and_swap(addr, old_value, new_value);
150 } else {
151 return SwapMutexCas64(old_value, new_value, addr);
152 }
153 }
Ian Rogers9adbff52013-01-23 18:19:03 -0800154
155 // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes?
Ian Rogersb122a4b2013-11-19 18:00:50 -0800156 static bool LongAtomicsUseMutexes() {
Ian Rogers63c5dd02014-05-19 22:55:00 -0700157 return kNeedSwapMutexes;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800158 }
159
Hans Boehm30359612014-05-21 17:46:23 -0700160 #if ART_HAVE_STDATOMIC
161
162 static void ThreadFenceAcquire () {
163 std::atomic_thread_fence(std::memory_order_acquire);
164 }
165
166 static void ThreadFenceRelease () {
167 std::atomic_thread_fence(std::memory_order_release);
168 }
169
170 static void ThreadFenceForConstructor() {
171 #if defined(__aarch64__)
172 __asm__ __volatile__("dmb ishst" : : : "memory");
173 #else
174 std::atomic_thread_fence(std::memory_order_release);
175 #endif
176 }
177
178 static void ThreadFenceSequentiallyConsistent() {
179 std::atomic_thread_fence(std::memory_order_seq_cst);
180 }
181
182 #else
183
184 static void ThreadFenceAcquire() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000185 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800186 __asm__ __volatile__("dmb ish" : : : "memory");
Hans Boehm30359612014-05-21 17:46:23 -0700187 // Could possibly use dmb ishld on aarch64
188 // But currently we also use this on volatile loads
189 // to enforce store atomicity. Ishld is
190 // insufficient for that purpose.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800191 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800192 __asm__ __volatile__("" : : : "memory");
193 #elif defined(__mips__)
194 __asm__ __volatile__("sync" : : : "memory");
195 #else
196 #error Unexpected architecture
197 #endif
198 }
199
Hans Boehm30359612014-05-21 17:46:23 -0700200 static void ThreadFenceRelease() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000201 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800202 __asm__ __volatile__("dmb ish" : : : "memory");
Hans Boehm30359612014-05-21 17:46:23 -0700203 // ishst doesn't order load followed by store.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800204 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800205 __asm__ __volatile__("" : : : "memory");
206 #elif defined(__mips__)
207 __asm__ __volatile__("sync" : : : "memory");
208 #else
209 #error Unexpected architecture
210 #endif
211 }
212
Hans Boehm30359612014-05-21 17:46:23 -0700213 // Fence at the end of a constructor with final fields
214 // or allocation. We believe this
215 // only has to order stores, and can thus be weaker than
216 // release on aarch64.
217 static void ThreadFenceForConstructor() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000218 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800219 __asm__ __volatile__("dmb ishst" : : : "memory");
Ian Rogersef7d42f2014-01-06 12:55:46 -0800220 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800221 __asm__ __volatile__("" : : : "memory");
222 #elif defined(__mips__)
223 __asm__ __volatile__("sync" : : : "memory");
224 #else
225 #error Unexpected architecture
226 #endif
227 }
228
Hans Boehm30359612014-05-21 17:46:23 -0700229 static void ThreadFenceSequentiallyConsistent() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000230 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800231 __asm__ __volatile__("dmb ish" : : : "memory");
Ian Rogersef7d42f2014-01-06 12:55:46 -0800232 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800233 __asm__ __volatile__("mfence" : : : "memory");
234 #elif defined(__mips__)
235 __asm__ __volatile__("sync" : : : "memory");
236 #else
237 #error Unexpected architecture
238 #endif
239 }
Hans Boehm30359612014-05-21 17:46:23 -0700240 #endif
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700241
242 private:
Ian Rogersb122a4b2013-11-19 18:00:50 -0800243 static Mutex* GetSwapMutex(const volatile int64_t* addr);
244 static int64_t SwapMutexRead64(volatile const int64_t* addr);
245 static void SwapMutexWrite64(volatile int64_t* addr, int64_t val);
246 static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr);
247
248 // We stripe across a bunch of different mutexes to reduce contention.
249 static constexpr size_t kSwapMutexCount = 32;
250 static std::vector<Mutex*>* gSwapMutexes;
251
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700252 DISALLOW_COPY_AND_ASSIGN(QuasiAtomic);
253};
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700254
Hans Boehm30359612014-05-21 17:46:23 -0700255#if ART_HAVE_STDATOMIC
256template<typename T>
257class Atomic : public std::atomic<T> {
258 public:
259 Atomic<T>() : std::atomic<T>() { }
260
261 explicit Atomic<T>(T value) : std::atomic<T>(value) { }
262
263 // Load from memory without ordering or synchronization constraints.
264 T LoadRelaxed() const {
265 return this->load(std::memory_order_relaxed);
266 }
267
268 // Word tearing allowed, but may race.
269 // TODO: Optimize?
270 // There has been some discussion of eventually disallowing word
271 // tearing for Java data loads.
272 T LoadJavaData() const {
273 return this->load(std::memory_order_relaxed);
274 }
275
276 // Load from memory with a total ordering.
277 // Corresponds exactly to a Java volatile load.
278 T LoadSequentiallyConsistent() const {
279 return this->load(std::memory_order_seq_cst);
280 }
281
282 // Store to memory without ordering or synchronization constraints.
283 void StoreRelaxed(T desired) {
284 this->store(desired, std::memory_order_relaxed);
285 }
286
287 // Word tearing allowed, but may race.
288 void StoreJavaData(T desired) {
289 this->store(desired, std::memory_order_relaxed);
290 }
291
292 // Store to memory with release ordering.
293 void StoreRelease(T desired) {
294 this->store(desired, std::memory_order_release);
295 }
296
297 // Store to memory with a total ordering.
298 void StoreSequentiallyConsistent(T desired) {
299 this->store(desired, std::memory_order_seq_cst);
300 }
301
302 // Atomically replace the value with desired value if it matches the expected value.
303 // Participates in total ordering of atomic operations.
304 bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
305 return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst);
306 }
307
308 // The same, except it may fail spuriously.
309 bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) {
310 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst);
311 }
312
313 // Atomically replace the value with desired value if it matches the expected value. Doesn't
314 // imply ordering or synchronization constraints.
315 bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) {
316 return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed);
317 }
318
319 // The same, except it may fail spuriously.
320 bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
321 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
322 }
323
324 // Atomically replace the value with desired value if it matches the expected value. Prior writes
325 // made to other memory locations by the thread that did the release become visible in this
326 // thread.
327 bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
328 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire);
329 }
330
331 // Atomically replace the value with desired value if it matches the expected value. prior writes
332 // to other memory locations become visible to the threads that do a consume or an acquire on the
333 // same location.
334 bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
335 return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
336 }
337
338 T FetchAndAddSequentiallyConsistent(const T value) {
339 return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value.
340 }
341
342 T FetchAndSubSequentiallyConsistent(const T value) {
343 return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
344 }
345
346 volatile T* Address() {
347 return reinterpret_cast<T*>(this);
348 }
349
350 static T MaxValue() {
351 return std::numeric_limits<T>::max();
352 }
353
354};
355
356#else
357
358template<typename T> class Atomic;
359
360// Helper class for Atomic to deal separately with size 8 and small
361// objects. Should not be used directly.
362
363template<int SZ, class T> struct AtomicHelper {
364 friend class Atomic<T>;
365
366private:
367 COMPILE_ASSERT(sizeof(T) <= 4, bad_atomic_helper_arg);
368
369 static T LoadRelaxed(const volatile T* loc) {
370 // sizeof(T) <= 4
371 return *loc;
372 }
373
374 static void StoreRelaxed(volatile T* loc, T desired) {
375 // sizeof(T) <= 4
376 *loc = desired;
377 }
378
379 static bool CompareExchangeStrongSequentiallyConsistent(volatile T* loc,
380 T expected_value, T desired_value) {
381 // sizeof(T) <= 4
382 return __sync_bool_compare_and_swap(loc, expected_value, desired_value);
383 }
384};
385
386template<class T> struct AtomicHelper<8, T> {
387 friend class Atomic<T>;
388
389private:
390 COMPILE_ASSERT(sizeof(T) == 8, bad_large_atomic_helper_arg);
391
392 static T LoadRelaxed(const volatile T* loc) {
393 // sizeof(T) == 8
394 volatile const int64_t* loc_ptr =
395 reinterpret_cast<volatile const int64_t*>(loc);
396 return reinterpret_cast<T>(QuasiAtomic::Read64(loc_ptr));
397 }
398
399 static void StoreRelaxed(volatile T* loc, T desired) {
400 // sizeof(T) == 8
401 volatile int64_t* loc_ptr =
402 reinterpret_cast<volatile int64_t*>(loc);
403 QuasiAtomic::Write64(loc_ptr,
404 reinterpret_cast<int64_t>(desired));
405 }
406
407
408 static bool CompareExchangeStrongSequentiallyConsistent(volatile T* loc,
409 T expected_value, T desired_value) {
410 // sizeof(T) == 8
411 volatile int64_t* loc_ptr = reinterpret_cast<volatile int64_t*>(loc);
412 return QuasiAtomic::Cas64(
413 reinterpret_cast<int64_t>(expected_value),
414 reinterpret_cast<int64_t>(desired_value), loc_ptr);
415 }
416};
417
418template<typename T>
419class Atomic {
420
421 private:
422 COMPILE_ASSERT(sizeof(T) <= 4 || sizeof(T) == 8, bad_atomic_arg);
423
424 public:
425 Atomic<T>() : value_(0) { }
426
427 explicit Atomic<T>(T value) : value_(value) { }
428
429 // Load from memory without ordering or synchronization constraints.
430 T LoadRelaxed() const {
431 return AtomicHelper<sizeof(T),T>::LoadRelaxed(&value_);
432 }
433
434 // Word tearing allowed, but may race.
435 T LoadJavaData() const {
436 return value_;
437 }
438
439 // Load from memory with a total ordering.
440 T LoadSequentiallyConsistent() const;
441
442 // Store to memory without ordering or synchronization constraints.
443 void StoreRelaxed(T desired) {
444 AtomicHelper<sizeof(T),T>::StoreRelaxed(&value_,desired);
445 }
446
447 // Word tearing allowed, but may race.
448 void StoreJavaData(T desired) {
449 value_ = desired;
450 }
451
452 // Store to memory with release ordering.
453 void StoreRelease(T desired);
454
455 // Store to memory with a total ordering.
456 void StoreSequentiallyConsistent(T desired);
457
458 // Atomically replace the value with desired value if it matches the expected value.
459 // Participates in total ordering of atomic operations.
460 bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
461 return AtomicHelper<sizeof(T),T>::
462 CompareExchangeStrongSequentiallyConsistent(&value_, expected_value, desired_value);
463 }
464
465 // The same, but may fail spuriously.
466 bool CompareExchangeWeakSequentiallyConsistent(T expected_value, T desired_value) {
467 // TODO: Take advantage of the fact that it may fail spuriously.
468 return AtomicHelper<sizeof(T),T>::
469 CompareExchangeStrongSequentiallyConsistent(&value_, expected_value, desired_value);
470 }
471
472 // Atomically replace the value with desired value if it matches the expected value. Doesn't
473 // imply ordering or synchronization constraints.
474 bool CompareExchangeStrongRelaxed(T expected_value, T desired_value) {
475 // TODO: make this relaxed.
476 return CompareExchangeStrongSequentiallyConsistent(expected_value, desired_value);
477 }
478
479 // The same, but may fail spuriously.
480 bool CompareExchangeWeakRelaxed(T expected_value, T desired_value) {
481 // TODO: Take advantage of the fact that it may fail spuriously.
482 // TODO: make this relaxed.
483 return CompareExchangeStrongSequentiallyConsistent(expected_value, desired_value);
484 }
485
486 // Atomically replace the value with desired value if it matches the expected value. Prior accesses
487 // made to other memory locations by the thread that did the release become visible in this
488 // thread.
489 bool CompareExchangeWeakAcquire(T expected_value, T desired_value) {
490 // TODO: make this acquire.
491 return CompareExchangeWeakSequentiallyConsistent(expected_value, desired_value);
492 }
493
494 // Atomically replace the value with desired value if it matches the expected value. Prior accesses
495 // to other memory locations become visible to the threads that do a consume or an acquire on the
496 // same location.
497 bool CompareExchangeWeakRelease(T expected_value, T desired_value) {
498 // TODO: make this release.
499 return CompareExchangeWeakSequentiallyConsistent(expected_value, desired_value);
500 }
501
502 volatile T* Address() {
503 return &value_;
504 }
505
506 T FetchAndAddSequentiallyConsistent(const T value) {
507 if (sizeof(T) <= 4) {
508 return __sync_fetch_and_add(&value_, value); // Return old value.
509 } else {
510 T expected;
511 do {
512 expected = LoadRelaxed();
513 } while (!CompareExchangeWeakSequentiallyConsistent(expected, expected + value));
514 return expected;
515 }
516 }
517
518 T FetchAndSubSequentiallyConsistent(const T value) {
519 if (sizeof(T) <= 4) {
520 return __sync_fetch_and_sub(&value_, value); // Return old value.
521 } else {
522 return FetchAndAddSequentiallyConsistent(-value);
523 }
524 }
525
526 T operator++() { // Prefix operator.
527 if (sizeof(T) <= 4) {
528 return __sync_add_and_fetch(&value_, 1); // Return new value.
529 } else {
530 return FetchAndAddSequentiallyConsistent(1) + 1;
531 }
532 }
533
534 T operator++(int) { // Postfix operator.
535 return FetchAndAddSequentiallyConsistent(1);
536 }
537
538 T operator--() { // Prefix operator.
539 if (sizeof(T) <= 4) {
540 return __sync_sub_and_fetch(&value_, 1); // Return new value.
541 } else {
542 return FetchAndSubSequentiallyConsistent(1) - 1;
543 }
544 }
545
546 T operator--(int) { // Postfix operator.
547 return FetchAndSubSequentiallyConsistent(1);
548 }
549
550 static T MaxValue() {
551 return std::numeric_limits<T>::max();
552 }
553
554
555 private:
556 volatile T value_;
557};
558#endif
559
560typedef Atomic<int32_t> AtomicInteger;
561
562COMPILE_ASSERT(sizeof(AtomicInteger) == sizeof(int32_t), weird_atomic_int_size);
563COMPILE_ASSERT(alignof(AtomicInteger) == alignof(int32_t),
564 atomic_int_alignment_differs_from_that_of_underlying_type);
565COMPILE_ASSERT(sizeof(Atomic<long long>) == sizeof(long long), weird_atomic_long_long_size);
566COMPILE_ASSERT(alignof(Atomic<long long>) == alignof(long long),
567 atomic_long_long_alignment_differs_from_that_of_underlying_type);
568
569
Ian Rogers3e5cf302014-05-20 16:40:37 -0700570#if !ART_HAVE_STDATOMIC
571template<typename T>
572inline T Atomic<T>::LoadSequentiallyConsistent() const {
573 T result = value_;
Hans Boehm30359612014-05-21 17:46:23 -0700574 if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
575 QuasiAtomic::ThreadFenceAcquire();
576 // We optimistically assume this suffices for store atomicity.
577 // On ARMv8 we strengthen ThreadFenceAcquire to make that true.
578 }
Ian Rogers3e5cf302014-05-20 16:40:37 -0700579 return result;
580}
581
582template<typename T>
Hans Boehm30359612014-05-21 17:46:23 -0700583inline void Atomic<T>::StoreRelease(T desired) {
584 if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
585 QuasiAtomic::ThreadFenceRelease();
586 }
587 StoreRelaxed(desired);
588}
589
590template<typename T>
Ian Rogers3e5cf302014-05-20 16:40:37 -0700591inline void Atomic<T>::StoreSequentiallyConsistent(T desired) {
Hans Boehm30359612014-05-21 17:46:23 -0700592 if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
593 QuasiAtomic::ThreadFenceRelease();
594 }
595 StoreRelaxed(desired);
596 if (sizeof(T) != 8 || !QuasiAtomic::LongAtomicsUseMutexes()) {
597 QuasiAtomic::ThreadFenceSequentiallyConsistent();
598 }
Ian Rogers3e5cf302014-05-20 16:40:37 -0700599}
600
601#endif
602
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700603} // namespace art
604
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700605#endif // ART_RUNTIME_ATOMIC_H_