Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_ATOMIC_H_ |
| 18 | #define ART_RUNTIME_ATOMIC_H_ |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 19 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 20 | #include <stdint.h> |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 21 | #include <vector> |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 22 | |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame^] | 23 | #include "base/logging.h" |
Elliott Hughes | 7616005 | 2012-12-12 16:31:20 -0800 | [diff] [blame] | 24 | #include "base/macros.h" |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 25 | |
| 26 | namespace art { |
| 27 | |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 28 | class Mutex; |
| 29 | |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 30 | template<typename T> |
| 31 | class Atomic { |
| 32 | public: |
| 33 | Atomic<T>() : value_(0) { } |
| 34 | |
| 35 | explicit Atomic<T>(T value) : value_(value) { } |
| 36 | |
| 37 | Atomic<T>& operator=(T desired) { |
| 38 | Store(desired); |
| 39 | return *this; |
| 40 | } |
| 41 | |
| 42 | T Load() const { |
| 43 | return value_; |
| 44 | } |
| 45 | |
| 46 | operator T() const { |
| 47 | return Load(); |
| 48 | } |
| 49 | |
| 50 | T FetchAndAdd(const T value) { |
| 51 | return __sync_fetch_and_add(&value_, value); // Return old_value. |
| 52 | } |
| 53 | |
| 54 | T FetchAndSub(const T value) { |
| 55 | return __sync_fetch_and_sub(&value_, value); // Return old value. |
| 56 | } |
| 57 | |
| 58 | T operator++() { // Prefix operator. |
| 59 | return __sync_add_and_fetch(&value_, 1); // Return new value. |
| 60 | } |
| 61 | |
| 62 | T operator++(int) { // Postfix operator. |
| 63 | return __sync_fetch_and_add(&value_, 1); // Return old value. |
| 64 | } |
| 65 | |
| 66 | T operator--() { // Prefix operator. |
| 67 | return __sync_sub_and_fetch(&value_, 1); // Return new value. |
| 68 | } |
| 69 | |
| 70 | T operator--(int) { // Postfix operator. |
| 71 | return __sync_fetch_and_sub(&value_, 1); // Return old value. |
| 72 | } |
| 73 | |
| 74 | bool CompareAndSwap(T expected_value, T desired_value) { |
| 75 | return __sync_bool_compare_and_swap(&value_, expected_value, desired_value); |
| 76 | } |
| 77 | |
| 78 | volatile T* Address() { |
| 79 | return &value_; |
| 80 | } |
| 81 | |
| 82 | private: |
| 83 | // Unsafe = operator for non atomic operations on the integer. |
| 84 | void Store(T desired) { |
| 85 | value_ = desired; |
| 86 | } |
| 87 | |
| 88 | volatile T value_; |
| 89 | }; |
| 90 | |
| 91 | typedef Atomic<int32_t> AtomicInteger; |
| 92 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 93 | // NOTE: Two "quasiatomic" operations on the exact same memory address |
| 94 | // are guaranteed to operate atomically with respect to each other, |
| 95 | // but no guarantees are made about quasiatomic operations mixed with |
| 96 | // non-quasiatomic operations on the same address, nor about |
| 97 | // quasiatomic operations that are performed on partially-overlapping |
| 98 | // memory. |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 99 | class QuasiAtomic { |
Ian Rogers | 936b37f | 2014-02-14 00:52:24 -0800 | [diff] [blame] | 100 | #if defined(__mips__) && !defined(__LP64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 101 | static constexpr bool kNeedSwapMutexes = true; |
| 102 | #else |
| 103 | static constexpr bool kNeedSwapMutexes = false; |
| 104 | #endif |
| 105 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 106 | public: |
| 107 | static void Startup(); |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 108 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 109 | static void Shutdown(); |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 110 | |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 111 | // Reads the 64-bit value at "addr" without tearing. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 112 | static int64_t Read64(volatile const int64_t* addr) { |
| 113 | if (!kNeedSwapMutexes) { |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame^] | 114 | int64_t value; |
| 115 | #if defined(__LP64__) |
| 116 | value = *addr; |
| 117 | #else |
| 118 | #if defined(__arm__) |
| 119 | #if defined(__ARM_FEATURE_LPAE) |
| 120 | // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear. |
| 121 | __asm__ __volatile__("@ QuasiAtomic::Read64\n" |
| 122 | "ldrd %0, %H0, %1" |
| 123 | : "=r" (value) |
| 124 | : "m" (*addr)); |
| 125 | #else |
| 126 | // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary. |
| 127 | __asm__ __volatile__("@ QuasiAtomic::Read64\n" |
| 128 | "ldrexd %0, %H0, %1" |
| 129 | : "=r" (value) |
| 130 | : "Q" (*addr)); |
| 131 | #endif |
| 132 | #elif defined(__i386__) |
| 133 | __asm__ __volatile__( |
| 134 | "movq %1, %0\n" |
| 135 | : "=x" (value) |
| 136 | : "m" (*addr)); |
| 137 | #else |
| 138 | LOG(FATAL) << "Unsupported architecture"; |
| 139 | #endif |
| 140 | #endif // defined(__LP64__) |
| 141 | return value; |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 142 | } else { |
| 143 | return SwapMutexRead64(addr); |
| 144 | } |
| 145 | } |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 146 | |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 147 | // Writes to the 64-bit value at "addr" without tearing. |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame^] | 148 | static void Write64(volatile int64_t* addr, int64_t value) { |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 149 | if (!kNeedSwapMutexes) { |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame^] | 150 | #if defined(__LP64__) |
| 151 | *addr = value; |
| 152 | #else |
| 153 | #if defined(__arm__) |
| 154 | #if defined(__ARM_FEATURE_LPAE) |
| 155 | // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear. |
| 156 | __asm__ __volatile__("@ QuasiAtomic::Write64\n" |
| 157 | "strd %1, %H1, %0" |
| 158 | : "=m"(*addr) |
| 159 | : "r" (value)); |
| 160 | #else |
| 161 | // The write is done as a swap so that the cache-line is in the exclusive state for the store. |
| 162 | int64_t prev; |
| 163 | int status; |
| 164 | do { |
| 165 | __asm__ __volatile__("@ QuasiAtomic::Write64\n" |
| 166 | "ldrexd %0, %H0, %2\n" |
| 167 | "strexd %1, %3, %H3, %2" |
| 168 | : "=&r" (prev), "=&r" (status), "+Q"(*addr) |
| 169 | : "r" (value) |
| 170 | : "cc"); |
| 171 | } while (UNLIKELY(status != 0)); |
| 172 | #endif |
| 173 | #elif defined(__i386__) |
| 174 | __asm__ __volatile__( |
| 175 | "movq %1, %0" |
| 176 | : "=m" (*addr) |
| 177 | : "x" (value)); |
| 178 | #else |
| 179 | LOG(FATAL) << "Unsupported architecture"; |
| 180 | #endif |
| 181 | #endif // defined(__LP64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 182 | } else { |
Ian Rogers | a984454 | 2014-04-21 17:01:02 -0700 | [diff] [blame^] | 183 | SwapMutexWrite64(addr, value); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 184 | } |
| 185 | } |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 186 | |
| 187 | // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value" |
| 188 | // and return true. Otherwise, don't swap, and return false. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 189 | static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) { |
| 190 | if (!kNeedSwapMutexes) { |
| 191 | return __sync_bool_compare_and_swap(addr, old_value, new_value); |
| 192 | } else { |
| 193 | return SwapMutexCas64(old_value, new_value, addr); |
| 194 | } |
| 195 | } |
Ian Rogers | 9adbff5 | 2013-01-23 18:19:03 -0800 | [diff] [blame] | 196 | |
| 197 | // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes? |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 198 | static bool LongAtomicsUseMutexes() { |
| 199 | return !kNeedSwapMutexes; |
| 200 | } |
| 201 | |
| 202 | static void MembarLoadStore() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 203 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 204 | __asm__ __volatile__("dmb ish" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 205 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 206 | __asm__ __volatile__("" : : : "memory"); |
| 207 | #elif defined(__mips__) |
| 208 | __asm__ __volatile__("sync" : : : "memory"); |
| 209 | #else |
| 210 | #error Unexpected architecture |
| 211 | #endif |
| 212 | } |
| 213 | |
| 214 | static void MembarLoadLoad() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 215 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 216 | __asm__ __volatile__("dmb ish" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 217 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 218 | __asm__ __volatile__("" : : : "memory"); |
| 219 | #elif defined(__mips__) |
| 220 | __asm__ __volatile__("sync" : : : "memory"); |
| 221 | #else |
| 222 | #error Unexpected architecture |
| 223 | #endif |
| 224 | } |
| 225 | |
| 226 | static void MembarStoreStore() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 227 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 228 | __asm__ __volatile__("dmb ishst" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 229 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 230 | __asm__ __volatile__("" : : : "memory"); |
| 231 | #elif defined(__mips__) |
| 232 | __asm__ __volatile__("sync" : : : "memory"); |
| 233 | #else |
| 234 | #error Unexpected architecture |
| 235 | #endif |
| 236 | } |
| 237 | |
| 238 | static void MembarStoreLoad() { |
Stuart Monteith | 5817e89 | 2014-02-18 11:16:29 +0000 | [diff] [blame] | 239 | #if defined(__arm__) || defined(__aarch64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 240 | __asm__ __volatile__("dmb ish" : : : "memory"); |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 241 | #elif defined(__i386__) || defined(__x86_64__) |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 242 | __asm__ __volatile__("mfence" : : : "memory"); |
| 243 | #elif defined(__mips__) |
| 244 | __asm__ __volatile__("sync" : : : "memory"); |
| 245 | #else |
| 246 | #error Unexpected architecture |
| 247 | #endif |
| 248 | } |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 249 | |
| 250 | private: |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 251 | static Mutex* GetSwapMutex(const volatile int64_t* addr); |
| 252 | static int64_t SwapMutexRead64(volatile const int64_t* addr); |
| 253 | static void SwapMutexWrite64(volatile int64_t* addr, int64_t val); |
| 254 | static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr); |
| 255 | |
| 256 | // We stripe across a bunch of different mutexes to reduce contention. |
| 257 | static constexpr size_t kSwapMutexCount = 32; |
| 258 | static std::vector<Mutex*>* gSwapMutexes; |
| 259 | |
Elliott Hughes | 7c6169d | 2012-05-02 16:11:48 -0700 | [diff] [blame] | 260 | DISALLOW_COPY_AND_ASSIGN(QuasiAtomic); |
| 261 | }; |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 262 | |
| 263 | } // namespace art |
| 264 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 265 | #endif // ART_RUNTIME_ATOMIC_H_ |