blob: 6867fefb059e86e7647c70aae8dac945c432d606 [file] [log] [blame]
Elliott Hughes5ea047b2011-09-13 14:38:18 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_ATOMIC_H_
18#define ART_RUNTIME_ATOMIC_H_
Elliott Hughes5ea047b2011-09-13 14:38:18 -070019
Elliott Hughes7c6169d2012-05-02 16:11:48 -070020#include <stdint.h>
Ian Rogersb122a4b2013-11-19 18:00:50 -080021#include <vector>
Elliott Hughes7c6169d2012-05-02 16:11:48 -070022
Ian Rogersa9844542014-04-21 17:01:02 -070023#include "base/logging.h"
Elliott Hughes76160052012-12-12 16:31:20 -080024#include "base/macros.h"
Elliott Hughes5ea047b2011-09-13 14:38:18 -070025
26namespace art {
27
Ian Rogersb122a4b2013-11-19 18:00:50 -080028class Mutex;
29
Ian Rogersef7d42f2014-01-06 12:55:46 -080030template<typename T>
31class Atomic {
32 public:
33 Atomic<T>() : value_(0) { }
34
35 explicit Atomic<T>(T value) : value_(value) { }
36
37 Atomic<T>& operator=(T desired) {
38 Store(desired);
39 return *this;
40 }
41
42 T Load() const {
43 return value_;
44 }
45
46 operator T() const {
47 return Load();
48 }
49
50 T FetchAndAdd(const T value) {
51 return __sync_fetch_and_add(&value_, value); // Return old_value.
52 }
53
54 T FetchAndSub(const T value) {
55 return __sync_fetch_and_sub(&value_, value); // Return old value.
56 }
57
58 T operator++() { // Prefix operator.
59 return __sync_add_and_fetch(&value_, 1); // Return new value.
60 }
61
62 T operator++(int) { // Postfix operator.
63 return __sync_fetch_and_add(&value_, 1); // Return old value.
64 }
65
66 T operator--() { // Prefix operator.
67 return __sync_sub_and_fetch(&value_, 1); // Return new value.
68 }
69
70 T operator--(int) { // Postfix operator.
71 return __sync_fetch_and_sub(&value_, 1); // Return old value.
72 }
73
74 bool CompareAndSwap(T expected_value, T desired_value) {
75 return __sync_bool_compare_and_swap(&value_, expected_value, desired_value);
76 }
77
78 volatile T* Address() {
79 return &value_;
80 }
81
82 private:
83 // Unsafe = operator for non atomic operations on the integer.
84 void Store(T desired) {
85 value_ = desired;
86 }
87
88 volatile T value_;
89};
90
91typedef Atomic<int32_t> AtomicInteger;
92
Elliott Hughes7c6169d2012-05-02 16:11:48 -070093// NOTE: Two "quasiatomic" operations on the exact same memory address
94// are guaranteed to operate atomically with respect to each other,
95// but no guarantees are made about quasiatomic operations mixed with
96// non-quasiatomic operations on the same address, nor about
97// quasiatomic operations that are performed on partially-overlapping
98// memory.
Elliott Hughes7c6169d2012-05-02 16:11:48 -070099class QuasiAtomic {
Ian Rogers936b37f2014-02-14 00:52:24 -0800100#if defined(__mips__) && !defined(__LP64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800101 static constexpr bool kNeedSwapMutexes = true;
102#else
103 static constexpr bool kNeedSwapMutexes = false;
104#endif
105
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700106 public:
107 static void Startup();
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700108
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700109 static void Shutdown();
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700110
Ian Rogers9adbff52013-01-23 18:19:03 -0800111 // Reads the 64-bit value at "addr" without tearing.
Ian Rogersb122a4b2013-11-19 18:00:50 -0800112 static int64_t Read64(volatile const int64_t* addr) {
113 if (!kNeedSwapMutexes) {
Ian Rogersa9844542014-04-21 17:01:02 -0700114 int64_t value;
115#if defined(__LP64__)
116 value = *addr;
117#else
118#if defined(__arm__)
119#if defined(__ARM_FEATURE_LPAE)
120 // With LPAE support (such as Cortex-A15) then ldrd is defined not to tear.
121 __asm__ __volatile__("@ QuasiAtomic::Read64\n"
122 "ldrd %0, %H0, %1"
123 : "=r" (value)
124 : "m" (*addr));
125#else
126 // Exclusive loads are defined not to tear, clearing the exclusive state isn't necessary.
127 __asm__ __volatile__("@ QuasiAtomic::Read64\n"
128 "ldrexd %0, %H0, %1"
129 : "=r" (value)
130 : "Q" (*addr));
131#endif
132#elif defined(__i386__)
133 __asm__ __volatile__(
134 "movq %1, %0\n"
135 : "=x" (value)
136 : "m" (*addr));
137#else
138 LOG(FATAL) << "Unsupported architecture";
139#endif
140#endif // defined(__LP64__)
141 return value;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800142 } else {
143 return SwapMutexRead64(addr);
144 }
145 }
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700146
Ian Rogers9adbff52013-01-23 18:19:03 -0800147 // Writes to the 64-bit value at "addr" without tearing.
Ian Rogersa9844542014-04-21 17:01:02 -0700148 static void Write64(volatile int64_t* addr, int64_t value) {
Ian Rogersb122a4b2013-11-19 18:00:50 -0800149 if (!kNeedSwapMutexes) {
Ian Rogersa9844542014-04-21 17:01:02 -0700150#if defined(__LP64__)
151 *addr = value;
152#else
153#if defined(__arm__)
154#if defined(__ARM_FEATURE_LPAE)
155 // If we know that ARM architecture has LPAE (such as Cortex-A15) strd is defined not to tear.
156 __asm__ __volatile__("@ QuasiAtomic::Write64\n"
157 "strd %1, %H1, %0"
158 : "=m"(*addr)
159 : "r" (value));
160#else
161 // The write is done as a swap so that the cache-line is in the exclusive state for the store.
162 int64_t prev;
163 int status;
164 do {
165 __asm__ __volatile__("@ QuasiAtomic::Write64\n"
166 "ldrexd %0, %H0, %2\n"
167 "strexd %1, %3, %H3, %2"
168 : "=&r" (prev), "=&r" (status), "+Q"(*addr)
169 : "r" (value)
170 : "cc");
171 } while (UNLIKELY(status != 0));
172#endif
173#elif defined(__i386__)
174 __asm__ __volatile__(
175 "movq %1, %0"
176 : "=m" (*addr)
177 : "x" (value));
178#else
179 LOG(FATAL) << "Unsupported architecture";
180#endif
181#endif // defined(__LP64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800182 } else {
Ian Rogersa9844542014-04-21 17:01:02 -0700183 SwapMutexWrite64(addr, value);
Ian Rogersb122a4b2013-11-19 18:00:50 -0800184 }
185 }
Ian Rogers9adbff52013-01-23 18:19:03 -0800186
187 // Atomically compare the value at "addr" to "old_value", if equal replace it with "new_value"
188 // and return true. Otherwise, don't swap, and return false.
Ian Rogersb122a4b2013-11-19 18:00:50 -0800189 static bool Cas64(int64_t old_value, int64_t new_value, volatile int64_t* addr) {
190 if (!kNeedSwapMutexes) {
191 return __sync_bool_compare_and_swap(addr, old_value, new_value);
192 } else {
193 return SwapMutexCas64(old_value, new_value, addr);
194 }
195 }
Ian Rogers9adbff52013-01-23 18:19:03 -0800196
197 // Does the architecture provide reasonable atomic long operations or do we fall back on mutexes?
Ian Rogersb122a4b2013-11-19 18:00:50 -0800198 static bool LongAtomicsUseMutexes() {
199 return !kNeedSwapMutexes;
200 }
201
202 static void MembarLoadStore() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000203 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800204 __asm__ __volatile__("dmb ish" : : : "memory");
Ian Rogersef7d42f2014-01-06 12:55:46 -0800205 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800206 __asm__ __volatile__("" : : : "memory");
207 #elif defined(__mips__)
208 __asm__ __volatile__("sync" : : : "memory");
209 #else
210 #error Unexpected architecture
211 #endif
212 }
213
214 static void MembarLoadLoad() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000215 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800216 __asm__ __volatile__("dmb ish" : : : "memory");
Ian Rogersef7d42f2014-01-06 12:55:46 -0800217 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800218 __asm__ __volatile__("" : : : "memory");
219 #elif defined(__mips__)
220 __asm__ __volatile__("sync" : : : "memory");
221 #else
222 #error Unexpected architecture
223 #endif
224 }
225
226 static void MembarStoreStore() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000227 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800228 __asm__ __volatile__("dmb ishst" : : : "memory");
Ian Rogersef7d42f2014-01-06 12:55:46 -0800229 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800230 __asm__ __volatile__("" : : : "memory");
231 #elif defined(__mips__)
232 __asm__ __volatile__("sync" : : : "memory");
233 #else
234 #error Unexpected architecture
235 #endif
236 }
237
238 static void MembarStoreLoad() {
Stuart Monteith5817e892014-02-18 11:16:29 +0000239 #if defined(__arm__) || defined(__aarch64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800240 __asm__ __volatile__("dmb ish" : : : "memory");
Ian Rogersef7d42f2014-01-06 12:55:46 -0800241 #elif defined(__i386__) || defined(__x86_64__)
Ian Rogersb122a4b2013-11-19 18:00:50 -0800242 __asm__ __volatile__("mfence" : : : "memory");
243 #elif defined(__mips__)
244 __asm__ __volatile__("sync" : : : "memory");
245 #else
246 #error Unexpected architecture
247 #endif
248 }
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700249
250 private:
Ian Rogersb122a4b2013-11-19 18:00:50 -0800251 static Mutex* GetSwapMutex(const volatile int64_t* addr);
252 static int64_t SwapMutexRead64(volatile const int64_t* addr);
253 static void SwapMutexWrite64(volatile int64_t* addr, int64_t val);
254 static bool SwapMutexCas64(int64_t old_value, int64_t new_value, volatile int64_t* addr);
255
256 // We stripe across a bunch of different mutexes to reduce contention.
257 static constexpr size_t kSwapMutexCount = 32;
258 static std::vector<Mutex*>* gSwapMutexes;
259
Elliott Hughes7c6169d2012-05-02 16:11:48 -0700260 DISALLOW_COPY_AND_ASSIGN(QuasiAtomic);
261};
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700262
263} // namespace art
264
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700265#endif // ART_RUNTIME_ATOMIC_H_