blob: ee84997fe6a12ff1acce549eb8246e45beba7df3 [file] [log] [blame]
Andreas Gampec15a2f42017-04-21 12:09:39 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
18#define ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_
19
20#include "object.h"
21
David Sehrc431b9d2018-03-02 12:01:51 -080022#include "base/atomic.h"
Andreas Gampe09659c22017-09-18 18:23:32 -070023#include "heap_poisoning.h"
Andreas Gampec15a2f42017-04-21 12:09:39 -070024#include "lock_word-inl.h"
25#include "object_reference-inl.h"
26#include "read_barrier.h"
27#include "runtime.h"
28
29namespace art {
30namespace mirror {
31
32template<VerifyObjectFlags kVerifyFlags>
33inline LockWord Object::GetLockWord(bool as_volatile) {
34 if (as_volatile) {
Mathieu Chartier42c2e502018-06-19 12:30:56 -070035 return LockWord(GetField32Volatile<kVerifyFlags>(MonitorOffset()));
Andreas Gampec15a2f42017-04-21 12:09:39 -070036 }
Mathieu Chartier42c2e502018-06-19 12:30:56 -070037 return LockWord(GetField32<kVerifyFlags>(MonitorOffset()));
Andreas Gampec15a2f42017-04-21 12:09:39 -070038}
39
40template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
Mathieu Chartier42c2e502018-06-19 12:30:56 -070041inline bool Object::CasField32(MemberOffset field_offset,
42 int32_t old_value,
43 int32_t new_value,
44 CASMode mode,
45 std::memory_order memory_order) {
Andreas Gampec15a2f42017-04-21 12:09:39 -070046 if (kCheckTransaction) {
47 DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
48 }
49 if (kTransactionActive) {
50 Runtime::Current()->RecordWriteField32(this, field_offset, old_value, true);
51 }
52 if (kVerifyFlags & kVerifyThis) {
53 VerifyObject(this);
54 }
55 uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value();
56 AtomicInteger* atomic_addr = reinterpret_cast<AtomicInteger*>(raw_addr);
57
Mathieu Chartier42c2e502018-06-19 12:30:56 -070058 return atomic_addr->CompareAndSet(old_value, new_value, mode, memory_order);
Andreas Gampec15a2f42017-04-21 12:09:39 -070059}
60
Mathieu Chartier42c2e502018-06-19 12:30:56 -070061inline bool Object::CasLockWord(LockWord old_val,
62 LockWord new_val,
63 CASMode mode,
64 std::memory_order memory_order) {
Andreas Gampec15a2f42017-04-21 12:09:39 -070065 // Force use of non-transactional mode and do not check.
Mathieu Chartier42c2e502018-06-19 12:30:56 -070066 return CasField32<false, false>(MonitorOffset(),
67 old_val.GetValue(),
68 new_val.GetValue(),
69 mode,
70 memory_order);
Andreas Gampec15a2f42017-04-21 12:09:39 -070071}
72
73inline uint32_t Object::GetReadBarrierState(uintptr_t* fake_address_dependency) {
74 if (!kUseBakerReadBarrier) {
75 LOG(FATAL) << "Unreachable";
76 UNREACHABLE();
77 }
78#if defined(__arm__)
79 uintptr_t obj = reinterpret_cast<uintptr_t>(this);
80 uintptr_t result;
81 DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
82 // Use inline assembly to prevent the compiler from optimizing away the false dependency.
83 __asm__ __volatile__(
84 "ldr %[result], [%[obj], #4]\n\t"
85 // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
86 // null, without them being able to assume that fact.
87 "eor %[fad], %[result], %[result]\n\t"
88 : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
89 : [obj] "r" (obj));
90 DCHECK_EQ(*fake_address_dependency, 0U);
91 LockWord lw(static_cast<uint32_t>(result));
92 uint32_t rb_state = lw.ReadBarrierState();
93 return rb_state;
94#elif defined(__aarch64__)
95 uintptr_t obj = reinterpret_cast<uintptr_t>(this);
96 uintptr_t result;
97 DCHECK_EQ(OFFSETOF_MEMBER(Object, monitor_), 4U);
98 // Use inline assembly to prevent the compiler from optimizing away the false dependency.
99 __asm__ __volatile__(
100 "ldr %w[result], [%[obj], #4]\n\t"
101 // This instruction is enough to "fool the compiler and the CPU" by having `fad` always be
102 // null, without them being able to assume that fact.
103 "eor %[fad], %[result], %[result]\n\t"
104 : [result] "+r" (result), [fad] "=r" (*fake_address_dependency)
105 : [obj] "r" (obj));
106 DCHECK_EQ(*fake_address_dependency, 0U);
107 LockWord lw(static_cast<uint32_t>(result));
108 uint32_t rb_state = lw.ReadBarrierState();
109 return rb_state;
110#elif defined(__i386__) || defined(__x86_64__)
111 LockWord lw = GetLockWord(false);
112 // i386/x86_64 don't need fake address dependency. Use a compiler fence to avoid compiler
113 // reordering.
114 *fake_address_dependency = 0;
115 std::atomic_signal_fence(std::memory_order_acquire);
116 uint32_t rb_state = lw.ReadBarrierState();
117 return rb_state;
118#else
119 // MIPS32/MIPS64: use a memory barrier to prevent load-load reordering.
120 LockWord lw = GetLockWord(false);
121 *fake_address_dependency = 0;
122 std::atomic_thread_fence(std::memory_order_acquire);
123 uint32_t rb_state = lw.ReadBarrierState();
124 return rb_state;
125#endif
126}
127
128inline uint32_t Object::GetReadBarrierState() {
129 if (!kUseBakerReadBarrier) {
130 LOG(FATAL) << "Unreachable";
131 UNREACHABLE();
132 }
133 DCHECK(kUseBakerReadBarrier);
Andreas Gampe98ea9d92018-10-19 14:06:15 -0700134 LockWord lw(GetFieldPrimitive<uint32_t, /*kIsVolatile=*/false>(MonitorOffset()));
Andreas Gampec15a2f42017-04-21 12:09:39 -0700135 uint32_t rb_state = lw.ReadBarrierState();
136 DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
137 return rb_state;
138}
139
140inline uint32_t Object::GetReadBarrierStateAcquire() {
141 if (!kUseBakerReadBarrier) {
142 LOG(FATAL) << "Unreachable";
143 UNREACHABLE();
144 }
Mathieu Chartier99111282018-06-19 12:30:56 -0700145 LockWord lw(GetFieldAcquire<uint32_t>(MonitorOffset()));
Andreas Gampec15a2f42017-04-21 12:09:39 -0700146 uint32_t rb_state = lw.ReadBarrierState();
147 DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
148 return rb_state;
149}
150
Mathieu Chartier42c2e502018-06-19 12:30:56 -0700151template<std::memory_order kMemoryOrder>
Andreas Gampec15a2f42017-04-21 12:09:39 -0700152inline bool Object::AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) {
153 if (!kUseBakerReadBarrier) {
154 LOG(FATAL) << "Unreachable";
155 UNREACHABLE();
156 }
157 DCHECK(ReadBarrier::IsValidReadBarrierState(expected_rb_state)) << expected_rb_state;
158 DCHECK(ReadBarrier::IsValidReadBarrierState(rb_state)) << rb_state;
159 LockWord expected_lw;
160 LockWord new_lw;
161 do {
162 LockWord lw = GetLockWord(false);
163 if (UNLIKELY(lw.ReadBarrierState() != expected_rb_state)) {
164 // Lost the race.
165 return false;
166 }
167 expected_lw = lw;
168 expected_lw.SetReadBarrierState(expected_rb_state);
169 new_lw = lw;
170 new_lw.SetReadBarrierState(rb_state);
Roland Levillain14e5a292018-06-28 12:00:56 +0100171 // ConcurrentCopying::ProcessMarkStackRef uses this with
172 // `kMemoryOrder` == `std::memory_order_release`.
173 // If `kMemoryOrder` == `std::memory_order_release`, use a CAS release so that when GC updates
174 // all the fields of an object and then changes the object from gray to black (non-gray), the
175 // field updates (stores) will be visible (won't be reordered after this CAS.)
Mathieu Chartier42c2e502018-06-19 12:30:56 -0700176 } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, kMemoryOrder));
Andreas Gampec15a2f42017-04-21 12:09:39 -0700177 return true;
178}
179
180inline bool Object::AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) {
181 LockWord expected_lw;
182 LockWord new_lw;
183 do {
184 LockWord lw = GetLockWord(false);
185 if (UNLIKELY(lw.MarkBitState() != expected_mark_bit)) {
186 // Lost the race.
187 return false;
188 }
189 expected_lw = lw;
190 new_lw = lw;
191 new_lw.SetMarkBitState(mark_bit);
Roland Levillain2ae376f2018-01-30 11:35:11 +0000192 // Since this is only set from the mutator, we can use the non-release CAS.
Mathieu Chartier42c2e502018-06-19 12:30:56 -0700193 } while (!CasLockWord(expected_lw, new_lw, CASMode::kWeak, std::memory_order_relaxed));
Andreas Gampec15a2f42017-04-21 12:09:39 -0700194 return true;
195}
196
Andreas Gampec15a2f42017-04-21 12:09:39 -0700197} // namespace mirror
198} // namespace art
199
200#endif // ART_RUNTIME_MIRROR_OBJECT_READBARRIER_INL_H_