blob: 482e0e39a926e1e441990bb3d379a531c65fe47a [file] [log] [blame]
Ian Rogers693ff612013-02-01 10:56:12 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_INL_H_
18#define ART_RUNTIME_THREAD_INL_H_
Ian Rogers693ff612013-02-01 10:56:12 -080019
20#include "thread.h"
21
Bilyan Borisovbb661c02016-04-04 16:27:32 +010022#ifdef ART_TARGET_ANDROID
Andreas Gampe4382f1e2015-08-05 01:08:53 +000023#include <bionic_tls.h> // Access to our own TLS slot.
24#endif
25
Ian Rogers02ed4c02013-09-06 13:10:04 -070026#include <pthread.h>
27
Ian Rogers1eb512d2013-10-18 15:42:20 -070028#include "base/casts.h"
Ian Rogers693ff612013-02-01 10:56:12 -080029#include "base/mutex-inl.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070030#include "gc/heap.h"
Ian Rogers68d8b422014-07-17 11:09:10 -070031#include "jni_env_ext.h"
Andreas Gampec73cb642017-02-22 10:11:30 -080032#include "obj_ptr.h"
David Sehrf42eb2c2016-10-19 13:20:45 -070033#include "runtime.h"
Mathieu Chartier3cf22532015-07-09 15:15:09 -070034#include "thread_pool.h"
Ian Rogers693ff612013-02-01 10:56:12 -080035
36namespace art {
37
Ian Rogers1eb512d2013-10-18 15:42:20 -070038// Quickly access the current thread from a JNIEnv.
39static inline Thread* ThreadForEnv(JNIEnv* env) {
40 JNIEnvExt* full_env(down_cast<JNIEnvExt*>(env));
41 return full_env->self;
42}
43
Ian Rogers02ed4c02013-09-06 13:10:04 -070044inline Thread* Thread::Current() {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070045 // We rely on Thread::Current returning null for a detached thread, so it's not obvious
Ian Rogers02ed4c02013-09-06 13:10:04 -070046 // that we can replace this with a direct %fs access on x86.
47 if (!is_started_) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070048 return nullptr;
Ian Rogers02ed4c02013-09-06 13:10:04 -070049 } else {
Bilyan Borisovbb661c02016-04-04 16:27:32 +010050#ifdef ART_TARGET_ANDROID
Andreas Gampe4382f1e2015-08-05 01:08:53 +000051 void* thread = __get_tls()[TLS_SLOT_ART_THREAD_SELF];
52#else
Ian Rogers02ed4c02013-09-06 13:10:04 -070053 void* thread = pthread_getspecific(Thread::pthread_key_self_);
Andreas Gampe4382f1e2015-08-05 01:08:53 +000054#endif
Ian Rogers02ed4c02013-09-06 13:10:04 -070055 return reinterpret_cast<Thread*>(thread);
56 }
57}
58
Ian Rogers7b078e82014-09-10 14:44:24 -070059inline void Thread::AllowThreadSuspension() {
60 DCHECK_EQ(Thread::Current(), this);
61 if (UNLIKELY(TestAllFlags())) {
62 CheckSuspend();
63 }
Mathieu Chartiera59d9b22016-09-26 18:13:17 -070064 // Invalidate the current thread's object pointers (ObjPtr) to catch possible moving GC bugs due
65 // to missing handles.
Mathieu Chartier3f7f03c2016-09-26 11:39:52 -070066 PoisonObjectPointers();
Ian Rogers7b078e82014-09-10 14:44:24 -070067}
68
69inline void Thread::CheckSuspend() {
70 DCHECK_EQ(Thread::Current(), this);
71 for (;;) {
72 if (ReadFlag(kCheckpointRequest)) {
73 RunCheckpointFunction();
74 } else if (ReadFlag(kSuspendRequest)) {
75 FullSuspendCheck();
Hiroshi Yamauchi30493242016-11-03 13:06:52 -070076 } else if (ReadFlag(kEmptyCheckpointRequest)) {
77 RunEmptyCheckpoint();
78 } else {
79 break;
80 }
81 }
82}
83
Hiroshi Yamauchia2224042017-02-08 16:35:45 -080084inline void Thread::CheckEmptyCheckpointFromWeakRefAccess(BaseMutex* cond_var_mutex) {
85 Thread* self = Thread::Current();
86 DCHECK_EQ(self, this);
87 for (;;) {
88 if (ReadFlag(kEmptyCheckpointRequest)) {
89 RunEmptyCheckpoint();
90 // Check we hold only an expected mutex when accessing weak ref.
91 if (kIsDebugBuild) {
92 for (int i = kLockLevelCount - 1; i >= 0; --i) {
93 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
94 if (held_mutex != nullptr &&
95 held_mutex != Locks::mutator_lock_ &&
96 held_mutex != cond_var_mutex) {
97 std::vector<BaseMutex*>& expected_mutexes = Locks::expected_mutexes_on_weak_ref_access_;
98 CHECK(std::find(expected_mutexes.begin(), expected_mutexes.end(), held_mutex) !=
99 expected_mutexes.end())
100 << "Holding unexpected mutex " << held_mutex->GetName()
101 << " when accessing weak ref";
102 }
103 }
104 }
105 } else {
106 break;
107 }
108 }
109}
110
111inline void Thread::CheckEmptyCheckpointFromMutex() {
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700112 DCHECK_EQ(Thread::Current(), this);
113 for (;;) {
114 if (ReadFlag(kEmptyCheckpointRequest)) {
115 RunEmptyCheckpoint();
Ian Rogers7b078e82014-09-10 14:44:24 -0700116 } else {
117 break;
118 }
119 }
120}
121
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800122inline ThreadState Thread::SetState(ThreadState new_state) {
Yu Lieac44242015-06-29 10:50:03 +0800123 // Should only be used to change between suspended states.
124 // Cannot use this code to change into or from Runnable as changing to Runnable should
125 // fail if old_state_and_flags.suspend_request is true and changing from Runnable might
126 // miss passing an active suspend barrier.
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800127 DCHECK_NE(new_state, kRunnable);
Andreas Gampeef048f62014-11-25 22:12:27 -0800128 if (kIsDebugBuild && this != Thread::Current()) {
129 std::string name;
130 GetThreadName(name);
131 LOG(FATAL) << "Thread \"" << name << "\"(" << this << " != Thread::Current()="
132 << Thread::Current() << ") changing state to " << new_state;
133 }
Chris Dearman59cde532013-12-04 18:53:49 -0800134 union StateAndFlags old_state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700135 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
Yu Lieac44242015-06-29 10:50:03 +0800136 CHECK_NE(old_state_and_flags.as_struct.state, kRunnable);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700137 tls32_.state_and_flags.as_struct.state = new_state;
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800138 return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
139}
140
Mathieu Chartier10b218d2016-07-25 17:48:52 -0700141inline bool Thread::IsThreadSuspensionAllowable() const {
142 if (tls32_.no_thread_suspension != 0) {
143 return false;
144 }
145 for (int i = kLockLevelCount - 1; i >= 0; --i) {
146 if (i != kMutatorLock && GetHeldMutex(static_cast<LockLevel>(i)) != nullptr) {
147 return false;
148 }
149 }
150 return true;
151}
152
Ian Rogers693ff612013-02-01 10:56:12 -0800153inline void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700154 if (kIsDebugBuild) {
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000155 if (gAborting == 0) {
156 CHECK_EQ(0u, tls32_.no_thread_suspension) << tlsPtr_.last_no_thread_suspension_cause;
157 }
Ian Rogersf3d874c2014-07-17 18:52:42 -0700158 if (check_locks) {
159 bool bad_mutexes_held = false;
160 for (int i = kLockLevelCount - 1; i >= 0; --i) {
161 // We expect no locks except the mutator_lock_ or thread list suspend thread lock.
Ian Rogers4ad5cd32014-11-11 23:08:07 -0800162 if (i != kMutatorLock) {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700163 BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700164 if (held_mutex != nullptr) {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700165 LOG(ERROR) << "holding \"" << held_mutex->GetName()
166 << "\" at point where thread suspension is expected";
167 bad_mutexes_held = true;
168 }
Ian Rogers693ff612013-02-01 10:56:12 -0800169 }
170 }
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000171 if (gAborting == 0) {
172 CHECK(!bad_mutexes_held);
173 }
Ian Rogers693ff612013-02-01 10:56:12 -0800174 }
Ian Rogers693ff612013-02-01 10:56:12 -0800175 }
Ian Rogers693ff612013-02-01 10:56:12 -0800176}
177
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700178inline void Thread::TransitionToSuspendedAndRunCheckpoints(ThreadState new_state) {
Ian Rogers693ff612013-02-01 10:56:12 -0800179 DCHECK_NE(new_state, kRunnable);
Ian Rogers693ff612013-02-01 10:56:12 -0800180 DCHECK_EQ(GetState(), kRunnable);
181 union StateAndFlags old_state_and_flags;
182 union StateAndFlags new_state_and_flags;
Dave Allison0f5f6bb2013-11-22 17:39:19 -0800183 while (true) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700184 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700185 if (UNLIKELY((old_state_and_flags.as_struct.flags & kCheckpointRequest) != 0)) {
186 RunCheckpointFunction();
187 continue;
188 }
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700189 if (UNLIKELY((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest) != 0)) {
190 RunEmptyCheckpoint();
191 continue;
192 }
Dave Allison0f5f6bb2013-11-22 17:39:19 -0800193 // Change the state but keep the current flags (kCheckpointRequest is clear).
194 DCHECK_EQ((old_state_and_flags.as_struct.flags & kCheckpointRequest), 0);
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700195 DCHECK_EQ((old_state_and_flags.as_struct.flags & kEmptyCheckpointRequest), 0);
Dave Allison0f5f6bb2013-11-22 17:39:19 -0800196 new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags;
Ian Rogers693ff612013-02-01 10:56:12 -0800197 new_state_and_flags.as_struct.state = new_state;
Ian Rogersb8e087e2014-07-09 21:12:06 -0700198
Yu Lieac44242015-06-29 10:50:03 +0800199 // CAS the value with a memory ordering.
Ian Rogersb8e087e2014-07-09 21:12:06 -0700200 bool done =
Yu Lieac44242015-06-29 10:50:03 +0800201 tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakRelease(old_state_and_flags.as_int,
Ian Rogersb8e087e2014-07-09 21:12:06 -0700202 new_state_and_flags.as_int);
203 if (LIKELY(done)) {
Dave Allison0f5f6bb2013-11-22 17:39:19 -0800204 break;
205 }
206 }
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700207}
Yu Lieac44242015-06-29 10:50:03 +0800208
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700209inline void Thread::PassActiveSuspendBarriers() {
Yu Lieac44242015-06-29 10:50:03 +0800210 while (true) {
211 uint16_t current_flags = tls32_.state_and_flags.as_struct.flags;
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700212 if (LIKELY((current_flags &
213 (kCheckpointRequest | kEmptyCheckpointRequest | kActiveSuspendBarrier)) == 0)) {
Yu Lieac44242015-06-29 10:50:03 +0800214 break;
215 } else if ((current_flags & kActiveSuspendBarrier) != 0) {
216 PassActiveSuspendBarriers(this);
217 } else {
218 // Impossible
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700219 LOG(FATAL) << "Fatal, thread transitioned into suspended without running the checkpoint";
Yu Lieac44242015-06-29 10:50:03 +0800220 }
221 }
Ian Rogers693ff612013-02-01 10:56:12 -0800222}
223
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700224inline void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
225 AssertThreadSuspensionIsAllowable();
Mathieu Chartier3398c782016-09-30 10:27:43 -0700226 PoisonObjectPointersIfDebug();
Mathieu Chartier8ac9c912015-10-01 15:58:41 -0700227 DCHECK_EQ(this, Thread::Current());
228 // Change to non-runnable state, thereby appearing suspended to the system.
229 TransitionToSuspendedAndRunCheckpoints(new_state);
230 // Mark the release of the share of the mutator_lock_.
231 Locks::mutator_lock_->TransitionFromRunnableToSuspended(this);
232 // Once suspended - check the active suspend barrier flag
233 PassActiveSuspendBarriers();
234}
235
Ian Rogers693ff612013-02-01 10:56:12 -0800236inline ThreadState Thread::TransitionFromSuspendedToRunnable() {
Chris Dearman59cde532013-12-04 18:53:49 -0800237 union StateAndFlags old_state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700238 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
Ian Rogers693ff612013-02-01 10:56:12 -0800239 int16_t old_state = old_state_and_flags.as_struct.state;
240 DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable);
241 do {
242 Locks::mutator_lock_->AssertNotHeld(this); // Otherwise we starve GC..
Ian Rogersdd7624d2014-03-14 17:43:00 -0700243 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
Ian Rogers693ff612013-02-01 10:56:12 -0800244 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
Yu Lieac44242015-06-29 10:50:03 +0800245 if (LIKELY(old_state_and_flags.as_struct.flags == 0)) {
246 // Optimize for the return from native code case - this is the fast path.
247 // Atomically change from suspended to runnable if no suspend request pending.
248 union StateAndFlags new_state_and_flags;
249 new_state_and_flags.as_int = old_state_and_flags.as_int;
250 new_state_and_flags.as_struct.state = kRunnable;
251 // CAS the value with a memory barrier.
252 if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareExchangeWeakAcquire(
253 old_state_and_flags.as_int,
254 new_state_and_flags.as_int))) {
255 // Mark the acquisition of a share of the mutator_lock_.
256 Locks::mutator_lock_->TransitionFromSuspendedToRunnable(this);
257 break;
258 }
259 } else if ((old_state_and_flags.as_struct.flags & kActiveSuspendBarrier) != 0) {
260 PassActiveSuspendBarriers(this);
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700261 } else if ((old_state_and_flags.as_struct.flags &
262 (kCheckpointRequest | kEmptyCheckpointRequest)) != 0) {
Yu Lieac44242015-06-29 10:50:03 +0800263 // Impossible
Mathieu Chartierdabdccc2015-10-01 14:46:29 -0700264 LOG(FATAL) << "Transitioning to runnable with checkpoint flag, "
265 << " flags=" << old_state_and_flags.as_struct.flags
266 << " state=" << old_state_and_flags.as_struct.state;
Yu Lieac44242015-06-29 10:50:03 +0800267 } else if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
Ian Rogers693ff612013-02-01 10:56:12 -0800268 // Wait while our suspend count is non-zero.
Nicolas Geoffray9f5f8ac2016-06-29 14:39:59 +0100269
270 // We pass null to the MutexLock as we may be in a situation where the
271 // runtime is shutting down. Guarding ourselves from that situation
272 // requires to take the shutdown lock, which is undesirable here.
273 Thread* thread_to_pass = nullptr;
274 if (kIsDebugBuild && !IsDaemon()) {
275 // We know we can make our debug locking checks on non-daemon threads,
276 // so re-enable them on debug builds.
277 thread_to_pass = this;
278 }
279 MutexLock mu(thread_to_pass, *Locks::thread_suspend_count_lock_);
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700280 ScopedTransitioningToRunnable scoped_transitioning_to_runnable(this);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700281 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
Ian Rogers693ff612013-02-01 10:56:12 -0800282 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
283 while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
284 // Re-check when Thread::resume_cond_ is notified.
Nicolas Geoffray9f5f8ac2016-06-29 14:39:59 +0100285 Thread::resume_cond_->Wait(thread_to_pass);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700286 old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
Ian Rogers693ff612013-02-01 10:56:12 -0800287 DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
288 }
289 DCHECK_EQ(GetSuspendCount(), 0);
290 }
Ian Rogers719d1a32014-03-06 12:13:39 -0800291 } while (true);
Yu Lieac44242015-06-29 10:50:03 +0800292 // Run the flip function, if set.
293 Closure* flip_func = GetFlipFunction();
294 if (flip_func != nullptr) {
295 flip_func->Run(this);
296 }
297 return static_cast<ThreadState>(old_state);
Ian Rogers693ff612013-02-01 10:56:12 -0800298}
299
Ian Rogers04d7aa92013-03-16 14:29:17 -0700300inline void Thread::VerifyStack() {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800301 if (kVerifyStack) {
302 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) {
303 VerifyStackImpl();
304 }
Ian Rogers04d7aa92013-03-16 14:29:17 -0700305 }
306}
307
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800308inline size_t Thread::TlabSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700309 return tlsPtr_.thread_local_end - tlsPtr_.thread_local_pos;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800310}
311
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800312inline mirror::Object* Thread::AllocTlab(size_t bytes) {
313 DCHECK_GE(TlabSize(), bytes);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700314 ++tlsPtr_.thread_local_objects;
315 mirror::Object* ret = reinterpret_cast<mirror::Object*>(tlsPtr_.thread_local_pos);
316 tlsPtr_.thread_local_pos += bytes;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800317 return ret;
318}
319
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800320inline bool Thread::PushOnThreadLocalAllocationStack(mirror::Object* obj) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700321 DCHECK_LE(tlsPtr_.thread_local_alloc_stack_top, tlsPtr_.thread_local_alloc_stack_end);
322 if (tlsPtr_.thread_local_alloc_stack_top < tlsPtr_.thread_local_alloc_stack_end) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800323 // There's room.
Ian Rogers13735952014-10-08 12:43:28 -0700324 DCHECK_LE(reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_top) +
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800325 sizeof(StackReference<mirror::Object>),
Ian Rogers13735952014-10-08 12:43:28 -0700326 reinterpret_cast<uint8_t*>(tlsPtr_.thread_local_alloc_stack_end));
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800327 DCHECK(tlsPtr_.thread_local_alloc_stack_top->AsMirrorPtr() == nullptr);
328 tlsPtr_.thread_local_alloc_stack_top->Assign(obj);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700329 ++tlsPtr_.thread_local_alloc_stack_top;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800330 return true;
331 }
332 return false;
333}
334
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800335inline void Thread::SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
336 StackReference<mirror::Object>* end) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800337 DCHECK(Thread::Current() == this) << "Should be called by self";
338 DCHECK(start != nullptr);
339 DCHECK(end != nullptr);
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800340 DCHECK_ALIGNED(start, sizeof(StackReference<mirror::Object>));
341 DCHECK_ALIGNED(end, sizeof(StackReference<mirror::Object>));
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800342 DCHECK_LT(start, end);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700343 tlsPtr_.thread_local_alloc_stack_end = end;
344 tlsPtr_.thread_local_alloc_stack_top = start;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800345}
346
347inline void Thread::RevokeThreadLocalAllocationStack() {
348 if (kIsDebugBuild) {
349 // Note: self is not necessarily equal to this thread since thread may be suspended.
350 Thread* self = Thread::Current();
351 DCHECK(this == self || IsSuspended() || GetState() == kWaitingPerformingGc)
352 << GetState() << " thread " << this << " self " << self;
353 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700354 tlsPtr_.thread_local_alloc_stack_end = nullptr;
355 tlsPtr_.thread_local_alloc_stack_top = nullptr;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800356}
357
Mathieu Chartiera59d9b22016-09-26 18:13:17 -0700358inline void Thread::PoisonObjectPointersIfDebug() {
Andreas Gampec73cb642017-02-22 10:11:30 -0800359 if (kObjPtrPoisoning) {
Mathieu Chartiera59d9b22016-09-26 18:13:17 -0700360 Thread::Current()->PoisonObjectPointers();
361 }
362}
363
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -0700364inline bool Thread::ModifySuspendCount(Thread* self,
365 int delta,
366 AtomicInteger* suspend_barrier,
367 bool for_debugger) {
368 if (delta > 0 && ((kUseReadBarrier && this != self) || suspend_barrier != nullptr)) {
369 // When delta > 0 (requesting a suspend), ModifySuspendCountInternal() may fail either if
370 // active_suspend_barriers is full or we are in the middle of a thread flip. Retry in a loop.
371 while (true) {
372 if (LIKELY(ModifySuspendCountInternal(self, delta, suspend_barrier, for_debugger))) {
373 return true;
374 } else {
375 // Failure means the list of active_suspend_barriers is full or we are in the middle of a
376 // thread flip, we should release the thread_suspend_count_lock_ (to avoid deadlock) and
377 // wait till the target thread has executed or Thread::PassActiveSuspendBarriers() or the
378 // flip function. Note that we could not simply wait for the thread to change to a suspended
379 // state, because it might need to run checkpoint function before the state change or
380 // resumes from the resume_cond_, which also needs thread_suspend_count_lock_.
381 //
382 // The list of active_suspend_barriers is very unlikely to be full since more than
383 // kMaxSuspendBarriers threads need to execute SuspendAllInternal() simultaneously, and
384 // target thread stays in kRunnable in the mean time.
385 Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
386 NanoSleep(100000);
387 Locks::thread_suspend_count_lock_->ExclusiveLock(self);
388 }
389 }
390 } else {
391 return ModifySuspendCountInternal(self, delta, suspend_barrier, for_debugger);
392 }
393}
394
Ian Rogers693ff612013-02-01 10:56:12 -0800395} // namespace art
396
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700397#endif // ART_RUNTIME_THREAD_INL_H_