blob: 0e71c08b073c3676625c45dbc0d698e6b323c4ff [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070019
Elliott Hughes02b48d12011-09-07 17:15:51 -070020#include <bitset>
Ian Rogers306057f2012-11-26 12:45:53 -080021#include <deque>
Elliott Hughesa0957642011-09-02 14:27:33 -070022#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070023#include <list>
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Dave Allison8ce6b902014-08-26 11:07:58 -070025#include <setjmp.h>
Elliott Hughes8daa0922011-09-11 13:46:25 -070026#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070027
Mingyao Yang4dcfc432015-04-21 16:55:22 -070028#include "arch/context.h"
Ian Rogersd582fa42014-11-05 23:46:43 -080029#include "arch/instruction_set.h"
Ian Rogersb8e087e2014-07-09 21:12:06 -070030#include "atomic.h"
Elliott Hughes76160052012-12-12 16:31:20 -080031#include "base/macros.h"
Ian Rogers719d1a32014-03-06 12:13:39 -080032#include "base/mutex.h"
Ian Rogers848871b2013-08-05 10:56:33 -070033#include "entrypoints/interpreter/interpreter_entrypoints.h"
34#include "entrypoints/jni/jni_entrypoints.h"
Ian Rogers7655f292013-07-29 11:07:13 -070035#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070036#include "globals.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070037#include "handle_scope.h"
Elliott Hughes956af0f2014-12-11 14:34:28 -080038#include "instrumentation.h"
Ian Rogers306057f2012-11-26 12:45:53 -080039#include "jvalue.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080040#include "object_callbacks.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070041#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070042#include "runtime_stats.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070043#include "stack.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080044#include "thread_state.h"
Ian Rogersb033c752011-07-20 12:22:35 -070045
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070046namespace art {
47
Mathieu Chartier15d34022014-02-26 17:16:38 -080048namespace gc {
49namespace collector {
Mingyao Yang98d1cc82014-05-15 17:02:16 -070050 class SemiSpace;
Mathieu Chartier15d34022014-02-26 17:16:38 -080051} // namespace collector
52} // namespace gc
53
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080054namespace mirror {
Ian Rogers848871b2013-08-05 10:56:33 -070055 class Array;
56 class Class;
57 class ClassLoader;
58 class Object;
59 template<class T> class ObjectArray;
60 template<class T> class PrimitiveArray;
61 typedef PrimitiveArray<int32_t> IntArray;
62 class StackTraceElement;
Vladimir Marko80afd022015-05-19 18:08:00 +010063 class String;
Ian Rogers848871b2013-08-05 10:56:33 -070064 class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080065} // namespace mirror
Mathieu Chartier12d625f2015-03-13 11:33:37 -070066
67namespace verifier {
68class MethodVerifier;
69} // namespace verifier
70
Mathieu Chartiere401d142015-04-22 13:56:20 -070071class ArtMethod;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080072class BaseMutex;
73class ClassLinker;
Ian Rogers7a22fa62013-01-23 12:16:16 -080074class Closure;
Ian Rogersbdb03912011-09-14 00:55:44 -070075class Context;
Ian Rogers1b09b092012-08-20 15:35:52 -070076struct DebugInvokeReq;
Sebastien Hertzf7958692015-06-09 14:09:14 +020077class DeoptimizationReturnValueRecord;
Ian Rogers81d425b2012-09-27 16:03:43 -070078class DexFile;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080079class JavaVMExt;
Ian Rogers81d425b2012-09-27 16:03:43 -070080struct JNIEnvExt;
Elliott Hughes8daa0922011-09-11 13:46:25 -070081class Monitor;
Carl Shapirob5573532011-07-12 18:22:59 -070082class Runtime;
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -070083class ScopedObjectAccessAlreadyRunnable;
Logan Chienf7ad17e2012-03-15 03:10:03 +080084class ShadowFrame;
Sebastien Hertz597c4f02015-01-26 17:37:14 +010085class SingleStepControl;
Sebastien Hertzf7958692015-06-09 14:09:14 +020086class StackedShadowFrameRecord;
Brian Carlstrom40381fb2011-10-19 14:13:40 -070087class Thread;
88class ThreadList;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070089
Elliott Hughes34e06962012-04-09 13:55:55 -070090// Thread priorities. These must match the Thread.MIN_PRIORITY,
91// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
92enum ThreadPriority {
93 kMinThreadPriority = 1,
94 kNormThreadPriority = 5,
95 kMaxThreadPriority = 10,
96};
97
Ian Rogers474b6da2012-09-25 00:20:38 -070098enum ThreadFlag {
Ian Rogers50ffee22012-11-20 11:47:44 -080099 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
100 // safepoint handler.
Ian Rogers62d6c772013-02-27 08:32:07 -0800101 kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue.
Ian Rogers474b6da2012-09-25 00:20:38 -0700102};
103
Sebastien Hertzf7958692015-06-09 14:09:14 +0200104enum class StackedShadowFrameType {
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700105 kShadowFrameUnderConstruction,
106 kDeoptimizationShadowFrame
107};
108
Ian Rogerse63db272014-07-15 15:36:11 -0700109static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
110
Dave Allison648d7112014-07-25 16:15:27 -0700111// Thread's stack layout for implicit stack overflow checks:
112//
113// +---------------------+ <- highest address of stack memory
114// | |
115// . . <- SP
116// | |
117// | |
118// +---------------------+ <- stack_end
119// | |
120// | Gap |
121// | |
122// +---------------------+ <- stack_begin
123// | |
124// | Protected region |
125// | |
126// +---------------------+ <- lowest address of stack memory
127//
128// The stack always grows down in memory. At the lowest address is a region of memory
129// that is set mprotect(PROT_NONE). Any attempt to read/write to this region will
130// result in a segmentation fault signal. At any point, the thread's SP will be somewhere
131// between the stack_end and the highest address in stack memory. An implicit stack
132// overflow check is a read of memory at a certain offset below the current SP (4K typically).
133// If the thread's SP is below the stack_end address this will be a read into the protected
134// region. If the SP is above the stack_end address, the thread is guaranteed to have
135// at least 4K of space. Because stack overflow checks are only performed in generated code,
136// if the thread makes a call out to a native function (through JNI), that native function
137// might only have 4K of memory (if the SP is adjacent to stack_end).
138
Ian Rogersdd7624d2014-03-14 17:43:00 -0700139class Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700140 public:
Dave Allisonf9439142014-03-27 15:10:22 -0700141 // For implicit overflow checks we reserve an extra piece of memory at the bottom
142 // of the stack (lowest memory). The higher portion of the memory
143 // is protected against reads and the lower is available for use while
144 // throwing the StackOverflow exception.
Dave Allison648d7112014-07-25 16:15:27 -0700145 static constexpr size_t kStackOverflowProtectedSize = 4 * KB;
Andreas Gampe7ea6f792014-07-14 16:21:44 -0700146 static const size_t kStackOverflowImplicitCheckSize;
Dave Allisonf9439142014-03-27 15:10:22 -0700147
Elliott Hughes462c9442012-03-23 18:47:50 -0700148 // Creates a new native thread corresponding to the given managed peer.
149 // Used to implement Thread.start.
Ian Rogers52673ff2012-06-27 23:25:34 -0700150 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700151
Elliott Hughes462c9442012-03-23 18:47:50 -0700152 // Attaches the calling native thread to the runtime, returning the new native peer.
153 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800154 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
155 bool create_peer);
Carl Shapirob5573532011-07-12 18:22:59 -0700156
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700157 // Reset internal state of child thread after fork.
158 void InitAfterFork();
159
Ian Rogers6f3dbba2014-10-14 17:41:57 -0700160 // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
161 // high cost and so we favor passing self around when possible.
162 // TODO: mark as PURE so the compiler may coalesce and remove?
Ian Rogers02ed4c02013-09-06 13:10:04 -0700163 static Thread* Current();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700164
Ian Rogers7b078e82014-09-10 14:44:24 -0700165 // On a runnable thread, check for pending thread suspension request and handle if pending.
166 void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
167
168 // Process pending thread suspension request and handle if pending.
169 void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
170
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700171 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800172 mirror::Object* thread_peer)
jeffhaoa77f0f62012-12-05 17:19:31 -0800173 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700174 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
175 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700176 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
jeffhaoa77f0f62012-12-05 17:19:31 -0800177 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700178 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
179 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700180
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700181 // Translates 172 to pAllocArrayFromCode and so on.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700182 template<size_t size_of_pointers>
183 static void DumpThreadOffset(std::ostream& os, uint32_t offset);
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700184
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700185 // Dumps a one-line summary of thread state (used for operator<<).
186 void ShortDump(std::ostream& os) const;
187
188 // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
189 void Dump(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700190 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
191 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa0957642011-09-02 14:27:33 -0700192
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700193 void DumpJavaStack(std::ostream& os) const
194 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
195 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
196
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700197 // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700198 // case we use 'tid' to identify the thread, and we'll include as much information as we can.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700199 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
Ian Rogerscfaa4552012-11-26 21:00:08 -0800200 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
201 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700202
Ian Rogers474b6da2012-09-25 00:20:38 -0700203 ThreadState GetState() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700204 DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
205 DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
206 return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
Dave Allison0aded082013-11-07 13:15:11 -0800207 }
208
Ian Rogers474b6da2012-09-25 00:20:38 -0700209 ThreadState SetState(ThreadState new_state);
Ian Rogers52673ff2012-06-27 23:25:34 -0700210
Ian Rogers81d425b2012-09-27 16:03:43 -0700211 int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700212 return tls32_.suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700213 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700214
Ian Rogers81d425b2012-09-27 16:03:43 -0700215 int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700216 return tls32_.debug_suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700217 }
218
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700219 bool IsSuspended() const {
Chris Dearman59cde532013-12-04 18:53:49 -0800220 union StateAndFlags state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700221 state_and_flags.as_int = tls32_.state_and_flags.as_int;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700222 return state_and_flags.as_struct.state != kRunnable &&
223 (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700224 }
225
Ian Rogers01ae5802012-09-28 16:14:01 -0700226 void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700227 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700228
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700229 bool RequestCheckpoint(Closure* function)
230 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700231
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800232 void SetFlipFunction(Closure* function);
233 Closure* GetFlipFunction();
234
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700235 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
236 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
Ian Rogers9da7f592012-08-20 17:14:28 -0700237 void FullSuspendCheck()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700238 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
239 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700240
241 // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
242 ThreadState TransitionFromSuspendedToRunnable()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700243 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800244 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800245 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700246
247 // Transition from runnable into a state where mutator privileges are denied. Releases share of
248 // mutator lock.
249 void TransitionFromRunnableToSuspended(ThreadState new_state)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700250 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800251 UNLOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800252 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700253
Ian Rogers0399dde2012-06-06 17:09:28 -0700254 // Once called thread suspension will cause an assertion failure.
Ian Rogers52673ff2012-06-27 23:25:34 -0700255 const char* StartAssertNoThreadSuspension(const char* cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700256 if (kIsDebugBuild) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700257 CHECK(cause != nullptr);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700258 const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
259 tls32_.no_thread_suspension++;
260 tlsPtr_.last_no_thread_suspension_cause = cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700261 return previous_cause;
262 } else {
263 return nullptr;
264 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700265 }
Ian Rogers52673ff2012-06-27 23:25:34 -0700266
Ian Rogers0399dde2012-06-06 17:09:28 -0700267 // End region where no thread suspension is expected.
Ian Rogers52673ff2012-06-27 23:25:34 -0700268 void EndAssertNoThreadSuspension(const char* old_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700269 if (kIsDebugBuild) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700270 CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
271 CHECK_GT(tls32_.no_thread_suspension, 0U);
272 tls32_.no_thread_suspension--;
273 tlsPtr_.last_no_thread_suspension_cause = old_cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700274 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700275 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700276
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700277 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700278
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700279 bool IsDaemon() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700280 return tls32_.daemon;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700281 }
282
Ian Rogersdd7624d2014-03-14 17:43:00 -0700283 bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700284
Elliott Hughes8daa0922011-09-11 13:46:25 -0700285 /*
286 * Changes the priority of this thread to match that of the java.lang.Thread object.
287 *
288 * We map a priority value from 1-10 to Linux "nice" values, where lower
289 * numbers indicate higher priority.
290 */
291 void SetNativePriority(int newPriority);
292
293 /*
294 * Returns the thread priority for the current thread by querying the system.
295 * This is useful when attaching a thread through JNI.
296 *
297 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
298 */
299 static int GetNativePriority();
300
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700301 uint32_t GetThreadId() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700302 return tls32_.thin_lock_thread_id;
Carl Shapirob5573532011-07-12 18:22:59 -0700303 }
304
Elliott Hughesd92bec42011-09-02 17:04:36 -0700305 pid_t GetTid() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700306 return tls32_.tid;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700307 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700308
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700309 // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700310 mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700311 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes899e7892012-01-24 14:57:32 -0800312
Elliott Hughesffb465f2012-03-01 18:46:05 -0800313 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
314 // allocation, or locking.
315 void GetThreadName(std::string& name) const;
316
Elliott Hughes899e7892012-01-24 14:57:32 -0800317 // Sets the thread's name.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700318 void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700319
Jeff Hao57dac6e2013-08-15 16:36:24 -0700320 // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
321 uint64_t GetCpuMicroTime() const;
322
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800323 mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700324 CHECK(tlsPtr_.jpeer == nullptr);
325 return tlsPtr_.opeer;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700326 }
327
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700328 bool HasPeer() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700329 return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700330 }
331
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700332 RuntimeStats* GetStats() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700333 return &tls64_.stats;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700334 }
335
Elliott Hughes7dc51662012-05-16 14:48:43 -0700336 bool IsStillStarting() const;
337
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700338 bool IsExceptionPending() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700339 return tlsPtr_.exception != nullptr;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700340 }
341
Nicolas Geoffray14691c52015-03-05 10:40:17 +0000342 mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700343 return tlsPtr_.exception;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700344 }
345
Andreas Gamped9efea62014-07-21 22:56:08 -0700346 void AssertPendingException() const;
Mathieu Chartiera61894d2015-04-23 16:32:54 -0700347 void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700348 void AssertNoPendingException() const;
Mathieu Chartier8d7672e2014-02-25 10:57:16 -0800349 void AssertNoPendingExceptionForNewException(const char* msg) const;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700350
Nicolas Geoffray14691c52015-03-05 10:40:17 +0000351 void SetException(mirror::Throwable* new_exception)
Ian Rogers62d6c772013-02-27 08:32:07 -0800352 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700353 CHECK(new_exception != nullptr);
Ian Rogers474b6da2012-09-25 00:20:38 -0700354 // TODO: DCHECK(!IsExceptionPending());
Ian Rogersdd7624d2014-03-14 17:43:00 -0700355 tlsPtr_.exception = new_exception;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700356 }
357
Serguei Katkova309d762014-05-26 11:23:39 +0700358 void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700359 tlsPtr_.exception = nullptr;
jeffhao94d6df42012-11-26 16:02:12 -0800360 }
361
Ian Rogersbdb03912011-09-14 00:55:44 -0700362 // Find catch block and perform long jump to appropriate exception handle
Andreas Gampe794ad762015-02-23 08:12:24 -0800363 NO_RETURN void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersbdb03912011-09-14 00:55:44 -0700364
365 Context* GetLongJumpContext();
Ian Rogers0399dde2012-06-06 17:09:28 -0700366 void ReleaseLongJumpContext(Context* context) {
Mingyao Yang4dcfc432015-04-21 16:55:22 -0700367 if (tlsPtr_.long_jump_context != nullptr) {
368 // Each QuickExceptionHandler gets a long jump context and uses
369 // it for doing the long jump, after finding catch blocks/doing deoptimization.
370 // Both finding catch blocks and deoptimization can trigger another
371 // exception such as a result of class loading. So there can be nested
372 // cases of exception handling and multiple contexts being used.
373 // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
374 // for reuse so there is no need to always allocate a new one each time when
375 // getting a context. Since we only keep one context for reuse, delete the
376 // existing one since the passed in context is yet to be used for longjump.
377 delete tlsPtr_.long_jump_context;
378 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700379 tlsPtr_.long_jump_context = context;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700380 }
381
Andreas Gampe6ec8ebd2014-07-25 13:36:56 -0700382 // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
383 // abort the runtime iff abort_on_error is true.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700384 ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700385 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700386
Nicolas Geoffray7642cfc2015-02-26 10:56:09 +0000387 // Returns whether the given exception was thrown by the current Java method being executed
388 // (Note that this includes native Java methods).
389 bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
390 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
391
Mathieu Chartiere401d142015-04-22 13:56:20 -0700392 void SetTopOfStack(ArtMethod** top_method) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700393 tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700394 }
395
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800396 void SetTopOfShadowStack(ShadowFrame* top) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700397 tlsPtr_.managed_stack.SetTopShadowFrame(top);
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800398 }
399
Ian Rogers0399dde2012-06-06 17:09:28 -0700400 bool HasManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700401 return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
402 (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
Ian Rogersbdb03912011-09-14 00:55:44 -0700403 }
404
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700405 // If 'msg' is null, no detail message is set.
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000406 void ThrowNewException(const char* exception_class_descriptor, const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700407 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700408
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700409 // If 'msg' is null, no detail message is set. An exception must be pending, and will be
Elliott Hughesa4f94742012-05-29 16:28:38 -0700410 // used as the new exception's cause.
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000411 void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700412 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa4f94742012-05-29 16:28:38 -0700413
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000414 void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
415 __attribute__((format(printf, 3, 4)))
Ian Rogersb726dcb2012-09-05 08:57:23 -0700416 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700417
Nicolas Geoffray0aa50ce2015-03-10 11:03:29 +0000418 void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700419 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700420
Elliott Hughes2ced6a52011-10-16 18:44:48 -0700421 // OutOfMemoryError is special, because we need to pre-allocate an instance.
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700422 // Only the GC should call this.
Ian Rogers120f1c72012-09-28 17:17:10 -0700423 void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes79082e32011-08-25 12:07:32 -0700424
Elliott Hughesbe759c62011-09-08 19:38:21 -0700425 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700426 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700427 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700428
Ian Rogersb033c752011-07-20 12:22:35 -0700429 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700430 JNIEnvExt* GetJniEnv() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700431 return tlsPtr_.jni_env;
Ian Rogersb033c752011-07-20 12:22:35 -0700432 }
433
Ian Rogers408f79a2011-08-23 18:22:33 -0700434 // Convert a jobject into a Object*
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800435 mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersb033c752011-07-20 12:22:35 -0700436
Mathieu Chartiera6e7f082014-05-22 14:43:37 -0700437 mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700438 return tlsPtr_.monitor_enter_object;
439 }
440
Mathieu Chartiera6e7f082014-05-22 14:43:37 -0700441 void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700442 tlsPtr_.monitor_enter_object = obj;
443 }
444
Elliott Hughes8daa0922011-09-11 13:46:25 -0700445 // Implements java.lang.Thread.interrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700446 bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700447 // Implements java.lang.Thread.isInterrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700448 bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
449 bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
450 return interrupted_;
451 }
452 void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
453 void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
454 interrupted_ = i;
455 }
456 void Notify() LOCKS_EXCLUDED(wait_mutex_);
457
458 private:
459 void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
460
461 public:
462 Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
463 return wait_mutex_;
464 }
465
466 ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
467 return wait_cond_;
468 }
469
470 Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
471 return wait_monitor_;
472 }
473
474 void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
475 wait_monitor_ = mon;
476 }
477
478
479 // Waiter link-list support.
480 Thread* GetWaitNext() const {
481 return tlsPtr_.wait_next;
482 }
483
484 void SetWaitNext(Thread* next) {
485 tlsPtr_.wait_next = next;
486 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700487
Ian Rogers68d8b422014-07-17 11:09:10 -0700488 jobject GetClassLoaderOverride() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700489 return tlsPtr_.class_loader_override;
buzbeec143c552011-08-20 17:38:58 -0700490 }
491
Ian Rogers68d8b422014-07-17 11:09:10 -0700492 void SetClassLoaderOverride(jobject class_loader_override);
buzbeec143c552011-08-20 17:38:58 -0700493
Ian Rogersaaa20802011-09-11 21:47:37 -0700494 // Create the internal representation of a stack trace, that is more time
Sebastien Hertzee1d79a2014-02-21 15:46:30 +0100495 // and space efficient to compute than the StackTraceElement[].
496 template<bool kTransactionActive>
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700497 jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700498 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersaaa20802011-09-11 21:47:37 -0700499
Elliott Hughes01158d72011-09-19 19:47:10 -0700500 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700501 // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
502 // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
Elliott Hughes01158d72011-09-19 19:47:10 -0700503 // with the number of valid frames in the returned array.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700504 static jobjectArray InternalStackTraceToStackTraceElementArray(
505 const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
506 jobjectArray output_array = nullptr, int* stack_depth = nullptr)
Ian Rogers53b8b092014-03-13 23:45:53 -0700507 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700508
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700509 void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700510
Mathieu Chartier4e305412014-02-19 10:54:44 -0800511 ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
jeffhao25045522012-03-13 19:34:37 -0700512
Elliott Hughesbe759c62011-09-08 19:38:21 -0700513 //
514 // Offsets of various members of native Thread class, used by compiled code.
515 //
516
Ian Rogersdd7624d2014-03-14 17:43:00 -0700517 template<size_t pointer_size>
518 static ThreadOffset<pointer_size> ThinLockIdOffset() {
519 return ThreadOffset<pointer_size>(
520 OFFSETOF_MEMBER(Thread, tls32_) +
521 OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700522 }
523
Ian Rogersdd7624d2014-03-14 17:43:00 -0700524 template<size_t pointer_size>
525 static ThreadOffset<pointer_size> ThreadFlagsOffset() {
526 return ThreadOffset<pointer_size>(
527 OFFSETOF_MEMBER(Thread, tls32_) +
528 OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700529 }
530
Ian Rogersdd7624d2014-03-14 17:43:00 -0700531 private:
532 template<size_t pointer_size>
533 static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
534 size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
535 size_t scale;
536 size_t shrink;
537 if (pointer_size == sizeof(void*)) {
538 scale = 1;
539 shrink = 1;
540 } else if (pointer_size > sizeof(void*)) {
541 scale = pointer_size / sizeof(void*);
542 shrink = 1;
543 } else {
544 DCHECK_GT(sizeof(void*), pointer_size);
545 scale = 1;
546 shrink = sizeof(void*) / pointer_size;
547 }
548 return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
Ian Rogers07ec8e12012-12-01 01:26:51 -0800549 }
550
Ian Rogersdd7624d2014-03-14 17:43:00 -0700551 public:
Jeff Hao848f70a2014-01-15 13:49:50 -0800552 static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
553 size_t pointer_size) {
554 DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
555 if (pointer_size == 4) {
556 return QuickEntryPointOffset<4>(quick_entrypoint_offset).Uint32Value();
557 } else {
558 return QuickEntryPointOffset<8>(quick_entrypoint_offset).Uint32Value();
559 }
560 }
561
Ian Rogersdd7624d2014-03-14 17:43:00 -0700562 template<size_t pointer_size>
563 static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
564 return ThreadOffsetFromTlsPtr<pointer_size>(
565 OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700566 }
567
Ian Rogersdd7624d2014-03-14 17:43:00 -0700568 template<size_t pointer_size>
569 static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
570 return ThreadOffsetFromTlsPtr<pointer_size>(
571 OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700572 }
573
Ian Rogersdd7624d2014-03-14 17:43:00 -0700574 template<size_t pointer_size>
575 static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
576 return ThreadOffsetFromTlsPtr<pointer_size>(
577 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700578 }
579
Ian Rogersdd7624d2014-03-14 17:43:00 -0700580 template<size_t pointer_size>
Ian Rogersdd7624d2014-03-14 17:43:00 -0700581 static ThreadOffset<pointer_size> SelfOffset() {
582 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
583 }
584
585 template<size_t pointer_size>
586 static ThreadOffset<pointer_size> ExceptionOffset() {
587 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
588 }
589
590 template<size_t pointer_size>
591 static ThreadOffset<pointer_size> PeerOffset() {
592 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
593 }
594
595
596 template<size_t pointer_size>
597 static ThreadOffset<pointer_size> CardTableOffset() {
598 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
599 }
600
601 template<size_t pointer_size>
602 static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
603 return ThreadOffsetFromTlsPtr<pointer_size>(
604 OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
Dave Allisonb373e092014-02-20 16:06:36 -0800605 }
606
Hiroshi Yamauchie01a5202015-03-19 12:35:04 -0700607 template<size_t pointer_size>
608 static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
609 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_pos));
610 }
611
612 template<size_t pointer_size>
613 static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
614 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_end));
615 }
616
617 template<size_t pointer_size>
618 static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
619 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
620 }
621
Ian Rogers932746a2011-09-22 18:57:50 -0700622 // Size of stack less any space reserved for stack overflow
jeffhaod7521322012-11-21 15:38:24 -0800623 size_t GetStackSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700624 return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
Ian Rogers932746a2011-09-22 18:57:50 -0700625 }
626
Ian Rogers13735952014-10-08 12:43:28 -0700627 uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
Nicolas Geoffray535a3fb2014-07-22 15:17:38 +0100628 if (implicit_overflow_check) {
629 // The interpreter needs the extra overflow bytes that stack_end does
630 // not include.
631 return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
632 } else {
633 return tlsPtr_.stack_end;
634 }
635 }
636
Ian Rogers13735952014-10-08 12:43:28 -0700637 uint8_t* GetStackEnd() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700638 return tlsPtr_.stack_end;
jeffhaod7521322012-11-21 15:38:24 -0800639 }
640
Ian Rogers932746a2011-09-22 18:57:50 -0700641 // Set the stack end to that to be used during a stack overflow
Ian Rogersb726dcb2012-09-05 08:57:23 -0700642 void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers932746a2011-09-22 18:57:50 -0700643
644 // Set the stack end to that to be used during regular execution
Dave Allisonb090a182014-08-14 17:02:48 +0000645 void ResetDefaultStackEnd() {
Ian Rogers932746a2011-09-22 18:57:50 -0700646 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
647 // to throw a StackOverflowError.
Dave Allisonb090a182014-08-14 17:02:48 +0000648 tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
Ian Rogers932746a2011-09-22 18:57:50 -0700649 }
650
Dave Allisonf9439142014-03-27 15:10:22 -0700651 // Install the protected region for implicit stack checks.
Dave Allison648d7112014-07-25 16:15:27 -0700652 void InstallImplicitProtection();
Dave Allisonf9439142014-03-27 15:10:22 -0700653
Ian Rogers120f1c72012-09-28 17:17:10 -0700654 bool IsHandlingStackOverflow() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700655 return tlsPtr_.stack_end == tlsPtr_.stack_begin;
Ian Rogers120f1c72012-09-28 17:17:10 -0700656 }
657
Ian Rogersdd7624d2014-03-14 17:43:00 -0700658 template<size_t pointer_size>
659 static ThreadOffset<pointer_size> StackEndOffset() {
660 return ThreadOffsetFromTlsPtr<pointer_size>(
661 OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700662 }
663
Ian Rogersdd7624d2014-03-14 17:43:00 -0700664 template<size_t pointer_size>
665 static ThreadOffset<pointer_size> JniEnvOffset() {
666 return ThreadOffsetFromTlsPtr<pointer_size>(
667 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700668 }
669
Ian Rogersdd7624d2014-03-14 17:43:00 -0700670 template<size_t pointer_size>
671 static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
672 return ThreadOffsetFromTlsPtr<pointer_size>(
673 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
674 ManagedStack::TopQuickFrameOffset());
Elliott Hughesbe759c62011-09-08 19:38:21 -0700675 }
676
Ian Rogers0399dde2012-06-06 17:09:28 -0700677 const ManagedStack* GetManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700678 return &tlsPtr_.managed_stack;
Ian Rogers0399dde2012-06-06 17:09:28 -0700679 }
680
681 // Linked list recording fragments of managed stack.
682 void PushManagedStackFragment(ManagedStack* fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700683 tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700684 }
685 void PopManagedStackFragment(const ManagedStack& fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700686 tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700687 }
688
689 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700690 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
TDYa127de479be2012-05-31 08:03:26 -0700691 }
692
693 ShadowFrame* PopShadowFrame() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700694 return tlsPtr_.managed_stack.PopShadowFrame();
TDYa127de479be2012-05-31 08:03:26 -0700695 }
Logan Chienf7ad17e2012-03-15 03:10:03 +0800696
Ian Rogersdd7624d2014-03-14 17:43:00 -0700697 template<size_t pointer_size>
698 static ThreadOffset<pointer_size> TopShadowFrameOffset() {
699 return ThreadOffsetFromTlsPtr<pointer_size>(
700 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
701 ManagedStack::TopShadowFrameOffset());
TDYa127d668a062012-04-13 12:36:57 -0700702 }
703
Ian Rogersef7d42f2014-01-06 12:55:46 -0800704 // Number of references allocated in JNI ShadowFrames on this thread.
705 size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700706 return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700707 }
708
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700709 // Number of references in handle scope on this thread.
710 size_t NumHandleReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700711
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700712 // Number of references allocated in handle scopes & JNI shadow frames on this thread.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800713 size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700714 return NumHandleReferences() + NumJniShadowFrameReferences();
Andreas Gampec8ccf682014-09-29 20:07:43 -0700715 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700716
717 // Is the given obj in this thread's stack indirect reference table?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700718 bool HandleScopeContains(jobject obj) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700719
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700720 void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800721 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700722
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700723 HandleScope* GetTopHandleScope() {
724 return tlsPtr_.top_handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700725 }
726
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700727 void PushHandleScope(HandleScope* handle_scope) {
Ian Rogers59c07062014-10-10 13:03:39 -0700728 DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700729 tlsPtr_.top_handle_scope = handle_scope;
730 }
731
732 HandleScope* PopHandleScope() {
733 HandleScope* handle_scope = tlsPtr_.top_handle_scope;
734 DCHECK(handle_scope != nullptr);
735 tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
736 return handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700737 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700738
Ian Rogersdd7624d2014-03-14 17:43:00 -0700739 template<size_t pointer_size>
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700740 static ThreadOffset<pointer_size> TopHandleScopeOffset() {
741 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
742 top_handle_scope));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700743 }
744
Ian Rogersdd7624d2014-03-14 17:43:00 -0700745 DebugInvokeReq* GetInvokeReq() const {
746 return tlsPtr_.debug_invoke_req;
Elliott Hughes475fc232011-10-25 15:00:35 -0700747 }
748
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100749 SingleStepControl* GetSingleStepControl() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700750 return tlsPtr_.single_step_control;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100751 }
752
Sebastien Hertz1558b572015-02-25 15:05:59 +0100753 // Indicates whether this thread is ready to invoke a method for debugging. This
754 // is only true if the thread has been suspended by a debug event.
755 bool IsReadyForDebugInvoke() const {
756 return tls32_.ready_for_debug_invoke;
757 }
758
759 void SetReadyForDebugInvoke(bool ready) {
760 tls32_.ready_for_debug_invoke = ready;
761 }
762
Sebastien Hertz9d6bf692015-04-10 12:12:33 +0200763 bool IsDebugMethodEntry() const {
764 return tls32_.debug_method_entry_;
765 }
766
767 void SetDebugMethodEntry() {
768 tls32_.debug_method_entry_ = true;
769 }
770
771 void ClearDebugMethodEntry() {
772 tls32_.debug_method_entry_ = false;
773 }
774
Sebastien Hertz597c4f02015-01-26 17:37:14 +0100775 // Activates single step control for debugging. The thread takes the
776 // ownership of the given SingleStepControl*. It is deleted by a call
777 // to DeactivateSingleStepControl or upon thread destruction.
778 void ActivateSingleStepControl(SingleStepControl* ssc);
779
780 // Deactivates single step control for debugging.
781 void DeactivateSingleStepControl();
782
Sebastien Hertz1558b572015-02-25 15:05:59 +0100783 // Sets debug invoke request for debugging. When the thread is resumed,
Sebastien Hertzcbc50642015-06-01 17:33:12 +0200784 // it executes the method described by this request then sends the reply
785 // before suspending itself. The thread takes the ownership of the given
786 // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
Sebastien Hertz1558b572015-02-25 15:05:59 +0100787 void SetDebugInvokeReq(DebugInvokeReq* req);
788
789 // Clears debug invoke request for debugging. When the thread completes
Sebastien Hertzcbc50642015-06-01 17:33:12 +0200790 // method invocation, it deletes its debug invoke request and suspends
791 // itself.
Sebastien Hertz1558b572015-02-25 15:05:59 +0100792 void ClearDebugInvokeReq();
Sebastien Hertz597c4f02015-01-26 17:37:14 +0100793
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200794 // Returns the fake exception used to activate deoptimization.
795 static mirror::Throwable* GetDeoptimizationException() {
796 return reinterpret_cast<mirror::Throwable*>(-1);
797 }
798
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700799 // Currently deoptimization invokes verifier which can trigger class loading
800 // and execute Java code, so there might be nested deoptimizations happening.
801 // We need to save the ongoing deoptimization shadow frames and return
802 // values on stacks.
803 void SetDeoptimizationReturnValue(const JValue& ret_val, bool is_reference) {
804 tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
805 tls32_.deoptimization_return_value_is_reference = is_reference;
Sebastien Hertz714f1752014-04-28 15:03:08 +0200806 }
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700807 bool IsDeoptimizationReturnValueReference() {
808 return tls32_.deoptimization_return_value_is_reference;
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -0700809 }
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -0700810 void ClearDeoptimizationReturnValue() {
811 tls64_.deoptimization_return_value.SetJ(0);
812 tls32_.deoptimization_return_value_is_reference = false;
813 }
814 void PushAndClearDeoptimizationReturnValue();
815 JValue PopDeoptimizationReturnValue();
816 void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
817 ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type);
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -0700818
Ian Rogers62d6c772013-02-27 08:32:07 -0800819 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700820 return tlsPtr_.instrumentation_stack;
jeffhaoe343b762011-12-05 16:36:44 -0800821 }
822
Mathieu Chartiere401d142015-04-22 13:56:20 -0700823 std::vector<ArtMethod*>* GetStackTraceSample() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700824 return tlsPtr_.stack_trace_sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700825 }
826
Mathieu Chartiere401d142015-04-22 13:56:20 -0700827 void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700828 tlsPtr_.stack_trace_sample = sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700829 }
830
831 uint64_t GetTraceClockBase() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700832 return tls64_.trace_clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700833 }
834
835 void SetTraceClockBase(uint64_t clock_base) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700836 tls64_.trace_clock_base = clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700837 }
838
Ian Rogers81d425b2012-09-27 16:03:43 -0700839 BaseMutex* GetHeldMutex(LockLevel level) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700840 return tlsPtr_.held_mutexes[level];
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700841 }
842
Ian Rogers81d425b2012-09-27 16:03:43 -0700843 void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700844 tlsPtr_.held_mutexes[level] = mutex;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700845 }
Elliott Hughesffb465f2012-03-01 18:46:05 -0800846
Mathieu Chartier752a0e62013-06-27 11:03:27 -0700847 void RunCheckpointFunction();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700848
849 bool ReadFlag(ThreadFlag flag) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700850 return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700851 }
852
Jeff Hao9cec2472013-05-14 18:17:06 -0700853 bool TestAllFlags() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700854 return (tls32_.state_and_flags.as_struct.flags != 0);
Jeff Hao9cec2472013-05-14 18:17:06 -0700855 }
856
Ian Rogers8c1b5f72014-07-09 22:02:36 -0700857 void AtomicSetFlag(ThreadFlag flag) {
858 tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
859 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700860
Ian Rogers8c1b5f72014-07-09 22:02:36 -0700861 void AtomicClearFlag(ThreadFlag flag) {
862 tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
863 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700864
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700865 void ResetQuickAllocEntryPointsForThread();
866
Ian Rogersdd7624d2014-03-14 17:43:00 -0700867 // Returns the remaining space in the TLAB.
868 size_t TlabSize() const;
869 // Doesn't check that there is room.
870 mirror::Object* AllocTlab(size_t bytes);
Ian Rogers13735952014-10-08 12:43:28 -0700871 void SetTlab(uint8_t* start, uint8_t* end);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700872 bool HasTlab() const;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800873 uint8_t* GetTlabStart() {
874 return tlsPtr_.thread_local_start;
875 }
876 uint8_t* GetTlabPos() {
877 return tlsPtr_.thread_local_pos;
878 }
Elliott Hughes5d96a712012-06-28 12:24:27 -0700879
Ian Rogersdd7624d2014-03-14 17:43:00 -0700880 // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
881 // equal to a valid pointer.
882 // TODO: does this need to atomic? I don't think so.
883 void RemoveSuspendTrigger() {
884 tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
885 }
886
887 // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
888 // The next time a suspend check is done, it will load from the value at this address
889 // and trigger a SIGSEGV.
890 void TriggerSuspend() {
891 tlsPtr_.suspend_trigger = nullptr;
892 }
893
894
895 // Push an object onto the allocation stack.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800896 bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
897 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700898
899 // Set the thread local allocation pointers to the given pointers.
Mathieu Chartiercb535da2015-01-23 13:50:03 -0800900 void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
901 StackReference<mirror::Object>* end);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700902
903 // Resets the thread local allocation pointers.
904 void RevokeThreadLocalAllocationStack();
905
906 size_t GetThreadLocalBytesAllocated() const {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700907 return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700908 }
909
910 size_t GetThreadLocalObjectsAllocated() const {
911 return tlsPtr_.thread_local_objects;
912 }
913
Ian Rogersdd7624d2014-03-14 17:43:00 -0700914 void* GetRosAllocRun(size_t index) const {
915 return tlsPtr_.rosalloc_runs[index];
916 }
917
918 void SetRosAllocRun(size_t index, void* run) {
919 tlsPtr_.rosalloc_runs[index] = run;
920 }
921
Dave Allison648d7112014-07-25 16:15:27 -0700922 void ProtectStack();
923 bool UnprotectStack();
924
925 void NoteSignalBeingHandled() {
926 if (tls32_.handling_signal_) {
927 LOG(FATAL) << "Detected signal while processing a signal";
928 }
929 tls32_.handling_signal_ = true;
930 }
931
932 void NoteSignalHandlerDone() {
933 tls32_.handling_signal_ = false;
934 }
935
Dave Allison8ce6b902014-08-26 11:07:58 -0700936 jmp_buf* GetNestedSignalState() {
937 return tlsPtr_.nested_signal_state;
938 }
939
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800940 bool IsSuspendedAtSuspendCheck() const {
941 return tls32_.suspended_at_suspend_check;
942 }
943
Mathieu Chartierd0ad2ee2015-03-31 14:59:59 -0700944 void PushVerifier(verifier::MethodVerifier* verifier);
945 void PopVerifier(verifier::MethodVerifier* verifier);
Mathieu Chartier12d625f2015-03-13 11:33:37 -0700946
Jeff Hao848f70a2014-01-15 13:49:50 -0800947 void InitStringEntryPoints();
948
Ian Rogersdd7624d2014-03-14 17:43:00 -0700949 private:
Ian Rogers52673ff2012-06-27 23:25:34 -0700950 explicit Thread(bool daemon);
Ian Rogersb726dcb2012-09-05 08:57:23 -0700951 ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
952 Locks::thread_suspend_count_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -0700953 void Destroy();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700954
Ian Rogers365c1022012-06-22 15:05:28 -0700955 void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700956
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100957 template<bool kTransactionActive>
958 void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
959 jobject thread_name, jint thread_priority)
960 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
961
Ian Rogers62d6c772013-02-27 08:32:07 -0800962 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
963 // Dbg::Disconnected.
Ian Rogers474b6da2012-09-25 00:20:38 -0700964 ThreadState SetStateUnsafe(ThreadState new_state) {
965 ThreadState old_state = GetState();
Ian Rogersdd7624d2014-03-14 17:43:00 -0700966 tls32_.state_and_flags.as_struct.state = new_state;
Ian Rogersc747cff2012-08-31 18:20:08 -0700967 return old_state;
968 }
Ian Rogersc747cff2012-08-31 18:20:08 -0700969
Ian Rogers04d7aa92013-03-16 14:29:17 -0700970 void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
971
Ian Rogerscfaa4552012-11-26 21:00:08 -0800972 void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700973 void DumpStack(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700974 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
975 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesd92bec42011-09-02 17:04:36 -0700976
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700977 // Out-of-line conveniences for debugging in gdb.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700978 static Thread* CurrentFromGdb(); // Like Thread::Current.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700979 // Like Thread::Dump(std::cerr).
Ian Rogersb726dcb2012-09-05 08:57:23 -0700980 void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700981
Elliott Hughes93e74e82011-09-13 11:07:03 -0700982 static void* CreateCallback(void* arg);
983
Ian Rogerscfaa4552012-11-26 21:00:08 -0800984 void HandleUncaughtExceptions(ScopedObjectAccess& soa)
985 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
986 void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700987
Andreas Gampe449357d2015-06-01 22:29:51 -0700988 // Initialize a thread.
989 //
990 // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
991 // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
992 // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
993 // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
994 // of false).
995 bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
996 EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
Ian Rogers5d76c432011-10-31 21:42:49 -0700997 void InitCardTable();
Ian Rogersb033c752011-07-20 12:22:35 -0700998 void InitCpu();
Alexei Zavjalov1efa0a92014-02-04 02:08:31 +0700999 void CleanupCpu();
Ian Rogers848871b2013-08-05 10:56:33 -07001000 void InitTlsEntryPoints();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -07001001 void InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -07001002 void InitPthreadKeySelf();
Ian Rogersf4d4da12014-11-11 16:10:33 -08001003 bool InitStackHwm();
Elliott Hughesbe759c62011-09-08 19:38:21 -07001004
Elliott Hughesd6a23bd2013-07-16 14:19:52 -07001005 void SetUpAlternateSignalStack();
1006 void TearDownAlternateSignalStack();
1007
Ian Rogers474b6da2012-09-25 00:20:38 -07001008 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1009 // change from being Suspended to Runnable without a suspend request occurring.
Chris Dearman59cde532013-12-04 18:53:49 -08001010 union PACKED(4) StateAndFlags {
1011 StateAndFlags() {}
Ian Rogersdf1ce912012-11-27 17:07:11 -08001012 struct PACKED(4) {
Ian Rogers30e173f2012-09-26 14:35:03 -07001013 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1014 // ThreadFlags for bit field meanings.
1015 volatile uint16_t flags;
1016 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1017 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1018 // operation. If a thread is suspended and a suspend_request is present, a thread may not
1019 // change to Runnable as a GC or other operation is in progress.
Ian Rogers01ae5802012-09-28 16:14:01 -07001020 volatile uint16_t state;
Ian Rogers30e173f2012-09-26 14:35:03 -07001021 } as_struct;
Ian Rogersb8e087e2014-07-09 21:12:06 -07001022 AtomicInteger as_atomic_int;
Ian Rogers01ae5802012-09-28 16:14:01 -07001023 volatile int32_t as_int;
Chris Dearman59cde532013-12-04 18:53:49 -08001024
1025 private:
1026 // gcc does not handle struct with volatile member assignments correctly.
1027 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1028 DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
Ian Rogers474b6da2012-09-25 00:20:38 -07001029 };
Andreas Gampe575e78c2014-11-03 23:41:03 -08001030 static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
Ian Rogers474b6da2012-09-25 00:20:38 -07001031
Ian Rogersdd7624d2014-03-14 17:43:00 -07001032 static void ThreadExitCallback(void* arg);
Elliott Hughes5d96a712012-06-28 12:24:27 -07001033
Dave Allison0aded082013-11-07 13:15:11 -08001034 // Maximum number of checkpoint functions.
1035 static constexpr uint32_t kMaxCheckpoints = 3;
1036
Ian Rogersdd7624d2014-03-14 17:43:00 -07001037 // Has Thread::Startup been called?
1038 static bool is_started_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -07001039
Ian Rogersdd7624d2014-03-14 17:43:00 -07001040 // TLS key used to retrieve the Thread*.
1041 static pthread_key_t pthread_key_self_;
Ian Rogersa32a6fd2012-02-06 20:18:44 -08001042
Ian Rogersdd7624d2014-03-14 17:43:00 -07001043 // Used to notify threads that they should attempt to resume, they will suspend again if
1044 // their suspend count is > 0.
1045 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
Dave Allisonb373e092014-02-20 16:06:36 -08001046
Ian Rogersdd7624d2014-03-14 17:43:00 -07001047 /***********************************************************************************************/
1048 // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1049 // pointer size differences. To encourage shorter encoding, more frequently used values appear
1050 // first if possible.
1051 /***********************************************************************************************/
Elliott Hughes6a607ad2012-07-13 20:40:00 -07001052
Zuo Wangf37a88b2014-07-10 04:26:41 -07001053 struct PACKED(4) tls_32bit_sized_values {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001054 // We have no control over the size of 'bool', but want our boolean fields
1055 // to be 4-byte quantities.
1056 typedef uint32_t bool32_t;
Ian Rogers22f454c2012-09-08 11:06:29 -07001057
Ian Rogersdd7624d2014-03-14 17:43:00 -07001058 explicit tls_32bit_sized_values(bool is_daemon) :
1059 suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1060 daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001061 thread_exit_check_count(0), handling_signal_(false),
1062 deoptimization_return_value_is_reference(false), suspended_at_suspend_check(false),
Sebastien Hertz9d6bf692015-04-10 12:12:33 +02001063 ready_for_debug_invoke(false), debug_method_entry_(false) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001064 }
Dave Allisonb373e092014-02-20 16:06:36 -08001065
Ian Rogersdd7624d2014-03-14 17:43:00 -07001066 union StateAndFlags state_and_flags;
Andreas Gampe575e78c2014-11-03 23:41:03 -08001067 static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1068 "Size of state_and_flags and int32 are different");
Dave Allisonb373e092014-02-20 16:06:36 -08001069
Ian Rogersdd7624d2014-03-14 17:43:00 -07001070 // A non-zero value is used to tell the current thread to enter a safe point
1071 // at the next poll.
1072 int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001073
Ian Rogersdd7624d2014-03-14 17:43:00 -07001074 // How much of 'suspend_count_' is by request of the debugger, used to set things right
1075 // when the debugger detaches. Must be <= suspend_count_.
1076 int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001077
Ian Rogersdd7624d2014-03-14 17:43:00 -07001078 // Thin lock thread id. This is a small integer used by the thin lock implementation.
1079 // This is not to be confused with the native thread's tid, nor is it the value returned
1080 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1081 // important difference between this id and the ids visible to managed code is that these
1082 // ones get reused (to ensure that they fit in the number of bits available).
1083 uint32_t thin_lock_thread_id;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001084
Ian Rogersdd7624d2014-03-14 17:43:00 -07001085 // System thread id.
1086 uint32_t tid;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001087
Ian Rogersdd7624d2014-03-14 17:43:00 -07001088 // Is the thread a daemon?
1089 const bool32_t daemon;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001090
Ian Rogersdd7624d2014-03-14 17:43:00 -07001091 // A boolean telling us whether we're recursively throwing OOME.
1092 bool32_t throwing_OutOfMemoryError;
1093
1094 // A positive value implies we're in a region where thread suspension isn't expected.
1095 uint32_t no_thread_suspension;
1096
1097 // How many times has our pthread key's destructor been called?
1098 uint32_t thread_exit_check_count;
Sebastien Hertz9f102032014-05-23 08:59:42 +02001099
Dave Allison648d7112014-07-25 16:15:27 -07001100 // True if signal is being handled by this thread.
1101 bool32_t handling_signal_;
1102
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001103 // True if the return value for interpreter after deoptimization is a reference.
1104 // For gc purpose.
1105 bool32_t deoptimization_return_value_is_reference;
1106
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001107 // True if the thread is suspended in FullSuspendCheck(). This is
1108 // used to distinguish runnable threads that are suspended due to
1109 // a normal suspend check from other threads.
1110 bool32_t suspended_at_suspend_check;
Sebastien Hertz1558b572015-02-25 15:05:59 +01001111
1112 // True if the thread has been suspended by a debugger event. This is
1113 // used to invoke method from the debugger which is only allowed when
1114 // the thread is suspended by an event.
1115 bool32_t ready_for_debug_invoke;
Sebastien Hertz9d6bf692015-04-10 12:12:33 +02001116
1117 // True if the thread enters a method. This is used to detect method entry
1118 // event for the debugger.
1119 bool32_t debug_method_entry_;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001120 } tls32_;
1121
1122 struct PACKED(8) tls_64bit_sized_values {
1123 tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
1124 }
1125
1126 // The clock base used for tracing.
1127 uint64_t trace_clock_base;
1128
1129 // Return value used by deoptimization.
1130 JValue deoptimization_return_value;
1131
1132 RuntimeStats stats;
1133 } tls64_;
1134
1135 struct PACKED(4) tls_ptr_sized_values {
1136 tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
Andreas Gampe449357d2015-06-01 22:29:51 -07001137 managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1138 self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
Ian Rogersdd7624d2014-03-14 17:43:00 -07001139 stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001140 top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -07001141 instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001142 stacked_shadow_frame_record(nullptr), deoptimization_return_value_stack(nullptr),
1143 name(nullptr), pthread_self(0),
1144 last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -07001145 thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
Ian Rogers7b078e82014-09-10 14:44:24 -07001146 thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001147 nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr) {
1148 std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001149 }
1150
1151 // The biased card table, see CardTable for details.
Ian Rogers13735952014-10-08 12:43:28 -07001152 uint8_t* card_table;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001153
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001154 // The pending exception or null.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001155 mirror::Throwable* exception;
1156
1157 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1158 // We leave extra space so there's room for the code that throws StackOverflowError.
Ian Rogers13735952014-10-08 12:43:28 -07001159 uint8_t* stack_end;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001160
1161 // The top of the managed stack often manipulated directly by compiler generated code.
1162 ManagedStack managed_stack;
1163
1164 // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
1165 // normally set to the address of itself.
1166 uintptr_t* suspend_trigger;
1167
1168 // Every thread may have an associated JNI environment
1169 JNIEnvExt* jni_env;
1170
Andreas Gampe449357d2015-06-01 22:29:51 -07001171 // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1172 // created thread.
1173 JNIEnvExt* tmp_jni_env;
1174
Ian Rogersdd7624d2014-03-14 17:43:00 -07001175 // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1176 // is easy but getting the address of Thread::Current is hard. This field can be read off of
1177 // Thread::Current to give the address.
1178 Thread* self;
1179
1180 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1181 // start up, until the thread is registered and the local opeer_ is used.
1182 mirror::Object* opeer;
1183 jobject jpeer;
1184
1185 // The "lowest addressable byte" of the stack.
Ian Rogers13735952014-10-08 12:43:28 -07001186 uint8_t* stack_begin;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001187
1188 // Size of the stack.
1189 size_t stack_size;
1190
Ian Rogersdd7624d2014-03-14 17:43:00 -07001191 // Pointer to previous stack trace captured by sampling profiler.
Mathieu Chartiere401d142015-04-22 13:56:20 -07001192 std::vector<ArtMethod*>* stack_trace_sample;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001193
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001194 // The next thread in the wait set this thread is part of or null if not waiting.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001195 Thread* wait_next;
1196
1197 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1198 mirror::Object* monitor_enter_object;
1199
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001200 // Top of linked list of handle scopes or null for none.
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001201 HandleScope* top_handle_scope;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001202
1203 // Needed to get the right ClassLoader in JNI_OnLoad, but also
1204 // useful for testing.
Ian Rogers68d8b422014-07-17 11:09:10 -07001205 jobject class_loader_override;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001206
1207 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1208 Context* long_jump_context;
1209
1210 // Additional stack used by method instrumentation to store method and return pc values.
1211 // Stored as a pointer since std::deque is not PACKED.
1212 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1213
1214 // JDWP invoke-during-breakpoint support.
1215 DebugInvokeReq* debug_invoke_req;
1216
1217 // JDWP single-stepping support.
1218 SingleStepControl* single_step_control;
1219
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001220 // For gc purpose, a shadow frame record stack that keeps track of:
1221 // 1) shadow frames under construction.
1222 // 2) deoptimization shadow frames.
1223 StackedShadowFrameRecord* stacked_shadow_frame_record;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001224
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001225 // Deoptimization return value record stack.
1226 DeoptimizationReturnValueRecord* deoptimization_return_value_stack;
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -07001227
Ian Rogersdd7624d2014-03-14 17:43:00 -07001228 // A cached copy of the java.lang.Thread's name.
1229 std::string* name;
1230
1231 // A cached pthread_t for the pthread underlying this Thread*.
1232 pthread_t pthread_self;
1233
Ian Rogersdd7624d2014-03-14 17:43:00 -07001234 // If no_thread_suspension_ is > 0, what is causing that assertion.
1235 const char* last_no_thread_suspension_cause;
1236
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001237 // Pending checkpoint function or null if non-pending. Installation guarding by
Ian Rogersdd7624d2014-03-14 17:43:00 -07001238 // Locks::thread_suspend_count_lock_.
1239 Closure* checkpoint_functions[kMaxCheckpoints];
1240
1241 // Entrypoint function pointers.
1242 // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1243 InterpreterEntryPoints interpreter_entrypoints;
1244 JniEntryPoints jni_entrypoints;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001245 QuickEntryPoints quick_entrypoints;
1246
1247 // Thread-local allocation pointer.
Ian Rogers13735952014-10-08 12:43:28 -07001248 uint8_t* thread_local_start;
1249 uint8_t* thread_local_pos;
1250 uint8_t* thread_local_end;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001251 size_t thread_local_objects;
1252
Mathieu Chartier0651d412014-04-29 14:37:57 -07001253 // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
Ian Rogerse63db272014-07-15 15:36:11 -07001254 void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001255
1256 // Thread-local allocation stack data/routines.
Mathieu Chartiercb535da2015-01-23 13:50:03 -08001257 StackReference<mirror::Object>* thread_local_alloc_stack_top;
1258 StackReference<mirror::Object>* thread_local_alloc_stack_end;
Chao-ying Fu9e369312014-05-21 11:20:52 -07001259
1260 // Support for Mutex lock hierarchy bug detection.
1261 BaseMutex* held_mutexes[kLockLevelCount];
Dave Allison8ce6b902014-08-26 11:07:58 -07001262
1263 // Recorded thread state for nested signals.
1264 jmp_buf* nested_signal_state;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001265
1266 // The function used for thread flip.
1267 Closure* flip_function;
Mathieu Chartier12d625f2015-03-13 11:33:37 -07001268
1269 // Current method verifier, used for root marking.
1270 verifier::MethodVerifier* method_verifier;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001271 } tlsPtr_;
1272
1273 // Guards the 'interrupted_' and 'wait_monitor_' members.
1274 Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1275
1276 // Condition variable waited upon during a wait.
1277 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001278 // Pointer to the monitor lock we're currently waiting on or null if not waiting.
Ian Rogersdd7624d2014-03-14 17:43:00 -07001279 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1280
1281 // Thread "interrupted" status; stays raised until queried or thrown.
1282 bool interrupted_ GUARDED_BY(wait_mutex_);
1283
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001284 friend class Dbg; // For SetStateUnsafe.
Mathieu Chartier15d34022014-02-26 17:16:38 -08001285 friend class gc::collector::SemiSpace; // For getting stack traces.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001286 friend class Runtime; // For CreatePeer.
Ian Rogers5cf98192014-05-29 21:31:50 -07001287 friend class QuickExceptionHandler; // For dumping the stack.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001288 friend class ScopedThreadStateChange;
Mathieu Chartier119c6bd2014-05-09 14:11:47 -07001289 friend class StubTest; // For accessing entrypoints.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001290 friend class ThreadList; // For ~Thread and Destroy.
1291
Andreas Gampe4352b452014-06-04 18:59:01 -07001292 friend class EntrypointsOrderTest; // To test the order of tls entries.
1293
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001294 DISALLOW_COPY_AND_ASSIGN(Thread);
1295};
Ian Rogersbdb03912011-09-14 00:55:44 -07001296
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001297class ScopedAssertNoThreadSuspension {
1298 public:
1299 ScopedAssertNoThreadSuspension(Thread* self, const char* cause)
1300 : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) {
1301 }
1302 ~ScopedAssertNoThreadSuspension() {
1303 self_->EndAssertNoThreadSuspension(old_cause_);
1304 }
1305 Thread* Self() {
1306 return self_;
1307 }
1308
1309 private:
1310 Thread* const self_;
Ian Rogersf2247512014-12-02 16:17:08 -08001311 const char* const old_cause_;
Mathieu Chartier2d5f39e2014-09-19 17:52:37 -07001312};
1313
Mingyao Yang1f2d3ba2015-05-18 12:12:50 -07001314class ScopedStackedShadowFramePusher {
1315 public:
1316 ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1317 : self_(self), type_(type) {
1318 self_->PushStackedShadowFrame(sf, type);
1319 }
1320 ~ScopedStackedShadowFramePusher() {
1321 self_->PopStackedShadowFrame(type_);
1322 }
1323
1324 private:
1325 Thread* const self_;
1326 const StackedShadowFrameType type_;
1327
1328 DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1329};
1330
Elliott Hughes330304d2011-08-12 14:28:05 -07001331std::ostream& operator<<(std::ostream& os, const Thread& thread);
Sebastien Hertzf7958692015-06-09 14:09:14 +02001332std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001333
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001334} // namespace art
1335
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001336#endif // ART_RUNTIME_THREAD_H_