blob: 1bbe6174416ec0d9e49d502af46ccd65e542cedc [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070019
Elliott Hughes02b48d12011-09-07 17:15:51 -070020#include <bitset>
Ian Rogers306057f2012-11-26 12:45:53 -080021#include <deque>
Elliott Hughesa0957642011-09-02 14:27:33 -070022#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070023#include <list>
Elliott Hughes8daa0922011-09-11 13:46:25 -070024#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070025
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers719d1a32014-03-06 12:13:39 -080027#include "base/mutex.h"
Ian Rogers848871b2013-08-05 10:56:33 -070028#include "entrypoints/interpreter/interpreter_entrypoints.h"
29#include "entrypoints/jni/jni_entrypoints.h"
Ian Rogers7655f292013-07-29 11:07:13 -070030#include "entrypoints/portable/portable_entrypoints.h"
31#include "entrypoints/quick/quick_entrypoints.h"
Mathieu Chartier0651d412014-04-29 14:37:57 -070032#include "gc/allocator/rosalloc.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070033#include "globals.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070034#include "handle_scope.h"
Ian Rogers306057f2012-11-26 12:45:53 -080035#include "jvalue.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080036#include "object_callbacks.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070037#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070038#include "runtime_stats.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070039#include "stack.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080040#include "thread_state.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080041#include "throw_location.h"
Ian Rogersbdb03912011-09-14 00:55:44 -070042#include "UniquePtr.h"
Ian Rogersb033c752011-07-20 12:22:35 -070043
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070044namespace art {
45
Mathieu Chartier15d34022014-02-26 17:16:38 -080046namespace gc {
47namespace collector {
48class SemiSpace;
49} // namespace collector
50} // namespace gc
51
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080052namespace mirror {
Brian Carlstromea46f952013-07-30 01:26:50 -070053 class ArtMethod;
Ian Rogers848871b2013-08-05 10:56:33 -070054 class Array;
55 class Class;
56 class ClassLoader;
57 class Object;
58 template<class T> class ObjectArray;
59 template<class T> class PrimitiveArray;
60 typedef PrimitiveArray<int32_t> IntArray;
61 class StackTraceElement;
62 class StaticStorageBase;
63 class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080064} // namespace mirror
65class BaseMutex;
66class ClassLinker;
Ian Rogers7a22fa62013-01-23 12:16:16 -080067class Closure;
Ian Rogersbdb03912011-09-14 00:55:44 -070068class Context;
Ian Rogers1b09b092012-08-20 15:35:52 -070069struct DebugInvokeReq;
Ian Rogers81d425b2012-09-27 16:03:43 -070070class DexFile;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080071class JavaVMExt;
Ian Rogers81d425b2012-09-27 16:03:43 -070072struct JNIEnvExt;
Elliott Hughes8daa0922011-09-11 13:46:25 -070073class Monitor;
Carl Shapirob5573532011-07-12 18:22:59 -070074class Runtime;
Ian Rogers00f7d0e2012-07-19 15:28:27 -070075class ScopedObjectAccess;
76class ScopedObjectAccessUnchecked;
Logan Chienf7ad17e2012-03-15 03:10:03 +080077class ShadowFrame;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +010078struct SingleStepControl;
Brian Carlstrom40381fb2011-10-19 14:13:40 -070079class Thread;
80class ThreadList;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070081
Elliott Hughes34e06962012-04-09 13:55:55 -070082// Thread priorities. These must match the Thread.MIN_PRIORITY,
83// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
84enum ThreadPriority {
85 kMinThreadPriority = 1,
86 kNormThreadPriority = 5,
87 kMaxThreadPriority = 10,
88};
89
Ian Rogers474b6da2012-09-25 00:20:38 -070090enum ThreadFlag {
Ian Rogers50ffee22012-11-20 11:47:44 -080091 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
92 // safepoint handler.
Ian Rogers62d6c772013-02-27 08:32:07 -080093 kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue.
Ian Rogers474b6da2012-09-25 00:20:38 -070094};
95
Ian Rogersdd7624d2014-03-14 17:43:00 -070096class Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070097 public:
Ian Rogers932746a2011-09-22 18:57:50 -070098 // Space to throw a StackOverflowError in.
Ian Rogersb3fabf42014-03-12 23:35:11 -070099 // TODO: shrink reserved space, in particular for 64bit.
Andreas Gampee62a07e2014-03-26 14:53:21 -0700100#if defined(__x86_64__)
Ian Rogersb3fabf42014-03-12 23:35:11 -0700101 static constexpr size_t kStackOverflowReservedBytes = 24 * KB;
Andreas Gampee62a07e2014-03-26 14:53:21 -0700102#elif defined(__aarch64__)
103 // Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
104 // But this one works rather well.
105 static constexpr size_t kStackOverflowReservedBytes = 32 * KB;
Ian Rogers463cb4d2014-03-12 16:23:09 -0700106#else
107 static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
108#endif
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700109 // How much of the reserved bytes is reserved for incoming signals.
110 static constexpr size_t kStackOverflowSignalReservedBytes = 2 * KB;
111 // How much of the reserved bytes we may temporarily use during stack overflow checks as an
112 // optimization.
113 static constexpr size_t kStackOverflowReservedUsableBytes =
114 kStackOverflowReservedBytes - kStackOverflowSignalReservedBytes;
buzbeec143c552011-08-20 17:38:58 -0700115
Dave Allisonf9439142014-03-27 15:10:22 -0700116 // For implicit overflow checks we reserve an extra piece of memory at the bottom
117 // of the stack (lowest memory). The higher portion of the memory
118 // is protected against reads and the lower is available for use while
119 // throwing the StackOverflow exception.
120 static constexpr size_t kStackOverflowProtectedSize = 32 * KB;
121 static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
122 kStackOverflowReservedBytes;
123
Elliott Hughes462c9442012-03-23 18:47:50 -0700124 // Creates a new native thread corresponding to the given managed peer.
125 // Used to implement Thread.start.
Ian Rogers52673ff2012-06-27 23:25:34 -0700126 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700127
Elliott Hughes462c9442012-03-23 18:47:50 -0700128 // Attaches the calling native thread to the runtime, returning the new native peer.
129 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800130 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
131 bool create_peer);
Carl Shapirob5573532011-07-12 18:22:59 -0700132
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700133 // Reset internal state of child thread after fork.
134 void InitAfterFork();
135
Ian Rogers02ed4c02013-09-06 13:10:04 -0700136 static Thread* Current();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700137
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800138 static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts,
139 mirror::Object* thread_peer)
jeffhaoa77f0f62012-12-05 17:19:31 -0800140 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700141 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
142 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700143 static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
jeffhaoa77f0f62012-12-05 17:19:31 -0800144 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700145 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
146 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700147
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700148 // Translates 172 to pAllocArrayFromCode and so on.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700149 template<size_t size_of_pointers>
150 static void DumpThreadOffset(std::ostream& os, uint32_t offset);
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700151
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700152 // Dumps a one-line summary of thread state (used for operator<<).
153 void ShortDump(std::ostream& os) const;
154
155 // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
156 void Dump(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700157 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
158 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa0957642011-09-02 14:27:33 -0700159
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700160 void DumpJavaStack(std::ostream& os) const
161 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
162 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
163
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700164 // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
165 // case we use 'tid' to identify the thread, and we'll include as much information as we can.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700166 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
Ian Rogerscfaa4552012-11-26 21:00:08 -0800167 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
168 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700169
Ian Rogers474b6da2012-09-25 00:20:38 -0700170 ThreadState GetState() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700171 DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
172 DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
173 return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
Dave Allison0aded082013-11-07 13:15:11 -0800174 }
175
Ian Rogers474b6da2012-09-25 00:20:38 -0700176 ThreadState SetState(ThreadState new_state);
Ian Rogers52673ff2012-06-27 23:25:34 -0700177
Ian Rogers81d425b2012-09-27 16:03:43 -0700178 int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700179 return tls32_.suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700180 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700181
Ian Rogers81d425b2012-09-27 16:03:43 -0700182 int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700183 return tls32_.debug_suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700184 }
185
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700186 bool IsSuspended() const {
Chris Dearman59cde532013-12-04 18:53:49 -0800187 union StateAndFlags state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700188 state_and_flags.as_int = tls32_.state_and_flags.as_int;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700189 return state_and_flags.as_struct.state != kRunnable &&
190 (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700191 }
192
Ian Rogers01ae5802012-09-28 16:14:01 -0700193 void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700194 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700195
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700196 bool RequestCheckpoint(Closure* function)
197 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700198
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700199 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
200 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
Ian Rogers9da7f592012-08-20 17:14:28 -0700201 void FullSuspendCheck()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700202 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
203 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700204
205 // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
206 ThreadState TransitionFromSuspendedToRunnable()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700207 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800208 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800209 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700210
211 // Transition from runnable into a state where mutator privileges are denied. Releases share of
212 // mutator lock.
213 void TransitionFromRunnableToSuspended(ThreadState new_state)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700214 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800215 UNLOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800216 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700217
Ian Rogers0399dde2012-06-06 17:09:28 -0700218 // Once called thread suspension will cause an assertion failure.
Ian Rogers52673ff2012-06-27 23:25:34 -0700219 const char* StartAssertNoThreadSuspension(const char* cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700220 if (kIsDebugBuild) {
221 CHECK(cause != NULL);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700222 const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
223 tls32_.no_thread_suspension++;
224 tlsPtr_.last_no_thread_suspension_cause = cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700225 return previous_cause;
226 } else {
227 return nullptr;
228 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700229 }
Ian Rogers52673ff2012-06-27 23:25:34 -0700230
Ian Rogers0399dde2012-06-06 17:09:28 -0700231 // End region where no thread suspension is expected.
Ian Rogers52673ff2012-06-27 23:25:34 -0700232 void EndAssertNoThreadSuspension(const char* old_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700233 if (kIsDebugBuild) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700234 CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
235 CHECK_GT(tls32_.no_thread_suspension, 0U);
236 tls32_.no_thread_suspension--;
237 tlsPtr_.last_no_thread_suspension_cause = old_cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700238 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700239 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700240
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700241 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700242
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700243 bool IsDaemon() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700244 return tls32_.daemon;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700245 }
246
Ian Rogersdd7624d2014-03-14 17:43:00 -0700247 bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700248
Elliott Hughes8daa0922011-09-11 13:46:25 -0700249 /*
250 * Changes the priority of this thread to match that of the java.lang.Thread object.
251 *
252 * We map a priority value from 1-10 to Linux "nice" values, where lower
253 * numbers indicate higher priority.
254 */
255 void SetNativePriority(int newPriority);
256
257 /*
258 * Returns the thread priority for the current thread by querying the system.
259 * This is useful when attaching a thread through JNI.
260 *
261 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
262 */
263 static int GetNativePriority();
264
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700265 uint32_t GetThreadId() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700266 return tls32_.thin_lock_thread_id;
Carl Shapirob5573532011-07-12 18:22:59 -0700267 }
268
Elliott Hughesd92bec42011-09-02 17:04:36 -0700269 pid_t GetTid() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700270 return tls32_.tid;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700271 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700272
Elliott Hughesffb465f2012-03-01 18:46:05 -0800273 // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800274 mirror::String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700275 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes899e7892012-01-24 14:57:32 -0800276
Elliott Hughesffb465f2012-03-01 18:46:05 -0800277 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
278 // allocation, or locking.
279 void GetThreadName(std::string& name) const;
280
Elliott Hughes899e7892012-01-24 14:57:32 -0800281 // Sets the thread's name.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700282 void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700283
Jeff Hao57dac6e2013-08-15 16:36:24 -0700284 // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
285 uint64_t GetCpuMicroTime() const;
286
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800287 mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700288 CHECK(tlsPtr_.jpeer == nullptr);
289 return tlsPtr_.opeer;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700290 }
291
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700292 bool HasPeer() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700293 return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700294 }
295
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700296 RuntimeStats* GetStats() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700297 return &tls64_.stats;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700298 }
299
Elliott Hughes7dc51662012-05-16 14:48:43 -0700300 bool IsStillStarting() const;
301
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700302 bool IsExceptionPending() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700303 return tlsPtr_.exception != nullptr;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700304 }
305
Ian Rogers62d6c772013-02-27 08:32:07 -0800306 mirror::Throwable* GetException(ThrowLocation* throw_location) const
307 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700308 if (throw_location != nullptr) {
309 *throw_location = tlsPtr_.throw_location;
Ian Rogers62d6c772013-02-27 08:32:07 -0800310 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700311 return tlsPtr_.exception;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700312 }
313
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700314 void AssertNoPendingException() const;
Mathieu Chartier8d7672e2014-02-25 10:57:16 -0800315 void AssertNoPendingExceptionForNewException(const char* msg) const;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700316
Ian Rogers62d6c772013-02-27 08:32:07 -0800317 void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
318 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700319 CHECK(new_exception != NULL);
Ian Rogers474b6da2012-09-25 00:20:38 -0700320 // TODO: DCHECK(!IsExceptionPending());
Ian Rogersdd7624d2014-03-14 17:43:00 -0700321 tlsPtr_.exception = new_exception;
322 tlsPtr_.throw_location = throw_location;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700323 }
324
325 void ClearException() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700326 tlsPtr_.exception = nullptr;
327 tlsPtr_.throw_location.Clear();
jeffhao94d6df42012-11-26 16:02:12 -0800328 }
329
Ian Rogersbdb03912011-09-14 00:55:44 -0700330 // Find catch block and perform long jump to appropriate exception handle
jeffhao94d6df42012-11-26 16:02:12 -0800331 void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersbdb03912011-09-14 00:55:44 -0700332
333 Context* GetLongJumpContext();
Ian Rogers0399dde2012-06-06 17:09:28 -0700334 void ReleaseLongJumpContext(Context* context) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700335 DCHECK(tlsPtr_.long_jump_context == nullptr);
336 tlsPtr_.long_jump_context = context;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700337 }
338
Brian Carlstromea46f952013-07-30 01:26:50 -0700339 mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700340 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700341
Ian Rogers62d6c772013-02-27 08:32:07 -0800342 ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
343
Andreas Gampebf6b92a2014-03-05 16:11:04 -0800344 void SetTopOfStack(mirror::ArtMethod** top_method, uintptr_t pc) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700345 tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
346 tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700347 }
348
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800349 void SetTopOfShadowStack(ShadowFrame* top) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700350 tlsPtr_.managed_stack.SetTopShadowFrame(top);
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800351 }
352
Ian Rogers0399dde2012-06-06 17:09:28 -0700353 bool HasManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700354 return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
355 (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
Ian Rogersbdb03912011-09-14 00:55:44 -0700356 }
357
Elliott Hughesa4f94742012-05-29 16:28:38 -0700358 // If 'msg' is NULL, no detail message is set.
Ian Rogers62d6c772013-02-27 08:32:07 -0800359 void ThrowNewException(const ThrowLocation& throw_location,
360 const char* exception_class_descriptor, const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700361 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700362
Elliott Hughesa4f94742012-05-29 16:28:38 -0700363 // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
364 // used as the new exception's cause.
Ian Rogers62d6c772013-02-27 08:32:07 -0800365 void ThrowNewWrappedException(const ThrowLocation& throw_location,
366 const char* exception_class_descriptor,
367 const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700368 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa4f94742012-05-29 16:28:38 -0700369
Ian Rogers62d6c772013-02-27 08:32:07 -0800370 void ThrowNewExceptionF(const ThrowLocation& throw_location,
371 const char* exception_class_descriptor, const char* fmt, ...)
372 __attribute__((format(printf, 4, 5)))
Ian Rogersb726dcb2012-09-05 08:57:23 -0700373 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700374
Ian Rogers62d6c772013-02-27 08:32:07 -0800375 void ThrowNewExceptionV(const ThrowLocation& throw_location,
376 const char* exception_class_descriptor, const char* fmt, va_list ap)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700377 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700378
Elliott Hughes2ced6a52011-10-16 18:44:48 -0700379 // OutOfMemoryError is special, because we need to pre-allocate an instance.
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700380 // Only the GC should call this.
Ian Rogers120f1c72012-09-28 17:17:10 -0700381 void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes79082e32011-08-25 12:07:32 -0700382
Elliott Hughesbe759c62011-09-08 19:38:21 -0700383 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700384 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700385 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700386
Ian Rogersb033c752011-07-20 12:22:35 -0700387 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700388 JNIEnvExt* GetJniEnv() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700389 return tlsPtr_.jni_env;
Ian Rogersb033c752011-07-20 12:22:35 -0700390 }
391
Ian Rogers408f79a2011-08-23 18:22:33 -0700392 // Convert a jobject into a Object*
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800393 mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersb033c752011-07-20 12:22:35 -0700394
Ian Rogersdd7624d2014-03-14 17:43:00 -0700395 mirror::Object* GetMonitorEnterObject() const {
396 return tlsPtr_.monitor_enter_object;
397 }
398
399 void SetMonitorEnterObject(mirror::Object* obj) {
400 tlsPtr_.monitor_enter_object = obj;
401 }
402
Elliott Hughes8daa0922011-09-11 13:46:25 -0700403 // Implements java.lang.Thread.interrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700404 bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700405 // Implements java.lang.Thread.isInterrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700406 bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
407 bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
408 return interrupted_;
409 }
410 void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
411 void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
412 interrupted_ = i;
413 }
414 void Notify() LOCKS_EXCLUDED(wait_mutex_);
415
416 private:
417 void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
418
419 public:
420 Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
421 return wait_mutex_;
422 }
423
424 ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
425 return wait_cond_;
426 }
427
428 Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
429 return wait_monitor_;
430 }
431
432 void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
433 wait_monitor_ = mon;
434 }
435
436
437 // Waiter link-list support.
438 Thread* GetWaitNext() const {
439 return tlsPtr_.wait_next;
440 }
441
442 void SetWaitNext(Thread* next) {
443 tlsPtr_.wait_next = next;
444 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700445
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800446 mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700447 return tlsPtr_.class_loader_override;
buzbeec143c552011-08-20 17:38:58 -0700448 }
449
Mathieu Chartier4e305412014-02-19 10:54:44 -0800450 void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
451 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
buzbeec143c552011-08-20 17:38:58 -0700452
Ian Rogersaaa20802011-09-11 21:47:37 -0700453 // Create the internal representation of a stack trace, that is more time
Sebastien Hertzee1d79a2014-02-21 15:46:30 +0100454 // and space efficient to compute than the StackTraceElement[].
455 template<bool kTransactionActive>
Ian Rogers64b6d142012-10-29 16:34:15 -0700456 jobject CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700457 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersaaa20802011-09-11 21:47:37 -0700458
Elliott Hughes01158d72011-09-19 19:47:10 -0700459 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
460 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
461 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
462 // with the number of valid frames in the returned array.
Ian Rogers53b8b092014-03-13 23:45:53 -0700463 static jobjectArray InternalStackTraceToStackTraceElementArray(const ScopedObjectAccess& soa,
464 jobject internal, jobjectArray output_array = nullptr, int* stack_depth = nullptr)
465 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700466
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800467 void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700468
Mathieu Chartier4e305412014-02-19 10:54:44 -0800469 ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
jeffhao25045522012-03-13 19:34:37 -0700470
Elliott Hughesbe759c62011-09-08 19:38:21 -0700471 //
472 // Offsets of various members of native Thread class, used by compiled code.
473 //
474
Ian Rogersdd7624d2014-03-14 17:43:00 -0700475 template<size_t pointer_size>
476 static ThreadOffset<pointer_size> ThinLockIdOffset() {
477 return ThreadOffset<pointer_size>(
478 OFFSETOF_MEMBER(Thread, tls32_) +
479 OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700480 }
481
Ian Rogersdd7624d2014-03-14 17:43:00 -0700482 template<size_t pointer_size>
483 static ThreadOffset<pointer_size> ThreadFlagsOffset() {
484 return ThreadOffset<pointer_size>(
485 OFFSETOF_MEMBER(Thread, tls32_) +
486 OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700487 }
488
Ian Rogersdd7624d2014-03-14 17:43:00 -0700489 private:
490 template<size_t pointer_size>
491 static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
492 size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
493 size_t scale;
494 size_t shrink;
495 if (pointer_size == sizeof(void*)) {
496 scale = 1;
497 shrink = 1;
498 } else if (pointer_size > sizeof(void*)) {
499 scale = pointer_size / sizeof(void*);
500 shrink = 1;
501 } else {
502 DCHECK_GT(sizeof(void*), pointer_size);
503 scale = 1;
504 shrink = sizeof(void*) / pointer_size;
505 }
506 return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
Ian Rogers07ec8e12012-12-01 01:26:51 -0800507 }
508
Ian Rogersdd7624d2014-03-14 17:43:00 -0700509 public:
510 template<size_t pointer_size>
511 static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
512 return ThreadOffsetFromTlsPtr<pointer_size>(
513 OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700514 }
515
Ian Rogersdd7624d2014-03-14 17:43:00 -0700516 template<size_t pointer_size>
517 static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
518 return ThreadOffsetFromTlsPtr<pointer_size>(
519 OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700520 }
521
Ian Rogersdd7624d2014-03-14 17:43:00 -0700522 template<size_t pointer_size>
523 static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
524 return ThreadOffsetFromTlsPtr<pointer_size>(
525 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700526 }
527
Ian Rogersdd7624d2014-03-14 17:43:00 -0700528 template<size_t pointer_size>
529 static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
530 return ThreadOffsetFromTlsPtr<pointer_size>(
531 OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
532 }
533
534 template<size_t pointer_size>
535 static ThreadOffset<pointer_size> SelfOffset() {
536 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
537 }
538
539 template<size_t pointer_size>
540 static ThreadOffset<pointer_size> ExceptionOffset() {
541 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
542 }
543
544 template<size_t pointer_size>
545 static ThreadOffset<pointer_size> PeerOffset() {
546 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
547 }
548
549
550 template<size_t pointer_size>
551 static ThreadOffset<pointer_size> CardTableOffset() {
552 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
553 }
554
555 template<size_t pointer_size>
556 static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
557 return ThreadOffsetFromTlsPtr<pointer_size>(
558 OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
Dave Allisonb373e092014-02-20 16:06:36 -0800559 }
560
Ian Rogers932746a2011-09-22 18:57:50 -0700561 // Size of stack less any space reserved for stack overflow
jeffhaod7521322012-11-21 15:38:24 -0800562 size_t GetStackSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700563 return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
Ian Rogers932746a2011-09-22 18:57:50 -0700564 }
565
jeffhaod7521322012-11-21 15:38:24 -0800566 byte* GetStackEnd() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700567 return tlsPtr_.stack_end;
jeffhaod7521322012-11-21 15:38:24 -0800568 }
569
Ian Rogers932746a2011-09-22 18:57:50 -0700570 // Set the stack end to that to be used during a stack overflow
Ian Rogersb726dcb2012-09-05 08:57:23 -0700571 void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers932746a2011-09-22 18:57:50 -0700572
573 // Set the stack end to that to be used during regular execution
Dave Allisonf9439142014-03-27 15:10:22 -0700574 void ResetDefaultStackEnd(bool implicit_overflow_check) {
Ian Rogers932746a2011-09-22 18:57:50 -0700575 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
576 // to throw a StackOverflowError.
Dave Allisonf9439142014-03-27 15:10:22 -0700577 if (implicit_overflow_check) {
578 // For implicit checks we also need to add in the protected region above the
579 // overflow region.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700580 tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
Dave Allisonf9439142014-03-27 15:10:22 -0700581 } else {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700582 tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowReservedBytes;
Dave Allisonf9439142014-03-27 15:10:22 -0700583 }
Ian Rogers932746a2011-09-22 18:57:50 -0700584 }
585
Dave Allisonf9439142014-03-27 15:10:22 -0700586 // Install the protected region for implicit stack checks.
587 void InstallImplicitProtection(bool is_main_stack);
588
Ian Rogers120f1c72012-09-28 17:17:10 -0700589 bool IsHandlingStackOverflow() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700590 return tlsPtr_.stack_end == tlsPtr_.stack_begin;
Ian Rogers120f1c72012-09-28 17:17:10 -0700591 }
592
Ian Rogersdd7624d2014-03-14 17:43:00 -0700593 template<size_t pointer_size>
594 static ThreadOffset<pointer_size> StackEndOffset() {
595 return ThreadOffsetFromTlsPtr<pointer_size>(
596 OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700597 }
598
Ian Rogersdd7624d2014-03-14 17:43:00 -0700599 template<size_t pointer_size>
600 static ThreadOffset<pointer_size> JniEnvOffset() {
601 return ThreadOffsetFromTlsPtr<pointer_size>(
602 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700603 }
604
Ian Rogersdd7624d2014-03-14 17:43:00 -0700605 template<size_t pointer_size>
606 static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
607 return ThreadOffsetFromTlsPtr<pointer_size>(
608 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
609 ManagedStack::TopQuickFrameOffset());
Elliott Hughesbe759c62011-09-08 19:38:21 -0700610 }
611
Ian Rogersdd7624d2014-03-14 17:43:00 -0700612 template<size_t pointer_size>
613 static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
614 return ThreadOffsetFromTlsPtr<pointer_size>(
615 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
616 ManagedStack::TopQuickFramePcOffset());
Ian Rogersbdb03912011-09-14 00:55:44 -0700617 }
618
Ian Rogers0399dde2012-06-06 17:09:28 -0700619 const ManagedStack* GetManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700620 return &tlsPtr_.managed_stack;
Ian Rogers0399dde2012-06-06 17:09:28 -0700621 }
622
623 // Linked list recording fragments of managed stack.
624 void PushManagedStackFragment(ManagedStack* fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700625 tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700626 }
627 void PopManagedStackFragment(const ManagedStack& fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700628 tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700629 }
630
631 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700632 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
TDYa127de479be2012-05-31 08:03:26 -0700633 }
634
635 ShadowFrame* PopShadowFrame() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700636 return tlsPtr_.managed_stack.PopShadowFrame();
TDYa127de479be2012-05-31 08:03:26 -0700637 }
Logan Chienf7ad17e2012-03-15 03:10:03 +0800638
Ian Rogersdd7624d2014-03-14 17:43:00 -0700639 template<size_t pointer_size>
640 static ThreadOffset<pointer_size> TopShadowFrameOffset() {
641 return ThreadOffsetFromTlsPtr<pointer_size>(
642 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
643 ManagedStack::TopShadowFrameOffset());
TDYa127d668a062012-04-13 12:36:57 -0700644 }
645
Ian Rogersef7d42f2014-01-06 12:55:46 -0800646 // Number of references allocated in JNI ShadowFrames on this thread.
647 size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700648 return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700649 }
650
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700651 // Number of references in handle scope on this thread.
652 size_t NumHandleReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700653
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700654 // Number of references allocated in handle scopes & JNI shadow frames on this thread.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800655 size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700656 return NumHandleReferences() + NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700657 };
658
659 // Is the given obj in this thread's stack indirect reference table?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700660 bool HandleScopeContains(jobject obj) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700661
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700662 void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800663 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700664
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700665 HandleScope* GetTopHandleScope() {
666 return tlsPtr_.top_handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700667 }
668
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700669 void PushHandleScope(HandleScope* handle_scope) {
670 handle_scope->SetLink(tlsPtr_.top_handle_scope);
671 tlsPtr_.top_handle_scope = handle_scope;
672 }
673
674 HandleScope* PopHandleScope() {
675 HandleScope* handle_scope = tlsPtr_.top_handle_scope;
676 DCHECK(handle_scope != nullptr);
677 tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
678 return handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700679 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700680
Ian Rogersdd7624d2014-03-14 17:43:00 -0700681 template<size_t pointer_size>
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700682 static ThreadOffset<pointer_size> TopHandleScopeOffset() {
683 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
684 top_handle_scope));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700685 }
686
Ian Rogersdd7624d2014-03-14 17:43:00 -0700687 DebugInvokeReq* GetInvokeReq() const {
688 return tlsPtr_.debug_invoke_req;
Elliott Hughes475fc232011-10-25 15:00:35 -0700689 }
690
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100691 SingleStepControl* GetSingleStepControl() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700692 return tlsPtr_.single_step_control;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100693 }
694
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200695 // Returns the fake exception used to activate deoptimization.
696 static mirror::Throwable* GetDeoptimizationException() {
697 return reinterpret_cast<mirror::Throwable*>(-1);
698 }
699
Ian Rogers62d6c772013-02-27 08:32:07 -0800700 void SetDeoptimizationShadowFrame(ShadowFrame* sf);
701 void SetDeoptimizationReturnValue(const JValue& ret_val);
Ian Rogers306057f2012-11-26 12:45:53 -0800702
703 ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
704
Sebastien Hertz714f1752014-04-28 15:03:08 +0200705 bool HasDeoptimizationShadowFrame() const {
706 return tlsPtr_.deoptimization_shadow_frame != nullptr;
707 }
708
Ian Rogers62d6c772013-02-27 08:32:07 -0800709 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700710 return tlsPtr_.instrumentation_stack;
jeffhaoe343b762011-12-05 16:36:44 -0800711 }
712
Jeff Hao5ce4b172013-08-16 16:27:18 -0700713 std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700714 return tlsPtr_.stack_trace_sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700715 }
716
717 void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700718 tlsPtr_.stack_trace_sample = sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700719 }
720
721 uint64_t GetTraceClockBase() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700722 return tls64_.trace_clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700723 }
724
725 void SetTraceClockBase(uint64_t clock_base) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700726 tls64_.trace_clock_base = clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700727 }
728
Ian Rogers81d425b2012-09-27 16:03:43 -0700729 BaseMutex* GetHeldMutex(LockLevel level) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700730 return tlsPtr_.held_mutexes[level];
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700731 }
732
Ian Rogers81d425b2012-09-27 16:03:43 -0700733 void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700734 tlsPtr_.held_mutexes[level] = mutex;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700735 }
Elliott Hughesffb465f2012-03-01 18:46:05 -0800736
Mathieu Chartier752a0e62013-06-27 11:03:27 -0700737 void RunCheckpointFunction();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700738
739 bool ReadFlag(ThreadFlag flag) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700740 return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700741 }
742
Jeff Hao9cec2472013-05-14 18:17:06 -0700743 bool TestAllFlags() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700744 return (tls32_.state_and_flags.as_struct.flags != 0);
Jeff Hao9cec2472013-05-14 18:17:06 -0700745 }
746
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700747 void AtomicSetFlag(ThreadFlag flag);
748
749 void AtomicClearFlag(ThreadFlag flag);
750
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700751 void ResetQuickAllocEntryPointsForThread();
752
Ian Rogersdd7624d2014-03-14 17:43:00 -0700753 // Returns the remaining space in the TLAB.
754 size_t TlabSize() const;
755 // Doesn't check that there is room.
756 mirror::Object* AllocTlab(size_t bytes);
757 void SetTlab(byte* start, byte* end);
758 bool HasTlab() const;
Elliott Hughes5d96a712012-06-28 12:24:27 -0700759
Ian Rogersdd7624d2014-03-14 17:43:00 -0700760 // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
761 // equal to a valid pointer.
762 // TODO: does this need to atomic? I don't think so.
763 void RemoveSuspendTrigger() {
764 tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
765 }
766
767 // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
768 // The next time a suspend check is done, it will load from the value at this address
769 // and trigger a SIGSEGV.
770 void TriggerSuspend() {
771 tlsPtr_.suspend_trigger = nullptr;
772 }
773
774
775 // Push an object onto the allocation stack.
776 bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
777
778 // Set the thread local allocation pointers to the given pointers.
779 void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
780
781 // Resets the thread local allocation pointers.
782 void RevokeThreadLocalAllocationStack();
783
784 size_t GetThreadLocalBytesAllocated() const {
785 return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
786 }
787
788 size_t GetThreadLocalObjectsAllocated() const {
789 return tlsPtr_.thread_local_objects;
790 }
791
Ian Rogersdd7624d2014-03-14 17:43:00 -0700792 void* GetRosAllocRun(size_t index) const {
793 return tlsPtr_.rosalloc_runs[index];
794 }
795
796 void SetRosAllocRun(size_t index, void* run) {
797 tlsPtr_.rosalloc_runs[index] = run;
798 }
799
800 private:
Ian Rogers52673ff2012-06-27 23:25:34 -0700801 explicit Thread(bool daemon);
Ian Rogersb726dcb2012-09-05 08:57:23 -0700802 ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
803 Locks::thread_suspend_count_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -0700804 void Destroy();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700805
Ian Rogers365c1022012-06-22 15:05:28 -0700806 void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700807
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100808 template<bool kTransactionActive>
809 void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
810 jobject thread_name, jint thread_priority)
811 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
812
Ian Rogers62d6c772013-02-27 08:32:07 -0800813 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
814 // Dbg::Disconnected.
Ian Rogers474b6da2012-09-25 00:20:38 -0700815 ThreadState SetStateUnsafe(ThreadState new_state) {
816 ThreadState old_state = GetState();
Ian Rogersdd7624d2014-03-14 17:43:00 -0700817 tls32_.state_and_flags.as_struct.state = new_state;
Ian Rogersc747cff2012-08-31 18:20:08 -0700818 return old_state;
819 }
Ian Rogersc747cff2012-08-31 18:20:08 -0700820
Ian Rogers04d7aa92013-03-16 14:29:17 -0700821 void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
822
Ian Rogerscfaa4552012-11-26 21:00:08 -0800823 void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700824 void DumpStack(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700825 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
826 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesd92bec42011-09-02 17:04:36 -0700827
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700828 // Out-of-line conveniences for debugging in gdb.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700829 static Thread* CurrentFromGdb(); // Like Thread::Current.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700830 // Like Thread::Dump(std::cerr).
Ian Rogersb726dcb2012-09-05 08:57:23 -0700831 void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700832
Elliott Hughes93e74e82011-09-13 11:07:03 -0700833 static void* CreateCallback(void* arg);
834
Ian Rogerscfaa4552012-11-26 21:00:08 -0800835 void HandleUncaughtExceptions(ScopedObjectAccess& soa)
836 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
837 void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700838
Ian Rogers120f1c72012-09-28 17:17:10 -0700839 void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
Ian Rogers5d76c432011-10-31 21:42:49 -0700840 void InitCardTable();
Ian Rogersb033c752011-07-20 12:22:35 -0700841 void InitCpu();
Alexei Zavjalov1efa0a92014-02-04 02:08:31 +0700842 void CleanupCpu();
Ian Rogers848871b2013-08-05 10:56:33 -0700843 void InitTlsEntryPoints();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700844 void InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700845 void InitPthreadKeySelf();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700846 void InitStackHwm();
847
Elliott Hughesd6a23bd2013-07-16 14:19:52 -0700848 void SetUpAlternateSignalStack();
849 void TearDownAlternateSignalStack();
850
Ian Rogers474b6da2012-09-25 00:20:38 -0700851 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
852 // change from being Suspended to Runnable without a suspend request occurring.
Chris Dearman59cde532013-12-04 18:53:49 -0800853 union PACKED(4) StateAndFlags {
854 StateAndFlags() {}
Ian Rogersdf1ce912012-11-27 17:07:11 -0800855 struct PACKED(4) {
Ian Rogers30e173f2012-09-26 14:35:03 -0700856 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
857 // ThreadFlags for bit field meanings.
858 volatile uint16_t flags;
859 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
860 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
861 // operation. If a thread is suspended and a suspend_request is present, a thread may not
862 // change to Runnable as a GC or other operation is in progress.
Ian Rogers01ae5802012-09-28 16:14:01 -0700863 volatile uint16_t state;
Ian Rogers30e173f2012-09-26 14:35:03 -0700864 } as_struct;
Ian Rogers01ae5802012-09-28 16:14:01 -0700865 volatile int32_t as_int;
Chris Dearman59cde532013-12-04 18:53:49 -0800866
867 private:
868 // gcc does not handle struct with volatile member assignments correctly.
869 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
870 DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
Ian Rogers474b6da2012-09-25 00:20:38 -0700871 };
Ian Rogers474b6da2012-09-25 00:20:38 -0700872
Ian Rogersdd7624d2014-03-14 17:43:00 -0700873 static void ThreadExitCallback(void* arg);
Elliott Hughes5d96a712012-06-28 12:24:27 -0700874
Dave Allison0aded082013-11-07 13:15:11 -0800875 // Maximum number of checkpoint functions.
876 static constexpr uint32_t kMaxCheckpoints = 3;
877
Ian Rogersdd7624d2014-03-14 17:43:00 -0700878 // Has Thread::Startup been called?
879 static bool is_started_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700880
Ian Rogersdd7624d2014-03-14 17:43:00 -0700881 // TLS key used to retrieve the Thread*.
882 static pthread_key_t pthread_key_self_;
Ian Rogersa32a6fd2012-02-06 20:18:44 -0800883
Ian Rogersdd7624d2014-03-14 17:43:00 -0700884 // Used to notify threads that they should attempt to resume, they will suspend again if
885 // their suspend count is > 0.
886 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
Dave Allisonb373e092014-02-20 16:06:36 -0800887
Ian Rogersdd7624d2014-03-14 17:43:00 -0700888 /***********************************************************************************************/
889 // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
890 // pointer size differences. To encourage shorter encoding, more frequently used values appear
891 // first if possible.
892 /***********************************************************************************************/
Elliott Hughes6a607ad2012-07-13 20:40:00 -0700893
Ian Rogersdd7624d2014-03-14 17:43:00 -0700894 struct PACKED(4) tls_32bit_sized_values {
895 // We have no control over the size of 'bool', but want our boolean fields
896 // to be 4-byte quantities.
897 typedef uint32_t bool32_t;
Ian Rogers22f454c2012-09-08 11:06:29 -0700898
Ian Rogersdd7624d2014-03-14 17:43:00 -0700899 explicit tls_32bit_sized_values(bool is_daemon) :
900 suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
901 daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
902 thread_exit_check_count(0) {
903 }
Dave Allisonb373e092014-02-20 16:06:36 -0800904
Ian Rogersdd7624d2014-03-14 17:43:00 -0700905 union StateAndFlags state_and_flags;
906 COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
907 sizeof_state_and_flags_and_int32_are_different);
Dave Allisonb373e092014-02-20 16:06:36 -0800908
Ian Rogersdd7624d2014-03-14 17:43:00 -0700909 // A non-zero value is used to tell the current thread to enter a safe point
910 // at the next poll.
911 int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700912
Ian Rogersdd7624d2014-03-14 17:43:00 -0700913 // How much of 'suspend_count_' is by request of the debugger, used to set things right
914 // when the debugger detaches. Must be <= suspend_count_.
915 int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800916
Ian Rogersdd7624d2014-03-14 17:43:00 -0700917 // Thin lock thread id. This is a small integer used by the thin lock implementation.
918 // This is not to be confused with the native thread's tid, nor is it the value returned
919 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
920 // important difference between this id and the ids visible to managed code is that these
921 // ones get reused (to ensure that they fit in the number of bits available).
922 uint32_t thin_lock_thread_id;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800923
Ian Rogersdd7624d2014-03-14 17:43:00 -0700924 // System thread id.
925 uint32_t tid;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800926
Ian Rogersdd7624d2014-03-14 17:43:00 -0700927 // Is the thread a daemon?
928 const bool32_t daemon;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800929
Ian Rogersdd7624d2014-03-14 17:43:00 -0700930 // A boolean telling us whether we're recursively throwing OOME.
931 bool32_t throwing_OutOfMemoryError;
932
933 // A positive value implies we're in a region where thread suspension isn't expected.
934 uint32_t no_thread_suspension;
935
936 // How many times has our pthread key's destructor been called?
937 uint32_t thread_exit_check_count;
938 } tls32_;
939
940 struct PACKED(8) tls_64bit_sized_values {
941 tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
942 }
943
944 // The clock base used for tracing.
945 uint64_t trace_clock_base;
946
947 // Return value used by deoptimization.
948 JValue deoptimization_return_value;
949
950 RuntimeStats stats;
951 } tls64_;
952
953 struct PACKED(4) tls_ptr_sized_values {
954 tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
955 managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
956 jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
957 stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700958 top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700959 instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
960 deoptimization_shadow_frame(nullptr), name(nullptr), pthread_self(0),
961 last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
962 thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
963 thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
964 }
965
966 // The biased card table, see CardTable for details.
967 byte* card_table;
968
969 // The pending exception or NULL.
970 mirror::Throwable* exception;
971
972 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
973 // We leave extra space so there's room for the code that throws StackOverflowError.
974 byte* stack_end;
975
976 // The top of the managed stack often manipulated directly by compiler generated code.
977 ManagedStack managed_stack;
978
979 // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
980 // normally set to the address of itself.
981 uintptr_t* suspend_trigger;
982
983 // Every thread may have an associated JNI environment
984 JNIEnvExt* jni_env;
985
986 // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
987 // is easy but getting the address of Thread::Current is hard. This field can be read off of
988 // Thread::Current to give the address.
989 Thread* self;
990
991 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
992 // start up, until the thread is registered and the local opeer_ is used.
993 mirror::Object* opeer;
994 jobject jpeer;
995
996 // The "lowest addressable byte" of the stack.
997 byte* stack_begin;
998
999 // Size of the stack.
1000 size_t stack_size;
1001
1002 // The location the current exception was thrown from.
1003 ThrowLocation throw_location;
1004
1005 // Pointer to previous stack trace captured by sampling profiler.
1006 std::vector<mirror::ArtMethod*>* stack_trace_sample;
1007
1008 // The next thread in the wait set this thread is part of or NULL if not waiting.
1009 Thread* wait_next;
1010
1011 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1012 mirror::Object* monitor_enter_object;
1013
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001014 // Top of linked list of handle scopes or nullptr for none.
1015 HandleScope* top_handle_scope;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001016
1017 // Needed to get the right ClassLoader in JNI_OnLoad, but also
1018 // useful for testing.
1019 mirror::ClassLoader* class_loader_override;
1020
1021 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1022 Context* long_jump_context;
1023
1024 // Additional stack used by method instrumentation to store method and return pc values.
1025 // Stored as a pointer since std::deque is not PACKED.
1026 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1027
1028 // JDWP invoke-during-breakpoint support.
1029 DebugInvokeReq* debug_invoke_req;
1030
1031 // JDWP single-stepping support.
1032 SingleStepControl* single_step_control;
1033
1034 // Shadow frame stack that is used temporarily during the deoptimization of a method.
1035 ShadowFrame* deoptimization_shadow_frame;
1036
1037 // A cached copy of the java.lang.Thread's name.
1038 std::string* name;
1039
1040 // A cached pthread_t for the pthread underlying this Thread*.
1041 pthread_t pthread_self;
1042
1043 // Support for Mutex lock hierarchy bug detection.
1044 BaseMutex* held_mutexes[kLockLevelCount];
1045
1046 // If no_thread_suspension_ is > 0, what is causing that assertion.
1047 const char* last_no_thread_suspension_cause;
1048
1049 // Pending checkpoint function or NULL if non-pending. Installation guarding by
1050 // Locks::thread_suspend_count_lock_.
1051 Closure* checkpoint_functions[kMaxCheckpoints];
1052
1053 // Entrypoint function pointers.
1054 // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1055 InterpreterEntryPoints interpreter_entrypoints;
1056 JniEntryPoints jni_entrypoints;
1057 PortableEntryPoints portable_entrypoints;
1058 QuickEntryPoints quick_entrypoints;
1059
1060 // Thread-local allocation pointer.
1061 byte* thread_local_start;
1062 byte* thread_local_pos;
1063 byte* thread_local_end;
1064 size_t thread_local_objects;
1065
Mathieu Chartier0651d412014-04-29 14:37:57 -07001066 // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1067 void* rosalloc_runs[gc::allocator::RosAlloc::kNumThreadLocalSizeBrackets];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001068
1069 // Thread-local allocation stack data/routines.
1070 mirror::Object** thread_local_alloc_stack_top;
1071 mirror::Object** thread_local_alloc_stack_end;
1072 } tlsPtr_;
1073
1074 // Guards the 'interrupted_' and 'wait_monitor_' members.
1075 Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1076
1077 // Condition variable waited upon during a wait.
1078 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1079 // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
1080 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1081
1082 // Thread "interrupted" status; stays raised until queried or thrown.
1083 bool interrupted_ GUARDED_BY(wait_mutex_);
1084
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001085 friend class Dbg; // For SetStateUnsafe.
Mathieu Chartier15d34022014-02-26 17:16:38 -08001086 friend class gc::collector::SemiSpace; // For getting stack traces.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001087 friend class Runtime; // For CreatePeer.
1088 friend class ScopedThreadStateChange;
1089 friend class SignalCatcher; // For SetStateUnsafe.
Mathieu Chartier119c6bd2014-05-09 14:11:47 -07001090 friend class StubTest; // For accessing entrypoints.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001091 friend class ThreadList; // For ~Thread and Destroy.
1092
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001093 DISALLOW_COPY_AND_ASSIGN(Thread);
1094};
Ian Rogersbdb03912011-09-14 00:55:44 -07001095
Elliott Hughes330304d2011-08-12 14:28:05 -07001096std::ostream& operator<<(std::ostream& os, const Thread& thread);
Elliott Hughes34e06962012-04-09 13:55:55 -07001097std::ostream& operator<<(std::ostream& os, const ThreadState& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001098
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001099} // namespace art
1100
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001101#endif // ART_RUNTIME_THREAD_H_