blob: f8692855cf17f4320522d1357ef44df59d135fe3 [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070019
Elliott Hughes02b48d12011-09-07 17:15:51 -070020#include <bitset>
Ian Rogers306057f2012-11-26 12:45:53 -080021#include <deque>
Elliott Hughesa0957642011-09-02 14:27:33 -070022#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070023#include <list>
Elliott Hughes8daa0922011-09-11 13:46:25 -070024#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070025
Elliott Hughes76160052012-12-12 16:31:20 -080026#include "base/macros.h"
Ian Rogers719d1a32014-03-06 12:13:39 -080027#include "base/mutex.h"
Ian Rogers848871b2013-08-05 10:56:33 -070028#include "entrypoints/interpreter/interpreter_entrypoints.h"
29#include "entrypoints/jni/jni_entrypoints.h"
Ian Rogers7655f292013-07-29 11:07:13 -070030#include "entrypoints/portable/portable_entrypoints.h"
31#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070032#include "globals.h"
Ian Rogers306057f2012-11-26 12:45:53 -080033#include "jvalue.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080034#include "object_callbacks.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070035#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070036#include "runtime_stats.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070037#include "stack.h"
Ian Rogers1f539342012-10-03 21:09:42 -070038#include "stack_indirect_reference_table.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080039#include "thread_state.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080040#include "throw_location.h"
Ian Rogersbdb03912011-09-14 00:55:44 -070041#include "UniquePtr.h"
Ian Rogersb033c752011-07-20 12:22:35 -070042
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070043namespace art {
44
Mathieu Chartier15d34022014-02-26 17:16:38 -080045namespace gc {
46namespace collector {
47class SemiSpace;
48} // namespace collector
49} // namespace gc
50
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080051namespace mirror {
Brian Carlstromea46f952013-07-30 01:26:50 -070052 class ArtMethod;
Ian Rogers848871b2013-08-05 10:56:33 -070053 class Array;
54 class Class;
55 class ClassLoader;
56 class Object;
57 template<class T> class ObjectArray;
58 template<class T> class PrimitiveArray;
59 typedef PrimitiveArray<int32_t> IntArray;
60 class StackTraceElement;
61 class StaticStorageBase;
62 class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080063} // namespace mirror
64class BaseMutex;
65class ClassLinker;
Ian Rogers7a22fa62013-01-23 12:16:16 -080066class Closure;
Ian Rogersbdb03912011-09-14 00:55:44 -070067class Context;
Ian Rogers1b09b092012-08-20 15:35:52 -070068struct DebugInvokeReq;
Ian Rogers81d425b2012-09-27 16:03:43 -070069class DexFile;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080070class JavaVMExt;
Ian Rogers81d425b2012-09-27 16:03:43 -070071struct JNIEnvExt;
Elliott Hughes8daa0922011-09-11 13:46:25 -070072class Monitor;
Carl Shapirob5573532011-07-12 18:22:59 -070073class Runtime;
Ian Rogers00f7d0e2012-07-19 15:28:27 -070074class ScopedObjectAccess;
75class ScopedObjectAccessUnchecked;
Logan Chienf7ad17e2012-03-15 03:10:03 +080076class ShadowFrame;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +010077struct SingleStepControl;
Brian Carlstrom40381fb2011-10-19 14:13:40 -070078class Thread;
79class ThreadList;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070080
Elliott Hughes34e06962012-04-09 13:55:55 -070081// Thread priorities. These must match the Thread.MIN_PRIORITY,
82// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
83enum ThreadPriority {
84 kMinThreadPriority = 1,
85 kNormThreadPriority = 5,
86 kMaxThreadPriority = 10,
87};
88
Ian Rogers474b6da2012-09-25 00:20:38 -070089enum ThreadFlag {
Ian Rogers50ffee22012-11-20 11:47:44 -080090 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
91 // safepoint handler.
Ian Rogers62d6c772013-02-27 08:32:07 -080092 kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue.
Ian Rogers474b6da2012-09-25 00:20:38 -070093};
94
Ian Rogersdd7624d2014-03-14 17:43:00 -070095class Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070096 public:
Ian Rogers932746a2011-09-22 18:57:50 -070097 // Space to throw a StackOverflowError in.
Ian Rogersb3fabf42014-03-12 23:35:11 -070098 // TODO: shrink reserved space, in particular for 64bit.
Andreas Gampee62a07e2014-03-26 14:53:21 -070099#if defined(__x86_64__)
Ian Rogersb3fabf42014-03-12 23:35:11 -0700100 static constexpr size_t kStackOverflowReservedBytes = 24 * KB;
Andreas Gampee62a07e2014-03-26 14:53:21 -0700101#elif defined(__aarch64__)
102 // Worst-case, we would need about 2.6x the amount of x86_64 for many more registers.
103 // But this one works rather well.
104 static constexpr size_t kStackOverflowReservedBytes = 32 * KB;
Ian Rogers463cb4d2014-03-12 16:23:09 -0700105#else
106 static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
107#endif
Mathieu Chartier0d507d12014-03-19 10:17:28 -0700108 // How much of the reserved bytes is reserved for incoming signals.
109 static constexpr size_t kStackOverflowSignalReservedBytes = 2 * KB;
110 // How much of the reserved bytes we may temporarily use during stack overflow checks as an
111 // optimization.
112 static constexpr size_t kStackOverflowReservedUsableBytes =
113 kStackOverflowReservedBytes - kStackOverflowSignalReservedBytes;
buzbeec143c552011-08-20 17:38:58 -0700114
Dave Allisonf9439142014-03-27 15:10:22 -0700115 // For implicit overflow checks we reserve an extra piece of memory at the bottom
116 // of the stack (lowest memory). The higher portion of the memory
117 // is protected against reads and the lower is available for use while
118 // throwing the StackOverflow exception.
119 static constexpr size_t kStackOverflowProtectedSize = 32 * KB;
120 static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
121 kStackOverflowReservedBytes;
122
Elliott Hughes462c9442012-03-23 18:47:50 -0700123 // Creates a new native thread corresponding to the given managed peer.
124 // Used to implement Thread.start.
Ian Rogers52673ff2012-06-27 23:25:34 -0700125 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700126
Elliott Hughes462c9442012-03-23 18:47:50 -0700127 // Attaches the calling native thread to the runtime, returning the new native peer.
128 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800129 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
130 bool create_peer);
Carl Shapirob5573532011-07-12 18:22:59 -0700131
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700132 // Reset internal state of child thread after fork.
133 void InitAfterFork();
134
Ian Rogers02ed4c02013-09-06 13:10:04 -0700135 static Thread* Current();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700136
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800137 static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts,
138 mirror::Object* thread_peer)
jeffhaoa77f0f62012-12-05 17:19:31 -0800139 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700140 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
141 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700142 static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
jeffhaoa77f0f62012-12-05 17:19:31 -0800143 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700144 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
145 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700146
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700147 // Translates 172 to pAllocArrayFromCode and so on.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700148 template<size_t size_of_pointers>
149 static void DumpThreadOffset(std::ostream& os, uint32_t offset);
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700150
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700151 // Dumps a one-line summary of thread state (used for operator<<).
152 void ShortDump(std::ostream& os) const;
153
154 // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
155 void Dump(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700156 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
157 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa0957642011-09-02 14:27:33 -0700158
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700159 void DumpJavaStack(std::ostream& os) const
160 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
161 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
162
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700163 // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
164 // case we use 'tid' to identify the thread, and we'll include as much information as we can.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700165 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
Ian Rogerscfaa4552012-11-26 21:00:08 -0800166 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
167 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700168
Ian Rogers474b6da2012-09-25 00:20:38 -0700169 ThreadState GetState() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700170 DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
171 DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
172 return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
Dave Allison0aded082013-11-07 13:15:11 -0800173 }
174
Ian Rogers474b6da2012-09-25 00:20:38 -0700175 ThreadState SetState(ThreadState new_state);
Ian Rogers52673ff2012-06-27 23:25:34 -0700176
Ian Rogers81d425b2012-09-27 16:03:43 -0700177 int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700178 return tls32_.suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700179 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700180
Ian Rogers81d425b2012-09-27 16:03:43 -0700181 int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700182 return tls32_.debug_suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700183 }
184
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700185 bool IsSuspended() const {
Chris Dearman59cde532013-12-04 18:53:49 -0800186 union StateAndFlags state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700187 state_and_flags.as_int = tls32_.state_and_flags.as_int;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700188 return state_and_flags.as_struct.state != kRunnable &&
189 (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700190 }
191
Ian Rogers01ae5802012-09-28 16:14:01 -0700192 void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700193 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700194
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700195 bool RequestCheckpoint(Closure* function)
196 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700197
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700198 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
199 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
Ian Rogers9da7f592012-08-20 17:14:28 -0700200 void FullSuspendCheck()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700201 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
202 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700203
204 // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
205 ThreadState TransitionFromSuspendedToRunnable()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700206 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800207 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800208 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700209
210 // Transition from runnable into a state where mutator privileges are denied. Releases share of
211 // mutator lock.
212 void TransitionFromRunnableToSuspended(ThreadState new_state)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700213 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800214 UNLOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800215 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700216
Ian Rogers0399dde2012-06-06 17:09:28 -0700217 // Once called thread suspension will cause an assertion failure.
Ian Rogers52673ff2012-06-27 23:25:34 -0700218 const char* StartAssertNoThreadSuspension(const char* cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700219 if (kIsDebugBuild) {
220 CHECK(cause != NULL);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700221 const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
222 tls32_.no_thread_suspension++;
223 tlsPtr_.last_no_thread_suspension_cause = cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700224 return previous_cause;
225 } else {
226 return nullptr;
227 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700228 }
Ian Rogers52673ff2012-06-27 23:25:34 -0700229
Ian Rogers0399dde2012-06-06 17:09:28 -0700230 // End region where no thread suspension is expected.
Ian Rogers52673ff2012-06-27 23:25:34 -0700231 void EndAssertNoThreadSuspension(const char* old_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700232 if (kIsDebugBuild) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700233 CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
234 CHECK_GT(tls32_.no_thread_suspension, 0U);
235 tls32_.no_thread_suspension--;
236 tlsPtr_.last_no_thread_suspension_cause = old_cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700237 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700238 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700239
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700240 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700241
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700242 bool IsDaemon() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700243 return tls32_.daemon;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700244 }
245
Ian Rogersdd7624d2014-03-14 17:43:00 -0700246 bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700247
Elliott Hughes8daa0922011-09-11 13:46:25 -0700248 /*
249 * Changes the priority of this thread to match that of the java.lang.Thread object.
250 *
251 * We map a priority value from 1-10 to Linux "nice" values, where lower
252 * numbers indicate higher priority.
253 */
254 void SetNativePriority(int newPriority);
255
256 /*
257 * Returns the thread priority for the current thread by querying the system.
258 * This is useful when attaching a thread through JNI.
259 *
260 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
261 */
262 static int GetNativePriority();
263
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700264 uint32_t GetThreadId() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700265 return tls32_.thin_lock_thread_id;
Carl Shapirob5573532011-07-12 18:22:59 -0700266 }
267
Elliott Hughesd92bec42011-09-02 17:04:36 -0700268 pid_t GetTid() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700269 return tls32_.tid;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700270 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700271
Elliott Hughesffb465f2012-03-01 18:46:05 -0800272 // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800273 mirror::String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700274 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes899e7892012-01-24 14:57:32 -0800275
Elliott Hughesffb465f2012-03-01 18:46:05 -0800276 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
277 // allocation, or locking.
278 void GetThreadName(std::string& name) const;
279
Elliott Hughes899e7892012-01-24 14:57:32 -0800280 // Sets the thread's name.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700281 void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700282
Jeff Hao57dac6e2013-08-15 16:36:24 -0700283 // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
284 uint64_t GetCpuMicroTime() const;
285
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800286 mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700287 CHECK(tlsPtr_.jpeer == nullptr);
288 return tlsPtr_.opeer;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700289 }
290
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700291 bool HasPeer() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700292 return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700293 }
294
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700295 RuntimeStats* GetStats() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700296 return &tls64_.stats;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700297 }
298
Elliott Hughes7dc51662012-05-16 14:48:43 -0700299 bool IsStillStarting() const;
300
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700301 bool IsExceptionPending() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700302 return tlsPtr_.exception != nullptr;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700303 }
304
Ian Rogers62d6c772013-02-27 08:32:07 -0800305 mirror::Throwable* GetException(ThrowLocation* throw_location) const
306 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700307 if (throw_location != nullptr) {
308 *throw_location = tlsPtr_.throw_location;
Ian Rogers62d6c772013-02-27 08:32:07 -0800309 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700310 return tlsPtr_.exception;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700311 }
312
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700313 void AssertNoPendingException() const;
Mathieu Chartier8d7672e2014-02-25 10:57:16 -0800314 void AssertNoPendingExceptionForNewException(const char* msg) const;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700315
Ian Rogers62d6c772013-02-27 08:32:07 -0800316 void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
317 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700318 CHECK(new_exception != NULL);
Ian Rogers474b6da2012-09-25 00:20:38 -0700319 // TODO: DCHECK(!IsExceptionPending());
Ian Rogersdd7624d2014-03-14 17:43:00 -0700320 tlsPtr_.exception = new_exception;
321 tlsPtr_.throw_location = throw_location;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700322 }
323
324 void ClearException() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700325 tlsPtr_.exception = nullptr;
326 tlsPtr_.throw_location.Clear();
jeffhao94d6df42012-11-26 16:02:12 -0800327 }
328
Ian Rogersbdb03912011-09-14 00:55:44 -0700329 // Find catch block and perform long jump to appropriate exception handle
jeffhao94d6df42012-11-26 16:02:12 -0800330 void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersbdb03912011-09-14 00:55:44 -0700331
332 Context* GetLongJumpContext();
Ian Rogers0399dde2012-06-06 17:09:28 -0700333 void ReleaseLongJumpContext(Context* context) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700334 DCHECK(tlsPtr_.long_jump_context == nullptr);
335 tlsPtr_.long_jump_context = context;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700336 }
337
Brian Carlstromea46f952013-07-30 01:26:50 -0700338 mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700339 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700340
Ian Rogers62d6c772013-02-27 08:32:07 -0800341 ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
342
Andreas Gampebf6b92a2014-03-05 16:11:04 -0800343 void SetTopOfStack(mirror::ArtMethod** top_method, uintptr_t pc) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700344 tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
345 tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700346 }
347
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800348 void SetTopOfShadowStack(ShadowFrame* top) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700349 tlsPtr_.managed_stack.SetTopShadowFrame(top);
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800350 }
351
Ian Rogers0399dde2012-06-06 17:09:28 -0700352 bool HasManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700353 return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
354 (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
Ian Rogersbdb03912011-09-14 00:55:44 -0700355 }
356
Elliott Hughesa4f94742012-05-29 16:28:38 -0700357 // If 'msg' is NULL, no detail message is set.
Ian Rogers62d6c772013-02-27 08:32:07 -0800358 void ThrowNewException(const ThrowLocation& throw_location,
359 const char* exception_class_descriptor, const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700360 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700361
Elliott Hughesa4f94742012-05-29 16:28:38 -0700362 // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
363 // used as the new exception's cause.
Ian Rogers62d6c772013-02-27 08:32:07 -0800364 void ThrowNewWrappedException(const ThrowLocation& throw_location,
365 const char* exception_class_descriptor,
366 const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700367 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa4f94742012-05-29 16:28:38 -0700368
Ian Rogers62d6c772013-02-27 08:32:07 -0800369 void ThrowNewExceptionF(const ThrowLocation& throw_location,
370 const char* exception_class_descriptor, const char* fmt, ...)
371 __attribute__((format(printf, 4, 5)))
Ian Rogersb726dcb2012-09-05 08:57:23 -0700372 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700373
Ian Rogers62d6c772013-02-27 08:32:07 -0800374 void ThrowNewExceptionV(const ThrowLocation& throw_location,
375 const char* exception_class_descriptor, const char* fmt, va_list ap)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700376 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700377
Elliott Hughes2ced6a52011-10-16 18:44:48 -0700378 // OutOfMemoryError is special, because we need to pre-allocate an instance.
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700379 // Only the GC should call this.
Ian Rogers120f1c72012-09-28 17:17:10 -0700380 void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes79082e32011-08-25 12:07:32 -0700381
Elliott Hughesbe759c62011-09-08 19:38:21 -0700382 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700383 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700384 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700385
Ian Rogersb033c752011-07-20 12:22:35 -0700386 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700387 JNIEnvExt* GetJniEnv() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700388 return tlsPtr_.jni_env;
Ian Rogersb033c752011-07-20 12:22:35 -0700389 }
390
Ian Rogers408f79a2011-08-23 18:22:33 -0700391 // Convert a jobject into a Object*
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800392 mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersb033c752011-07-20 12:22:35 -0700393
Ian Rogersdd7624d2014-03-14 17:43:00 -0700394 mirror::Object* GetMonitorEnterObject() const {
395 return tlsPtr_.monitor_enter_object;
396 }
397
398 void SetMonitorEnterObject(mirror::Object* obj) {
399 tlsPtr_.monitor_enter_object = obj;
400 }
401
Elliott Hughes8daa0922011-09-11 13:46:25 -0700402 // Implements java.lang.Thread.interrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700403 bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700404 // Implements java.lang.Thread.isInterrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700405 bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
406 bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
407 return interrupted_;
408 }
409 void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
410 void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
411 interrupted_ = i;
412 }
413 void Notify() LOCKS_EXCLUDED(wait_mutex_);
414
415 private:
416 void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
417
418 public:
419 Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
420 return wait_mutex_;
421 }
422
423 ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
424 return wait_cond_;
425 }
426
427 Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
428 return wait_monitor_;
429 }
430
431 void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
432 wait_monitor_ = mon;
433 }
434
435
436 // Waiter link-list support.
437 Thread* GetWaitNext() const {
438 return tlsPtr_.wait_next;
439 }
440
441 void SetWaitNext(Thread* next) {
442 tlsPtr_.wait_next = next;
443 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700444
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800445 mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700446 return tlsPtr_.class_loader_override;
buzbeec143c552011-08-20 17:38:58 -0700447 }
448
Mathieu Chartier4e305412014-02-19 10:54:44 -0800449 void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
450 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
buzbeec143c552011-08-20 17:38:58 -0700451
Ian Rogersaaa20802011-09-11 21:47:37 -0700452 // Create the internal representation of a stack trace, that is more time
Sebastien Hertzee1d79a2014-02-21 15:46:30 +0100453 // and space efficient to compute than the StackTraceElement[].
454 template<bool kTransactionActive>
Ian Rogers64b6d142012-10-29 16:34:15 -0700455 jobject CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700456 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersaaa20802011-09-11 21:47:37 -0700457
Elliott Hughes01158d72011-09-19 19:47:10 -0700458 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
459 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
460 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
461 // with the number of valid frames in the returned array.
Ian Rogers53b8b092014-03-13 23:45:53 -0700462 static jobjectArray InternalStackTraceToStackTraceElementArray(const ScopedObjectAccess& soa,
463 jobject internal, jobjectArray output_array = nullptr, int* stack_depth = nullptr)
464 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700465
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800466 void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700467
Mathieu Chartier4e305412014-02-19 10:54:44 -0800468 ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
jeffhao25045522012-03-13 19:34:37 -0700469
Elliott Hughesbe759c62011-09-08 19:38:21 -0700470 //
471 // Offsets of various members of native Thread class, used by compiled code.
472 //
473
Ian Rogersdd7624d2014-03-14 17:43:00 -0700474 template<size_t pointer_size>
475 static ThreadOffset<pointer_size> ThinLockIdOffset() {
476 return ThreadOffset<pointer_size>(
477 OFFSETOF_MEMBER(Thread, tls32_) +
478 OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700479 }
480
Ian Rogersdd7624d2014-03-14 17:43:00 -0700481 template<size_t pointer_size>
482 static ThreadOffset<pointer_size> ThreadFlagsOffset() {
483 return ThreadOffset<pointer_size>(
484 OFFSETOF_MEMBER(Thread, tls32_) +
485 OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700486 }
487
Ian Rogersdd7624d2014-03-14 17:43:00 -0700488 private:
489 template<size_t pointer_size>
490 static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
491 size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
492 size_t scale;
493 size_t shrink;
494 if (pointer_size == sizeof(void*)) {
495 scale = 1;
496 shrink = 1;
497 } else if (pointer_size > sizeof(void*)) {
498 scale = pointer_size / sizeof(void*);
499 shrink = 1;
500 } else {
501 DCHECK_GT(sizeof(void*), pointer_size);
502 scale = 1;
503 shrink = sizeof(void*) / pointer_size;
504 }
505 return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
Ian Rogers07ec8e12012-12-01 01:26:51 -0800506 }
507
Ian Rogersdd7624d2014-03-14 17:43:00 -0700508 public:
509 template<size_t pointer_size>
510 static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
511 return ThreadOffsetFromTlsPtr<pointer_size>(
512 OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700513 }
514
Ian Rogersdd7624d2014-03-14 17:43:00 -0700515 template<size_t pointer_size>
516 static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
517 return ThreadOffsetFromTlsPtr<pointer_size>(
518 OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700519 }
520
Ian Rogersdd7624d2014-03-14 17:43:00 -0700521 template<size_t pointer_size>
522 static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
523 return ThreadOffsetFromTlsPtr<pointer_size>(
524 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700525 }
526
Ian Rogersdd7624d2014-03-14 17:43:00 -0700527 template<size_t pointer_size>
528 static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
529 return ThreadOffsetFromTlsPtr<pointer_size>(
530 OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
531 }
532
533 template<size_t pointer_size>
534 static ThreadOffset<pointer_size> SelfOffset() {
535 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
536 }
537
538 template<size_t pointer_size>
539 static ThreadOffset<pointer_size> ExceptionOffset() {
540 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
541 }
542
543 template<size_t pointer_size>
544 static ThreadOffset<pointer_size> PeerOffset() {
545 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
546 }
547
548
549 template<size_t pointer_size>
550 static ThreadOffset<pointer_size> CardTableOffset() {
551 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
552 }
553
554 template<size_t pointer_size>
555 static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
556 return ThreadOffsetFromTlsPtr<pointer_size>(
557 OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
Dave Allisonb373e092014-02-20 16:06:36 -0800558 }
559
Ian Rogers932746a2011-09-22 18:57:50 -0700560 // Size of stack less any space reserved for stack overflow
jeffhaod7521322012-11-21 15:38:24 -0800561 size_t GetStackSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700562 return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
Ian Rogers932746a2011-09-22 18:57:50 -0700563 }
564
jeffhaod7521322012-11-21 15:38:24 -0800565 byte* GetStackEnd() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700566 return tlsPtr_.stack_end;
jeffhaod7521322012-11-21 15:38:24 -0800567 }
568
Ian Rogers932746a2011-09-22 18:57:50 -0700569 // Set the stack end to that to be used during a stack overflow
Ian Rogersb726dcb2012-09-05 08:57:23 -0700570 void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers932746a2011-09-22 18:57:50 -0700571
572 // Set the stack end to that to be used during regular execution
Dave Allisonf9439142014-03-27 15:10:22 -0700573 void ResetDefaultStackEnd(bool implicit_overflow_check) {
Ian Rogers932746a2011-09-22 18:57:50 -0700574 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
575 // to throw a StackOverflowError.
Dave Allisonf9439142014-03-27 15:10:22 -0700576 if (implicit_overflow_check) {
577 // For implicit checks we also need to add in the protected region above the
578 // overflow region.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700579 tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
Dave Allisonf9439142014-03-27 15:10:22 -0700580 } else {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700581 tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowReservedBytes;
Dave Allisonf9439142014-03-27 15:10:22 -0700582 }
Ian Rogers932746a2011-09-22 18:57:50 -0700583 }
584
Dave Allisonf9439142014-03-27 15:10:22 -0700585 // Install the protected region for implicit stack checks.
586 void InstallImplicitProtection(bool is_main_stack);
587
Ian Rogers120f1c72012-09-28 17:17:10 -0700588 bool IsHandlingStackOverflow() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700589 return tlsPtr_.stack_end == tlsPtr_.stack_begin;
Ian Rogers120f1c72012-09-28 17:17:10 -0700590 }
591
Ian Rogersdd7624d2014-03-14 17:43:00 -0700592 template<size_t pointer_size>
593 static ThreadOffset<pointer_size> StackEndOffset() {
594 return ThreadOffsetFromTlsPtr<pointer_size>(
595 OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700596 }
597
Ian Rogersdd7624d2014-03-14 17:43:00 -0700598 template<size_t pointer_size>
599 static ThreadOffset<pointer_size> JniEnvOffset() {
600 return ThreadOffsetFromTlsPtr<pointer_size>(
601 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700602 }
603
Ian Rogersdd7624d2014-03-14 17:43:00 -0700604 template<size_t pointer_size>
605 static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
606 return ThreadOffsetFromTlsPtr<pointer_size>(
607 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
608 ManagedStack::TopQuickFrameOffset());
Elliott Hughesbe759c62011-09-08 19:38:21 -0700609 }
610
Ian Rogersdd7624d2014-03-14 17:43:00 -0700611 template<size_t pointer_size>
612 static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
613 return ThreadOffsetFromTlsPtr<pointer_size>(
614 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
615 ManagedStack::TopQuickFramePcOffset());
Ian Rogersbdb03912011-09-14 00:55:44 -0700616 }
617
Ian Rogers0399dde2012-06-06 17:09:28 -0700618 const ManagedStack* GetManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700619 return &tlsPtr_.managed_stack;
Ian Rogers0399dde2012-06-06 17:09:28 -0700620 }
621
622 // Linked list recording fragments of managed stack.
623 void PushManagedStackFragment(ManagedStack* fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700624 tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700625 }
626 void PopManagedStackFragment(const ManagedStack& fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700627 tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700628 }
629
630 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700631 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
TDYa127de479be2012-05-31 08:03:26 -0700632 }
633
634 ShadowFrame* PopShadowFrame() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700635 return tlsPtr_.managed_stack.PopShadowFrame();
TDYa127de479be2012-05-31 08:03:26 -0700636 }
Logan Chienf7ad17e2012-03-15 03:10:03 +0800637
Ian Rogersdd7624d2014-03-14 17:43:00 -0700638 template<size_t pointer_size>
639 static ThreadOffset<pointer_size> TopShadowFrameOffset() {
640 return ThreadOffsetFromTlsPtr<pointer_size>(
641 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
642 ManagedStack::TopShadowFrameOffset());
TDYa127d668a062012-04-13 12:36:57 -0700643 }
644
Ian Rogersef7d42f2014-01-06 12:55:46 -0800645 // Number of references allocated in JNI ShadowFrames on this thread.
646 size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700647 return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700648 }
649
Ian Rogersef7d42f2014-01-06 12:55:46 -0800650 // Number of references in SIRTs on this thread.
Ian Rogers0399dde2012-06-06 17:09:28 -0700651 size_t NumSirtReferences();
652
Ian Rogersef7d42f2014-01-06 12:55:46 -0800653 // Number of references allocated in SIRTs & JNI shadow frames on this thread.
654 size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
TDYa127ce4cc0d2012-11-18 16:59:53 -0800655 return NumSirtReferences() + NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700656 };
657
658 // Is the given obj in this thread's stack indirect reference table?
Ian Rogerscfaa4552012-11-26 21:00:08 -0800659 bool SirtContains(jobject obj) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700660
Mathieu Chartier89bb1442014-02-12 10:06:23 -0800661 void SirtVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800662 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700663
Ian Rogers1f539342012-10-03 21:09:42 -0700664 void PushSirt(StackIndirectReferenceTable* sirt) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700665 sirt->SetLink(tlsPtr_.top_sirt);
666 tlsPtr_.top_sirt = sirt;
Ian Rogers1f539342012-10-03 21:09:42 -0700667 }
668
669 StackIndirectReferenceTable* PopSirt() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700670 StackIndirectReferenceTable* sirt = tlsPtr_.top_sirt;
Ian Rogers1f539342012-10-03 21:09:42 -0700671 DCHECK(sirt != NULL);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700672 tlsPtr_.top_sirt = tlsPtr_.top_sirt->GetLink();
Ian Rogers1f539342012-10-03 21:09:42 -0700673 return sirt;
674 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700675
Ian Rogersdd7624d2014-03-14 17:43:00 -0700676 template<size_t pointer_size>
677 static ThreadOffset<pointer_size> TopSirtOffset() {
678 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, top_sirt));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700679 }
680
Ian Rogersdd7624d2014-03-14 17:43:00 -0700681 DebugInvokeReq* GetInvokeReq() const {
682 return tlsPtr_.debug_invoke_req;
Elliott Hughes475fc232011-10-25 15:00:35 -0700683 }
684
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100685 SingleStepControl* GetSingleStepControl() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700686 return tlsPtr_.single_step_control;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100687 }
688
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200689 // Returns the fake exception used to activate deoptimization.
690 static mirror::Throwable* GetDeoptimizationException() {
691 return reinterpret_cast<mirror::Throwable*>(-1);
692 }
693
Ian Rogers62d6c772013-02-27 08:32:07 -0800694 void SetDeoptimizationShadowFrame(ShadowFrame* sf);
695 void SetDeoptimizationReturnValue(const JValue& ret_val);
Ian Rogers306057f2012-11-26 12:45:53 -0800696
697 ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
698
Sebastien Hertz714f1752014-04-28 15:03:08 +0200699 bool HasDeoptimizationShadowFrame() const {
700 return tlsPtr_.deoptimization_shadow_frame != nullptr;
701 }
702
Ian Rogers62d6c772013-02-27 08:32:07 -0800703 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700704 return tlsPtr_.instrumentation_stack;
jeffhaoe343b762011-12-05 16:36:44 -0800705 }
706
Jeff Hao5ce4b172013-08-16 16:27:18 -0700707 std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700708 return tlsPtr_.stack_trace_sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700709 }
710
711 void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700712 tlsPtr_.stack_trace_sample = sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700713 }
714
715 uint64_t GetTraceClockBase() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700716 return tls64_.trace_clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700717 }
718
719 void SetTraceClockBase(uint64_t clock_base) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700720 tls64_.trace_clock_base = clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700721 }
722
Ian Rogers81d425b2012-09-27 16:03:43 -0700723 BaseMutex* GetHeldMutex(LockLevel level) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700724 return tlsPtr_.held_mutexes[level];
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700725 }
726
Ian Rogers81d425b2012-09-27 16:03:43 -0700727 void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700728 tlsPtr_.held_mutexes[level] = mutex;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700729 }
Elliott Hughesffb465f2012-03-01 18:46:05 -0800730
Mathieu Chartier752a0e62013-06-27 11:03:27 -0700731 void RunCheckpointFunction();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700732
733 bool ReadFlag(ThreadFlag flag) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700734 return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700735 }
736
Jeff Hao9cec2472013-05-14 18:17:06 -0700737 bool TestAllFlags() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700738 return (tls32_.state_and_flags.as_struct.flags != 0);
Jeff Hao9cec2472013-05-14 18:17:06 -0700739 }
740
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700741 void AtomicSetFlag(ThreadFlag flag);
742
743 void AtomicClearFlag(ThreadFlag flag);
744
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700745 void ResetQuickAllocEntryPointsForThread();
746
Ian Rogersdd7624d2014-03-14 17:43:00 -0700747 // Returns the remaining space in the TLAB.
748 size_t TlabSize() const;
749 // Doesn't check that there is room.
750 mirror::Object* AllocTlab(size_t bytes);
751 void SetTlab(byte* start, byte* end);
752 bool HasTlab() const;
Elliott Hughes5d96a712012-06-28 12:24:27 -0700753
Ian Rogersdd7624d2014-03-14 17:43:00 -0700754 // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
755 // equal to a valid pointer.
756 // TODO: does this need to atomic? I don't think so.
757 void RemoveSuspendTrigger() {
758 tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
759 }
760
761 // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
762 // The next time a suspend check is done, it will load from the value at this address
763 // and trigger a SIGSEGV.
764 void TriggerSuspend() {
765 tlsPtr_.suspend_trigger = nullptr;
766 }
767
768
769 // Push an object onto the allocation stack.
770 bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
771
772 // Set the thread local allocation pointers to the given pointers.
773 void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
774
775 // Resets the thread local allocation pointers.
776 void RevokeThreadLocalAllocationStack();
777
778 size_t GetThreadLocalBytesAllocated() const {
779 return tlsPtr_.thread_local_pos - tlsPtr_.thread_local_start;
780 }
781
782 size_t GetThreadLocalObjectsAllocated() const {
783 return tlsPtr_.thread_local_objects;
784 }
785
786 // ROS alloc TLS.
787 static constexpr size_t kRosAllocNumOfSizeBrackets = 34;
788
789 void* GetRosAllocRun(size_t index) const {
790 return tlsPtr_.rosalloc_runs[index];
791 }
792
793 void SetRosAllocRun(size_t index, void* run) {
794 tlsPtr_.rosalloc_runs[index] = run;
795 }
796
797 private:
Ian Rogers52673ff2012-06-27 23:25:34 -0700798 explicit Thread(bool daemon);
Ian Rogersb726dcb2012-09-05 08:57:23 -0700799 ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
800 Locks::thread_suspend_count_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -0700801 void Destroy();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700802
Ian Rogers365c1022012-06-22 15:05:28 -0700803 void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700804
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100805 template<bool kTransactionActive>
806 void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
807 jobject thread_name, jint thread_priority)
808 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
809
Ian Rogers62d6c772013-02-27 08:32:07 -0800810 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
811 // Dbg::Disconnected.
Ian Rogers474b6da2012-09-25 00:20:38 -0700812 ThreadState SetStateUnsafe(ThreadState new_state) {
813 ThreadState old_state = GetState();
Ian Rogersdd7624d2014-03-14 17:43:00 -0700814 tls32_.state_and_flags.as_struct.state = new_state;
Ian Rogersc747cff2012-08-31 18:20:08 -0700815 return old_state;
816 }
Ian Rogersc747cff2012-08-31 18:20:08 -0700817
Ian Rogers04d7aa92013-03-16 14:29:17 -0700818 void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
819
Ian Rogerscfaa4552012-11-26 21:00:08 -0800820 void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700821 void DumpStack(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700822 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
823 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesd92bec42011-09-02 17:04:36 -0700824
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700825 // Out-of-line conveniences for debugging in gdb.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700826 static Thread* CurrentFromGdb(); // Like Thread::Current.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700827 // Like Thread::Dump(std::cerr).
Ian Rogersb726dcb2012-09-05 08:57:23 -0700828 void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700829
Elliott Hughes93e74e82011-09-13 11:07:03 -0700830 static void* CreateCallback(void* arg);
831
Ian Rogerscfaa4552012-11-26 21:00:08 -0800832 void HandleUncaughtExceptions(ScopedObjectAccess& soa)
833 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
834 void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700835
Ian Rogers120f1c72012-09-28 17:17:10 -0700836 void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
Ian Rogers5d76c432011-10-31 21:42:49 -0700837 void InitCardTable();
Ian Rogersb033c752011-07-20 12:22:35 -0700838 void InitCpu();
Alexei Zavjalov1efa0a92014-02-04 02:08:31 +0700839 void CleanupCpu();
Ian Rogers848871b2013-08-05 10:56:33 -0700840 void InitTlsEntryPoints();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700841 void InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700842 void InitPthreadKeySelf();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700843 void InitStackHwm();
844
Elliott Hughesd6a23bd2013-07-16 14:19:52 -0700845 void SetUpAlternateSignalStack();
846 void TearDownAlternateSignalStack();
847
Ian Rogers474b6da2012-09-25 00:20:38 -0700848 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
849 // change from being Suspended to Runnable without a suspend request occurring.
Chris Dearman59cde532013-12-04 18:53:49 -0800850 union PACKED(4) StateAndFlags {
851 StateAndFlags() {}
Ian Rogersdf1ce912012-11-27 17:07:11 -0800852 struct PACKED(4) {
Ian Rogers30e173f2012-09-26 14:35:03 -0700853 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
854 // ThreadFlags for bit field meanings.
855 volatile uint16_t flags;
856 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
857 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
858 // operation. If a thread is suspended and a suspend_request is present, a thread may not
859 // change to Runnable as a GC or other operation is in progress.
Ian Rogers01ae5802012-09-28 16:14:01 -0700860 volatile uint16_t state;
Ian Rogers30e173f2012-09-26 14:35:03 -0700861 } as_struct;
Ian Rogers01ae5802012-09-28 16:14:01 -0700862 volatile int32_t as_int;
Chris Dearman59cde532013-12-04 18:53:49 -0800863
864 private:
865 // gcc does not handle struct with volatile member assignments correctly.
866 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
867 DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
Ian Rogers474b6da2012-09-25 00:20:38 -0700868 };
Ian Rogers474b6da2012-09-25 00:20:38 -0700869
Ian Rogersdd7624d2014-03-14 17:43:00 -0700870 static void ThreadExitCallback(void* arg);
Elliott Hughes5d96a712012-06-28 12:24:27 -0700871
Dave Allison0aded082013-11-07 13:15:11 -0800872 // Maximum number of checkpoint functions.
873 static constexpr uint32_t kMaxCheckpoints = 3;
874
Ian Rogersdd7624d2014-03-14 17:43:00 -0700875 // Has Thread::Startup been called?
876 static bool is_started_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700877
Ian Rogersdd7624d2014-03-14 17:43:00 -0700878 // TLS key used to retrieve the Thread*.
879 static pthread_key_t pthread_key_self_;
Ian Rogersa32a6fd2012-02-06 20:18:44 -0800880
Ian Rogersdd7624d2014-03-14 17:43:00 -0700881 // Used to notify threads that they should attempt to resume, they will suspend again if
882 // their suspend count is > 0.
883 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
Dave Allisonb373e092014-02-20 16:06:36 -0800884
Ian Rogersdd7624d2014-03-14 17:43:00 -0700885 /***********************************************************************************************/
886 // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
887 // pointer size differences. To encourage shorter encoding, more frequently used values appear
888 // first if possible.
889 /***********************************************************************************************/
Elliott Hughes6a607ad2012-07-13 20:40:00 -0700890
Ian Rogersdd7624d2014-03-14 17:43:00 -0700891 struct PACKED(4) tls_32bit_sized_values {
892 // We have no control over the size of 'bool', but want our boolean fields
893 // to be 4-byte quantities.
894 typedef uint32_t bool32_t;
Ian Rogers22f454c2012-09-08 11:06:29 -0700895
Ian Rogersdd7624d2014-03-14 17:43:00 -0700896 explicit tls_32bit_sized_values(bool is_daemon) :
897 suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
898 daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
899 thread_exit_check_count(0) {
900 }
Dave Allisonb373e092014-02-20 16:06:36 -0800901
Ian Rogersdd7624d2014-03-14 17:43:00 -0700902 union StateAndFlags state_and_flags;
903 COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
904 sizeof_state_and_flags_and_int32_are_different);
Dave Allisonb373e092014-02-20 16:06:36 -0800905
Ian Rogersdd7624d2014-03-14 17:43:00 -0700906 // A non-zero value is used to tell the current thread to enter a safe point
907 // at the next poll.
908 int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700909
Ian Rogersdd7624d2014-03-14 17:43:00 -0700910 // How much of 'suspend_count_' is by request of the debugger, used to set things right
911 // when the debugger detaches. Must be <= suspend_count_.
912 int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800913
Ian Rogersdd7624d2014-03-14 17:43:00 -0700914 // Thin lock thread id. This is a small integer used by the thin lock implementation.
915 // This is not to be confused with the native thread's tid, nor is it the value returned
916 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
917 // important difference between this id and the ids visible to managed code is that these
918 // ones get reused (to ensure that they fit in the number of bits available).
919 uint32_t thin_lock_thread_id;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800920
Ian Rogersdd7624d2014-03-14 17:43:00 -0700921 // System thread id.
922 uint32_t tid;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800923
Ian Rogersdd7624d2014-03-14 17:43:00 -0700924 // Is the thread a daemon?
925 const bool32_t daemon;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800926
Ian Rogersdd7624d2014-03-14 17:43:00 -0700927 // A boolean telling us whether we're recursively throwing OOME.
928 bool32_t throwing_OutOfMemoryError;
929
930 // A positive value implies we're in a region where thread suspension isn't expected.
931 uint32_t no_thread_suspension;
932
933 // How many times has our pthread key's destructor been called?
934 uint32_t thread_exit_check_count;
935 } tls32_;
936
937 struct PACKED(8) tls_64bit_sized_values {
938 tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
939 }
940
941 // The clock base used for tracing.
942 uint64_t trace_clock_base;
943
944 // Return value used by deoptimization.
945 JValue deoptimization_return_value;
946
947 RuntimeStats stats;
948 } tls64_;
949
950 struct PACKED(4) tls_ptr_sized_values {
951 tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
952 managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
953 jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
954 stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
955 top_sirt(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
956 instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
957 deoptimization_shadow_frame(nullptr), name(nullptr), pthread_self(0),
958 last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
959 thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
960 thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
961 }
962
963 // The biased card table, see CardTable for details.
964 byte* card_table;
965
966 // The pending exception or NULL.
967 mirror::Throwable* exception;
968
969 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
970 // We leave extra space so there's room for the code that throws StackOverflowError.
971 byte* stack_end;
972
973 // The top of the managed stack often manipulated directly by compiler generated code.
974 ManagedStack managed_stack;
975
976 // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
977 // normally set to the address of itself.
978 uintptr_t* suspend_trigger;
979
980 // Every thread may have an associated JNI environment
981 JNIEnvExt* jni_env;
982
983 // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
984 // is easy but getting the address of Thread::Current is hard. This field can be read off of
985 // Thread::Current to give the address.
986 Thread* self;
987
988 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
989 // start up, until the thread is registered and the local opeer_ is used.
990 mirror::Object* opeer;
991 jobject jpeer;
992
993 // The "lowest addressable byte" of the stack.
994 byte* stack_begin;
995
996 // Size of the stack.
997 size_t stack_size;
998
999 // The location the current exception was thrown from.
1000 ThrowLocation throw_location;
1001
1002 // Pointer to previous stack trace captured by sampling profiler.
1003 std::vector<mirror::ArtMethod*>* stack_trace_sample;
1004
1005 // The next thread in the wait set this thread is part of or NULL if not waiting.
1006 Thread* wait_next;
1007
1008 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1009 mirror::Object* monitor_enter_object;
1010
1011 // Top of linked list of stack indirect reference tables or NULL for none.
1012 StackIndirectReferenceTable* top_sirt;
1013
1014 // Needed to get the right ClassLoader in JNI_OnLoad, but also
1015 // useful for testing.
1016 mirror::ClassLoader* class_loader_override;
1017
1018 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1019 Context* long_jump_context;
1020
1021 // Additional stack used by method instrumentation to store method and return pc values.
1022 // Stored as a pointer since std::deque is not PACKED.
1023 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1024
1025 // JDWP invoke-during-breakpoint support.
1026 DebugInvokeReq* debug_invoke_req;
1027
1028 // JDWP single-stepping support.
1029 SingleStepControl* single_step_control;
1030
1031 // Shadow frame stack that is used temporarily during the deoptimization of a method.
1032 ShadowFrame* deoptimization_shadow_frame;
1033
1034 // A cached copy of the java.lang.Thread's name.
1035 std::string* name;
1036
1037 // A cached pthread_t for the pthread underlying this Thread*.
1038 pthread_t pthread_self;
1039
1040 // Support for Mutex lock hierarchy bug detection.
1041 BaseMutex* held_mutexes[kLockLevelCount];
1042
1043 // If no_thread_suspension_ is > 0, what is causing that assertion.
1044 const char* last_no_thread_suspension_cause;
1045
1046 // Pending checkpoint function or NULL if non-pending. Installation guarding by
1047 // Locks::thread_suspend_count_lock_.
1048 Closure* checkpoint_functions[kMaxCheckpoints];
1049
1050 // Entrypoint function pointers.
1051 // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1052 InterpreterEntryPoints interpreter_entrypoints;
1053 JniEntryPoints jni_entrypoints;
1054 PortableEntryPoints portable_entrypoints;
1055 QuickEntryPoints quick_entrypoints;
1056
1057 // Thread-local allocation pointer.
1058 byte* thread_local_start;
1059 byte* thread_local_pos;
1060 byte* thread_local_end;
1061 size_t thread_local_objects;
1062
1063 // Thread-local rosalloc runs. There are 34 size brackets in rosalloc
1064 // runs (RosAlloc::kNumOfSizeBrackets). We can't refer to the
1065 // RosAlloc class due to a header file circular dependency issue.
1066 // To compensate, we check that the two values match at RosAlloc
1067 // initialization time.
1068 void* rosalloc_runs[kRosAllocNumOfSizeBrackets];
1069
1070 // Thread-local allocation stack data/routines.
1071 mirror::Object** thread_local_alloc_stack_top;
1072 mirror::Object** thread_local_alloc_stack_end;
1073 } tlsPtr_;
1074
1075 // Guards the 'interrupted_' and 'wait_monitor_' members.
1076 Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1077
1078 // Condition variable waited upon during a wait.
1079 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1080 // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
1081 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1082
1083 // Thread "interrupted" status; stays raised until queried or thrown.
1084 bool interrupted_ GUARDED_BY(wait_mutex_);
1085
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001086 friend class Dbg; // For SetStateUnsafe.
Mathieu Chartier15d34022014-02-26 17:16:38 -08001087 friend class gc::collector::SemiSpace; // For getting stack traces.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001088 friend class Runtime; // For CreatePeer.
1089 friend class ScopedThreadStateChange;
1090 friend class SignalCatcher; // For SetStateUnsafe.
1091 friend class ThreadList; // For ~Thread and Destroy.
1092
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001093 DISALLOW_COPY_AND_ASSIGN(Thread);
1094};
Ian Rogersbdb03912011-09-14 00:55:44 -07001095
Elliott Hughes330304d2011-08-12 14:28:05 -07001096std::ostream& operator<<(std::ostream& os, const Thread& thread);
Elliott Hughes34e06962012-04-09 13:55:55 -07001097std::ostream& operator<<(std::ostream& os, const ThreadState& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001098
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001099} // namespace art
1100
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001101#endif // ART_RUNTIME_THREAD_H_