blob: d08c2fce82b6d1b72c714da4747cc0d0cb6ee209 [file] [log] [blame]
Elliott Hughes8d768a92011-09-14 16:35:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070019
Elliott Hughes02b48d12011-09-07 17:15:51 -070020#include <bitset>
Ian Rogers306057f2012-11-26 12:45:53 -080021#include <deque>
Elliott Hughesa0957642011-09-02 14:27:33 -070022#include <iosfwd>
Ian Rogersb033c752011-07-20 12:22:35 -070023#include <list>
Ian Rogers700a4022014-05-19 16:49:03 -070024#include <memory>
Elliott Hughes8daa0922011-09-11 13:46:25 -070025#include <string>
Carl Shapirob5573532011-07-12 18:22:59 -070026
Ian Rogersb8e087e2014-07-09 21:12:06 -070027#include "atomic.h"
Elliott Hughes76160052012-12-12 16:31:20 -080028#include "base/macros.h"
Ian Rogers719d1a32014-03-06 12:13:39 -080029#include "base/mutex.h"
Ian Rogers848871b2013-08-05 10:56:33 -070030#include "entrypoints/interpreter/interpreter_entrypoints.h"
31#include "entrypoints/jni/jni_entrypoints.h"
Ian Rogers7655f292013-07-29 11:07:13 -070032#include "entrypoints/portable/portable_entrypoints.h"
33#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070034#include "globals.h"
Mathieu Chartiereb8167a2014-05-07 15:43:14 -070035#include "handle_scope.h"
Andreas Gampe7cd26f32014-06-18 17:01:15 -070036#include "instruction_set.h"
Ian Rogers306057f2012-11-26 12:45:53 -080037#include "jvalue.h"
Mathieu Chartier83c8ee02014-01-28 14:50:23 -080038#include "object_callbacks.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070039#include "offsets.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070040#include "runtime_stats.h"
Elliott Hughes68e76522011-10-05 13:22:16 -070041#include "stack.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080042#include "thread_state.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080043#include "throw_location.h"
Ian Rogersb033c752011-07-20 12:22:35 -070044
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070045namespace art {
46
Mathieu Chartier15d34022014-02-26 17:16:38 -080047namespace gc {
48namespace collector {
Mingyao Yang98d1cc82014-05-15 17:02:16 -070049 class SemiSpace;
Mathieu Chartier15d34022014-02-26 17:16:38 -080050} // namespace collector
51} // namespace gc
52
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080053namespace mirror {
Brian Carlstromea46f952013-07-30 01:26:50 -070054 class ArtMethod;
Ian Rogers848871b2013-08-05 10:56:33 -070055 class Array;
56 class Class;
57 class ClassLoader;
58 class Object;
59 template<class T> class ObjectArray;
60 template<class T> class PrimitiveArray;
61 typedef PrimitiveArray<int32_t> IntArray;
62 class StackTraceElement;
Ian Rogers848871b2013-08-05 10:56:33 -070063 class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080064} // namespace mirror
65class BaseMutex;
66class ClassLinker;
Ian Rogers7a22fa62013-01-23 12:16:16 -080067class Closure;
Ian Rogersbdb03912011-09-14 00:55:44 -070068class Context;
Ian Rogers1b09b092012-08-20 15:35:52 -070069struct DebugInvokeReq;
Ian Rogers81d425b2012-09-27 16:03:43 -070070class DexFile;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080071class JavaVMExt;
Ian Rogers81d425b2012-09-27 16:03:43 -070072struct JNIEnvExt;
Elliott Hughes8daa0922011-09-11 13:46:25 -070073class Monitor;
Carl Shapirob5573532011-07-12 18:22:59 -070074class Runtime;
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -070075class ScopedObjectAccessAlreadyRunnable;
Logan Chienf7ad17e2012-03-15 03:10:03 +080076class ShadowFrame;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +010077struct SingleStepControl;
Brian Carlstrom40381fb2011-10-19 14:13:40 -070078class Thread;
79class ThreadList;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070080
Elliott Hughes34e06962012-04-09 13:55:55 -070081// Thread priorities. These must match the Thread.MIN_PRIORITY,
82// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
83enum ThreadPriority {
84 kMinThreadPriority = 1,
85 kNormThreadPriority = 5,
86 kMaxThreadPriority = 10,
87};
88
Ian Rogers474b6da2012-09-25 00:20:38 -070089enum ThreadFlag {
Ian Rogers50ffee22012-11-20 11:47:44 -080090 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the
91 // safepoint handler.
Ian Rogers62d6c772013-02-27 08:32:07 -080092 kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue.
Ian Rogers474b6da2012-09-25 00:20:38 -070093};
94
Ian Rogerse63db272014-07-15 15:36:11 -070095static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
96
Ian Rogersdd7624d2014-03-14 17:43:00 -070097class Thread {
Carl Shapiro0e5d75d2011-07-06 18:28:37 -070098 public:
Mathieu Chartier0d507d12014-03-19 10:17:28 -070099 // How much of the reserved bytes is reserved for incoming signals.
100 static constexpr size_t kStackOverflowSignalReservedBytes = 2 * KB;
buzbeec143c552011-08-20 17:38:58 -0700101
Dave Allisonf9439142014-03-27 15:10:22 -0700102 // For implicit overflow checks we reserve an extra piece of memory at the bottom
103 // of the stack (lowest memory). The higher portion of the memory
104 // is protected against reads and the lower is available for use while
105 // throwing the StackOverflow exception.
Dave Allisonf4b80bc2014-05-14 15:41:25 -0700106 static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
Dave Allisonf9439142014-03-27 15:10:22 -0700107 static constexpr size_t kStackOverflowImplicitCheckSize = kStackOverflowProtectedSize +
Andreas Gampe7cd26f32014-06-18 17:01:15 -0700108 kRuntimeStackOverflowReservedBytes;
Dave Allisonf9439142014-03-27 15:10:22 -0700109
Elliott Hughes462c9442012-03-23 18:47:50 -0700110 // Creates a new native thread corresponding to the given managed peer.
111 // Used to implement Thread.start.
Ian Rogers52673ff2012-06-27 23:25:34 -0700112 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700113
Elliott Hughes462c9442012-03-23 18:47:50 -0700114 // Attaches the calling native thread to the runtime, returning the new native peer.
115 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800116 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
117 bool create_peer);
Carl Shapirob5573532011-07-12 18:22:59 -0700118
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700119 // Reset internal state of child thread after fork.
120 void InitAfterFork();
121
Ian Rogers02ed4c02013-09-06 13:10:04 -0700122 static Thread* Current();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700123
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700124 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800125 mirror::Object* thread_peer)
jeffhaoa77f0f62012-12-05 17:19:31 -0800126 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700127 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
128 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700129 static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
jeffhaoa77f0f62012-12-05 17:19:31 -0800130 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700131 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
132 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700133
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700134 // Translates 172 to pAllocArrayFromCode and so on.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700135 template<size_t size_of_pointers>
136 static void DumpThreadOffset(std::ostream& os, uint32_t offset);
Elliott Hughes28fa76d2012-04-09 17:31:46 -0700137
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700138 // Dumps a one-line summary of thread state (used for operator<<).
139 void ShortDump(std::ostream& os) const;
140
141 // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
142 void Dump(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700143 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
144 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa0957642011-09-02 14:27:33 -0700145
Mathieu Chartierc751fdc2014-03-30 15:25:44 -0700146 void DumpJavaStack(std::ostream& os) const
147 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
148 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
149
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700150 // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
151 // case we use 'tid' to identify the thread, and we'll include as much information as we can.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700152 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
Ian Rogerscfaa4552012-11-26 21:00:08 -0800153 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
154 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700155
Ian Rogers474b6da2012-09-25 00:20:38 -0700156 ThreadState GetState() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700157 DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
158 DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
159 return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
Dave Allison0aded082013-11-07 13:15:11 -0800160 }
161
Ian Rogers474b6da2012-09-25 00:20:38 -0700162 ThreadState SetState(ThreadState new_state);
Ian Rogers52673ff2012-06-27 23:25:34 -0700163
Ian Rogers81d425b2012-09-27 16:03:43 -0700164 int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700165 return tls32_.suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700166 }
Elliott Hughes038a8062011-09-18 14:12:41 -0700167
Ian Rogers81d425b2012-09-27 16:03:43 -0700168 int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700169 return tls32_.debug_suspend_count;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700170 }
171
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700172 bool IsSuspended() const {
Chris Dearman59cde532013-12-04 18:53:49 -0800173 union StateAndFlags state_and_flags;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700174 state_and_flags.as_int = tls32_.state_and_flags.as_int;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700175 return state_and_flags.as_struct.state != kRunnable &&
176 (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700177 }
178
Ian Rogers01ae5802012-09-28 16:14:01 -0700179 void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700180 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700181
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700182 bool RequestCheckpoint(Closure* function)
183 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700184
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700185 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
186 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
Ian Rogers9da7f592012-08-20 17:14:28 -0700187 void FullSuspendCheck()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700188 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
189 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700190
191 // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
192 ThreadState TransitionFromSuspendedToRunnable()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700193 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800194 SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800195 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700196
197 // Transition from runnable into a state where mutator privileges are denied. Releases share of
198 // mutator lock.
199 void TransitionFromRunnableToSuspended(ThreadState new_state)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700200 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
Ian Rogers693ff612013-02-01 10:56:12 -0800201 UNLOCK_FUNCTION(Locks::mutator_lock_)
Ian Rogers1ffa32f2013-02-05 18:29:08 -0800202 ALWAYS_INLINE;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700203
Ian Rogers0399dde2012-06-06 17:09:28 -0700204 // Once called thread suspension will cause an assertion failure.
Ian Rogers52673ff2012-06-27 23:25:34 -0700205 const char* StartAssertNoThreadSuspension(const char* cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700206 if (kIsDebugBuild) {
207 CHECK(cause != NULL);
Ian Rogersdd7624d2014-03-14 17:43:00 -0700208 const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
209 tls32_.no_thread_suspension++;
210 tlsPtr_.last_no_thread_suspension_cause = cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700211 return previous_cause;
212 } else {
213 return nullptr;
214 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700215 }
Ian Rogers52673ff2012-06-27 23:25:34 -0700216
Ian Rogers0399dde2012-06-06 17:09:28 -0700217 // End region where no thread suspension is expected.
Ian Rogers52673ff2012-06-27 23:25:34 -0700218 void EndAssertNoThreadSuspension(const char* old_cause) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700219 if (kIsDebugBuild) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700220 CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
221 CHECK_GT(tls32_.no_thread_suspension, 0U);
222 tls32_.no_thread_suspension--;
223 tlsPtr_.last_no_thread_suspension_cause = old_cause;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700224 }
Ian Rogers0399dde2012-06-06 17:09:28 -0700225 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700226
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700227 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700228
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700229 bool IsDaemon() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700230 return tls32_.daemon;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700231 }
232
Ian Rogersdd7624d2014-03-14 17:43:00 -0700233 bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5f791332011-09-15 17:45:30 -0700234
Elliott Hughes8daa0922011-09-11 13:46:25 -0700235 /*
236 * Changes the priority of this thread to match that of the java.lang.Thread object.
237 *
238 * We map a priority value from 1-10 to Linux "nice" values, where lower
239 * numbers indicate higher priority.
240 */
241 void SetNativePriority(int newPriority);
242
243 /*
244 * Returns the thread priority for the current thread by querying the system.
245 * This is useful when attaching a thread through JNI.
246 *
247 * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
248 */
249 static int GetNativePriority();
250
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700251 uint32_t GetThreadId() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700252 return tls32_.thin_lock_thread_id;
Carl Shapirob5573532011-07-12 18:22:59 -0700253 }
254
Elliott Hughesd92bec42011-09-02 17:04:36 -0700255 pid_t GetTid() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700256 return tls32_.tid;
Elliott Hughesd92bec42011-09-02 17:04:36 -0700257 }
Elliott Hughese27955c2011-08-26 15:21:24 -0700258
Elliott Hughesffb465f2012-03-01 18:46:05 -0800259 // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700260 mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700261 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes899e7892012-01-24 14:57:32 -0800262
Elliott Hughesffb465f2012-03-01 18:46:05 -0800263 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
264 // allocation, or locking.
265 void GetThreadName(std::string& name) const;
266
Elliott Hughes899e7892012-01-24 14:57:32 -0800267 // Sets the thread's name.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700268 void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesfc861622011-10-17 17:57:47 -0700269
Jeff Hao57dac6e2013-08-15 16:36:24 -0700270 // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
271 uint64_t GetCpuMicroTime() const;
272
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800273 mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700274 CHECK(tlsPtr_.jpeer == nullptr);
275 return tlsPtr_.opeer;
Elliott Hughes8daa0922011-09-11 13:46:25 -0700276 }
277
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700278 bool HasPeer() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700279 return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700280 }
281
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700282 RuntimeStats* GetStats() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700283 return &tls64_.stats;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700284 }
285
Elliott Hughes7dc51662012-05-16 14:48:43 -0700286 bool IsStillStarting() const;
287
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700288 bool IsExceptionPending() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700289 return tlsPtr_.exception != nullptr;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700290 }
291
Ian Rogers62d6c772013-02-27 08:32:07 -0800292 mirror::Throwable* GetException(ThrowLocation* throw_location) const
293 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700294 if (throw_location != nullptr) {
295 *throw_location = tlsPtr_.throw_location;
Ian Rogers62d6c772013-02-27 08:32:07 -0800296 }
Ian Rogersdd7624d2014-03-14 17:43:00 -0700297 return tlsPtr_.exception;
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700298 }
299
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700300 void AssertNoPendingException() const;
Mathieu Chartier8d7672e2014-02-25 10:57:16 -0800301 void AssertNoPendingExceptionForNewException(const char* msg) const;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700302
Ian Rogers62d6c772013-02-27 08:32:07 -0800303 void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
304 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700305 CHECK(new_exception != NULL);
Ian Rogers474b6da2012-09-25 00:20:38 -0700306 // TODO: DCHECK(!IsExceptionPending());
Ian Rogersdd7624d2014-03-14 17:43:00 -0700307 tlsPtr_.exception = new_exception;
308 tlsPtr_.throw_location = throw_location;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700309 }
310
Serguei Katkova309d762014-05-26 11:23:39 +0700311 void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700312 tlsPtr_.exception = nullptr;
313 tlsPtr_.throw_location.Clear();
Sebastien Hertz9f102032014-05-23 08:59:42 +0200314 SetExceptionReportedToInstrumentation(false);
jeffhao94d6df42012-11-26 16:02:12 -0800315 }
316
Ian Rogersbdb03912011-09-14 00:55:44 -0700317 // Find catch block and perform long jump to appropriate exception handle
jeffhao94d6df42012-11-26 16:02:12 -0800318 void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersbdb03912011-09-14 00:55:44 -0700319
320 Context* GetLongJumpContext();
Ian Rogers0399dde2012-06-06 17:09:28 -0700321 void ReleaseLongJumpContext(Context* context) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700322 DCHECK(tlsPtr_.long_jump_context == nullptr);
323 tlsPtr_.long_jump_context = context;
Shih-wei Liao1a18c8c2011-08-14 17:47:36 -0700324 }
325
Brian Carlstromea46f952013-07-30 01:26:50 -0700326 mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700327 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700328
Ian Rogers62d6c772013-02-27 08:32:07 -0800329 ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
330
Andreas Gampecf4035a2014-05-28 22:43:01 -0700331 void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700332 tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
333 tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700334 }
335
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800336 void SetTopOfShadowStack(ShadowFrame* top) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700337 tlsPtr_.managed_stack.SetTopShadowFrame(top);
Jeff Hao11ffc2d2013-02-01 11:52:17 -0800338 }
339
Ian Rogers0399dde2012-06-06 17:09:28 -0700340 bool HasManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700341 return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
342 (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
Ian Rogersbdb03912011-09-14 00:55:44 -0700343 }
344
Elliott Hughesa4f94742012-05-29 16:28:38 -0700345 // If 'msg' is NULL, no detail message is set.
Ian Rogers62d6c772013-02-27 08:32:07 -0800346 void ThrowNewException(const ThrowLocation& throw_location,
347 const char* exception_class_descriptor, const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700348 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes5cb5ad22011-10-02 12:13:39 -0700349
Elliott Hughesa4f94742012-05-29 16:28:38 -0700350 // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
351 // used as the new exception's cause.
Ian Rogers62d6c772013-02-27 08:32:07 -0800352 void ThrowNewWrappedException(const ThrowLocation& throw_location,
353 const char* exception_class_descriptor,
354 const char* msg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700355 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa4f94742012-05-29 16:28:38 -0700356
Ian Rogers62d6c772013-02-27 08:32:07 -0800357 void ThrowNewExceptionF(const ThrowLocation& throw_location,
358 const char* exception_class_descriptor, const char* fmt, ...)
359 __attribute__((format(printf, 4, 5)))
Ian Rogersb726dcb2012-09-05 08:57:23 -0700360 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesa5b897e2011-08-16 11:33:06 -0700361
Ian Rogers62d6c772013-02-27 08:32:07 -0800362 void ThrowNewExceptionV(const ThrowLocation& throw_location,
363 const char* exception_class_descriptor, const char* fmt, va_list ap)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700364 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes4a2b4172011-09-20 17:08:25 -0700365
Elliott Hughes2ced6a52011-10-16 18:44:48 -0700366 // OutOfMemoryError is special, because we need to pre-allocate an instance.
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700367 // Only the GC should call this.
Ian Rogers120f1c72012-09-28 17:17:10 -0700368 void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes79082e32011-08-25 12:07:32 -0700369
Elliott Hughesbe759c62011-09-08 19:38:21 -0700370 static void Startup();
Elliott Hughes038a8062011-09-18 14:12:41 -0700371 static void FinishStartup();
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700372 static void Shutdown();
Carl Shapirob5573532011-07-12 18:22:59 -0700373
Ian Rogersb033c752011-07-20 12:22:35 -0700374 // JNI methods
Elliott Hughes69f5bc62011-08-24 09:26:14 -0700375 JNIEnvExt* GetJniEnv() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700376 return tlsPtr_.jni_env;
Ian Rogersb033c752011-07-20 12:22:35 -0700377 }
378
Ian Rogers408f79a2011-08-23 18:22:33 -0700379 // Convert a jobject into a Object*
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800380 mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersb033c752011-07-20 12:22:35 -0700381
Mathieu Chartiera6e7f082014-05-22 14:43:37 -0700382 mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700383 return tlsPtr_.monitor_enter_object;
384 }
385
Mathieu Chartiera6e7f082014-05-22 14:43:37 -0700386 void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700387 tlsPtr_.monitor_enter_object = obj;
388 }
389
Elliott Hughes8daa0922011-09-11 13:46:25 -0700390 // Implements java.lang.Thread.interrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700391 bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700392 // Implements java.lang.Thread.isInterrupted.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700393 bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
394 bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
395 return interrupted_;
396 }
397 void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
398 void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
399 interrupted_ = i;
400 }
401 void Notify() LOCKS_EXCLUDED(wait_mutex_);
402
403 private:
404 void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
405
406 public:
407 Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
408 return wait_mutex_;
409 }
410
411 ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
412 return wait_cond_;
413 }
414
415 Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
416 return wait_monitor_;
417 }
418
419 void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
420 wait_monitor_ = mon;
421 }
422
423
424 // Waiter link-list support.
425 Thread* GetWaitNext() const {
426 return tlsPtr_.wait_next;
427 }
428
429 void SetWaitNext(Thread* next) {
430 tlsPtr_.wait_next = next;
431 }
Elliott Hughes5f791332011-09-15 17:45:30 -0700432
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800433 mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700434 return tlsPtr_.class_loader_override;
buzbeec143c552011-08-20 17:38:58 -0700435 }
436
Mathieu Chartier4e305412014-02-19 10:54:44 -0800437 void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
438 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
buzbeec143c552011-08-20 17:38:58 -0700439
Ian Rogersaaa20802011-09-11 21:47:37 -0700440 // Create the internal representation of a stack trace, that is more time
Sebastien Hertzee1d79a2014-02-21 15:46:30 +0100441 // and space efficient to compute than the StackTraceElement[].
442 template<bool kTransactionActive>
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700443 jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700444 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogersaaa20802011-09-11 21:47:37 -0700445
Elliott Hughes01158d72011-09-19 19:47:10 -0700446 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
447 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
448 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
449 // with the number of valid frames in the returned array.
Mathieu Chartier2b7c4d12014-05-19 10:52:16 -0700450 static jobjectArray InternalStackTraceToStackTraceElementArray(
451 const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
452 jobjectArray output_array = nullptr, int* stack_depth = nullptr)
Ian Rogers53b8b092014-03-13 23:45:53 -0700453 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Shih-wei Liao55df06b2011-08-26 14:39:27 -0700454
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800455 void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes410c0c82011-09-01 17:58:25 -0700456
Mathieu Chartier4e305412014-02-19 10:54:44 -0800457 ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
jeffhao25045522012-03-13 19:34:37 -0700458
Elliott Hughesbe759c62011-09-08 19:38:21 -0700459 //
460 // Offsets of various members of native Thread class, used by compiled code.
461 //
462
Ian Rogersdd7624d2014-03-14 17:43:00 -0700463 template<size_t pointer_size>
464 static ThreadOffset<pointer_size> ThinLockIdOffset() {
465 return ThreadOffset<pointer_size>(
466 OFFSETOF_MEMBER(Thread, tls32_) +
467 OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700468 }
469
Ian Rogersdd7624d2014-03-14 17:43:00 -0700470 template<size_t pointer_size>
471 static ThreadOffset<pointer_size> ThreadFlagsOffset() {
472 return ThreadOffset<pointer_size>(
473 OFFSETOF_MEMBER(Thread, tls32_) +
474 OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700475 }
476
Ian Rogersdd7624d2014-03-14 17:43:00 -0700477 private:
478 template<size_t pointer_size>
479 static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
480 size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
481 size_t scale;
482 size_t shrink;
483 if (pointer_size == sizeof(void*)) {
484 scale = 1;
485 shrink = 1;
486 } else if (pointer_size > sizeof(void*)) {
487 scale = pointer_size / sizeof(void*);
488 shrink = 1;
489 } else {
490 DCHECK_GT(sizeof(void*), pointer_size);
491 scale = 1;
492 shrink = sizeof(void*) / pointer_size;
493 }
494 return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
Ian Rogers07ec8e12012-12-01 01:26:51 -0800495 }
496
Ian Rogersdd7624d2014-03-14 17:43:00 -0700497 public:
498 template<size_t pointer_size>
499 static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
500 return ThreadOffsetFromTlsPtr<pointer_size>(
501 OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700502 }
503
Ian Rogersdd7624d2014-03-14 17:43:00 -0700504 template<size_t pointer_size>
505 static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
506 return ThreadOffsetFromTlsPtr<pointer_size>(
507 OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700508 }
509
Ian Rogersdd7624d2014-03-14 17:43:00 -0700510 template<size_t pointer_size>
511 static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
512 return ThreadOffsetFromTlsPtr<pointer_size>(
513 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
Elliott Hughesbe759c62011-09-08 19:38:21 -0700514 }
515
Ian Rogersdd7624d2014-03-14 17:43:00 -0700516 template<size_t pointer_size>
517 static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
518 return ThreadOffsetFromTlsPtr<pointer_size>(
519 OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
520 }
521
522 template<size_t pointer_size>
523 static ThreadOffset<pointer_size> SelfOffset() {
524 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
525 }
526
527 template<size_t pointer_size>
528 static ThreadOffset<pointer_size> ExceptionOffset() {
529 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
530 }
531
532 template<size_t pointer_size>
533 static ThreadOffset<pointer_size> PeerOffset() {
534 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
535 }
536
537
538 template<size_t pointer_size>
539 static ThreadOffset<pointer_size> CardTableOffset() {
540 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
541 }
542
543 template<size_t pointer_size>
544 static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
545 return ThreadOffsetFromTlsPtr<pointer_size>(
546 OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
Dave Allisonb373e092014-02-20 16:06:36 -0800547 }
548
Ian Rogers932746a2011-09-22 18:57:50 -0700549 // Size of stack less any space reserved for stack overflow
jeffhaod7521322012-11-21 15:38:24 -0800550 size_t GetStackSize() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700551 return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
Ian Rogers932746a2011-09-22 18:57:50 -0700552 }
553
jeffhaod7521322012-11-21 15:38:24 -0800554 byte* GetStackEnd() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700555 return tlsPtr_.stack_end;
jeffhaod7521322012-11-21 15:38:24 -0800556 }
557
Ian Rogers932746a2011-09-22 18:57:50 -0700558 // Set the stack end to that to be used during a stack overflow
Ian Rogersb726dcb2012-09-05 08:57:23 -0700559 void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers932746a2011-09-22 18:57:50 -0700560
561 // Set the stack end to that to be used during regular execution
Dave Allisonf9439142014-03-27 15:10:22 -0700562 void ResetDefaultStackEnd(bool implicit_overflow_check) {
Ian Rogers932746a2011-09-22 18:57:50 -0700563 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
564 // to throw a StackOverflowError.
Dave Allisonf9439142014-03-27 15:10:22 -0700565 if (implicit_overflow_check) {
566 // For implicit checks we also need to add in the protected region above the
567 // overflow region.
Ian Rogersdd7624d2014-03-14 17:43:00 -0700568 tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
Dave Allisonf9439142014-03-27 15:10:22 -0700569 } else {
Andreas Gampe7cd26f32014-06-18 17:01:15 -0700570 tlsPtr_.stack_end = tlsPtr_.stack_begin + kRuntimeStackOverflowReservedBytes;
Dave Allisonf9439142014-03-27 15:10:22 -0700571 }
Ian Rogers932746a2011-09-22 18:57:50 -0700572 }
573
Dave Allisonf9439142014-03-27 15:10:22 -0700574 // Install the protected region for implicit stack checks.
575 void InstallImplicitProtection(bool is_main_stack);
576
Ian Rogers120f1c72012-09-28 17:17:10 -0700577 bool IsHandlingStackOverflow() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700578 return tlsPtr_.stack_end == tlsPtr_.stack_begin;
Ian Rogers120f1c72012-09-28 17:17:10 -0700579 }
580
Ian Rogersdd7624d2014-03-14 17:43:00 -0700581 template<size_t pointer_size>
582 static ThreadOffset<pointer_size> StackEndOffset() {
583 return ThreadOffsetFromTlsPtr<pointer_size>(
584 OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700585 }
586
Ian Rogersdd7624d2014-03-14 17:43:00 -0700587 template<size_t pointer_size>
588 static ThreadOffset<pointer_size> JniEnvOffset() {
589 return ThreadOffsetFromTlsPtr<pointer_size>(
590 OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700591 }
592
Ian Rogersdd7624d2014-03-14 17:43:00 -0700593 template<size_t pointer_size>
594 static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
595 return ThreadOffsetFromTlsPtr<pointer_size>(
596 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
597 ManagedStack::TopQuickFrameOffset());
Elliott Hughesbe759c62011-09-08 19:38:21 -0700598 }
599
Ian Rogersdd7624d2014-03-14 17:43:00 -0700600 template<size_t pointer_size>
601 static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
602 return ThreadOffsetFromTlsPtr<pointer_size>(
603 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
604 ManagedStack::TopQuickFramePcOffset());
Ian Rogersbdb03912011-09-14 00:55:44 -0700605 }
606
Ian Rogers0399dde2012-06-06 17:09:28 -0700607 const ManagedStack* GetManagedStack() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700608 return &tlsPtr_.managed_stack;
Ian Rogers0399dde2012-06-06 17:09:28 -0700609 }
610
611 // Linked list recording fragments of managed stack.
612 void PushManagedStackFragment(ManagedStack* fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700613 tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700614 }
615 void PopManagedStackFragment(const ManagedStack& fragment) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700616 tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
Ian Rogers0399dde2012-06-06 17:09:28 -0700617 }
618
619 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700620 return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
TDYa127de479be2012-05-31 08:03:26 -0700621 }
622
623 ShadowFrame* PopShadowFrame() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700624 return tlsPtr_.managed_stack.PopShadowFrame();
TDYa127de479be2012-05-31 08:03:26 -0700625 }
Logan Chienf7ad17e2012-03-15 03:10:03 +0800626
Ian Rogersdd7624d2014-03-14 17:43:00 -0700627 template<size_t pointer_size>
628 static ThreadOffset<pointer_size> TopShadowFrameOffset() {
629 return ThreadOffsetFromTlsPtr<pointer_size>(
630 OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
631 ManagedStack::TopShadowFrameOffset());
TDYa127d668a062012-04-13 12:36:57 -0700632 }
633
Ian Rogersef7d42f2014-01-06 12:55:46 -0800634 // Number of references allocated in JNI ShadowFrames on this thread.
635 size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700636 return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700637 }
638
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700639 // Number of references in handle scope on this thread.
640 size_t NumHandleReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700641
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700642 // Number of references allocated in handle scopes & JNI shadow frames on this thread.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800643 size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700644 return NumHandleReferences() + NumJniShadowFrameReferences();
Ian Rogers0399dde2012-06-06 17:09:28 -0700645 };
646
647 // Is the given obj in this thread's stack indirect reference table?
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700648 bool HandleScopeContains(jobject obj) const;
Ian Rogers0399dde2012-06-06 17:09:28 -0700649
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700650 void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
Mathieu Chartier83c8ee02014-01-28 14:50:23 -0800651 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0399dde2012-06-06 17:09:28 -0700652
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700653 HandleScope* GetTopHandleScope() {
654 return tlsPtr_.top_handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700655 }
656
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700657 void PushHandleScope(HandleScope* handle_scope) {
658 handle_scope->SetLink(tlsPtr_.top_handle_scope);
659 tlsPtr_.top_handle_scope = handle_scope;
660 }
661
662 HandleScope* PopHandleScope() {
663 HandleScope* handle_scope = tlsPtr_.top_handle_scope;
664 DCHECK(handle_scope != nullptr);
665 tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
666 return handle_scope;
Ian Rogers1f539342012-10-03 21:09:42 -0700667 }
Brian Carlstrom40381fb2011-10-19 14:13:40 -0700668
Ian Rogersdd7624d2014-03-14 17:43:00 -0700669 template<size_t pointer_size>
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700670 static ThreadOffset<pointer_size> TopHandleScopeOffset() {
671 return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
672 top_handle_scope));
Elliott Hughesbe759c62011-09-08 19:38:21 -0700673 }
674
Ian Rogersdd7624d2014-03-14 17:43:00 -0700675 DebugInvokeReq* GetInvokeReq() const {
676 return tlsPtr_.debug_invoke_req;
Elliott Hughes475fc232011-10-25 15:00:35 -0700677 }
678
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100679 SingleStepControl* GetSingleStepControl() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700680 return tlsPtr_.single_step_control;
Sebastien Hertz61b7f1b2013-11-15 15:59:30 +0100681 }
682
Sebastien Hertzfd3077e2014-04-23 10:32:43 +0200683 // Returns the fake exception used to activate deoptimization.
684 static mirror::Throwable* GetDeoptimizationException() {
685 return reinterpret_cast<mirror::Throwable*>(-1);
686 }
687
Ian Rogers62d6c772013-02-27 08:32:07 -0800688 void SetDeoptimizationShadowFrame(ShadowFrame* sf);
689 void SetDeoptimizationReturnValue(const JValue& ret_val);
Ian Rogers306057f2012-11-26 12:45:53 -0800690
691 ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
692
Sebastien Hertz714f1752014-04-28 15:03:08 +0200693 bool HasDeoptimizationShadowFrame() const {
694 return tlsPtr_.deoptimization_shadow_frame != nullptr;
695 }
696
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -0700697 void SetShadowFrameUnderConstruction(ShadowFrame* sf);
698 void ClearShadowFrameUnderConstruction();
699
700 bool HasShadowFrameUnderConstruction() const {
701 return tlsPtr_.shadow_frame_under_construction != nullptr;
702 }
703
Ian Rogers62d6c772013-02-27 08:32:07 -0800704 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700705 return tlsPtr_.instrumentation_stack;
jeffhaoe343b762011-12-05 16:36:44 -0800706 }
707
Jeff Hao5ce4b172013-08-16 16:27:18 -0700708 std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700709 return tlsPtr_.stack_trace_sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700710 }
711
712 void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700713 tlsPtr_.stack_trace_sample = sample;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700714 }
715
716 uint64_t GetTraceClockBase() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700717 return tls64_.trace_clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700718 }
719
720 void SetTraceClockBase(uint64_t clock_base) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700721 tls64_.trace_clock_base = clock_base;
Jeff Hao5ce4b172013-08-16 16:27:18 -0700722 }
723
Ian Rogers81d425b2012-09-27 16:03:43 -0700724 BaseMutex* GetHeldMutex(LockLevel level) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700725 return tlsPtr_.held_mutexes[level];
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700726 }
727
Ian Rogers81d425b2012-09-27 16:03:43 -0700728 void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700729 tlsPtr_.held_mutexes[level] = mutex;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700730 }
Elliott Hughesffb465f2012-03-01 18:46:05 -0800731
Mathieu Chartier752a0e62013-06-27 11:03:27 -0700732 void RunCheckpointFunction();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700733
734 bool ReadFlag(ThreadFlag flag) const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700735 return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700736 }
737
Jeff Hao9cec2472013-05-14 18:17:06 -0700738 bool TestAllFlags() const {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700739 return (tls32_.state_and_flags.as_struct.flags != 0);
Jeff Hao9cec2472013-05-14 18:17:06 -0700740 }
741
Ian Rogers8c1b5f72014-07-09 22:02:36 -0700742 void AtomicSetFlag(ThreadFlag flag) {
743 tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
744 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700745
Ian Rogers8c1b5f72014-07-09 22:02:36 -0700746 void AtomicClearFlag(ThreadFlag flag) {
747 tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
748 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700749
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700750 void ResetQuickAllocEntryPointsForThread();
751
Ian Rogersdd7624d2014-03-14 17:43:00 -0700752 // Returns the remaining space in the TLAB.
753 size_t TlabSize() const;
754 // Doesn't check that there is room.
755 mirror::Object* AllocTlab(size_t bytes);
756 void SetTlab(byte* start, byte* end);
757 bool HasTlab() const;
Elliott Hughes5d96a712012-06-28 12:24:27 -0700758
Ian Rogersdd7624d2014-03-14 17:43:00 -0700759 // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
760 // equal to a valid pointer.
761 // TODO: does this need to atomic? I don't think so.
762 void RemoveSuspendTrigger() {
763 tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
764 }
765
766 // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
767 // The next time a suspend check is done, it will load from the value at this address
768 // and trigger a SIGSEGV.
769 void TriggerSuspend() {
770 tlsPtr_.suspend_trigger = nullptr;
771 }
772
773
774 // Push an object onto the allocation stack.
775 bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
776
777 // Set the thread local allocation pointers to the given pointers.
778 void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
779
780 // Resets the thread local allocation pointers.
781 void RevokeThreadLocalAllocationStack();
782
783 size_t GetThreadLocalBytesAllocated() const {
Mathieu Chartier14cc9be2014-07-11 10:26:37 -0700784 return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700785 }
786
787 size_t GetThreadLocalObjectsAllocated() const {
788 return tlsPtr_.thread_local_objects;
789 }
790
Ian Rogersdd7624d2014-03-14 17:43:00 -0700791 void* GetRosAllocRun(size_t index) const {
792 return tlsPtr_.rosalloc_runs[index];
793 }
794
795 void SetRosAllocRun(size_t index, void* run) {
796 tlsPtr_.rosalloc_runs[index] = run;
797 }
798
Sebastien Hertz9f102032014-05-23 08:59:42 +0200799 bool IsExceptionReportedToInstrumentation() const {
800 return tls32_.is_exception_reported_to_instrumentation_;
801 }
802
803 void SetExceptionReportedToInstrumentation(bool reported) {
804 tls32_.is_exception_reported_to_instrumentation_ = reported;
805 }
806
Ian Rogersdd7624d2014-03-14 17:43:00 -0700807 private:
Ian Rogers52673ff2012-06-27 23:25:34 -0700808 explicit Thread(bool daemon);
Ian Rogersb726dcb2012-09-05 08:57:23 -0700809 ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
810 Locks::thread_suspend_count_lock_);
Elliott Hughesc0f09332012-03-26 13:27:06 -0700811 void Destroy();
Carl Shapiro0e5d75d2011-07-06 18:28:37 -0700812
Ian Rogers365c1022012-06-22 15:05:28 -0700813 void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
Elliott Hughes5fe594f2011-09-08 12:33:17 -0700814
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100815 template<bool kTransactionActive>
816 void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
817 jobject thread_name, jint thread_priority)
818 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
819
Ian Rogers62d6c772013-02-27 08:32:07 -0800820 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
821 // Dbg::Disconnected.
Ian Rogers474b6da2012-09-25 00:20:38 -0700822 ThreadState SetStateUnsafe(ThreadState new_state) {
823 ThreadState old_state = GetState();
Ian Rogersdd7624d2014-03-14 17:43:00 -0700824 tls32_.state_and_flags.as_struct.state = new_state;
Ian Rogersc747cff2012-08-31 18:20:08 -0700825 return old_state;
826 }
Ian Rogersc747cff2012-08-31 18:20:08 -0700827
Ian Rogers04d7aa92013-03-16 14:29:17 -0700828 void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
829
Ian Rogerscfaa4552012-11-26 21:00:08 -0800830 void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700831 void DumpStack(std::ostream& os) const
Ian Rogersb726dcb2012-09-05 08:57:23 -0700832 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
833 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesd92bec42011-09-02 17:04:36 -0700834
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700835 // Out-of-line conveniences for debugging in gdb.
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700836 static Thread* CurrentFromGdb(); // Like Thread::Current.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700837 // Like Thread::Dump(std::cerr).
Ian Rogersb726dcb2012-09-05 08:57:23 -0700838 void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700839
Elliott Hughes93e74e82011-09-13 11:07:03 -0700840 static void* CreateCallback(void* arg);
841
Ian Rogerscfaa4552012-11-26 21:00:08 -0800842 void HandleUncaughtExceptions(ScopedObjectAccess& soa)
843 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
844 void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700845
Ian Rogers120f1c72012-09-28 17:17:10 -0700846 void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
Ian Rogers5d76c432011-10-31 21:42:49 -0700847 void InitCardTable();
Ian Rogersb033c752011-07-20 12:22:35 -0700848 void InitCpu();
Alexei Zavjalov1efa0a92014-02-04 02:08:31 +0700849 void CleanupCpu();
Ian Rogers848871b2013-08-05 10:56:33 -0700850 void InitTlsEntryPoints();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700851 void InitTid();
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700852 void InitPthreadKeySelf();
Elliott Hughesbe759c62011-09-08 19:38:21 -0700853 void InitStackHwm();
854
Elliott Hughesd6a23bd2013-07-16 14:19:52 -0700855 void SetUpAlternateSignalStack();
856 void TearDownAlternateSignalStack();
857
Ian Rogers474b6da2012-09-25 00:20:38 -0700858 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
859 // change from being Suspended to Runnable without a suspend request occurring.
Chris Dearman59cde532013-12-04 18:53:49 -0800860 union PACKED(4) StateAndFlags {
861 StateAndFlags() {}
Ian Rogersdf1ce912012-11-27 17:07:11 -0800862 struct PACKED(4) {
Ian Rogers30e173f2012-09-26 14:35:03 -0700863 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
864 // ThreadFlags for bit field meanings.
865 volatile uint16_t flags;
866 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
867 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
868 // operation. If a thread is suspended and a suspend_request is present, a thread may not
869 // change to Runnable as a GC or other operation is in progress.
Ian Rogers01ae5802012-09-28 16:14:01 -0700870 volatile uint16_t state;
Ian Rogers30e173f2012-09-26 14:35:03 -0700871 } as_struct;
Ian Rogersb8e087e2014-07-09 21:12:06 -0700872 AtomicInteger as_atomic_int;
Ian Rogers01ae5802012-09-28 16:14:01 -0700873 volatile int32_t as_int;
Chris Dearman59cde532013-12-04 18:53:49 -0800874
875 private:
876 // gcc does not handle struct with volatile member assignments correctly.
877 // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
878 DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
Ian Rogers474b6da2012-09-25 00:20:38 -0700879 };
Ian Rogersb8e087e2014-07-09 21:12:06 -0700880 COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size);
Ian Rogers474b6da2012-09-25 00:20:38 -0700881
Ian Rogersdd7624d2014-03-14 17:43:00 -0700882 static void ThreadExitCallback(void* arg);
Elliott Hughes5d96a712012-06-28 12:24:27 -0700883
Dave Allison0aded082013-11-07 13:15:11 -0800884 // Maximum number of checkpoint functions.
885 static constexpr uint32_t kMaxCheckpoints = 3;
886
Ian Rogersdd7624d2014-03-14 17:43:00 -0700887 // Has Thread::Startup been called?
888 static bool is_started_;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700889
Ian Rogersdd7624d2014-03-14 17:43:00 -0700890 // TLS key used to retrieve the Thread*.
891 static pthread_key_t pthread_key_self_;
Ian Rogersa32a6fd2012-02-06 20:18:44 -0800892
Ian Rogersdd7624d2014-03-14 17:43:00 -0700893 // Used to notify threads that they should attempt to resume, they will suspend again if
894 // their suspend count is > 0.
895 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
Dave Allisonb373e092014-02-20 16:06:36 -0800896
Ian Rogersdd7624d2014-03-14 17:43:00 -0700897 /***********************************************************************************************/
898 // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
899 // pointer size differences. To encourage shorter encoding, more frequently used values appear
900 // first if possible.
901 /***********************************************************************************************/
Elliott Hughes6a607ad2012-07-13 20:40:00 -0700902
Zuo Wangf37a88b2014-07-10 04:26:41 -0700903 struct PACKED(4) tls_32bit_sized_values {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700904 // We have no control over the size of 'bool', but want our boolean fields
905 // to be 4-byte quantities.
906 typedef uint32_t bool32_t;
Ian Rogers22f454c2012-09-08 11:06:29 -0700907
Ian Rogersdd7624d2014-03-14 17:43:00 -0700908 explicit tls_32bit_sized_values(bool is_daemon) :
909 suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
910 daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
Sebastien Hertz9f102032014-05-23 08:59:42 +0200911 thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700912 }
Dave Allisonb373e092014-02-20 16:06:36 -0800913
Ian Rogersdd7624d2014-03-14 17:43:00 -0700914 union StateAndFlags state_and_flags;
915 COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
916 sizeof_state_and_flags_and_int32_are_different);
Dave Allisonb373e092014-02-20 16:06:36 -0800917
Ian Rogersdd7624d2014-03-14 17:43:00 -0700918 // A non-zero value is used to tell the current thread to enter a safe point
919 // at the next poll.
920 int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700921
Ian Rogersdd7624d2014-03-14 17:43:00 -0700922 // How much of 'suspend_count_' is by request of the debugger, used to set things right
923 // when the debugger detaches. Must be <= suspend_count_.
924 int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800925
Ian Rogersdd7624d2014-03-14 17:43:00 -0700926 // Thin lock thread id. This is a small integer used by the thin lock implementation.
927 // This is not to be confused with the native thread's tid, nor is it the value returned
928 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
929 // important difference between this id and the ids visible to managed code is that these
930 // ones get reused (to ensure that they fit in the number of bits available).
931 uint32_t thin_lock_thread_id;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800932
Ian Rogersdd7624d2014-03-14 17:43:00 -0700933 // System thread id.
934 uint32_t tid;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800935
Ian Rogersdd7624d2014-03-14 17:43:00 -0700936 // Is the thread a daemon?
937 const bool32_t daemon;
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800938
Ian Rogersdd7624d2014-03-14 17:43:00 -0700939 // A boolean telling us whether we're recursively throwing OOME.
940 bool32_t throwing_OutOfMemoryError;
941
942 // A positive value implies we're in a region where thread suspension isn't expected.
943 uint32_t no_thread_suspension;
944
945 // How many times has our pthread key's destructor been called?
946 uint32_t thread_exit_check_count;
Sebastien Hertz9f102032014-05-23 08:59:42 +0200947
948 // When true this field indicates that the exception associated with this thread has already
949 // been reported to instrumentation.
950 bool32_t is_exception_reported_to_instrumentation_;
Ian Rogersdd7624d2014-03-14 17:43:00 -0700951 } tls32_;
952
953 struct PACKED(8) tls_64bit_sized_values {
954 tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
955 }
956
957 // The clock base used for tracing.
958 uint64_t trace_clock_base;
959
960 // Return value used by deoptimization.
961 JValue deoptimization_return_value;
962
963 RuntimeStats stats;
964 } tls64_;
965
966 struct PACKED(4) tls_ptr_sized_values {
967 tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
968 managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
969 jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
970 stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700971 top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700972 instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -0700973 deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
974 pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
Ian Rogersdd7624d2014-03-14 17:43:00 -0700975 thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
976 thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
977 }
978
979 // The biased card table, see CardTable for details.
980 byte* card_table;
981
982 // The pending exception or NULL.
983 mirror::Throwable* exception;
984
985 // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
986 // We leave extra space so there's room for the code that throws StackOverflowError.
987 byte* stack_end;
988
989 // The top of the managed stack often manipulated directly by compiler generated code.
990 ManagedStack managed_stack;
991
992 // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check. It is
993 // normally set to the address of itself.
994 uintptr_t* suspend_trigger;
995
996 // Every thread may have an associated JNI environment
997 JNIEnvExt* jni_env;
998
999 // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1000 // is easy but getting the address of Thread::Current is hard. This field can be read off of
1001 // Thread::Current to give the address.
1002 Thread* self;
1003
1004 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1005 // start up, until the thread is registered and the local opeer_ is used.
1006 mirror::Object* opeer;
1007 jobject jpeer;
1008
1009 // The "lowest addressable byte" of the stack.
1010 byte* stack_begin;
1011
1012 // Size of the stack.
1013 size_t stack_size;
1014
1015 // The location the current exception was thrown from.
1016 ThrowLocation throw_location;
1017
1018 // Pointer to previous stack trace captured by sampling profiler.
1019 std::vector<mirror::ArtMethod*>* stack_trace_sample;
1020
1021 // The next thread in the wait set this thread is part of or NULL if not waiting.
1022 Thread* wait_next;
1023
1024 // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1025 mirror::Object* monitor_enter_object;
1026
Mathieu Chartiereb8167a2014-05-07 15:43:14 -07001027 // Top of linked list of handle scopes or nullptr for none.
1028 HandleScope* top_handle_scope;
Ian Rogersdd7624d2014-03-14 17:43:00 -07001029
1030 // Needed to get the right ClassLoader in JNI_OnLoad, but also
1031 // useful for testing.
1032 mirror::ClassLoader* class_loader_override;
1033
1034 // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1035 Context* long_jump_context;
1036
1037 // Additional stack used by method instrumentation to store method and return pc values.
1038 // Stored as a pointer since std::deque is not PACKED.
1039 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1040
1041 // JDWP invoke-during-breakpoint support.
1042 DebugInvokeReq* debug_invoke_req;
1043
1044 // JDWP single-stepping support.
1045 SingleStepControl* single_step_control;
1046
1047 // Shadow frame stack that is used temporarily during the deoptimization of a method.
1048 ShadowFrame* deoptimization_shadow_frame;
1049
Andreas Gampe2a0d4ec2014-06-02 22:05:22 -07001050 // Shadow frame stack that is currently under construction but not yet on the stack
1051 ShadowFrame* shadow_frame_under_construction;
1052
Ian Rogersdd7624d2014-03-14 17:43:00 -07001053 // A cached copy of the java.lang.Thread's name.
1054 std::string* name;
1055
1056 // A cached pthread_t for the pthread underlying this Thread*.
1057 pthread_t pthread_self;
1058
Ian Rogersdd7624d2014-03-14 17:43:00 -07001059 // If no_thread_suspension_ is > 0, what is causing that assertion.
1060 const char* last_no_thread_suspension_cause;
1061
1062 // Pending checkpoint function or NULL if non-pending. Installation guarding by
1063 // Locks::thread_suspend_count_lock_.
1064 Closure* checkpoint_functions[kMaxCheckpoints];
1065
1066 // Entrypoint function pointers.
1067 // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1068 InterpreterEntryPoints interpreter_entrypoints;
1069 JniEntryPoints jni_entrypoints;
1070 PortableEntryPoints portable_entrypoints;
1071 QuickEntryPoints quick_entrypoints;
1072
1073 // Thread-local allocation pointer.
1074 byte* thread_local_start;
1075 byte* thread_local_pos;
1076 byte* thread_local_end;
1077 size_t thread_local_objects;
1078
Mathieu Chartier0651d412014-04-29 14:37:57 -07001079 // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
Ian Rogerse63db272014-07-15 15:36:11 -07001080 void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001081
1082 // Thread-local allocation stack data/routines.
1083 mirror::Object** thread_local_alloc_stack_top;
1084 mirror::Object** thread_local_alloc_stack_end;
Chao-ying Fu9e369312014-05-21 11:20:52 -07001085
1086 // Support for Mutex lock hierarchy bug detection.
1087 BaseMutex* held_mutexes[kLockLevelCount];
Ian Rogersdd7624d2014-03-14 17:43:00 -07001088 } tlsPtr_;
1089
1090 // Guards the 'interrupted_' and 'wait_monitor_' members.
1091 Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1092
1093 // Condition variable waited upon during a wait.
1094 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1095 // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
1096 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1097
1098 // Thread "interrupted" status; stays raised until queried or thrown.
1099 bool interrupted_ GUARDED_BY(wait_mutex_);
1100
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001101 friend class Dbg; // For SetStateUnsafe.
Mathieu Chartier15d34022014-02-26 17:16:38 -08001102 friend class gc::collector::SemiSpace; // For getting stack traces.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001103 friend class Runtime; // For CreatePeer.
Ian Rogers5cf98192014-05-29 21:31:50 -07001104 friend class QuickExceptionHandler; // For dumping the stack.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001105 friend class ScopedThreadStateChange;
1106 friend class SignalCatcher; // For SetStateUnsafe.
Mathieu Chartier119c6bd2014-05-09 14:11:47 -07001107 friend class StubTest; // For accessing entrypoints.
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001108 friend class ThreadList; // For ~Thread and Destroy.
1109
Andreas Gampe4352b452014-06-04 18:59:01 -07001110 friend class EntrypointsOrderTest; // To test the order of tls entries.
1111
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001112 DISALLOW_COPY_AND_ASSIGN(Thread);
1113};
Ian Rogersbdb03912011-09-14 00:55:44 -07001114
Elliott Hughes330304d2011-08-12 14:28:05 -07001115std::ostream& operator<<(std::ostream& os, const Thread& thread);
Elliott Hughes34e06962012-04-09 13:55:55 -07001116std::ostream& operator<<(std::ostream& os, const ThreadState& state);
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001117
Carl Shapiro0e5d75d2011-07-06 18:28:37 -07001118} // namespace art
1119
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001120#endif // ART_RUNTIME_THREAD_H_