blob: c5c7e2cc16ef1b19b0708e09e1bc63fd19cf18c2 [file] [log] [blame]
Elliott Hughes8daa0922011-09-11 13:46:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "thread_list.h"
18
Christopher Ferris6cff48f2014-01-26 21:36:13 -080019#include <backtrace/BacktraceMap.h>
Elliott Hughesabbe07d2012-06-05 17:42:23 -070020#include <dirent.h>
Ian Rogersd9c4fc92013-10-01 19:45:43 -070021#include <ScopedLocalRef.h>
22#include <ScopedUtfChars.h>
Elliott Hughesabbe07d2012-06-05 17:42:23 -070023#include <sys/types.h>
Elliott Hughes038a8062011-09-18 14:12:41 -070024#include <unistd.h>
25
Ian Rogersc7dd2952014-10-21 23:31:19 -070026#include <sstream>
27
Andreas Gampe46ee31b2016-12-14 10:11:49 -080028#include "android-base/stringprintf.h"
29
Mathieu Chartier70a596d2014-12-17 14:56:47 -080030#include "base/histogram-inl.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070031#include "base/mutex-inl.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080032#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010033#include "base/time_utils.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080034#include "base/timing_logger.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070035#include "debugger.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070036#include "gc/collector/concurrent_copying.h"
Hiroshi Yamauchi30493242016-11-03 13:06:52 -070037#include "gc/reference_processor.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070038#include "jni_internal.h"
39#include "lock_word.h"
40#include "monitor.h"
Andreas Gampe5dd44d02016-08-02 17:20:03 -070041#include "native_stack_dump.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070042#include "scoped_thread_state_change-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080043#include "thread.h"
Jeff Haoe094b872014-10-14 13:12:01 -070044#include "trace.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070045#include "well_known_classes.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070046
Yu Lieac44242015-06-29 10:50:03 +080047#if ART_USE_FUTEXES
48#include "linux/futex.h"
49#include "sys/syscall.h"
50#ifndef SYS_futex
51#define SYS_futex __NR_futex
52#endif
53#endif // ART_USE_FUTEXES
54
Elliott Hughes8daa0922011-09-11 13:46:25 -070055namespace art {
56
Andreas Gampe46ee31b2016-12-14 10:11:49 -080057using android::base::StringPrintf;
58
Mathieu Chartier251755c2014-07-15 18:10:25 -070059static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
Mathieu Chartier99143862015-02-03 14:26:46 -080060static constexpr uint64_t kThreadSuspendTimeoutMs = 30 * 1000; // 30s.
61// Use 0 since we want to yield to prevent blocking for an unpredictable amount of time.
62static constexpr useconds_t kThreadSuspendInitialSleepUs = 0;
63static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000;
64static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
Mathieu Chartier251755c2014-07-15 18:10:25 -070065
Andreas Gampe8d1594d2016-03-01 14:38:37 -080066// Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for
67// some history.
Andreas Gampea3e8fc32016-06-13 16:15:33 -070068// Turned off again. b/29248079
Mathieu Chartier3f386d52016-10-05 14:12:45 -070069static constexpr bool kDumpUnattachedThreadNativeStackForSigQuit = false;
Andreas Gampe8d1594d2016-03-01 14:38:37 -080070
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -080071ThreadList::ThreadList()
Mathieu Chartierb56200b2015-10-29 10:41:51 -070072 : suspend_all_count_(0),
73 debug_suspend_all_count_(0),
74 unregistering_count_(0),
75 suspend_all_historam_("suspend all histogram", 16, 64),
Hiroshi Yamauchi30493242016-11-03 13:06:52 -070076 long_suspend_(false),
77 empty_checkpoint_barrier_(new Barrier(0)) {
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080078 CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
Elliott Hughes8daa0922011-09-11 13:46:25 -070079}
80
81ThreadList::~ThreadList() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080082 ScopedTrace trace(__PRETTY_FUNCTION__);
Elliott Hughese52e49b2012-04-02 16:05:44 -070083 // Detach the current thread if necessary. If we failed to start, there might not be any threads.
Elliott Hughes6a144332012-04-03 13:07:11 -070084 // We need to detach the current thread here in case there's another thread waiting to join with
85 // us.
Mathieu Chartierfec72f42014-10-09 12:57:58 -070086 bool contains = false;
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080087 Thread* self = Thread::Current();
Mathieu Chartierfec72f42014-10-09 12:57:58 -070088 {
Mathieu Chartierfec72f42014-10-09 12:57:58 -070089 MutexLock mu(self, *Locks::thread_list_lock_);
90 contains = Contains(self);
91 }
92 if (contains) {
Elliott Hughes8daa0922011-09-11 13:46:25 -070093 Runtime::Current()->DetachCurrentThread();
94 }
Elliott Hughes6a144332012-04-03 13:07:11 -070095 WaitForOtherNonDaemonThreadsToExit();
Mathieu Chartier51168372015-08-12 16:40:32 -070096 // Disable GC and wait for GC to complete in case there are still daemon threads doing
97 // allocations.
98 gc::Heap* const heap = Runtime::Current()->GetHeap();
99 heap->DisableGCForShutdown();
100 // In case a GC is in progress, wait for it to finish.
101 heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700102 // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
103 // Thread::Init.
Mathieu Chartier4d87df62016-01-07 15:14:19 -0800104 SuspendAllDaemonThreadsForShutdown();
Elliott Hughes8daa0922011-09-11 13:46:25 -0700105}
106
107bool ThreadList::Contains(Thread* thread) {
108 return find(list_.begin(), list_.end(), thread) != list_.end();
109}
110
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700111bool ThreadList::Contains(pid_t tid) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700112 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700113 if (thread->GetTid() == tid) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700114 return true;
115 }
116 }
117 return false;
118}
119
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700120pid_t ThreadList::GetLockOwner() {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700121 return Locks::thread_list_lock_->GetExclusiveOwnerTid();
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700122}
123
Mathieu Chartier590fee92013-09-13 13:46:47 -0700124void ThreadList::DumpNativeStacks(std::ostream& os) {
125 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800126 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
Mathieu Chartier590fee92013-09-13 13:46:47 -0700127 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700128 os << "DUMPING THREAD " << thread->GetTid() << "\n";
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800129 DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700130 os << "\n";
131 }
132}
133
Elliott Hughesc967f782012-04-16 10:23:15 -0700134void ThreadList::DumpForSigQuit(std::ostream& os) {
Mathieu Chartier70a596d2014-12-17 14:56:47 -0800135 {
136 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier23f6e692014-12-18 18:24:39 -0800137 // Only print if we have samples.
138 if (suspend_all_historam_.SampleSize() > 0) {
139 Histogram<uint64_t>::CumulativeData data;
140 suspend_all_historam_.CreateHistogram(&data);
141 suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend.
142 }
Mathieu Chartier70a596d2014-12-17 14:56:47 -0800143 }
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000144 bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
145 Dump(os, dump_native_stack);
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700146 DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700147}
148
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000149static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
150 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700151 // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
Ian Rogerscfaa4552012-11-26 21:00:08 -0800152 // refactor DumpState to avoid skipping analysis.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700153 Thread::DumpState(os, nullptr, tid);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700154 DumpKernelStack(os, tid, " kernel: ", false);
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700155 if (dump_native_stack) {
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800156 DumpNativeStack(os, tid, nullptr, " native: ");
Brian Carlstromed8b7232012-06-27 17:54:47 -0700157 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700158 os << "\n";
159}
160
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000161void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700162 DIR* d = opendir("/proc/self/task");
163 if (!d) {
164 return;
165 }
166
Ian Rogers50b35e22012-10-04 10:09:15 -0700167 Thread* self = Thread::Current();
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700168 dirent* e;
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700169 while ((e = readdir(d)) != nullptr) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700170 char* end;
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700171 pid_t tid = strtol(e->d_name, &end, 10);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700172 if (!*end) {
173 bool contains;
174 {
Ian Rogers50b35e22012-10-04 10:09:15 -0700175 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700176 contains = Contains(tid);
177 }
178 if (!contains) {
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000179 DumpUnattachedThread(os, tid, dump_native_stack);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700180 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700181 }
182 }
183 closedir(d);
Elliott Hughesff738062012-02-03 15:00:42 -0800184}
185
Mathieu Chartier47c19592016-03-07 11:59:01 -0800186// Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be
187// overloaded with ANR dumps.
188static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
Andreas Gampe4a3d19b2015-01-09 17:54:51 -0800189
Ian Rogers7b078e82014-09-10 14:44:24 -0700190// A closure used by Thread::Dump.
191class DumpCheckpoint FINAL : public Closure {
192 public:
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000193 DumpCheckpoint(std::ostream* os, bool dump_native_stack)
194 : os_(os),
195 barrier_(0),
196 backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
197 dump_native_stack_(dump_native_stack) {}
Ian Rogers7b078e82014-09-10 14:44:24 -0700198
199 void Run(Thread* thread) OVERRIDE {
200 // Note thread and self may not be equal if thread was already suspended at the point of the
201 // request.
202 Thread* self = Thread::Current();
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700203 CHECK(self != nullptr);
Ian Rogers7b078e82014-09-10 14:44:24 -0700204 std::ostringstream local_os;
205 {
206 ScopedObjectAccess soa(self);
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000207 thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
Ian Rogers7b078e82014-09-10 14:44:24 -0700208 }
209 local_os << "\n";
210 {
211 // Use the logging lock to ensure serialization when writing to the common ostream.
212 MutexLock mu(self, *Locks::logging_lock_);
213 *os_ << local_os.str();
214 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700215 barrier_.Pass(self);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700216 }
Ian Rogers7b078e82014-09-10 14:44:24 -0700217
218 void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
219 Thread* self = Thread::Current();
220 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Andreas Gampe1e4b0ca2015-01-14 09:06:32 -0800221 bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
Ian Rogers2156ff12014-09-13 19:20:54 -0700222 if (timed_out) {
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000223 // Avoid a recursive abort.
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700224 LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR)
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000225 << "Unexpected time out during dump checkpoint.";
Ian Rogers2156ff12014-09-13 19:20:54 -0700226 }
Ian Rogers7b078e82014-09-10 14:44:24 -0700227 }
228
229 private:
230 // The common stream that will accumulate all the dumps.
231 std::ostream* const os_;
232 // The barrier to be passed through and for the requestor to wait upon.
233 Barrier barrier_;
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800234 // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
235 std::unique_ptr<BacktraceMap> backtrace_map_;
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000236 // Whether we should dump the native stack.
237 const bool dump_native_stack_;
Ian Rogers7b078e82014-09-10 14:44:24 -0700238};
239
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000240void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700241 Thread* self = Thread::Current();
Ian Rogers7b078e82014-09-10 14:44:24 -0700242 {
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700243 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers7b078e82014-09-10 14:44:24 -0700244 os << "DALVIK THREADS (" << list_.size() << "):\n";
245 }
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700246 if (self != nullptr) {
247 DumpCheckpoint checkpoint(&os, dump_native_stack);
248 size_t threads_running_checkpoint;
249 {
250 // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
251 ScopedObjectAccess soa(self);
252 threads_running_checkpoint = RunCheckpoint(&checkpoint);
253 }
254 if (threads_running_checkpoint != 0) {
255 checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
256 }
257 } else {
258 DumpUnattachedThreads(os, dump_native_stack);
Lei Lidd9943d2015-02-02 14:24:44 +0800259 }
Elliott Hughes8daa0922011-09-11 13:46:25 -0700260}
261
Ian Rogers50b35e22012-10-04 10:09:15 -0700262void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
263 MutexLock mu(self, *Locks::thread_list_lock_);
264 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700265 for (const auto& thread : list_) {
jeffhao725a9572012-11-13 18:20:12 -0800266 if (thread != ignore1 && thread != ignore2) {
Ian Rogers01ae5802012-09-28 16:14:01 -0700267 CHECK(thread->IsSuspended())
268 << "\nUnsuspended thread: <<" << *thread << "\n"
269 << "self: <<" << *Thread::Current();
270 }
Elliott Hughes8d768a92011-09-14 16:35:25 -0700271 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700272}
273
Ian Rogers66aee5c2012-08-15 17:17:47 -0700274#if HAVE_TIMED_RWLOCK
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700275// Attempt to rectify locks so that we dump thread list with required locks before exiting.
Andreas Gampe794ad762015-02-23 08:12:24 -0800276NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700277 Runtime* runtime = Runtime::Current();
278 std::ostringstream ss;
279 ss << "Thread suspend timeout\n";
Mathieu Chartier5869a2c2014-10-08 14:26:23 -0700280 Locks::mutator_lock_->Dump(ss);
281 ss << "\n";
Ian Rogers7b078e82014-09-10 14:44:24 -0700282 runtime->GetThreadList()->Dump(ss);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700283 LOG(FATAL) << ss.str();
Ian Rogers719d1a32014-03-06 12:13:39 -0800284 exit(0);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700285}
Ian Rogers66aee5c2012-08-15 17:17:47 -0700286#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700287
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800288// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
Mathieu Chartier99143862015-02-03 14:26:46 -0800289// individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then
290// we use sched_yield instead of calling usleep.
291static void ThreadSuspendSleep(useconds_t delay_us) {
292 if (delay_us == 0) {
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800293 sched_yield();
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800294 } else {
Mathieu Chartier99143862015-02-03 14:26:46 -0800295 usleep(delay_us);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800296 }
297}
298
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700299size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700300 Thread* self = Thread::Current();
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800301 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
302 Locks::thread_list_lock_->AssertNotHeld(self);
303 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700304
305 std::vector<Thread*> suspended_count_modified_threads;
306 size_t count = 0;
307 {
308 // Call a checkpoint function for each thread, threads which are suspend get their checkpoint
309 // manually called.
310 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700311 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier10d25082015-10-28 18:36:09 -0700312 count = list_.size();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700313 for (const auto& thread : list_) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700314 if (thread != self) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700315 while (true) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700316 if (thread->RequestCheckpoint(checkpoint_function)) {
Dave Allison0aded082013-11-07 13:15:11 -0800317 // This thread will run its checkpoint some time in the near future.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700318 break;
319 } else {
320 // We are probably suspended, try to make sure that we stay suspended.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700321 // The thread switched back to runnable.
322 if (thread->GetState() == kRunnable) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700323 // Spurious fail, try again.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700324 continue;
325 }
Yu Lieac44242015-06-29 10:50:03 +0800326 thread->ModifySuspendCount(self, +1, nullptr, false);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700327 suspended_count_modified_threads.push_back(thread);
328 break;
329 }
330 }
331 }
332 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700333 // Run the callback to be called inside this critical section.
334 if (callback != nullptr) {
335 callback->Run(self);
336 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700337 }
338
339 // Run the checkpoint on ourself while we wait for threads to suspend.
340 checkpoint_function->Run(self);
341
342 // Run the checkpoint on the suspended threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700343 for (const auto& thread : suspended_count_modified_threads) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700344 if (!thread->IsSuspended()) {
Mathieu Chartier99143862015-02-03 14:26:46 -0800345 if (ATRACE_ENABLED()) {
346 std::ostringstream oss;
347 thread->ShortDump(oss);
348 ATRACE_BEGIN((std::string("Waiting for suspension of thread ") + oss.str()).c_str());
349 }
350 // Busy wait until the thread is suspended.
351 const uint64_t start_time = NanoTime();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700352 do {
Mathieu Chartier99143862015-02-03 14:26:46 -0800353 ThreadSuspendSleep(kThreadSuspendInitialSleepUs);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700354 } while (!thread->IsSuspended());
Mathieu Chartier99143862015-02-03 14:26:46 -0800355 const uint64_t total_delay = NanoTime() - start_time;
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800356 // Shouldn't need to wait for longer than 1000 microseconds.
Mathieu Chartier99143862015-02-03 14:26:46 -0800357 constexpr uint64_t kLongWaitThreshold = MsToNs(1);
358 ATRACE_END();
359 if (UNLIKELY(total_delay > kLongWaitThreshold)) {
360 LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for "
361 << *thread << " suspension!";
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700362 }
363 }
364 // We know for sure that the thread is suspended at this point.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700365 checkpoint_function->Run(thread);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700366 {
367 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +0800368 thread->ModifySuspendCount(self, -1, nullptr, false);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700369 }
370 }
371
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800372 {
373 // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their
374 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
375 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
376 Thread::resume_cond_->Broadcast(self);
377 }
378
Lei Lidd9943d2015-02-02 14:24:44 +0800379 return count;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700380}
381
Hiroshi Yamauchia82769c2016-12-02 17:01:51 -0800382size_t ThreadList::RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids) {
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700383 Thread* self = Thread::Current();
384 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
385 Locks::thread_list_lock_->AssertNotHeld(self);
386 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
387
388 size_t count = 0;
389 {
390 MutexLock mu(self, *Locks::thread_list_lock_);
391 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
392 for (Thread* thread : list_) {
393 if (thread != self) {
394 while (true) {
395 if (thread->RequestEmptyCheckpoint()) {
396 // This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
397 // some time in the near future.
398 ++count;
Hiroshi Yamauchia82769c2016-12-02 17:01:51 -0800399 if (kIsDebugBuild) {
400 runnable_thread_ids.push_back(thread->GetThreadId());
401 }
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700402 break;
403 }
404 if (thread->GetState() != kRunnable) {
405 // It's seen suspended, we are done because it must not be in the middle of a mutator
406 // heap access.
407 break;
408 }
409 }
410 }
411 }
412 }
413
414 // Wake up the threads blocking for weak ref access so that they will respond to the empty
415 // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
416 Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
417 Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
418
419 return count;
420}
421
Dave Allison39c3bfb2014-01-28 18:33:52 -0800422// Request that a checkpoint function be run on all active (non-suspended)
423// threads. Returns the number of successful requests.
424size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
425 Thread* self = Thread::Current();
Ian Rogers7b078e82014-09-10 14:44:24 -0700426 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
427 Locks::thread_list_lock_->AssertNotHeld(self);
428 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
429 CHECK_NE(self->GetState(), kRunnable);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800430
431 size_t count = 0;
432 {
433 // Call a checkpoint function for each non-suspended thread.
434 MutexLock mu(self, *Locks::thread_list_lock_);
435 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
436 for (const auto& thread : list_) {
437 if (thread != self) {
438 if (thread->RequestCheckpoint(checkpoint_function)) {
439 // This thread will run its checkpoint some time in the near future.
440 count++;
441 }
442 }
443 }
444 }
445
446 // Return the number of threads that will run the checkpoint function.
447 return count;
448}
449
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800450// A checkpoint/suspend-all hybrid to switch thread roots from
451// from-space to to-space refs. Used to synchronize threads at a point
452// to mark the initiation of marking while maintaining the to-space
453// invariant.
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700454size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
455 Closure* flip_callback,
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800456 gc::collector::GarbageCollector* collector) {
457 TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
458 const uint64_t start_time = NanoTime();
459 Thread* self = Thread::Current();
460 Locks::mutator_lock_->AssertNotHeld(self);
461 Locks::thread_list_lock_->AssertNotHeld(self);
462 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
463 CHECK_NE(self->GetState(), kRunnable);
464
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700465 collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
466
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700467 SuspendAllInternal(self, self, nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800468
469 // Run the flip callback for the collector.
470 Locks::mutator_lock_->ExclusiveLock(self);
471 flip_callback->Run(self);
472 Locks::mutator_lock_->ExclusiveUnlock(self);
473 collector->RegisterPause(NanoTime() - start_time);
474
475 // Resume runnable threads.
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700476 size_t runnable_thread_count = 0;
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700477 std::vector<Thread*> other_threads;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800478 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700479 TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800480 MutexLock mu(self, *Locks::thread_list_lock_);
481 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
482 --suspend_all_count_;
483 for (const auto& thread : list_) {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700484 // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by
485 // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before
486 // a runnable thread runs it for itself or we run it for a suspended thread below.
487 thread->SetFlipFunction(thread_flip_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800488 if (thread == self) {
489 continue;
490 }
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700491 // Resume early the threads that were runnable but are suspended just for this thread flip or
492 // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to
493 // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting
494 // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip),
495 ThreadState state = thread->GetState();
Hiroshi Yamauchi15af34c2016-09-26 16:56:24 -0700496 if ((state == kWaitingForGcThreadFlip || thread->IsTransitioningToRunnable()) &&
497 thread->GetSuspendCount() == 1) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800498 // The thread will resume right after the broadcast.
Yu Lieac44242015-06-29 10:50:03 +0800499 thread->ModifySuspendCount(self, -1, nullptr, false);
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700500 ++runnable_thread_count;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800501 } else {
502 other_threads.push_back(thread);
503 }
504 }
505 Thread::resume_cond_->Broadcast(self);
506 }
507
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700508 collector->GetHeap()->ThreadFlipEnd(self);
509
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800510 // Run the closure on the other threads and let them resume.
511 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700512 TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800513 ReaderMutexLock mu(self, *Locks::mutator_lock_);
514 for (const auto& thread : other_threads) {
515 Closure* flip_func = thread->GetFlipFunction();
516 if (flip_func != nullptr) {
517 flip_func->Run(thread);
518 }
519 }
520 // Run it for self.
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700521 Closure* flip_func = self->GetFlipFunction();
522 if (flip_func != nullptr) {
523 flip_func->Run(self);
524 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800525 }
526
527 // Resume other threads.
528 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700529 TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800530 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
531 for (const auto& thread : other_threads) {
Yu Lieac44242015-06-29 10:50:03 +0800532 thread->ModifySuspendCount(self, -1, nullptr, false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800533 }
534 Thread::resume_cond_->Broadcast(self);
535 }
536
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700537 return runnable_thread_count + other_threads.size() + 1; // +1 for self.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800538}
539
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700540void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700541 Thread* self = Thread::Current();
542
Jeff Haoc5d824a2014-07-28 18:35:38 -0700543 if (self != nullptr) {
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700544 VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
Jeff Haoc5d824a2014-07-28 18:35:38 -0700545 } else {
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700546 VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
Jeff Haoc5d824a2014-07-28 18:35:38 -0700547 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800548 {
549 ScopedTrace trace("Suspending mutator threads");
550 const uint64_t start_time = NanoTime();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700551
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800552 SuspendAllInternal(self, self);
553 // All threads are known to have suspended (but a thread may still own the mutator lock)
554 // Make sure this thread grabs exclusive access to the mutator lock and its protected data.
Ian Rogers66aee5c2012-08-15 17:17:47 -0700555#if HAVE_TIMED_RWLOCK
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800556 while (true) {
557 if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self, kThreadSuspendTimeoutMs, 0)) {
558 break;
559 } else if (!long_suspend_) {
560 // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
561 // could result in a thread suspend timeout.
562 // Timeout if we wait more than kThreadSuspendTimeoutMs seconds.
563 UnsafeLogFatalForThreadSuspendAllTimeout();
564 }
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700565 }
Ian Rogers66aee5c2012-08-15 17:17:47 -0700566#else
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800567 Locks::mutator_lock_->ExclusiveLock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -0700568#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700569
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800570 long_suspend_ = long_suspend;
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700571
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800572 const uint64_t end_time = NanoTime();
573 const uint64_t suspend_time = end_time - start_time;
574 suspend_all_historam_.AdjustAndAddValue(suspend_time);
575 if (suspend_time > kLongThreadSuspendThreshold) {
576 LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time);
577 }
578
579 if (kDebugLocking) {
580 // Debug check that all threads are suspended.
581 AssertThreadsAreSuspended(self, self);
582 }
Mathieu Chartier251755c2014-07-15 18:10:25 -0700583 }
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700584 ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str());
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700585
Jeff Haoc5d824a2014-07-28 18:35:38 -0700586 if (self != nullptr) {
587 VLOG(threads) << *self << " SuspendAll complete";
588 } else {
589 VLOG(threads) << "Thread[null] SuspendAll complete";
590 }
Elliott Hughes8d768a92011-09-14 16:35:25 -0700591}
592
Yu Lieac44242015-06-29 10:50:03 +0800593// Ensures all threads running Java suspend and that those not running Java don't start.
594// Debugger thread might be set to kRunnable for a short period of time after the
595// SuspendAllInternal. This is safe because it will be set back to suspended state before
596// the SuspendAll returns.
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700597void ThreadList::SuspendAllInternal(Thread* self,
598 Thread* ignore1,
599 Thread* ignore2,
Yu Lieac44242015-06-29 10:50:03 +0800600 bool debug_suspend) {
601 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
602 Locks::thread_list_lock_->AssertNotHeld(self);
603 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
604 if (kDebugLocking && self != nullptr) {
605 CHECK_NE(self->GetState(), kRunnable);
606 }
607
608 // First request that all threads suspend, then wait for them to suspend before
609 // returning. This suspension scheme also relies on other behaviour:
610 // 1. Threads cannot be deleted while they are suspended or have a suspend-
611 // request flag set - (see Unregister() below).
612 // 2. When threads are created, they are created in a suspended state (actually
613 // kNative) and will never begin executing Java code without first checking
614 // the suspend-request flag.
615
616 // The atomic counter for number of threads that need to pass the barrier.
617 AtomicInteger pending_threads;
618 uint32_t num_ignored = 0;
619 if (ignore1 != nullptr) {
620 ++num_ignored;
621 }
622 if (ignore2 != nullptr && ignore1 != ignore2) {
623 ++num_ignored;
624 }
625 {
626 MutexLock mu(self, *Locks::thread_list_lock_);
627 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
628 // Update global suspend all state for attaching threads.
629 ++suspend_all_count_;
630 if (debug_suspend)
631 ++debug_suspend_all_count_;
632 pending_threads.StoreRelaxed(list_.size() - num_ignored);
633 // Increment everybody's suspend count (except those that should be ignored).
634 for (const auto& thread : list_) {
635 if (thread == ignore1 || thread == ignore2) {
636 continue;
637 }
638 VLOG(threads) << "requesting thread suspend: " << *thread;
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -0700639 thread->ModifySuspendCount(self, +1, &pending_threads, debug_suspend);
Yu Lieac44242015-06-29 10:50:03 +0800640
641 // Must install the pending_threads counter first, then check thread->IsSuspend() and clear
642 // the counter. Otherwise there's a race with Thread::TransitionFromRunnableToSuspended()
643 // that can lead a thread to miss a call to PassActiveSuspendBarriers().
644 if (thread->IsSuspended()) {
645 // Only clear the counter for the current thread.
646 thread->ClearSuspendBarrier(&pending_threads);
647 pending_threads.FetchAndSubSequentiallyConsistent(1);
648 }
649 }
650 }
651
652 // Wait for the barrier to be passed by all runnable threads. This wait
653 // is done with a timeout so that we can detect problems.
Mathieu Chartier19af1172015-07-14 10:05:45 -0700654#if ART_USE_FUTEXES
Yu Lieac44242015-06-29 10:50:03 +0800655 timespec wait_timeout;
Mathieu Chartier32c83372017-01-11 10:09:30 -0800656 InitTimeSpec(false, CLOCK_MONOTONIC, kIsDebugBuild ? 50000 : 10000, 0, &wait_timeout);
Mathieu Chartier19af1172015-07-14 10:05:45 -0700657#endif
Mathieu Chartier32c83372017-01-11 10:09:30 -0800658 const uint64_t start_time = NanoTime();
Yu Lieac44242015-06-29 10:50:03 +0800659 while (true) {
660 int32_t cur_val = pending_threads.LoadRelaxed();
661 if (LIKELY(cur_val > 0)) {
662#if ART_USE_FUTEXES
663 if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) {
664 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
665 if ((errno != EAGAIN) && (errno != EINTR)) {
666 if (errno == ETIMEDOUT) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700667 LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
Mathieu Chartier32c83372017-01-11 10:09:30 -0800668 << "Timed out waiting for threads to suspend, waited for "
669 << PrettyDuration(NanoTime() - start_time);
Yu Lieac44242015-06-29 10:50:03 +0800670 } else {
671 PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
672 }
673 }
Vladimir Markod778cd62016-07-05 17:29:55 +0100674 } // else re-check pending_threads in the next iteration (this may be a spurious wake-up).
Yu Lieac44242015-06-29 10:50:03 +0800675#else
676 // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set.
Mathieu Chartier32c83372017-01-11 10:09:30 -0800677 UNUSED(start_time);
Yu Lieac44242015-06-29 10:50:03 +0800678#endif
679 } else {
680 CHECK_EQ(cur_val, 0);
681 break;
682 }
683 }
684}
685
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700686void ThreadList::ResumeAll() {
687 Thread* self = Thread::Current();
688
Jeff Haoc5d824a2014-07-28 18:35:38 -0700689 if (self != nullptr) {
690 VLOG(threads) << *self << " ResumeAll starting";
691 } else {
692 VLOG(threads) << "Thread[null] ResumeAll starting";
693 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700694
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700695 ATRACE_END();
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800696
697 ScopedTrace trace("Resuming mutator threads");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700698
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800699 if (kDebugLocking) {
700 // Debug check that all threads are suspended.
701 AssertThreadsAreSuspended(self, self);
702 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700703
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700704 long_suspend_ = false;
705
Ian Rogers81d425b2012-09-27 16:03:43 -0700706 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700707 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700708 MutexLock mu(self, *Locks::thread_list_lock_);
709 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700710 // Update global suspend all state for attaching threads.
711 --suspend_all_count_;
712 // Decrement the suspend counts for all threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700713 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700714 if (thread == self) {
715 continue;
716 }
Yu Lieac44242015-06-29 10:50:03 +0800717 thread->ModifySuspendCount(self, -1, nullptr, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700718 }
719
720 // Broadcast a notification to all suspended threads, some or all of
721 // which may choose to wake up. No need to wait for them.
Jeff Haoc5d824a2014-07-28 18:35:38 -0700722 if (self != nullptr) {
723 VLOG(threads) << *self << " ResumeAll waking others";
724 } else {
725 VLOG(threads) << "Thread[null] ResumeAll waking others";
726 }
Ian Rogersc604d732012-10-14 16:09:54 -0700727 Thread::resume_cond_->Broadcast(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700728 }
Jeff Haoc5d824a2014-07-28 18:35:38 -0700729
730 if (self != nullptr) {
731 VLOG(threads) << *self << " ResumeAll complete";
732 } else {
733 VLOG(threads) << "Thread[null] ResumeAll complete";
734 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700735}
736
737void ThreadList::Resume(Thread* thread, bool for_debugger) {
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800738 // This assumes there was an ATRACE_BEGIN when we suspended the thread.
739 ATRACE_END();
740
Ian Rogers81d425b2012-09-27 16:03:43 -0700741 Thread* self = Thread::Current();
742 DCHECK_NE(thread, self);
Brian Carlstromba32de42014-08-27 23:43:46 -0700743 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..."
744 << (for_debugger ? " (debugger)" : "");
Elliott Hughes01158d72011-09-19 19:47:10 -0700745
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700746 {
747 // To check Contains.
Ian Rogers81d425b2012-09-27 16:03:43 -0700748 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700749 // To check IsSuspended.
Ian Rogers81d425b2012-09-27 16:03:43 -0700750 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
751 DCHECK(thread->IsSuspended());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700752 if (!Contains(thread)) {
Brian Carlstromba32de42014-08-27 23:43:46 -0700753 // We only expect threads within the thread-list to have been suspended otherwise we can't
754 // stop such threads from delete-ing themselves.
755 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
756 << ") thread not within thread list";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700757 return;
758 }
Yu Lieac44242015-06-29 10:50:03 +0800759 thread->ModifySuspendCount(self, -1, nullptr, for_debugger);
Elliott Hughes01158d72011-09-19 19:47:10 -0700760 }
761
762 {
Brian Carlstromba32de42014-08-27 23:43:46 -0700763 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others";
Ian Rogers81d425b2012-09-27 16:03:43 -0700764 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -0700765 Thread::resume_cond_->Broadcast(self);
Elliott Hughes01158d72011-09-19 19:47:10 -0700766 }
767
Brian Carlstromba32de42014-08-27 23:43:46 -0700768 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700769}
Elliott Hughes01158d72011-09-19 19:47:10 -0700770
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700771static void ThreadSuspendByPeerWarning(Thread* self,
772 LogSeverity severity,
773 const char* message,
Ian Rogersc7dd2952014-10-21 23:31:19 -0700774 jobject peer) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700775 JNIEnvExt* env = self->GetJniEnv();
776 ScopedLocalRef<jstring>
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700777 scoped_name_string(env, static_cast<jstring>(env->GetObjectField(
778 peer, WellKnownClasses::java_lang_Thread_name)));
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700779 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700780 if (scoped_name_chars.c_str() == nullptr) {
Ian Rogersc7dd2952014-10-21 23:31:19 -0700781 LOG(severity) << message << ": " << peer;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700782 env->ExceptionClear();
783 } else {
Ian Rogersc7dd2952014-10-21 23:31:19 -0700784 LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700785 }
786}
787
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700788Thread* ThreadList::SuspendThreadByPeer(jobject peer,
789 bool request_suspension,
790 bool debug_suspension,
791 bool* timed_out) {
Mathieu Chartier3a958aa2015-02-04 12:52:34 -0800792 const uint64_t start_time = NanoTime();
Mathieu Chartier99143862015-02-03 14:26:46 -0800793 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700794 *timed_out = false;
Mathieu Chartier99143862015-02-03 14:26:46 -0800795 Thread* const self = Thread::Current();
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800796 Thread* suspended_thread = nullptr;
Brian Carlstromba32de42014-08-27 23:43:46 -0700797 VLOG(threads) << "SuspendThreadByPeer starting";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700798 while (true) {
799 Thread* thread;
800 {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700801 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
802 // is requesting another suspend, to avoid deadlock, by requiring this function be called
803 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
804 // than request thread suspension, to avoid potential cycles in threads requesting each other
805 // suspend.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700806 ScopedObjectAccess soa(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800807 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700808 thread = Thread::FromManagedThread(soa, peer);
Brian Carlstromba32de42014-08-27 23:43:46 -0700809 if (thread == nullptr) {
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800810 if (suspended_thread != nullptr) {
811 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
812 // If we incremented the suspend count but the thread reset its peer, we need to
813 // re-decrement it since it is shutting down and may deadlock the runtime in
814 // ThreadList::WaitForOtherNonDaemonThreadsToExit.
Yu Lieac44242015-06-29 10:50:03 +0800815 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800816 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700817 ThreadSuspendByPeerWarning(self,
818 ::android::base::WARNING,
819 "No such thread for suspend",
820 peer);
Brian Carlstromba32de42014-08-27 23:43:46 -0700821 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700822 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700823 if (!Contains(thread)) {
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800824 CHECK(suspended_thread == nullptr);
Brian Carlstromba32de42014-08-27 23:43:46 -0700825 VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
826 << reinterpret_cast<void*>(thread);
827 return nullptr;
828 }
829 VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700830 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800831 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700832 if (request_suspension) {
Ian Rogers4ad5cd32014-11-11 23:08:07 -0800833 if (self->GetSuspendCount() > 0) {
834 // We hold the suspend count lock but another thread is trying to suspend us. Its not
835 // safe to try to suspend another thread in case we get a cycle. Start the loop again
836 // which will allow this thread to be suspended.
837 continue;
838 }
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800839 CHECK(suspended_thread == nullptr);
840 suspended_thread = thread;
Yu Lieac44242015-06-29 10:50:03 +0800841 suspended_thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700842 request_suspension = false;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700843 } else {
844 // If the caller isn't requesting suspension, a suspension should have already occurred.
845 CHECK_GT(thread->GetSuspendCount(), 0);
846 }
847 // IsSuspended on the current thread will fail as the current thread is changed into
848 // Runnable above. As the suspend count is now raised if this is the current thread
849 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
850 // to just explicitly handle the current thread in the callers to this code.
851 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
852 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
853 // count, or else we've waited and it has self suspended) or is the current thread, we're
854 // done.
855 if (thread->IsSuspended()) {
Brian Carlstromba32de42014-08-27 23:43:46 -0700856 VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800857 if (ATRACE_ENABLED()) {
858 std::string name;
859 thread->GetThreadName(name);
860 ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(),
861 peer).c_str());
862 }
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700863 return thread;
864 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800865 const uint64_t total_delay = NanoTime() - start_time;
866 if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700867 ThreadSuspendByPeerWarning(self,
868 ::android::base::FATAL,
Andreas Gamped6e54bb2016-09-26 14:07:57 -0700869 "Thread suspension timed out",
870 peer);
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800871 if (suspended_thread != nullptr) {
872 CHECK_EQ(suspended_thread, thread);
Yu Lieac44242015-06-29 10:50:03 +0800873 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700874 }
875 *timed_out = true;
Brian Carlstromba32de42014-08-27 23:43:46 -0700876 return nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800877 } else if (sleep_us == 0 &&
878 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
879 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
880 // excessive CPU usage.
881 sleep_us = kThreadSuspendMaxYieldUs / 2;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700882 }
883 }
884 // Release locks and come out of runnable state.
885 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800886 VLOG(threads) << "SuspendThreadByPeer waiting to allow thread chance to suspend";
887 ThreadSuspendSleep(sleep_us);
888 // This may stay at 0 if sleep_us == 0, but this is WAI since we want to avoid using usleep at
889 // all if possible. This shouldn't be an issue since time to suspend should always be small.
890 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700891 }
892}
893
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700894static void ThreadSuspendByThreadIdWarning(LogSeverity severity,
895 const char* message,
Ian Rogersc7dd2952014-10-21 23:31:19 -0700896 uint32_t thread_id) {
897 LOG(severity) << StringPrintf("%s: %d", message, thread_id);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700898}
899
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700900Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
901 bool debug_suspension,
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700902 bool* timed_out) {
Mathieu Chartier3a958aa2015-02-04 12:52:34 -0800903 const uint64_t start_time = NanoTime();
Mathieu Chartier99143862015-02-03 14:26:46 -0800904 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700905 *timed_out = false;
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800906 Thread* suspended_thread = nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800907 Thread* const self = Thread::Current();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700908 CHECK_NE(thread_id, kInvalidThreadId);
Brian Carlstromba32de42014-08-27 23:43:46 -0700909 VLOG(threads) << "SuspendThreadByThreadId starting";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700910 while (true) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700911 {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700912 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
913 // is requesting another suspend, to avoid deadlock, by requiring this function be called
914 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
915 // than request thread suspension, to avoid potential cycles in threads requesting each other
916 // suspend.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700917 ScopedObjectAccess soa(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800918 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Ian Rogersf3d874c2014-07-17 18:52:42 -0700919 Thread* thread = nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700920 for (const auto& it : list_) {
921 if (it->GetThreadId() == thread_id) {
922 thread = it;
923 break;
924 }
925 }
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800926 if (thread == nullptr) {
927 CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread
928 << " no longer in thread list";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700929 // There's a race in inflating a lock and the owner giving up ownership and then dying.
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700930 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
931 "No such thread id for suspend",
932 thread_id);
Brian Carlstromba32de42014-08-27 23:43:46 -0700933 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700934 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700935 VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
936 DCHECK(Contains(thread));
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700937 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800938 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800939 if (suspended_thread == nullptr) {
Ian Rogers4ad5cd32014-11-11 23:08:07 -0800940 if (self->GetSuspendCount() > 0) {
941 // We hold the suspend count lock but another thread is trying to suspend us. Its not
942 // safe to try to suspend another thread in case we get a cycle. Start the loop again
943 // which will allow this thread to be suspended.
944 continue;
945 }
Yu Lieac44242015-06-29 10:50:03 +0800946 thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800947 suspended_thread = thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700948 } else {
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800949 CHECK_EQ(suspended_thread, thread);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700950 // If the caller isn't requesting suspension, a suspension should have already occurred.
951 CHECK_GT(thread->GetSuspendCount(), 0);
952 }
953 // IsSuspended on the current thread will fail as the current thread is changed into
954 // Runnable above. As the suspend count is now raised if this is the current thread
955 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
956 // to just explicitly handle the current thread in the callers to this code.
957 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
958 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
959 // count, or else we've waited and it has self suspended) or is the current thread, we're
960 // done.
961 if (thread->IsSuspended()) {
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800962 if (ATRACE_ENABLED()) {
963 std::string name;
964 thread->GetThreadName(name);
965 ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d",
966 name.c_str(), thread_id).c_str());
967 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700968 VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700969 return thread;
970 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800971 const uint64_t total_delay = NanoTime() - start_time;
972 if (total_delay >= MsToNs(kThreadSuspendTimeoutMs)) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700973 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
974 "Thread suspension timed out",
975 thread_id);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800976 if (suspended_thread != nullptr) {
Yu Lieac44242015-06-29 10:50:03 +0800977 thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700978 }
979 *timed_out = true;
Brian Carlstromba32de42014-08-27 23:43:46 -0700980 return nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800981 } else if (sleep_us == 0 &&
982 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
983 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
984 // excessive CPU usage.
985 sleep_us = kThreadSuspendMaxYieldUs / 2;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700986 }
987 }
988 // Release locks and come out of runnable state.
989 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800990 VLOG(threads) << "SuspendThreadByThreadId waiting to allow thread chance to suspend";
991 ThreadSuspendSleep(sleep_us);
992 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700993 }
994}
995
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700996Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700997 for (const auto& thread : list_) {
Mathieu Chartier61b3cd42016-04-18 11:43:29 -0700998 if (thread->GetThreadId() == thread_id) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700999 return thread;
1000 }
1001 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001002 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -07001003}
1004
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001005void ThreadList::SuspendAllForDebugger() {
1006 Thread* self = Thread::Current();
1007 Thread* debug_thread = Dbg::GetDebugThread();
1008
1009 VLOG(threads) << *self << " SuspendAllForDebugger starting...";
1010
Yu Lieac44242015-06-29 10:50:03 +08001011 SuspendAllInternal(self, self, debug_thread, true);
Ian Rogers66aee5c2012-08-15 17:17:47 -07001012 // Block on the mutator lock until all Runnable threads release their share of access then
1013 // immediately unlock again.
1014#if HAVE_TIMED_RWLOCK
1015 // Timeout if we wait more than 30 seconds.
Ian Rogersc604d732012-10-14 16:09:54 -07001016 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
Sebastien Hertzbae182c2013-12-17 10:42:03 +01001017 UnsafeLogFatalForThreadSuspendAllTimeout();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001018 } else {
Ian Rogers81d425b2012-09-27 16:03:43 -07001019 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001020 }
Ian Rogers66aee5c2012-08-15 17:17:47 -07001021#else
Ian Rogers81d425b2012-09-27 16:03:43 -07001022 Locks::mutator_lock_->ExclusiveLock(self);
1023 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -07001024#endif
Mathieu Chartier9450c6c2015-11-07 11:55:23 -08001025 // Disabled for the following race condition:
1026 // Thread 1 calls SuspendAllForDebugger, gets preempted after pulsing the mutator lock.
1027 // Thread 2 calls SuspendAll and SetStateUnsafe (perhaps from Dbg::Disconnected).
1028 // Thread 1 fails assertion that all threads are suspended due to thread 2 being in a runnable
1029 // state (from SetStateUnsafe).
1030 // AssertThreadsAreSuspended(self, self, debug_thread);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001031
Sebastien Hertzed2be172014-08-19 15:33:43 +02001032 VLOG(threads) << *self << " SuspendAllForDebugger complete";
Elliott Hughes01158d72011-09-19 19:47:10 -07001033}
1034
Elliott Hughes475fc232011-10-25 15:00:35 -07001035void ThreadList::SuspendSelfForDebugger() {
Sebastien Hertz1558b572015-02-25 15:05:59 +01001036 Thread* const self = Thread::Current();
1037 self->SetReadyForDebugInvoke(true);
Elliott Hughes01158d72011-09-19 19:47:10 -07001038
Elliott Hughes475fc232011-10-25 15:00:35 -07001039 // The debugger thread must not suspend itself due to debugger activity!
1040 Thread* debug_thread = Dbg::GetDebugThread();
Elliott Hughes475fc232011-10-25 15:00:35 -07001041 CHECK(self != debug_thread);
jeffhaoa77f0f62012-12-05 17:19:31 -08001042 CHECK_NE(self->GetState(), kRunnable);
1043 Locks::mutator_lock_->AssertNotHeld(self);
Elliott Hughes475fc232011-10-25 15:00:35 -07001044
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001045 // The debugger may have detached while we were executing an invoke request. In that case, we
1046 // must not suspend ourself.
1047 DebugInvokeReq* pReq = self->GetInvokeReq();
1048 const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
1049 if (!skip_thread_suspension) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001050 // Collisions with other suspends aren't really interesting. We want
1051 // to ensure that we're the only one fiddling with the suspend count
1052 // though.
1053 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +08001054 self->ModifySuspendCount(self, +1, nullptr, true);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001055 CHECK_GT(self->GetSuspendCount(), 0);
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001056
1057 VLOG(threads) << *self << " self-suspending (debugger)";
1058 } else {
1059 // We must no longer be subject to debugger suspension.
1060 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1061 CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
1062
1063 VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
jeffhaoa77f0f62012-12-05 17:19:31 -08001064 }
Elliott Hughes475fc232011-10-25 15:00:35 -07001065
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001066 // If the debugger requested an invoke, we need to send the reply and clear the request.
Sebastien Hertz1558b572015-02-25 15:05:59 +01001067 if (pReq != nullptr) {
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001068 Dbg::FinishInvokeMethod(pReq);
Sebastien Hertz1558b572015-02-25 15:05:59 +01001069 self->ClearDebugInvokeReq();
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001070 pReq = nullptr; // object has been deleted, clear it for safety.
Sebastien Hertz21e729c2014-02-18 14:16:00 +01001071 }
1072
Elliott Hughes475fc232011-10-25 15:00:35 -07001073 // Tell JDWP that we've completed suspension. The JDWP thread can't
1074 // tell us to resume before we're fully asleep because we hold the
1075 // suspend count lock.
1076 Dbg::ClearWaitForEventThread();
1077
jeffhaoa77f0f62012-12-05 17:19:31 -08001078 {
1079 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001080 while (self->GetSuspendCount() != 0) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001081 Thread::resume_cond_->Wait(self);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001082 if (self->GetSuspendCount() != 0) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001083 // The condition was signaled but we're still suspended. This
Sebastien Hertzf272af42014-09-18 10:20:42 +02001084 // can happen when we suspend then resume all threads to
1085 // update instrumentation or compute monitor info. This can
1086 // also happen if the debugger lets go while a SIGQUIT thread
jeffhaoa77f0f62012-12-05 17:19:31 -08001087 // dump event is pending (assuming SignalCatcher was resumed for
1088 // just long enough to try to grab the thread-suspend lock).
Sebastien Hertzf272af42014-09-18 10:20:42 +02001089 VLOG(jdwp) << *self << " still suspended after undo "
1090 << "(suspend count=" << self->GetSuspendCount() << ", "
1091 << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
jeffhaoa77f0f62012-12-05 17:19:31 -08001092 }
Elliott Hughes475fc232011-10-25 15:00:35 -07001093 }
Ian Rogersdd7624d2014-03-14 17:43:00 -07001094 CHECK_EQ(self->GetSuspendCount(), 0);
Elliott Hughes475fc232011-10-25 15:00:35 -07001095 }
jeffhaoa77f0f62012-12-05 17:19:31 -08001096
Sebastien Hertz1558b572015-02-25 15:05:59 +01001097 self->SetReadyForDebugInvoke(false);
Elliott Hughes1f729aa2012-03-02 13:55:41 -08001098 VLOG(threads) << *self << " self-reviving (debugger)";
Elliott Hughes475fc232011-10-25 15:00:35 -07001099}
1100
Sebastien Hertz253fa552014-10-14 17:27:15 +02001101void ThreadList::ResumeAllForDebugger() {
1102 Thread* self = Thread::Current();
1103 Thread* debug_thread = Dbg::GetDebugThread();
Sebastien Hertz253fa552014-10-14 17:27:15 +02001104
1105 VLOG(threads) << *self << " ResumeAllForDebugger starting...";
1106
1107 // Threads can't resume if we exclusively hold the mutator lock.
1108 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
1109
1110 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001111 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Sebastien Hertz253fa552014-10-14 17:27:15 +02001112 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001113 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Sebastien Hertz253fa552014-10-14 17:27:15 +02001114 // Update global suspend all state for attaching threads.
1115 DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001116 if (debug_suspend_all_count_ > 0) {
Sebastien Hertz253fa552014-10-14 17:27:15 +02001117 --suspend_all_count_;
1118 --debug_suspend_all_count_;
Sebastien Hertz253fa552014-10-14 17:27:15 +02001119 } else {
1120 // We've been asked to resume all threads without being asked to
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001121 // suspend them all before. That may happen if a debugger tries
1122 // to resume some suspended threads (with suspend count == 1)
1123 // at once with a VirtualMachine.Resume command. Let's print a
1124 // warning.
Sebastien Hertz253fa552014-10-14 17:27:15 +02001125 LOG(WARNING) << "Debugger attempted to resume all threads without "
1126 << "having suspended them all before.";
1127 }
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001128 // Decrement everybody's suspend count (except our own).
1129 for (const auto& thread : list_) {
1130 if (thread == self || thread == debug_thread) {
1131 continue;
1132 }
1133 if (thread->GetDebugSuspendCount() == 0) {
1134 // This thread may have been individually resumed with ThreadReference.Resume.
1135 continue;
1136 }
1137 VLOG(threads) << "requesting thread resume: " << *thread;
Yu Lieac44242015-06-29 10:50:03 +08001138 thread->ModifySuspendCount(self, -1, nullptr, true);
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001139 }
Sebastien Hertz253fa552014-10-14 17:27:15 +02001140 }
1141 }
1142
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001143 {
Sebastien Hertz253fa552014-10-14 17:27:15 +02001144 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1145 Thread::resume_cond_->Broadcast(self);
1146 }
1147
1148 VLOG(threads) << *self << " ResumeAllForDebugger complete";
1149}
1150
Elliott Hughes234ab152011-10-26 14:02:26 -07001151void ThreadList::UndoDebuggerSuspensions() {
1152 Thread* self = Thread::Current();
1153
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001154 VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
Elliott Hughes234ab152011-10-26 14:02:26 -07001155
1156 {
Ian Rogers81d425b2012-09-27 16:03:43 -07001157 MutexLock mu(self, *Locks::thread_list_lock_);
1158 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001159 // Update global suspend all state for attaching threads.
1160 suspend_all_count_ -= debug_suspend_all_count_;
1161 debug_suspend_all_count_ = 0;
1162 // Update running threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001163 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001164 if (thread == self || thread->GetDebugSuspendCount() == 0) {
Elliott Hughes234ab152011-10-26 14:02:26 -07001165 continue;
1166 }
Yu Lieac44242015-06-29 10:50:03 +08001167 thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), nullptr, true);
Elliott Hughes234ab152011-10-26 14:02:26 -07001168 }
1169 }
1170
1171 {
Ian Rogers81d425b2012-09-27 16:03:43 -07001172 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -07001173 Thread::resume_cond_->Broadcast(self);
Elliott Hughes234ab152011-10-26 14:02:26 -07001174 }
1175
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001176 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
Elliott Hughes234ab152011-10-26 14:02:26 -07001177}
1178
Elliott Hughese52e49b2012-04-02 16:05:44 -07001179void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001180 ScopedTrace trace(__PRETTY_FUNCTION__);
Ian Rogers81d425b2012-09-27 16:03:43 -07001181 Thread* self = Thread::Current();
1182 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001183 while (true) {
Ian Rogers120f1c72012-09-28 17:17:10 -07001184 {
1185 // No more threads can be born after we start to shutdown.
1186 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001187 CHECK(Runtime::Current()->IsShuttingDownLocked());
Ian Rogers120f1c72012-09-28 17:17:10 -07001188 CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
1189 }
Ian Rogers120f1c72012-09-28 17:17:10 -07001190 MutexLock mu(self, *Locks::thread_list_lock_);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001191 // Also wait for any threads that are unregistering to finish. This is required so that no
1192 // threads access the thread list after it is deleted. TODO: This may not work for user daemon
1193 // threads since they could unregister at the wrong time.
1194 bool done = unregistering_count_ == 0;
1195 if (done) {
1196 for (const auto& thread : list_) {
1197 if (thread != self && !thread->IsDaemon()) {
1198 done = false;
1199 break;
1200 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001201 }
1202 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001203 if (done) {
1204 break;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001205 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001206 // Wait for another thread to exit before re-checking.
1207 Locks::thread_exit_cond_->Wait(self);
1208 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001209}
1210
Mathieu Chartier4d87df62016-01-07 15:14:19 -08001211void ThreadList::SuspendAllDaemonThreadsForShutdown() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001212 ScopedTrace trace(__PRETTY_FUNCTION__);
Ian Rogers81d425b2012-09-27 16:03:43 -07001213 Thread* self = Thread::Current();
Mathieu Chartier62597d12016-01-11 10:19:06 -08001214 size_t daemons_left = 0;
Nicolas Geoffrayaa45daa2016-06-20 15:58:32 +01001215 {
1216 // Tell all the daemons it's time to suspend.
1217 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers81d425b2012-09-27 16:03:43 -07001218 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001219 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001220 // This is only run after all non-daemon threads have exited, so the remainder should all be
1221 // daemons.
Ian Rogers7e762862012-10-22 15:45:08 -07001222 CHECK(thread->IsDaemon()) << *thread;
Ian Rogers81d425b2012-09-27 16:03:43 -07001223 if (thread != self) {
Yu Lieac44242015-06-29 10:50:03 +08001224 thread->ModifySuspendCount(self, +1, nullptr, false);
Mathieu Chartier62597d12016-01-11 10:19:06 -08001225 ++daemons_left;
Elliott Hughese52e49b2012-04-02 16:05:44 -07001226 }
Mathieu Chartier4d87df62016-01-07 15:14:19 -08001227 // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
1228 // the sleep forever one.
1229 thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
Elliott Hughes038a8062011-09-18 14:12:41 -07001230 }
1231 }
Mathieu Chartier62597d12016-01-11 10:19:06 -08001232 // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
1233 // are about to access runtime state and are not in a runnable state. Examples: Monitor code
1234 // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
1235 // for daemon threads to be in a blocked state.
1236 if (daemons_left > 0) {
1237 static constexpr size_t kDaemonSleepTime = 200 * 1000;
1238 usleep(kDaemonSleepTime);
1239 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001240 // Give the threads a chance to suspend, complaining if they're slow.
1241 bool have_complained = false;
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001242 static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
1243 static constexpr size_t kSleepMicroseconds = 1000;
1244 for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
Elliott Hughes038a8062011-09-18 14:12:41 -07001245 bool all_suspended = true;
Nicolas Geoffrayaa45daa2016-06-20 15:58:32 +01001246 {
1247 MutexLock mu(self, *Locks::thread_list_lock_);
1248 for (const auto& thread : list_) {
1249 if (thread != self && thread->GetState() == kRunnable) {
1250 if (!have_complained) {
1251 LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
1252 have_complained = true;
1253 }
1254 all_suspended = false;
Elliott Hughes038a8062011-09-18 14:12:41 -07001255 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001256 }
1257 }
1258 if (all_suspended) {
1259 return;
1260 }
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001261 usleep(kSleepMicroseconds);
Elliott Hughes038a8062011-09-18 14:12:41 -07001262 }
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001263 LOG(WARNING) << "timed out suspending all daemon threads";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001264}
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001265
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001266void ThreadList::Register(Thread* self) {
1267 DCHECK_EQ(self, Thread::Current());
1268
1269 if (VLOG_IS_ON(threads)) {
1270 std::ostringstream oss;
1271 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump.
Ian Rogers5a9ba012014-05-19 13:28:52 -07001272 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss.str();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001273 }
1274
1275 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1276 // SuspendAll requests.
Ian Rogers81d425b2012-09-27 16:03:43 -07001277 MutexLock mu(self, *Locks::thread_list_lock_);
1278 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001279 CHECK_GE(suspend_all_count_, debug_suspend_all_count_);
Ian Rogers2966e132014-04-02 08:34:36 -07001280 // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While
1281 // this isn't particularly efficient the suspend counts are most commonly 0 or 1.
1282 for (int delta = debug_suspend_all_count_; delta > 0; delta--) {
Yu Lieac44242015-06-29 10:50:03 +08001283 self->ModifySuspendCount(self, +1, nullptr, true);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001284 }
Ian Rogers2966e132014-04-02 08:34:36 -07001285 for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) {
Yu Lieac44242015-06-29 10:50:03 +08001286 self->ModifySuspendCount(self, +1, nullptr, false);
Ian Rogers01ae5802012-09-28 16:14:01 -07001287 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001288 CHECK(!Contains(self));
1289 list_.push_back(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001290 if (kUseReadBarrier) {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001291 // Initialize according to the state of the CC collector.
1292 bool is_gc_marking =
1293 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking();
Mathieu Chartierfe814e82016-11-09 14:32:49 -08001294 self->SetIsGcMarkingAndUpdateEntrypoints(is_gc_marking);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001295 bool weak_ref_access_enabled =
1296 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled();
1297 self->SetWeakRefAccessEnabled(weak_ref_access_enabled);
1298 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001299}
1300
1301void ThreadList::Unregister(Thread* self) {
1302 DCHECK_EQ(self, Thread::Current());
Ian Rogers68d8b422014-07-17 11:09:10 -07001303 CHECK_NE(self->GetState(), kRunnable);
1304 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001305
1306 VLOG(threads) << "ThreadList::Unregister() " << *self;
1307
Mathieu Chartier91e56692015-03-03 13:51:04 -08001308 {
1309 MutexLock mu(self, *Locks::thread_list_lock_);
1310 ++unregistering_count_;
1311 }
1312
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001313 // Any time-consuming destruction, plus anything that can call back into managed code or
Mathieu Chartier91e56692015-03-03 13:51:04 -08001314 // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1315 // causes the threads to join. It is important to do this after incrementing unregistering_count_
1316 // since we want the runtime to wait for the daemon threads to exit before deleting the thread
1317 // list.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001318 self->Destroy();
1319
Jeff Haoe094b872014-10-14 13:12:01 -07001320 // If tracing, remember thread id and name before thread exits.
1321 Trace::StoreExitingThreadInfo(self);
1322
Ian Rogersdd7624d2014-03-14 17:43:00 -07001323 uint32_t thin_lock_id = self->GetThreadId();
Mathieu Chartier91e56692015-03-03 13:51:04 -08001324 while (true) {
Ian Rogerscfaa4552012-11-26 21:00:08 -08001325 // Remove and delete the Thread* while holding the thread_list_lock_ and
1326 // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
Ian Rogers0878d652013-04-18 17:38:35 -07001327 // Note: deliberately not using MutexLock that could hold a stale self pointer.
Mathieu Chartier91e56692015-03-03 13:51:04 -08001328 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001329 if (!Contains(self)) {
Mathieu Chartier9db831a2015-02-24 17:20:30 -08001330 std::string thread_name;
1331 self->GetThreadName(thread_name);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001332 std::ostringstream os;
Christopher Ferris6cff48f2014-01-26 21:36:13 -08001333 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
Mathieu Chartier9db831a2015-02-24 17:20:30 -08001334 LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
Mathieu Chartier91e56692015-03-03 13:51:04 -08001335 break;
Ian Rogersa2af5c72014-09-15 15:17:07 -07001336 } else {
Mathieu Chartier91e56692015-03-03 13:51:04 -08001337 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001338 if (!self->IsSuspended()) {
1339 list_.remove(self);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001340 break;
Ian Rogersa2af5c72014-09-15 15:17:07 -07001341 }
Ian Rogers68d8b422014-07-17 11:09:10 -07001342 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001343 // We failed to remove the thread due to a suspend request, loop and try again.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001344 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001345 delete self;
1346
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -08001347 // Release the thread ID after the thread is finished and deleted to avoid cases where we can
1348 // temporarily have multiple threads with the same thread id. When this occurs, it causes
1349 // problems in FindThreadByThreadId / SuspendThreadByThreadId.
1350 ReleaseThreadId(nullptr, thin_lock_id);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001351
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001352 // Clear the TLS data, so that the underlying native thread is recognizably detached.
1353 // (It may wish to reattach later.)
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001354#ifdef ART_TARGET_ANDROID
Andreas Gampe4382f1e2015-08-05 01:08:53 +00001355 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
1356#else
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001357 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
Andreas Gampe4382f1e2015-08-05 01:08:53 +00001358#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001359
1360 // Signal that a thread just detached.
Mathieu Chartier91e56692015-03-03 13:51:04 -08001361 MutexLock mu(nullptr, *Locks::thread_list_lock_);
1362 --unregistering_count_;
1363 Locks::thread_exit_cond_->Broadcast(nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001364}
1365
1366void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001367 for (const auto& thread : list_) {
1368 callback(thread, context);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001369 }
1370}
1371
Mathieu Chartierf8a86b92016-06-14 17:08:47 -07001372void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
1373 Thread* const self = Thread::Current();
1374 std::vector<Thread*> threads_to_visit;
1375
1376 // Tell threads to suspend and copy them into list.
1377 {
1378 MutexLock mu(self, *Locks::thread_list_lock_);
1379 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1380 for (Thread* thread : list_) {
1381 thread->ModifySuspendCount(self, +1, nullptr, false);
1382 if (thread == self || thread->IsSuspended()) {
1383 threads_to_visit.push_back(thread);
1384 } else {
1385 thread->ModifySuspendCount(self, -1, nullptr, false);
1386 }
1387 }
1388 }
1389
1390 // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
1391 // order violations.
1392 for (Thread* thread : threads_to_visit) {
1393 thread->VisitRoots(visitor);
1394 }
1395
1396 // Restore suspend counts.
1397 {
1398 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1399 for (Thread* thread : threads_to_visit) {
1400 thread->ModifySuspendCount(self, -1, nullptr, false);
1401 }
1402 }
1403}
1404
Andreas Gampe585da952016-12-02 14:52:29 -08001405void ThreadList::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const {
Ian Rogers81d425b2012-09-27 16:03:43 -07001406 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001407 for (const auto& thread : list_) {
Andreas Gampe585da952016-12-02 14:52:29 -08001408 thread->VisitRoots(visitor, flags);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001409 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001410}
1411
Ian Rogerscfaa4552012-11-26 21:00:08 -08001412uint32_t ThreadList::AllocThreadId(Thread* self) {
Chao-ying Fu9e369312014-05-21 11:20:52 -07001413 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -07001414 for (size_t i = 0; i < allocated_ids_.size(); ++i) {
1415 if (!allocated_ids_[i]) {
1416 allocated_ids_.set(i);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001417 return i + 1; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -07001418 }
1419 }
1420 LOG(FATAL) << "Out of internal thread ids";
1421 return 0;
1422}
1423
Ian Rogerscfaa4552012-11-26 21:00:08 -08001424void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
Chao-ying Fu9e369312014-05-21 11:20:52 -07001425 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001426 --id; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -07001427 DCHECK(allocated_ids_[id]) << id;
1428 allocated_ids_.reset(id);
1429}
1430
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001431ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) {
1432 Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend);
1433}
1434
1435ScopedSuspendAll::~ScopedSuspendAll() {
1436 Runtime::Current()->GetThreadList()->ResumeAll();
1437}
1438
Elliott Hughes8daa0922011-09-11 13:46:25 -07001439} // namespace art