blob: df8acc37a2ebfe8d3168957723d2857995d99876 [file] [log] [blame]
Elliott Hughes8daa0922011-09-11 13:46:25 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "thread_list.h"
18
Christopher Ferris6cff48f2014-01-26 21:36:13 -080019#include <backtrace/BacktraceMap.h>
Elliott Hughesabbe07d2012-06-05 17:42:23 -070020#include <dirent.h>
Ian Rogersd9c4fc92013-10-01 19:45:43 -070021#include <ScopedLocalRef.h>
22#include <ScopedUtfChars.h>
Elliott Hughesabbe07d2012-06-05 17:42:23 -070023#include <sys/types.h>
Elliott Hughes038a8062011-09-18 14:12:41 -070024#include <unistd.h>
25
Ian Rogersc7dd2952014-10-21 23:31:19 -070026#include <sstream>
27
Andreas Gampe46ee31b2016-12-14 10:11:49 -080028#include "android-base/stringprintf.h"
29
Mathieu Chartier70a596d2014-12-17 14:56:47 -080030#include "base/histogram-inl.h"
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070031#include "base/mutex-inl.h"
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080032#include "base/systrace.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010033#include "base/time_utils.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080034#include "base/timing_logger.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070035#include "debugger.h"
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -070036#include "gc/collector/concurrent_copying.h"
Hiroshi Yamauchi30493242016-11-03 13:06:52 -070037#include "gc/reference_processor.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070038#include "jni_internal.h"
39#include "lock_word.h"
40#include "monitor.h"
Andreas Gampe5dd44d02016-08-02 17:20:03 -070041#include "native_stack_dump.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070042#include "scoped_thread_state_change-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080043#include "thread.h"
Jeff Haoe094b872014-10-14 13:12:01 -070044#include "trace.h"
Ian Rogersd9c4fc92013-10-01 19:45:43 -070045#include "well_known_classes.h"
Elliott Hughes475fc232011-10-25 15:00:35 -070046
Yu Lieac44242015-06-29 10:50:03 +080047#if ART_USE_FUTEXES
48#include "linux/futex.h"
49#include "sys/syscall.h"
50#ifndef SYS_futex
51#define SYS_futex __NR_futex
52#endif
53#endif // ART_USE_FUTEXES
54
Elliott Hughes8daa0922011-09-11 13:46:25 -070055namespace art {
56
Andreas Gampe46ee31b2016-12-14 10:11:49 -080057using android::base::StringPrintf;
58
Mathieu Chartier251755c2014-07-15 18:10:25 -070059static constexpr uint64_t kLongThreadSuspendThreshold = MsToNs(5);
Mathieu Chartier99143862015-02-03 14:26:46 -080060// Use 0 since we want to yield to prevent blocking for an unpredictable amount of time.
61static constexpr useconds_t kThreadSuspendInitialSleepUs = 0;
62static constexpr useconds_t kThreadSuspendMaxYieldUs = 3000;
63static constexpr useconds_t kThreadSuspendMaxSleepUs = 5000;
Mathieu Chartier251755c2014-07-15 18:10:25 -070064
Andreas Gampe8d1594d2016-03-01 14:38:37 -080065// Whether we should try to dump the native stack of unattached threads. See commit ed8b723 for
66// some history.
Andreas Gampea3e8fc32016-06-13 16:15:33 -070067// Turned off again. b/29248079
Mathieu Chartier3f386d52016-10-05 14:12:45 -070068static constexpr bool kDumpUnattachedThreadNativeStackForSigQuit = false;
Andreas Gampe8d1594d2016-03-01 14:38:37 -080069
Mathieu Chartier3fceaf52017-01-22 13:33:40 -080070ThreadList::ThreadList(uint64_t thread_suspend_timeout_ns)
Mathieu Chartierb56200b2015-10-29 10:41:51 -070071 : suspend_all_count_(0),
72 debug_suspend_all_count_(0),
73 unregistering_count_(0),
74 suspend_all_historam_("suspend all histogram", 16, 64),
Hiroshi Yamauchi30493242016-11-03 13:06:52 -070075 long_suspend_(false),
Mathieu Chartier3fceaf52017-01-22 13:33:40 -080076 thread_suspend_timeout_ns_(thread_suspend_timeout_ns),
Hiroshi Yamauchi30493242016-11-03 13:06:52 -070077 empty_checkpoint_barrier_(new Barrier(0)) {
Hiroshi Yamauchie15ea082015-02-09 17:11:42 -080078 CHECK(Monitor::IsValidLockWord(LockWord::FromThinLockId(kMaxThreadId, 1, 0U)));
Elliott Hughes8daa0922011-09-11 13:46:25 -070079}
80
81ThreadList::~ThreadList() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080082 ScopedTrace trace(__PRETTY_FUNCTION__);
Elliott Hughese52e49b2012-04-02 16:05:44 -070083 // Detach the current thread if necessary. If we failed to start, there might not be any threads.
Elliott Hughes6a144332012-04-03 13:07:11 -070084 // We need to detach the current thread here in case there's another thread waiting to join with
85 // us.
Mathieu Chartierfec72f42014-10-09 12:57:58 -070086 bool contains = false;
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -080087 Thread* self = Thread::Current();
Mathieu Chartierfec72f42014-10-09 12:57:58 -070088 {
Mathieu Chartierfec72f42014-10-09 12:57:58 -070089 MutexLock mu(self, *Locks::thread_list_lock_);
90 contains = Contains(self);
91 }
92 if (contains) {
Elliott Hughes8daa0922011-09-11 13:46:25 -070093 Runtime::Current()->DetachCurrentThread();
94 }
Elliott Hughes6a144332012-04-03 13:07:11 -070095 WaitForOtherNonDaemonThreadsToExit();
Mathieu Chartier51168372015-08-12 16:40:32 -070096 // Disable GC and wait for GC to complete in case there are still daemon threads doing
97 // allocations.
98 gc::Heap* const heap = Runtime::Current()->GetHeap();
99 heap->DisableGCForShutdown();
100 // In case a GC is in progress, wait for it to finish.
101 heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700102 // TODO: there's an unaddressed race here where a thread may attach during shutdown, see
103 // Thread::Init.
Mathieu Chartier4d87df62016-01-07 15:14:19 -0800104 SuspendAllDaemonThreadsForShutdown();
Elliott Hughes8daa0922011-09-11 13:46:25 -0700105}
106
107bool ThreadList::Contains(Thread* thread) {
108 return find(list_.begin(), list_.end(), thread) != list_.end();
109}
110
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700111bool ThreadList::Contains(pid_t tid) {
Mathieu Chartier02e25112013-08-14 16:14:24 -0700112 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700113 if (thread->GetTid() == tid) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700114 return true;
115 }
116 }
117 return false;
118}
119
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700120pid_t ThreadList::GetLockOwner() {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700121 return Locks::thread_list_lock_->GetExclusiveOwnerTid();
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700122}
123
Mathieu Chartier590fee92013-09-13 13:46:47 -0700124void ThreadList::DumpNativeStacks(std::ostream& os) {
125 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800126 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
Mathieu Chartier590fee92013-09-13 13:46:47 -0700127 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -0700128 os << "DUMPING THREAD " << thread->GetTid() << "\n";
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800129 DumpNativeStack(os, thread->GetTid(), map.get(), "\t");
Mathieu Chartier590fee92013-09-13 13:46:47 -0700130 os << "\n";
131 }
132}
133
Elliott Hughesc967f782012-04-16 10:23:15 -0700134void ThreadList::DumpForSigQuit(std::ostream& os) {
Mathieu Chartier70a596d2014-12-17 14:56:47 -0800135 {
136 ScopedObjectAccess soa(Thread::Current());
Mathieu Chartier23f6e692014-12-18 18:24:39 -0800137 // Only print if we have samples.
138 if (suspend_all_historam_.SampleSize() > 0) {
139 Histogram<uint64_t>::CumulativeData data;
140 suspend_all_historam_.CreateHistogram(&data);
141 suspend_all_historam_.PrintConfidenceIntervals(os, 0.99, data); // Dump time to suspend.
142 }
Mathieu Chartier70a596d2014-12-17 14:56:47 -0800143 }
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000144 bool dump_native_stack = Runtime::Current()->GetDumpNativeStackOnSigQuit();
145 Dump(os, dump_native_stack);
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700146 DumpUnattachedThreads(os, dump_native_stack && kDumpUnattachedThreadNativeStackForSigQuit);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700147}
148
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000149static void DumpUnattachedThread(std::ostream& os, pid_t tid, bool dump_native_stack)
150 NO_THREAD_SAFETY_ANALYSIS {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700151 // TODO: No thread safety analysis as DumpState with a null thread won't access fields, should
Ian Rogerscfaa4552012-11-26 21:00:08 -0800152 // refactor DumpState to avoid skipping analysis.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700153 Thread::DumpState(os, nullptr, tid);
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700154 DumpKernelStack(os, tid, " kernel: ", false);
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700155 if (dump_native_stack) {
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800156 DumpNativeStack(os, tid, nullptr, " native: ");
Brian Carlstromed8b7232012-06-27 17:54:47 -0700157 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700158 os << "\n";
159}
160
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000161void ThreadList::DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700162 DIR* d = opendir("/proc/self/task");
163 if (!d) {
164 return;
165 }
166
Ian Rogers50b35e22012-10-04 10:09:15 -0700167 Thread* self = Thread::Current();
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700168 dirent* e;
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700169 while ((e = readdir(d)) != nullptr) {
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700170 char* end;
Elliott Hughes4696b5b2012-10-30 10:35:10 -0700171 pid_t tid = strtol(e->d_name, &end, 10);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700172 if (!*end) {
173 bool contains;
174 {
Ian Rogers50b35e22012-10-04 10:09:15 -0700175 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700176 contains = Contains(tid);
177 }
178 if (!contains) {
Nicolas Geoffrayd3c59652016-03-17 09:35:04 +0000179 DumpUnattachedThread(os, tid, dump_native_stack);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700180 }
Elliott Hughesabbe07d2012-06-05 17:42:23 -0700181 }
182 }
183 closedir(d);
Elliott Hughesff738062012-02-03 15:00:42 -0800184}
185
Mathieu Chartier47c19592016-03-07 11:59:01 -0800186// Dump checkpoint timeout in milliseconds. Larger amount on the target, since the device could be
187// overloaded with ANR dumps.
188static constexpr uint32_t kDumpWaitTimeout = kIsTargetBuild ? 100000 : 20000;
Andreas Gampe4a3d19b2015-01-09 17:54:51 -0800189
Ian Rogers7b078e82014-09-10 14:44:24 -0700190// A closure used by Thread::Dump.
191class DumpCheckpoint FINAL : public Closure {
192 public:
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000193 DumpCheckpoint(std::ostream* os, bool dump_native_stack)
194 : os_(os),
195 barrier_(0),
196 backtrace_map_(dump_native_stack ? BacktraceMap::Create(getpid()) : nullptr),
197 dump_native_stack_(dump_native_stack) {}
Ian Rogers7b078e82014-09-10 14:44:24 -0700198
199 void Run(Thread* thread) OVERRIDE {
200 // Note thread and self may not be equal if thread was already suspended at the point of the
201 // request.
202 Thread* self = Thread::Current();
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700203 CHECK(self != nullptr);
Ian Rogers7b078e82014-09-10 14:44:24 -0700204 std::ostringstream local_os;
205 {
206 ScopedObjectAccess soa(self);
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000207 thread->Dump(local_os, dump_native_stack_, backtrace_map_.get());
Ian Rogers7b078e82014-09-10 14:44:24 -0700208 }
209 local_os << "\n";
210 {
211 // Use the logging lock to ensure serialization when writing to the common ostream.
212 MutexLock mu(self, *Locks::logging_lock_);
213 *os_ << local_os.str();
214 }
Mathieu Chartier10d25082015-10-28 18:36:09 -0700215 barrier_.Pass(self);
Elliott Hughes8daa0922011-09-11 13:46:25 -0700216 }
Ian Rogers7b078e82014-09-10 14:44:24 -0700217
218 void WaitForThreadsToRunThroughCheckpoint(size_t threads_running_checkpoint) {
219 Thread* self = Thread::Current();
220 ScopedThreadStateChange tsc(self, kWaitingForCheckPointsToRun);
Andreas Gampe1e4b0ca2015-01-14 09:06:32 -0800221 bool timed_out = barrier_.Increment(self, threads_running_checkpoint, kDumpWaitTimeout);
Ian Rogers2156ff12014-09-13 19:20:54 -0700222 if (timed_out) {
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000223 // Avoid a recursive abort.
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700224 LOG((kIsDebugBuild && (gAborting == 0)) ? ::android::base::FATAL : ::android::base::ERROR)
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000225 << "Unexpected time out during dump checkpoint.";
Ian Rogers2156ff12014-09-13 19:20:54 -0700226 }
Ian Rogers7b078e82014-09-10 14:44:24 -0700227 }
228
229 private:
230 // The common stream that will accumulate all the dumps.
231 std::ostream* const os_;
232 // The barrier to be passed through and for the requestor to wait upon.
233 Barrier barrier_;
Christopher Ferris6cff48f2014-01-26 21:36:13 -0800234 // A backtrace map, so that all threads use a shared info and don't reacquire/parse separately.
235 std::unique_ptr<BacktraceMap> backtrace_map_;
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000236 // Whether we should dump the native stack.
237 const bool dump_native_stack_;
Ian Rogers7b078e82014-09-10 14:44:24 -0700238};
239
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000240void ThreadList::Dump(std::ostream& os, bool dump_native_stack) {
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700241 Thread* self = Thread::Current();
Ian Rogers7b078e82014-09-10 14:44:24 -0700242 {
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700243 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers7b078e82014-09-10 14:44:24 -0700244 os << "DALVIK THREADS (" << list_.size() << "):\n";
245 }
Mathieu Chartier3f386d52016-10-05 14:12:45 -0700246 if (self != nullptr) {
247 DumpCheckpoint checkpoint(&os, dump_native_stack);
248 size_t threads_running_checkpoint;
249 {
250 // Use SOA to prevent deadlocks if multiple threads are calling Dump() at the same time.
251 ScopedObjectAccess soa(self);
252 threads_running_checkpoint = RunCheckpoint(&checkpoint);
253 }
254 if (threads_running_checkpoint != 0) {
255 checkpoint.WaitForThreadsToRunThroughCheckpoint(threads_running_checkpoint);
256 }
257 } else {
258 DumpUnattachedThreads(os, dump_native_stack);
Lei Lidd9943d2015-02-02 14:24:44 +0800259 }
Elliott Hughes8daa0922011-09-11 13:46:25 -0700260}
261
Ian Rogers50b35e22012-10-04 10:09:15 -0700262void ThreadList::AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2) {
263 MutexLock mu(self, *Locks::thread_list_lock_);
264 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -0700265 for (const auto& thread : list_) {
jeffhao725a9572012-11-13 18:20:12 -0800266 if (thread != ignore1 && thread != ignore2) {
Ian Rogers01ae5802012-09-28 16:14:01 -0700267 CHECK(thread->IsSuspended())
268 << "\nUnsuspended thread: <<" << *thread << "\n"
269 << "self: <<" << *Thread::Current();
270 }
Elliott Hughes8d768a92011-09-14 16:35:25 -0700271 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700272}
273
Ian Rogers66aee5c2012-08-15 17:17:47 -0700274#if HAVE_TIMED_RWLOCK
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700275// Attempt to rectify locks so that we dump thread list with required locks before exiting.
Andreas Gampe794ad762015-02-23 08:12:24 -0800276NO_RETURN static void UnsafeLogFatalForThreadSuspendAllTimeout() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700277 Runtime* runtime = Runtime::Current();
278 std::ostringstream ss;
279 ss << "Thread suspend timeout\n";
Mathieu Chartier5869a2c2014-10-08 14:26:23 -0700280 Locks::mutator_lock_->Dump(ss);
281 ss << "\n";
Ian Rogers7b078e82014-09-10 14:44:24 -0700282 runtime->GetThreadList()->Dump(ss);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700283 LOG(FATAL) << ss.str();
Ian Rogers719d1a32014-03-06 12:13:39 -0800284 exit(0);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700285}
Ian Rogers66aee5c2012-08-15 17:17:47 -0700286#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700287
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800288// Unlike suspending all threads where we can wait to acquire the mutator_lock_, suspending an
Mathieu Chartier99143862015-02-03 14:26:46 -0800289// individual thread requires polling. delay_us is the requested sleep wait. If delay_us is 0 then
290// we use sched_yield instead of calling usleep.
291static void ThreadSuspendSleep(useconds_t delay_us) {
292 if (delay_us == 0) {
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800293 sched_yield();
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800294 } else {
Mathieu Chartier99143862015-02-03 14:26:46 -0800295 usleep(delay_us);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800296 }
297}
298
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700299size_t ThreadList::RunCheckpoint(Closure* checkpoint_function, Closure* callback) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700300 Thread* self = Thread::Current();
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800301 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
302 Locks::thread_list_lock_->AssertNotHeld(self);
303 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700304
305 std::vector<Thread*> suspended_count_modified_threads;
306 size_t count = 0;
307 {
308 // Call a checkpoint function for each thread, threads which are suspend get their checkpoint
309 // manually called.
310 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700311 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier10d25082015-10-28 18:36:09 -0700312 count = list_.size();
Mathieu Chartier02e25112013-08-14 16:14:24 -0700313 for (const auto& thread : list_) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700314 if (thread != self) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700315 while (true) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700316 if (thread->RequestCheckpoint(checkpoint_function)) {
Dave Allison0aded082013-11-07 13:15:11 -0800317 // This thread will run its checkpoint some time in the near future.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700318 break;
319 } else {
320 // We are probably suspended, try to make sure that we stay suspended.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700321 // The thread switched back to runnable.
322 if (thread->GetState() == kRunnable) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700323 // Spurious fail, try again.
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700324 continue;
325 }
Yu Lieac44242015-06-29 10:50:03 +0800326 thread->ModifySuspendCount(self, +1, nullptr, false);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700327 suspended_count_modified_threads.push_back(thread);
328 break;
329 }
330 }
331 }
332 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700333 // Run the callback to be called inside this critical section.
334 if (callback != nullptr) {
335 callback->Run(self);
336 }
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700337 }
338
339 // Run the checkpoint on ourself while we wait for threads to suspend.
340 checkpoint_function->Run(self);
341
342 // Run the checkpoint on the suspended threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700343 for (const auto& thread : suspended_count_modified_threads) {
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700344 if (!thread->IsSuspended()) {
Mathieu Chartier99143862015-02-03 14:26:46 -0800345 if (ATRACE_ENABLED()) {
346 std::ostringstream oss;
347 thread->ShortDump(oss);
348 ATRACE_BEGIN((std::string("Waiting for suspension of thread ") + oss.str()).c_str());
349 }
350 // Busy wait until the thread is suspended.
351 const uint64_t start_time = NanoTime();
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700352 do {
Mathieu Chartier99143862015-02-03 14:26:46 -0800353 ThreadSuspendSleep(kThreadSuspendInitialSleepUs);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700354 } while (!thread->IsSuspended());
Mathieu Chartier99143862015-02-03 14:26:46 -0800355 const uint64_t total_delay = NanoTime() - start_time;
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800356 // Shouldn't need to wait for longer than 1000 microseconds.
Mathieu Chartier99143862015-02-03 14:26:46 -0800357 constexpr uint64_t kLongWaitThreshold = MsToNs(1);
358 ATRACE_END();
359 if (UNLIKELY(total_delay > kLongWaitThreshold)) {
360 LOG(WARNING) << "Long wait of " << PrettyDuration(total_delay) << " for "
361 << *thread << " suspension!";
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700362 }
363 }
364 // We know for sure that the thread is suspended at this point.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700365 checkpoint_function->Run(thread);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700366 {
367 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +0800368 thread->ModifySuspendCount(self, -1, nullptr, false);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700369 }
370 }
371
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800372 {
373 // Imitate ResumeAll, threads may be waiting on Thread::resume_cond_ since we raised their
374 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast.
375 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
376 Thread::resume_cond_->Broadcast(self);
377 }
378
Lei Lidd9943d2015-02-02 14:24:44 +0800379 return count;
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700380}
381
Hiroshi Yamauchia82769c2016-12-02 17:01:51 -0800382size_t ThreadList::RunEmptyCheckpoint(std::vector<uint32_t>& runnable_thread_ids) {
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700383 Thread* self = Thread::Current();
384 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
385 Locks::thread_list_lock_->AssertNotHeld(self);
386 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
387
388 size_t count = 0;
389 {
390 MutexLock mu(self, *Locks::thread_list_lock_);
391 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
392 for (Thread* thread : list_) {
393 if (thread != self) {
394 while (true) {
395 if (thread->RequestEmptyCheckpoint()) {
396 // This thread will run an empty checkpoint (decrement the empty checkpoint barrier)
397 // some time in the near future.
398 ++count;
Hiroshi Yamauchia82769c2016-12-02 17:01:51 -0800399 if (kIsDebugBuild) {
400 runnable_thread_ids.push_back(thread->GetThreadId());
401 }
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700402 break;
403 }
404 if (thread->GetState() != kRunnable) {
405 // It's seen suspended, we are done because it must not be in the middle of a mutator
406 // heap access.
407 break;
408 }
409 }
410 }
411 }
412 }
413
414 // Wake up the threads blocking for weak ref access so that they will respond to the empty
415 // checkpoint request. Otherwise we will hang as they are blocking in the kRunnable state.
416 Runtime::Current()->GetHeap()->GetReferenceProcessor()->BroadcastForSlowPath(self);
417 Runtime::Current()->BroadcastForNewSystemWeaks(/*broadcast_for_checkpoint*/true);
418
419 return count;
420}
421
Dave Allison39c3bfb2014-01-28 18:33:52 -0800422// Request that a checkpoint function be run on all active (non-suspended)
423// threads. Returns the number of successful requests.
424size_t ThreadList::RunCheckpointOnRunnableThreads(Closure* checkpoint_function) {
425 Thread* self = Thread::Current();
Ian Rogers7b078e82014-09-10 14:44:24 -0700426 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
427 Locks::thread_list_lock_->AssertNotHeld(self);
428 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
429 CHECK_NE(self->GetState(), kRunnable);
Dave Allison39c3bfb2014-01-28 18:33:52 -0800430
431 size_t count = 0;
432 {
433 // Call a checkpoint function for each non-suspended thread.
434 MutexLock mu(self, *Locks::thread_list_lock_);
435 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
436 for (const auto& thread : list_) {
437 if (thread != self) {
438 if (thread->RequestCheckpoint(checkpoint_function)) {
439 // This thread will run its checkpoint some time in the near future.
440 count++;
441 }
442 }
443 }
444 }
445
446 // Return the number of threads that will run the checkpoint function.
447 return count;
448}
449
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800450// A checkpoint/suspend-all hybrid to switch thread roots from
451// from-space to to-space refs. Used to synchronize threads at a point
452// to mark the initiation of marking while maintaining the to-space
453// invariant.
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700454size_t ThreadList::FlipThreadRoots(Closure* thread_flip_visitor,
455 Closure* flip_callback,
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800456 gc::collector::GarbageCollector* collector) {
457 TimingLogger::ScopedTiming split("ThreadListFlip", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800458 Thread* self = Thread::Current();
459 Locks::mutator_lock_->AssertNotHeld(self);
460 Locks::thread_list_lock_->AssertNotHeld(self);
461 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
462 CHECK_NE(self->GetState(), kRunnable);
463
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700464 collector->GetHeap()->ThreadFlipBegin(self); // Sync with JNI critical calls.
465
Mathieu Chartiere9429c82017-01-27 15:22:56 -0800466 // ThreadFlipBegin happens before we suspend all the threads, so it does not count towards the
467 // pause.
468 const uint64_t suspend_start_time = NanoTime();
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700469 SuspendAllInternal(self, self, nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800470
471 // Run the flip callback for the collector.
472 Locks::mutator_lock_->ExclusiveLock(self);
Mathieu Chartiere9429c82017-01-27 15:22:56 -0800473 suspend_all_historam_.AdjustAndAddValue(NanoTime() - suspend_start_time);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800474 flip_callback->Run(self);
475 Locks::mutator_lock_->ExclusiveUnlock(self);
Mathieu Chartiere9429c82017-01-27 15:22:56 -0800476 collector->RegisterPause(NanoTime() - suspend_start_time);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800477
478 // Resume runnable threads.
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700479 size_t runnable_thread_count = 0;
Mathieu Chartierb19ccb12015-07-15 10:24:16 -0700480 std::vector<Thread*> other_threads;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800481 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700482 TimingLogger::ScopedTiming split2("ResumeRunnableThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800483 MutexLock mu(self, *Locks::thread_list_lock_);
484 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
485 --suspend_all_count_;
486 for (const auto& thread : list_) {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700487 // Set the flip function for all threads because Thread::DumpState/DumpJavaStack() (invoked by
488 // a checkpoint) may cause the flip function to be run for a runnable/suspended thread before
489 // a runnable thread runs it for itself or we run it for a suspended thread below.
490 thread->SetFlipFunction(thread_flip_visitor);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800491 if (thread == self) {
492 continue;
493 }
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700494 // Resume early the threads that were runnable but are suspended just for this thread flip or
495 // about to transition from non-runnable (eg. kNative at the SOA entry in a JNI function) to
496 // runnable (both cases waiting inside Thread::TransitionFromSuspendedToRunnable), or waiting
497 // for the thread flip to end at the JNI critical section entry (kWaitingForGcThreadFlip),
498 ThreadState state = thread->GetState();
Hiroshi Yamauchi15af34c2016-09-26 16:56:24 -0700499 if ((state == kWaitingForGcThreadFlip || thread->IsTransitioningToRunnable()) &&
500 thread->GetSuspendCount() == 1) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800501 // The thread will resume right after the broadcast.
Yu Lieac44242015-06-29 10:50:03 +0800502 thread->ModifySuspendCount(self, -1, nullptr, false);
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700503 ++runnable_thread_count;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800504 } else {
505 other_threads.push_back(thread);
506 }
507 }
508 Thread::resume_cond_->Broadcast(self);
509 }
510
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700511 collector->GetHeap()->ThreadFlipEnd(self);
512
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800513 // Run the closure on the other threads and let them resume.
514 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700515 TimingLogger::ScopedTiming split3("FlipOtherThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800516 ReaderMutexLock mu(self, *Locks::mutator_lock_);
517 for (const auto& thread : other_threads) {
518 Closure* flip_func = thread->GetFlipFunction();
519 if (flip_func != nullptr) {
520 flip_func->Run(thread);
521 }
522 }
523 // Run it for self.
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700524 Closure* flip_func = self->GetFlipFunction();
525 if (flip_func != nullptr) {
526 flip_func->Run(self);
527 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800528 }
529
530 // Resume other threads.
531 {
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700532 TimingLogger::ScopedTiming split4("ResumeOtherThreads", collector->GetTimings());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800533 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
534 for (const auto& thread : other_threads) {
Yu Lieac44242015-06-29 10:50:03 +0800535 thread->ModifySuspendCount(self, -1, nullptr, false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800536 }
537 Thread::resume_cond_->Broadcast(self);
538 }
539
Hiroshi Yamauchiee235822016-08-19 17:03:27 -0700540 return runnable_thread_count + other_threads.size() + 1; // +1 for self.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800541}
542
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700543void ThreadList::SuspendAll(const char* cause, bool long_suspend) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700544 Thread* self = Thread::Current();
545
Jeff Haoc5d824a2014-07-28 18:35:38 -0700546 if (self != nullptr) {
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700547 VLOG(threads) << *self << " SuspendAll for " << cause << " starting...";
Jeff Haoc5d824a2014-07-28 18:35:38 -0700548 } else {
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700549 VLOG(threads) << "Thread[null] SuspendAll for " << cause << " starting...";
Jeff Haoc5d824a2014-07-28 18:35:38 -0700550 }
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800551 {
552 ScopedTrace trace("Suspending mutator threads");
553 const uint64_t start_time = NanoTime();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700554
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800555 SuspendAllInternal(self, self);
556 // All threads are known to have suspended (but a thread may still own the mutator lock)
557 // Make sure this thread grabs exclusive access to the mutator lock and its protected data.
Ian Rogers66aee5c2012-08-15 17:17:47 -0700558#if HAVE_TIMED_RWLOCK
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800559 while (true) {
Mathieu Chartier3fceaf52017-01-22 13:33:40 -0800560 if (Locks::mutator_lock_->ExclusiveLockWithTimeout(self,
561 NsToMs(thread_suspend_timeout_ns_),
562 0)) {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800563 break;
564 } else if (!long_suspend_) {
565 // Reading long_suspend without the mutator lock is slightly racy, in some rare cases, this
566 // could result in a thread suspend timeout.
Mathieu Chartier3fceaf52017-01-22 13:33:40 -0800567 // Timeout if we wait more than thread_suspend_timeout_ns_ nanoseconds.
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800568 UnsafeLogFatalForThreadSuspendAllTimeout();
569 }
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700570 }
Ian Rogers66aee5c2012-08-15 17:17:47 -0700571#else
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800572 Locks::mutator_lock_->ExclusiveLock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -0700573#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700574
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800575 long_suspend_ = long_suspend;
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700576
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800577 const uint64_t end_time = NanoTime();
578 const uint64_t suspend_time = end_time - start_time;
579 suspend_all_historam_.AdjustAndAddValue(suspend_time);
580 if (suspend_time > kLongThreadSuspendThreshold) {
581 LOG(WARNING) << "Suspending all threads took: " << PrettyDuration(suspend_time);
582 }
583
584 if (kDebugLocking) {
585 // Debug check that all threads are suspended.
586 AssertThreadsAreSuspended(self, self);
587 }
Mathieu Chartier251755c2014-07-15 18:10:25 -0700588 }
Mathieu Chartierbf9fc582015-03-13 17:21:25 -0700589 ATRACE_BEGIN((std::string("Mutator threads suspended for ") + cause).c_str());
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700590
Jeff Haoc5d824a2014-07-28 18:35:38 -0700591 if (self != nullptr) {
592 VLOG(threads) << *self << " SuspendAll complete";
593 } else {
594 VLOG(threads) << "Thread[null] SuspendAll complete";
595 }
Elliott Hughes8d768a92011-09-14 16:35:25 -0700596}
597
Yu Lieac44242015-06-29 10:50:03 +0800598// Ensures all threads running Java suspend and that those not running Java don't start.
599// Debugger thread might be set to kRunnable for a short period of time after the
600// SuspendAllInternal. This is safe because it will be set back to suspended state before
601// the SuspendAll returns.
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700602void ThreadList::SuspendAllInternal(Thread* self,
603 Thread* ignore1,
604 Thread* ignore2,
Yu Lieac44242015-06-29 10:50:03 +0800605 bool debug_suspend) {
606 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
607 Locks::thread_list_lock_->AssertNotHeld(self);
608 Locks::thread_suspend_count_lock_->AssertNotHeld(self);
609 if (kDebugLocking && self != nullptr) {
610 CHECK_NE(self->GetState(), kRunnable);
611 }
612
613 // First request that all threads suspend, then wait for them to suspend before
614 // returning. This suspension scheme also relies on other behaviour:
615 // 1. Threads cannot be deleted while they are suspended or have a suspend-
616 // request flag set - (see Unregister() below).
617 // 2. When threads are created, they are created in a suspended state (actually
618 // kNative) and will never begin executing Java code without first checking
619 // the suspend-request flag.
620
621 // The atomic counter for number of threads that need to pass the barrier.
622 AtomicInteger pending_threads;
623 uint32_t num_ignored = 0;
624 if (ignore1 != nullptr) {
625 ++num_ignored;
626 }
627 if (ignore2 != nullptr && ignore1 != ignore2) {
628 ++num_ignored;
629 }
630 {
631 MutexLock mu(self, *Locks::thread_list_lock_);
632 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
633 // Update global suspend all state for attaching threads.
634 ++suspend_all_count_;
Mathieu Chartiere9429c82017-01-27 15:22:56 -0800635 if (debug_suspend) {
Yu Lieac44242015-06-29 10:50:03 +0800636 ++debug_suspend_all_count_;
Mathieu Chartiere9429c82017-01-27 15:22:56 -0800637 }
Yu Lieac44242015-06-29 10:50:03 +0800638 pending_threads.StoreRelaxed(list_.size() - num_ignored);
639 // Increment everybody's suspend count (except those that should be ignored).
640 for (const auto& thread : list_) {
641 if (thread == ignore1 || thread == ignore2) {
642 continue;
643 }
644 VLOG(threads) << "requesting thread suspend: " << *thread;
Hiroshi Yamauchi02e7f1a2016-10-03 15:32:01 -0700645 thread->ModifySuspendCount(self, +1, &pending_threads, debug_suspend);
Yu Lieac44242015-06-29 10:50:03 +0800646
647 // Must install the pending_threads counter first, then check thread->IsSuspend() and clear
648 // the counter. Otherwise there's a race with Thread::TransitionFromRunnableToSuspended()
649 // that can lead a thread to miss a call to PassActiveSuspendBarriers().
650 if (thread->IsSuspended()) {
651 // Only clear the counter for the current thread.
652 thread->ClearSuspendBarrier(&pending_threads);
653 pending_threads.FetchAndSubSequentiallyConsistent(1);
654 }
655 }
656 }
657
658 // Wait for the barrier to be passed by all runnable threads. This wait
659 // is done with a timeout so that we can detect problems.
Mathieu Chartier19af1172015-07-14 10:05:45 -0700660#if ART_USE_FUTEXES
Yu Lieac44242015-06-29 10:50:03 +0800661 timespec wait_timeout;
Mathieu Chartier3fceaf52017-01-22 13:33:40 -0800662 InitTimeSpec(false, CLOCK_MONOTONIC, NsToMs(thread_suspend_timeout_ns_), 0, &wait_timeout);
Mathieu Chartier19af1172015-07-14 10:05:45 -0700663#endif
Mathieu Chartier32c83372017-01-11 10:09:30 -0800664 const uint64_t start_time = NanoTime();
Yu Lieac44242015-06-29 10:50:03 +0800665 while (true) {
666 int32_t cur_val = pending_threads.LoadRelaxed();
667 if (LIKELY(cur_val > 0)) {
668#if ART_USE_FUTEXES
669 if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) {
670 // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning.
671 if ((errno != EAGAIN) && (errno != EINTR)) {
672 if (errno == ETIMEDOUT) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700673 LOG(kIsDebugBuild ? ::android::base::FATAL : ::android::base::ERROR)
Mathieu Chartier32c83372017-01-11 10:09:30 -0800674 << "Timed out waiting for threads to suspend, waited for "
675 << PrettyDuration(NanoTime() - start_time);
Yu Lieac44242015-06-29 10:50:03 +0800676 } else {
677 PLOG(FATAL) << "futex wait failed for SuspendAllInternal()";
678 }
679 }
Vladimir Markod778cd62016-07-05 17:29:55 +0100680 } // else re-check pending_threads in the next iteration (this may be a spurious wake-up).
Yu Lieac44242015-06-29 10:50:03 +0800681#else
682 // Spin wait. This is likely to be slow, but on most architecture ART_USE_FUTEXES is set.
Mathieu Chartier32c83372017-01-11 10:09:30 -0800683 UNUSED(start_time);
Yu Lieac44242015-06-29 10:50:03 +0800684#endif
685 } else {
686 CHECK_EQ(cur_val, 0);
687 break;
688 }
689 }
690}
691
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700692void ThreadList::ResumeAll() {
693 Thread* self = Thread::Current();
694
Jeff Haoc5d824a2014-07-28 18:35:38 -0700695 if (self != nullptr) {
696 VLOG(threads) << *self << " ResumeAll starting";
697 } else {
698 VLOG(threads) << "Thread[null] ResumeAll starting";
699 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700700
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700701 ATRACE_END();
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -0800702
703 ScopedTrace trace("Resuming mutator threads");
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700704
Mathieu Chartier6dda8982014-03-06 11:11:48 -0800705 if (kDebugLocking) {
706 // Debug check that all threads are suspended.
707 AssertThreadsAreSuspended(self, self);
708 }
Ian Rogers01ae5802012-09-28 16:14:01 -0700709
Mathieu Chartierbf44d422015-06-02 11:42:18 -0700710 long_suspend_ = false;
711
Ian Rogers81d425b2012-09-27 16:03:43 -0700712 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700713 {
Ian Rogers81d425b2012-09-27 16:03:43 -0700714 MutexLock mu(self, *Locks::thread_list_lock_);
715 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700716 // Update global suspend all state for attaching threads.
717 --suspend_all_count_;
718 // Decrement the suspend counts for all threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -0700719 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700720 if (thread == self) {
721 continue;
722 }
Yu Lieac44242015-06-29 10:50:03 +0800723 thread->ModifySuspendCount(self, -1, nullptr, false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700724 }
725
726 // Broadcast a notification to all suspended threads, some or all of
727 // which may choose to wake up. No need to wait for them.
Jeff Haoc5d824a2014-07-28 18:35:38 -0700728 if (self != nullptr) {
729 VLOG(threads) << *self << " ResumeAll waking others";
730 } else {
731 VLOG(threads) << "Thread[null] ResumeAll waking others";
732 }
Ian Rogersc604d732012-10-14 16:09:54 -0700733 Thread::resume_cond_->Broadcast(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700734 }
Jeff Haoc5d824a2014-07-28 18:35:38 -0700735
736 if (self != nullptr) {
737 VLOG(threads) << *self << " ResumeAll complete";
738 } else {
739 VLOG(threads) << "Thread[null] ResumeAll complete";
740 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700741}
742
743void ThreadList::Resume(Thread* thread, bool for_debugger) {
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800744 // This assumes there was an ATRACE_BEGIN when we suspended the thread.
745 ATRACE_END();
746
Ian Rogers81d425b2012-09-27 16:03:43 -0700747 Thread* self = Thread::Current();
748 DCHECK_NE(thread, self);
Brian Carlstromba32de42014-08-27 23:43:46 -0700749 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..."
750 << (for_debugger ? " (debugger)" : "");
Elliott Hughes01158d72011-09-19 19:47:10 -0700751
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700752 {
753 // To check Contains.
Ian Rogers81d425b2012-09-27 16:03:43 -0700754 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700755 // To check IsSuspended.
Ian Rogers81d425b2012-09-27 16:03:43 -0700756 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
757 DCHECK(thread->IsSuspended());
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700758 if (!Contains(thread)) {
Brian Carlstromba32de42014-08-27 23:43:46 -0700759 // We only expect threads within the thread-list to have been suspended otherwise we can't
760 // stop such threads from delete-ing themselves.
761 LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
762 << ") thread not within thread list";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700763 return;
764 }
Yu Lieac44242015-06-29 10:50:03 +0800765 thread->ModifySuspendCount(self, -1, nullptr, for_debugger);
Elliott Hughes01158d72011-09-19 19:47:10 -0700766 }
767
768 {
Brian Carlstromba32de42014-08-27 23:43:46 -0700769 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others";
Ian Rogers81d425b2012-09-27 16:03:43 -0700770 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -0700771 Thread::resume_cond_->Broadcast(self);
Elliott Hughes01158d72011-09-19 19:47:10 -0700772 }
773
Brian Carlstromba32de42014-08-27 23:43:46 -0700774 VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700775}
Elliott Hughes01158d72011-09-19 19:47:10 -0700776
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700777static void ThreadSuspendByPeerWarning(Thread* self,
778 LogSeverity severity,
779 const char* message,
Ian Rogersc7dd2952014-10-21 23:31:19 -0700780 jobject peer) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700781 JNIEnvExt* env = self->GetJniEnv();
782 ScopedLocalRef<jstring>
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700783 scoped_name_string(env, static_cast<jstring>(env->GetObjectField(
784 peer, WellKnownClasses::java_lang_Thread_name)));
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700785 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700786 if (scoped_name_chars.c_str() == nullptr) {
Ian Rogersc7dd2952014-10-21 23:31:19 -0700787 LOG(severity) << message << ": " << peer;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700788 env->ExceptionClear();
789 } else {
Ian Rogersc7dd2952014-10-21 23:31:19 -0700790 LOG(severity) << message << ": " << peer << ":" << scoped_name_chars.c_str();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700791 }
792}
793
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700794Thread* ThreadList::SuspendThreadByPeer(jobject peer,
795 bool request_suspension,
796 bool debug_suspension,
797 bool* timed_out) {
Mathieu Chartier3a958aa2015-02-04 12:52:34 -0800798 const uint64_t start_time = NanoTime();
Mathieu Chartier99143862015-02-03 14:26:46 -0800799 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700800 *timed_out = false;
Mathieu Chartier99143862015-02-03 14:26:46 -0800801 Thread* const self = Thread::Current();
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800802 Thread* suspended_thread = nullptr;
Brian Carlstromba32de42014-08-27 23:43:46 -0700803 VLOG(threads) << "SuspendThreadByPeer starting";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700804 while (true) {
805 Thread* thread;
806 {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700807 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
808 // is requesting another suspend, to avoid deadlock, by requiring this function be called
809 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
810 // than request thread suspension, to avoid potential cycles in threads requesting each other
811 // suspend.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700812 ScopedObjectAccess soa(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800813 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700814 thread = Thread::FromManagedThread(soa, peer);
Brian Carlstromba32de42014-08-27 23:43:46 -0700815 if (thread == nullptr) {
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800816 if (suspended_thread != nullptr) {
817 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
818 // If we incremented the suspend count but the thread reset its peer, we need to
819 // re-decrement it since it is shutting down and may deadlock the runtime in
820 // ThreadList::WaitForOtherNonDaemonThreadsToExit.
Yu Lieac44242015-06-29 10:50:03 +0800821 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800822 }
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700823 ThreadSuspendByPeerWarning(self,
824 ::android::base::WARNING,
825 "No such thread for suspend",
826 peer);
Brian Carlstromba32de42014-08-27 23:43:46 -0700827 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700828 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700829 if (!Contains(thread)) {
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800830 CHECK(suspended_thread == nullptr);
Brian Carlstromba32de42014-08-27 23:43:46 -0700831 VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
832 << reinterpret_cast<void*>(thread);
833 return nullptr;
834 }
835 VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700836 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800837 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700838 if (request_suspension) {
Ian Rogers4ad5cd32014-11-11 23:08:07 -0800839 if (self->GetSuspendCount() > 0) {
840 // We hold the suspend count lock but another thread is trying to suspend us. Its not
841 // safe to try to suspend another thread in case we get a cycle. Start the loop again
842 // which will allow this thread to be suspended.
843 continue;
844 }
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800845 CHECK(suspended_thread == nullptr);
846 suspended_thread = thread;
Yu Lieac44242015-06-29 10:50:03 +0800847 suspended_thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700848 request_suspension = false;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700849 } else {
850 // If the caller isn't requesting suspension, a suspension should have already occurred.
851 CHECK_GT(thread->GetSuspendCount(), 0);
852 }
853 // IsSuspended on the current thread will fail as the current thread is changed into
854 // Runnable above. As the suspend count is now raised if this is the current thread
855 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
856 // to just explicitly handle the current thread in the callers to this code.
857 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
858 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
859 // count, or else we've waited and it has self suspended) or is the current thread, we're
860 // done.
861 if (thread->IsSuspended()) {
Brian Carlstromba32de42014-08-27 23:43:46 -0700862 VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800863 if (ATRACE_ENABLED()) {
864 std::string name;
865 thread->GetThreadName(name);
866 ATRACE_BEGIN(StringPrintf("SuspendThreadByPeer suspended %s for peer=%p", name.c_str(),
867 peer).c_str());
868 }
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700869 return thread;
870 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800871 const uint64_t total_delay = NanoTime() - start_time;
Mathieu Chartier3fceaf52017-01-22 13:33:40 -0800872 if (total_delay >= thread_suspend_timeout_ns_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700873 ThreadSuspendByPeerWarning(self,
874 ::android::base::FATAL,
Andreas Gamped6e54bb2016-09-26 14:07:57 -0700875 "Thread suspension timed out",
876 peer);
Mathieu Chartier82a800d2014-12-15 15:59:49 -0800877 if (suspended_thread != nullptr) {
878 CHECK_EQ(suspended_thread, thread);
Yu Lieac44242015-06-29 10:50:03 +0800879 suspended_thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700880 }
881 *timed_out = true;
Brian Carlstromba32de42014-08-27 23:43:46 -0700882 return nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800883 } else if (sleep_us == 0 &&
884 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
885 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
886 // excessive CPU usage.
887 sleep_us = kThreadSuspendMaxYieldUs / 2;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700888 }
889 }
890 // Release locks and come out of runnable state.
891 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800892 VLOG(threads) << "SuspendThreadByPeer waiting to allow thread chance to suspend";
893 ThreadSuspendSleep(sleep_us);
894 // This may stay at 0 if sleep_us == 0, but this is WAI since we want to avoid using usleep at
895 // all if possible. This shouldn't be an issue since time to suspend should always be small.
896 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700897 }
898}
899
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700900static void ThreadSuspendByThreadIdWarning(LogSeverity severity,
901 const char* message,
Ian Rogersc7dd2952014-10-21 23:31:19 -0700902 uint32_t thread_id) {
903 LOG(severity) << StringPrintf("%s: %d", message, thread_id);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700904}
905
Mathieu Chartierb56200b2015-10-29 10:41:51 -0700906Thread* ThreadList::SuspendThreadByThreadId(uint32_t thread_id,
907 bool debug_suspension,
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700908 bool* timed_out) {
Mathieu Chartier3a958aa2015-02-04 12:52:34 -0800909 const uint64_t start_time = NanoTime();
Mathieu Chartier99143862015-02-03 14:26:46 -0800910 useconds_t sleep_us = kThreadSuspendInitialSleepUs;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700911 *timed_out = false;
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800912 Thread* suspended_thread = nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800913 Thread* const self = Thread::Current();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700914 CHECK_NE(thread_id, kInvalidThreadId);
Brian Carlstromba32de42014-08-27 23:43:46 -0700915 VLOG(threads) << "SuspendThreadByThreadId starting";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700916 while (true) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700917 {
Ian Rogersf3d874c2014-07-17 18:52:42 -0700918 // Note: this will transition to runnable and potentially suspend. We ensure only one thread
919 // is requesting another suspend, to avoid deadlock, by requiring this function be called
920 // holding Locks::thread_list_suspend_thread_lock_. Its important this thread suspend rather
921 // than request thread suspension, to avoid potential cycles in threads requesting each other
922 // suspend.
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700923 ScopedObjectAccess soa(self);
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800924 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Ian Rogersf3d874c2014-07-17 18:52:42 -0700925 Thread* thread = nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700926 for (const auto& it : list_) {
927 if (it->GetThreadId() == thread_id) {
928 thread = it;
929 break;
930 }
931 }
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800932 if (thread == nullptr) {
933 CHECK(suspended_thread == nullptr) << "Suspended thread " << suspended_thread
934 << " no longer in thread list";
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700935 // There's a race in inflating a lock and the owner giving up ownership and then dying.
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700936 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
937 "No such thread id for suspend",
938 thread_id);
Brian Carlstromba32de42014-08-27 23:43:46 -0700939 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700940 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700941 VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
942 DCHECK(Contains(thread));
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700943 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -0800944 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800945 if (suspended_thread == nullptr) {
Ian Rogers4ad5cd32014-11-11 23:08:07 -0800946 if (self->GetSuspendCount() > 0) {
947 // We hold the suspend count lock but another thread is trying to suspend us. Its not
948 // safe to try to suspend another thread in case we get a cycle. Start the loop again
949 // which will allow this thread to be suspended.
950 continue;
951 }
Yu Lieac44242015-06-29 10:50:03 +0800952 thread->ModifySuspendCount(self, +1, nullptr, debug_suspension);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800953 suspended_thread = thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700954 } else {
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800955 CHECK_EQ(suspended_thread, thread);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700956 // If the caller isn't requesting suspension, a suspension should have already occurred.
957 CHECK_GT(thread->GetSuspendCount(), 0);
958 }
959 // IsSuspended on the current thread will fail as the current thread is changed into
960 // Runnable above. As the suspend count is now raised if this is the current thread
961 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
962 // to just explicitly handle the current thread in the callers to this code.
963 CHECK_NE(thread, self) << "Attempt to suspend the current thread for the debugger";
964 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
965 // count, or else we've waited and it has self suspended) or is the current thread, we're
966 // done.
967 if (thread->IsSuspended()) {
Mathieu Chartierf0dc8b52014-12-17 10:13:30 -0800968 if (ATRACE_ENABLED()) {
969 std::string name;
970 thread->GetThreadName(name);
971 ATRACE_BEGIN(StringPrintf("SuspendThreadByThreadId suspended %s id=%d",
972 name.c_str(), thread_id).c_str());
973 }
Brian Carlstromba32de42014-08-27 23:43:46 -0700974 VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700975 return thread;
976 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800977 const uint64_t total_delay = NanoTime() - start_time;
Mathieu Chartier3fceaf52017-01-22 13:33:40 -0800978 if (total_delay >= thread_suspend_timeout_ns_) {
Andreas Gampe3fec9ac2016-09-13 10:47:28 -0700979 ThreadSuspendByThreadIdWarning(::android::base::WARNING,
980 "Thread suspension timed out",
981 thread_id);
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -0800982 if (suspended_thread != nullptr) {
Yu Lieac44242015-06-29 10:50:03 +0800983 thread->ModifySuspendCount(soa.Self(), -1, nullptr, debug_suspension);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700984 }
985 *timed_out = true;
Brian Carlstromba32de42014-08-27 23:43:46 -0700986 return nullptr;
Mathieu Chartier99143862015-02-03 14:26:46 -0800987 } else if (sleep_us == 0 &&
988 total_delay > static_cast<uint64_t>(kThreadSuspendMaxYieldUs) * 1000) {
989 // We have spun for kThreadSuspendMaxYieldUs time, switch to sleeps to prevent
990 // excessive CPU usage.
991 sleep_us = kThreadSuspendMaxYieldUs / 2;
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700992 }
993 }
994 // Release locks and come out of runnable state.
995 }
Mathieu Chartier99143862015-02-03 14:26:46 -0800996 VLOG(threads) << "SuspendThreadByThreadId waiting to allow thread chance to suspend";
997 ThreadSuspendSleep(sleep_us);
998 sleep_us = std::min(sleep_us * 2, kThreadSuspendMaxSleepUs);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700999 }
1000}
1001
Mathieu Chartier61b3cd42016-04-18 11:43:29 -07001002Thread* ThreadList::FindThreadByThreadId(uint32_t thread_id) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -07001003 for (const auto& thread : list_) {
Mathieu Chartier61b3cd42016-04-18 11:43:29 -07001004 if (thread->GetThreadId() == thread_id) {
Ian Rogersd9c4fc92013-10-01 19:45:43 -07001005 return thread;
1006 }
1007 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001008 return nullptr;
Ian Rogersd9c4fc92013-10-01 19:45:43 -07001009}
1010
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001011void ThreadList::SuspendAllForDebugger() {
1012 Thread* self = Thread::Current();
1013 Thread* debug_thread = Dbg::GetDebugThread();
1014
1015 VLOG(threads) << *self << " SuspendAllForDebugger starting...";
1016
Yu Lieac44242015-06-29 10:50:03 +08001017 SuspendAllInternal(self, self, debug_thread, true);
Ian Rogers66aee5c2012-08-15 17:17:47 -07001018 // Block on the mutator lock until all Runnable threads release their share of access then
1019 // immediately unlock again.
1020#if HAVE_TIMED_RWLOCK
1021 // Timeout if we wait more than 30 seconds.
Ian Rogersc604d732012-10-14 16:09:54 -07001022 if (!Locks::mutator_lock_->ExclusiveLockWithTimeout(self, 30 * 1000, 0)) {
Sebastien Hertzbae182c2013-12-17 10:42:03 +01001023 UnsafeLogFatalForThreadSuspendAllTimeout();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001024 } else {
Ian Rogers81d425b2012-09-27 16:03:43 -07001025 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001026 }
Ian Rogers66aee5c2012-08-15 17:17:47 -07001027#else
Ian Rogers81d425b2012-09-27 16:03:43 -07001028 Locks::mutator_lock_->ExclusiveLock(self);
1029 Locks::mutator_lock_->ExclusiveUnlock(self);
Ian Rogers66aee5c2012-08-15 17:17:47 -07001030#endif
Mathieu Chartier9450c6c2015-11-07 11:55:23 -08001031 // Disabled for the following race condition:
1032 // Thread 1 calls SuspendAllForDebugger, gets preempted after pulsing the mutator lock.
1033 // Thread 2 calls SuspendAll and SetStateUnsafe (perhaps from Dbg::Disconnected).
1034 // Thread 1 fails assertion that all threads are suspended due to thread 2 being in a runnable
1035 // state (from SetStateUnsafe).
1036 // AssertThreadsAreSuspended(self, self, debug_thread);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001037
Sebastien Hertzed2be172014-08-19 15:33:43 +02001038 VLOG(threads) << *self << " SuspendAllForDebugger complete";
Elliott Hughes01158d72011-09-19 19:47:10 -07001039}
1040
Elliott Hughes475fc232011-10-25 15:00:35 -07001041void ThreadList::SuspendSelfForDebugger() {
Sebastien Hertz1558b572015-02-25 15:05:59 +01001042 Thread* const self = Thread::Current();
1043 self->SetReadyForDebugInvoke(true);
Elliott Hughes01158d72011-09-19 19:47:10 -07001044
Elliott Hughes475fc232011-10-25 15:00:35 -07001045 // The debugger thread must not suspend itself due to debugger activity!
1046 Thread* debug_thread = Dbg::GetDebugThread();
Elliott Hughes475fc232011-10-25 15:00:35 -07001047 CHECK(self != debug_thread);
jeffhaoa77f0f62012-12-05 17:19:31 -08001048 CHECK_NE(self->GetState(), kRunnable);
1049 Locks::mutator_lock_->AssertNotHeld(self);
Elliott Hughes475fc232011-10-25 15:00:35 -07001050
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001051 // The debugger may have detached while we were executing an invoke request. In that case, we
1052 // must not suspend ourself.
1053 DebugInvokeReq* pReq = self->GetInvokeReq();
1054 const bool skip_thread_suspension = (pReq != nullptr && !Dbg::IsDebuggerActive());
1055 if (!skip_thread_suspension) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001056 // Collisions with other suspends aren't really interesting. We want
1057 // to ensure that we're the only one fiddling with the suspend count
1058 // though.
1059 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Yu Lieac44242015-06-29 10:50:03 +08001060 self->ModifySuspendCount(self, +1, nullptr, true);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001061 CHECK_GT(self->GetSuspendCount(), 0);
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001062
1063 VLOG(threads) << *self << " self-suspending (debugger)";
1064 } else {
1065 // We must no longer be subject to debugger suspension.
1066 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1067 CHECK_EQ(self->GetDebugSuspendCount(), 0) << "Debugger detached without resuming us";
1068
1069 VLOG(threads) << *self << " not self-suspending because debugger detached during invoke";
jeffhaoa77f0f62012-12-05 17:19:31 -08001070 }
Elliott Hughes475fc232011-10-25 15:00:35 -07001071
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001072 // If the debugger requested an invoke, we need to send the reply and clear the request.
Sebastien Hertz1558b572015-02-25 15:05:59 +01001073 if (pReq != nullptr) {
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001074 Dbg::FinishInvokeMethod(pReq);
Sebastien Hertz1558b572015-02-25 15:05:59 +01001075 self->ClearDebugInvokeReq();
Sebastien Hertzcbc50642015-06-01 17:33:12 +02001076 pReq = nullptr; // object has been deleted, clear it for safety.
Sebastien Hertz21e729c2014-02-18 14:16:00 +01001077 }
1078
Elliott Hughes475fc232011-10-25 15:00:35 -07001079 // Tell JDWP that we've completed suspension. The JDWP thread can't
1080 // tell us to resume before we're fully asleep because we hold the
1081 // suspend count lock.
1082 Dbg::ClearWaitForEventThread();
1083
jeffhaoa77f0f62012-12-05 17:19:31 -08001084 {
1085 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001086 while (self->GetSuspendCount() != 0) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001087 Thread::resume_cond_->Wait(self);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001088 if (self->GetSuspendCount() != 0) {
jeffhaoa77f0f62012-12-05 17:19:31 -08001089 // The condition was signaled but we're still suspended. This
Sebastien Hertzf272af42014-09-18 10:20:42 +02001090 // can happen when we suspend then resume all threads to
1091 // update instrumentation or compute monitor info. This can
1092 // also happen if the debugger lets go while a SIGQUIT thread
jeffhaoa77f0f62012-12-05 17:19:31 -08001093 // dump event is pending (assuming SignalCatcher was resumed for
1094 // just long enough to try to grab the thread-suspend lock).
Sebastien Hertzf272af42014-09-18 10:20:42 +02001095 VLOG(jdwp) << *self << " still suspended after undo "
1096 << "(suspend count=" << self->GetSuspendCount() << ", "
1097 << "debug suspend count=" << self->GetDebugSuspendCount() << ")";
jeffhaoa77f0f62012-12-05 17:19:31 -08001098 }
Elliott Hughes475fc232011-10-25 15:00:35 -07001099 }
Ian Rogersdd7624d2014-03-14 17:43:00 -07001100 CHECK_EQ(self->GetSuspendCount(), 0);
Elliott Hughes475fc232011-10-25 15:00:35 -07001101 }
jeffhaoa77f0f62012-12-05 17:19:31 -08001102
Sebastien Hertz1558b572015-02-25 15:05:59 +01001103 self->SetReadyForDebugInvoke(false);
Elliott Hughes1f729aa2012-03-02 13:55:41 -08001104 VLOG(threads) << *self << " self-reviving (debugger)";
Elliott Hughes475fc232011-10-25 15:00:35 -07001105}
1106
Sebastien Hertz253fa552014-10-14 17:27:15 +02001107void ThreadList::ResumeAllForDebugger() {
1108 Thread* self = Thread::Current();
1109 Thread* debug_thread = Dbg::GetDebugThread();
Sebastien Hertz253fa552014-10-14 17:27:15 +02001110
1111 VLOG(threads) << *self << " ResumeAllForDebugger starting...";
1112
1113 // Threads can't resume if we exclusively hold the mutator lock.
1114 Locks::mutator_lock_->AssertNotExclusiveHeld(self);
1115
1116 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001117 MutexLock thread_list_mu(self, *Locks::thread_list_lock_);
Sebastien Hertz253fa552014-10-14 17:27:15 +02001118 {
Andreas Gampe277ccbd2014-11-03 21:36:10 -08001119 MutexLock suspend_count_mu(self, *Locks::thread_suspend_count_lock_);
Sebastien Hertz253fa552014-10-14 17:27:15 +02001120 // Update global suspend all state for attaching threads.
1121 DCHECK_GE(suspend_all_count_, debug_suspend_all_count_);
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001122 if (debug_suspend_all_count_ > 0) {
Sebastien Hertz253fa552014-10-14 17:27:15 +02001123 --suspend_all_count_;
1124 --debug_suspend_all_count_;
Sebastien Hertz253fa552014-10-14 17:27:15 +02001125 } else {
1126 // We've been asked to resume all threads without being asked to
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001127 // suspend them all before. That may happen if a debugger tries
1128 // to resume some suspended threads (with suspend count == 1)
1129 // at once with a VirtualMachine.Resume command. Let's print a
1130 // warning.
Sebastien Hertz253fa552014-10-14 17:27:15 +02001131 LOG(WARNING) << "Debugger attempted to resume all threads without "
1132 << "having suspended them all before.";
1133 }
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001134 // Decrement everybody's suspend count (except our own).
1135 for (const auto& thread : list_) {
1136 if (thread == self || thread == debug_thread) {
1137 continue;
1138 }
1139 if (thread->GetDebugSuspendCount() == 0) {
1140 // This thread may have been individually resumed with ThreadReference.Resume.
1141 continue;
1142 }
1143 VLOG(threads) << "requesting thread resume: " << *thread;
Yu Lieac44242015-06-29 10:50:03 +08001144 thread->ModifySuspendCount(self, -1, nullptr, true);
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001145 }
Sebastien Hertz253fa552014-10-14 17:27:15 +02001146 }
1147 }
1148
Sebastien Hertzf9d233d2015-01-09 14:51:41 +01001149 {
Sebastien Hertz253fa552014-10-14 17:27:15 +02001150 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1151 Thread::resume_cond_->Broadcast(self);
1152 }
1153
1154 VLOG(threads) << *self << " ResumeAllForDebugger complete";
1155}
1156
Elliott Hughes234ab152011-10-26 14:02:26 -07001157void ThreadList::UndoDebuggerSuspensions() {
1158 Thread* self = Thread::Current();
1159
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001160 VLOG(threads) << *self << " UndoDebuggerSuspensions starting";
Elliott Hughes234ab152011-10-26 14:02:26 -07001161
1162 {
Ian Rogers81d425b2012-09-27 16:03:43 -07001163 MutexLock mu(self, *Locks::thread_list_lock_);
1164 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001165 // Update global suspend all state for attaching threads.
1166 suspend_all_count_ -= debug_suspend_all_count_;
1167 debug_suspend_all_count_ = 0;
1168 // Update running threads.
Mathieu Chartier02e25112013-08-14 16:14:24 -07001169 for (const auto& thread : list_) {
Ian Rogersdd7624d2014-03-14 17:43:00 -07001170 if (thread == self || thread->GetDebugSuspendCount() == 0) {
Elliott Hughes234ab152011-10-26 14:02:26 -07001171 continue;
1172 }
Yu Lieac44242015-06-29 10:50:03 +08001173 thread->ModifySuspendCount(self, -thread->GetDebugSuspendCount(), nullptr, true);
Elliott Hughes234ab152011-10-26 14:02:26 -07001174 }
1175 }
1176
1177 {
Ian Rogers81d425b2012-09-27 16:03:43 -07001178 MutexLock mu(self, *Locks::thread_suspend_count_lock_);
Ian Rogersc604d732012-10-14 16:09:54 -07001179 Thread::resume_cond_->Broadcast(self);
Elliott Hughes234ab152011-10-26 14:02:26 -07001180 }
1181
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001182 VLOG(threads) << "UndoDebuggerSuspensions(" << *self << ") complete";
Elliott Hughes234ab152011-10-26 14:02:26 -07001183}
1184
Elliott Hughese52e49b2012-04-02 16:05:44 -07001185void ThreadList::WaitForOtherNonDaemonThreadsToExit() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001186 ScopedTrace trace(__PRETTY_FUNCTION__);
Ian Rogers81d425b2012-09-27 16:03:43 -07001187 Thread* self = Thread::Current();
1188 Locks::mutator_lock_->AssertNotHeld(self);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001189 while (true) {
Ian Rogers120f1c72012-09-28 17:17:10 -07001190 {
1191 // No more threads can be born after we start to shutdown.
1192 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001193 CHECK(Runtime::Current()->IsShuttingDownLocked());
Ian Rogers120f1c72012-09-28 17:17:10 -07001194 CHECK_EQ(Runtime::Current()->NumberOfThreadsBeingBorn(), 0U);
1195 }
Ian Rogers120f1c72012-09-28 17:17:10 -07001196 MutexLock mu(self, *Locks::thread_list_lock_);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001197 // Also wait for any threads that are unregistering to finish. This is required so that no
1198 // threads access the thread list after it is deleted. TODO: This may not work for user daemon
1199 // threads since they could unregister at the wrong time.
1200 bool done = unregistering_count_ == 0;
1201 if (done) {
1202 for (const auto& thread : list_) {
1203 if (thread != self && !thread->IsDaemon()) {
1204 done = false;
1205 break;
1206 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001207 }
1208 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001209 if (done) {
1210 break;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001211 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001212 // Wait for another thread to exit before re-checking.
1213 Locks::thread_exit_cond_->Wait(self);
1214 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001215}
1216
Mathieu Chartier4d87df62016-01-07 15:14:19 -08001217void ThreadList::SuspendAllDaemonThreadsForShutdown() {
Mathieu Chartier32ce2ad2016-03-04 14:58:03 -08001218 ScopedTrace trace(__PRETTY_FUNCTION__);
Ian Rogers81d425b2012-09-27 16:03:43 -07001219 Thread* self = Thread::Current();
Mathieu Chartier62597d12016-01-11 10:19:06 -08001220 size_t daemons_left = 0;
Nicolas Geoffrayaa45daa2016-06-20 15:58:32 +01001221 {
1222 // Tell all the daemons it's time to suspend.
1223 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogers81d425b2012-09-27 16:03:43 -07001224 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001225 for (const auto& thread : list_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001226 // This is only run after all non-daemon threads have exited, so the remainder should all be
1227 // daemons.
Ian Rogers7e762862012-10-22 15:45:08 -07001228 CHECK(thread->IsDaemon()) << *thread;
Ian Rogers81d425b2012-09-27 16:03:43 -07001229 if (thread != self) {
Yu Lieac44242015-06-29 10:50:03 +08001230 thread->ModifySuspendCount(self, +1, nullptr, false);
Mathieu Chartier62597d12016-01-11 10:19:06 -08001231 ++daemons_left;
Elliott Hughese52e49b2012-04-02 16:05:44 -07001232 }
Mathieu Chartier4d87df62016-01-07 15:14:19 -08001233 // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be
1234 // the sleep forever one.
1235 thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions();
Elliott Hughes038a8062011-09-18 14:12:41 -07001236 }
1237 }
Mathieu Chartier62597d12016-01-11 10:19:06 -08001238 // If we have any daemons left, wait 200ms to ensure they are not stuck in a place where they
1239 // are about to access runtime state and are not in a runnable state. Examples: Monitor code
1240 // or waking up from a condition variable. TODO: Try and see if there is a better way to wait
1241 // for daemon threads to be in a blocked state.
1242 if (daemons_left > 0) {
1243 static constexpr size_t kDaemonSleepTime = 200 * 1000;
1244 usleep(kDaemonSleepTime);
1245 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001246 // Give the threads a chance to suspend, complaining if they're slow.
1247 bool have_complained = false;
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001248 static constexpr size_t kTimeoutMicroseconds = 2000 * 1000;
1249 static constexpr size_t kSleepMicroseconds = 1000;
1250 for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) {
Elliott Hughes038a8062011-09-18 14:12:41 -07001251 bool all_suspended = true;
Nicolas Geoffrayaa45daa2016-06-20 15:58:32 +01001252 {
1253 MutexLock mu(self, *Locks::thread_list_lock_);
1254 for (const auto& thread : list_) {
1255 if (thread != self && thread->GetState() == kRunnable) {
1256 if (!have_complained) {
1257 LOG(WARNING) << "daemon thread not yet suspended: " << *thread;
1258 have_complained = true;
1259 }
1260 all_suspended = false;
Elliott Hughes038a8062011-09-18 14:12:41 -07001261 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001262 }
1263 }
1264 if (all_suspended) {
1265 return;
1266 }
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001267 usleep(kSleepMicroseconds);
Elliott Hughes038a8062011-09-18 14:12:41 -07001268 }
Mathieu Chartierba098ba2016-01-07 09:31:33 -08001269 LOG(WARNING) << "timed out suspending all daemon threads";
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001270}
Mathieu Chartierfbc31082016-01-24 11:59:56 -08001271
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001272void ThreadList::Register(Thread* self) {
1273 DCHECK_EQ(self, Thread::Current());
1274
1275 if (VLOG_IS_ON(threads)) {
1276 std::ostringstream oss;
1277 self->ShortDump(oss); // We don't hold the mutator_lock_ yet and so cannot call Dump.
Ian Rogers5a9ba012014-05-19 13:28:52 -07001278 LOG(INFO) << "ThreadList::Register() " << *self << "\n" << oss.str();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001279 }
1280
1281 // Atomically add self to the thread list and make its thread_suspend_count_ reflect ongoing
1282 // SuspendAll requests.
Ian Rogers81d425b2012-09-27 16:03:43 -07001283 MutexLock mu(self, *Locks::thread_list_lock_);
1284 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001285 CHECK_GE(suspend_all_count_, debug_suspend_all_count_);
Ian Rogers2966e132014-04-02 08:34:36 -07001286 // Modify suspend count in increments of 1 to maintain invariants in ModifySuspendCount. While
1287 // this isn't particularly efficient the suspend counts are most commonly 0 or 1.
1288 for (int delta = debug_suspend_all_count_; delta > 0; delta--) {
Yu Lieac44242015-06-29 10:50:03 +08001289 self->ModifySuspendCount(self, +1, nullptr, true);
Ian Rogersdd7624d2014-03-14 17:43:00 -07001290 }
Ian Rogers2966e132014-04-02 08:34:36 -07001291 for (int delta = suspend_all_count_ - debug_suspend_all_count_; delta > 0; delta--) {
Yu Lieac44242015-06-29 10:50:03 +08001292 self->ModifySuspendCount(self, +1, nullptr, false);
Ian Rogers01ae5802012-09-28 16:14:01 -07001293 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001294 CHECK(!Contains(self));
1295 list_.push_back(self);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001296 if (kUseReadBarrier) {
Hiroshi Yamauchi00370822015-08-18 14:47:25 -07001297 // Initialize according to the state of the CC collector.
1298 bool is_gc_marking =
1299 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsMarking();
Mathieu Chartierfe814e82016-11-09 14:32:49 -08001300 self->SetIsGcMarkingAndUpdateEntrypoints(is_gc_marking);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -07001301 bool weak_ref_access_enabled =
1302 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->IsWeakRefAccessEnabled();
1303 self->SetWeakRefAccessEnabled(weak_ref_access_enabled);
1304 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001305}
1306
1307void ThreadList::Unregister(Thread* self) {
1308 DCHECK_EQ(self, Thread::Current());
Ian Rogers68d8b422014-07-17 11:09:10 -07001309 CHECK_NE(self->GetState(), kRunnable);
1310 Locks::mutator_lock_->AssertNotHeld(self);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001311
1312 VLOG(threads) << "ThreadList::Unregister() " << *self;
1313
Mathieu Chartier91e56692015-03-03 13:51:04 -08001314 {
1315 MutexLock mu(self, *Locks::thread_list_lock_);
1316 ++unregistering_count_;
1317 }
1318
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001319 // Any time-consuming destruction, plus anything that can call back into managed code or
Mathieu Chartier91e56692015-03-03 13:51:04 -08001320 // suspend and so on, must happen at this point, and not in ~Thread. The self->Destroy is what
1321 // causes the threads to join. It is important to do this after incrementing unregistering_count_
1322 // since we want the runtime to wait for the daemon threads to exit before deleting the thread
1323 // list.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001324 self->Destroy();
1325
Jeff Haoe094b872014-10-14 13:12:01 -07001326 // If tracing, remember thread id and name before thread exits.
1327 Trace::StoreExitingThreadInfo(self);
1328
Ian Rogersdd7624d2014-03-14 17:43:00 -07001329 uint32_t thin_lock_id = self->GetThreadId();
Mathieu Chartier91e56692015-03-03 13:51:04 -08001330 while (true) {
Ian Rogerscfaa4552012-11-26 21:00:08 -08001331 // Remove and delete the Thread* while holding the thread_list_lock_ and
1332 // thread_suspend_count_lock_ so that the unregistering thread cannot be suspended.
Ian Rogers0878d652013-04-18 17:38:35 -07001333 // Note: deliberately not using MutexLock that could hold a stale self pointer.
Mathieu Chartier91e56692015-03-03 13:51:04 -08001334 MutexLock mu(self, *Locks::thread_list_lock_);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001335 if (!Contains(self)) {
Mathieu Chartier9db831a2015-02-24 17:20:30 -08001336 std::string thread_name;
1337 self->GetThreadName(thread_name);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001338 std::ostringstream os;
Christopher Ferris6cff48f2014-01-26 21:36:13 -08001339 DumpNativeStack(os, GetTid(), nullptr, " native: ", nullptr);
Mathieu Chartier9db831a2015-02-24 17:20:30 -08001340 LOG(ERROR) << "Request to unregister unattached thread " << thread_name << "\n" << os.str();
Mathieu Chartier91e56692015-03-03 13:51:04 -08001341 break;
Ian Rogersa2af5c72014-09-15 15:17:07 -07001342 } else {
Mathieu Chartier91e56692015-03-03 13:51:04 -08001343 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
Ian Rogersa2af5c72014-09-15 15:17:07 -07001344 if (!self->IsSuspended()) {
1345 list_.remove(self);
Mathieu Chartier91e56692015-03-03 13:51:04 -08001346 break;
Ian Rogersa2af5c72014-09-15 15:17:07 -07001347 }
Ian Rogers68d8b422014-07-17 11:09:10 -07001348 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001349 // We failed to remove the thread due to a suspend request, loop and try again.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001350 }
Mathieu Chartier91e56692015-03-03 13:51:04 -08001351 delete self;
1352
Mathieu Chartier5f51d4b2013-12-03 14:24:05 -08001353 // Release the thread ID after the thread is finished and deleted to avoid cases where we can
1354 // temporarily have multiple threads with the same thread id. When this occurs, it causes
1355 // problems in FindThreadByThreadId / SuspendThreadByThreadId.
1356 ReleaseThreadId(nullptr, thin_lock_id);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001357
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001358 // Clear the TLS data, so that the underlying native thread is recognizably detached.
1359 // (It may wish to reattach later.)
Bilyan Borisovbb661c02016-04-04 16:27:32 +01001360#ifdef ART_TARGET_ANDROID
Andreas Gampe4382f1e2015-08-05 01:08:53 +00001361 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = nullptr;
1362#else
Mathieu Chartier2cebb242015-04-21 16:50:40 -07001363 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, nullptr), "detach self");
Andreas Gampe4382f1e2015-08-05 01:08:53 +00001364#endif
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001365
1366 // Signal that a thread just detached.
Mathieu Chartier91e56692015-03-03 13:51:04 -08001367 MutexLock mu(nullptr, *Locks::thread_list_lock_);
1368 --unregistering_count_;
1369 Locks::thread_exit_cond_->Broadcast(nullptr);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001370}
1371
1372void ThreadList::ForEach(void (*callback)(Thread*, void*), void* context) {
Mathieu Chartier02e25112013-08-14 16:14:24 -07001373 for (const auto& thread : list_) {
1374 callback(thread, context);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001375 }
1376}
1377
Mathieu Chartierf8a86b92016-06-14 17:08:47 -07001378void ThreadList::VisitRootsForSuspendedThreads(RootVisitor* visitor) {
1379 Thread* const self = Thread::Current();
1380 std::vector<Thread*> threads_to_visit;
1381
1382 // Tell threads to suspend and copy them into list.
1383 {
1384 MutexLock mu(self, *Locks::thread_list_lock_);
1385 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1386 for (Thread* thread : list_) {
1387 thread->ModifySuspendCount(self, +1, nullptr, false);
1388 if (thread == self || thread->IsSuspended()) {
1389 threads_to_visit.push_back(thread);
1390 } else {
1391 thread->ModifySuspendCount(self, -1, nullptr, false);
1392 }
1393 }
1394 }
1395
1396 // Visit roots without holding thread_list_lock_ and thread_suspend_count_lock_ to prevent lock
1397 // order violations.
1398 for (Thread* thread : threads_to_visit) {
1399 thread->VisitRoots(visitor);
1400 }
1401
1402 // Restore suspend counts.
1403 {
1404 MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
1405 for (Thread* thread : threads_to_visit) {
1406 thread->ModifySuspendCount(self, -1, nullptr, false);
1407 }
1408 }
1409}
1410
Andreas Gampe585da952016-12-02 14:52:29 -08001411void ThreadList::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const {
Ian Rogers81d425b2012-09-27 16:03:43 -07001412 MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
Mathieu Chartier02e25112013-08-14 16:14:24 -07001413 for (const auto& thread : list_) {
Andreas Gampe585da952016-12-02 14:52:29 -08001414 thread->VisitRoots(visitor, flags);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001415 }
Elliott Hughes038a8062011-09-18 14:12:41 -07001416}
1417
Ian Rogerscfaa4552012-11-26 21:00:08 -08001418uint32_t ThreadList::AllocThreadId(Thread* self) {
Chao-ying Fu9e369312014-05-21 11:20:52 -07001419 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
Elliott Hughes8daa0922011-09-11 13:46:25 -07001420 for (size_t i = 0; i < allocated_ids_.size(); ++i) {
1421 if (!allocated_ids_[i]) {
1422 allocated_ids_.set(i);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001423 return i + 1; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -07001424 }
1425 }
1426 LOG(FATAL) << "Out of internal thread ids";
1427 return 0;
1428}
1429
Ian Rogerscfaa4552012-11-26 21:00:08 -08001430void ThreadList::ReleaseThreadId(Thread* self, uint32_t id) {
Chao-ying Fu9e369312014-05-21 11:20:52 -07001431 MutexLock mu(self, *Locks::allocated_thread_ids_lock_);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001432 --id; // Zero is reserved to mean "invalid".
Elliott Hughes8daa0922011-09-11 13:46:25 -07001433 DCHECK(allocated_ids_[id]) << id;
1434 allocated_ids_.reset(id);
1435}
1436
Mathieu Chartier4f55e222015-09-04 13:26:21 -07001437ScopedSuspendAll::ScopedSuspendAll(const char* cause, bool long_suspend) {
1438 Runtime::Current()->GetThreadList()->SuspendAll(cause, long_suspend);
1439}
1440
1441ScopedSuspendAll::~ScopedSuspendAll() {
1442 Runtime::Current()->GetThreadList()->ResumeAll();
1443}
1444
Elliott Hughes8daa0922011-09-11 13:46:25 -07001445} // namespace art