blob: 1cec44c27e755fe072f51c14176f58edc884817b [file] [log] [blame]
Andreas Gampe7cc45fd2018-11-21 16:03:08 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "locks.h"
18
19#include <errno.h>
20#include <sys/time.h>
21
22#include "android-base/logging.h"
23
24#include "base/atomic.h"
25#include "base/logging.h"
26#include "base/systrace.h"
27#include "base/time_utils.h"
28#include "base/value_object.h"
29#include "mutex-inl.h"
30#include "scoped_thread_state_change-inl.h"
31#include "thread-inl.h"
32
33namespace art {
34
35static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr);
36
37Mutex* Locks::abort_lock_ = nullptr;
38Mutex* Locks::alloc_tracker_lock_ = nullptr;
39Mutex* Locks::allocated_monitor_ids_lock_ = nullptr;
40Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
41ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
42ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
43Mutex* Locks::custom_tls_lock_ = nullptr;
44Mutex* Locks::deoptimization_lock_ = nullptr;
45ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
46Mutex* Locks::instrument_entrypoints_lock_ = nullptr;
47Mutex* Locks::intern_table_lock_ = nullptr;
48Mutex* Locks::jni_function_table_lock_ = nullptr;
49Mutex* Locks::jni_libraries_lock_ = nullptr;
50Mutex* Locks::logging_lock_ = nullptr;
51Mutex* Locks::modify_ldt_lock_ = nullptr;
52MutatorMutex* Locks::mutator_lock_ = nullptr;
53Mutex* Locks::profiler_lock_ = nullptr;
54ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr;
55ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr;
56Mutex* Locks::host_dlopen_handles_lock_ = nullptr;
57Mutex* Locks::reference_processor_lock_ = nullptr;
58Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr;
59Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr;
60Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr;
61Mutex* Locks::reference_queue_soft_references_lock_ = nullptr;
62Mutex* Locks::reference_queue_weak_references_lock_ = nullptr;
63Mutex* Locks::runtime_shutdown_lock_ = nullptr;
Mathieu Chartierada33d72018-12-17 13:17:30 -080064Mutex* Locks::runtime_thread_pool_lock_ = nullptr;
Andreas Gampe7cc45fd2018-11-21 16:03:08 -080065Mutex* Locks::cha_lock_ = nullptr;
Nicolas Geoffray2a905b22019-06-06 09:04:07 +010066Mutex* Locks::jit_lock_ = nullptr;
Andreas Gampe7cc45fd2018-11-21 16:03:08 -080067Mutex* Locks::subtype_check_lock_ = nullptr;
68Mutex* Locks::thread_list_lock_ = nullptr;
69ConditionVariable* Locks::thread_exit_cond_ = nullptr;
70Mutex* Locks::thread_suspend_count_lock_ = nullptr;
71Mutex* Locks::trace_lock_ = nullptr;
72Mutex* Locks::unexpected_signal_lock_ = nullptr;
73Mutex* Locks::user_code_suspension_lock_ = nullptr;
74Uninterruptible Roles::uninterruptible_;
75ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
76Mutex* Locks::jni_weak_globals_lock_ = nullptr;
77ReaderWriterMutex* Locks::dex_lock_ = nullptr;
78Mutex* Locks::native_debug_interface_lock_ = nullptr;
79std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
80Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
81
82// Wait for an amount of time that roughly increases in the argument i.
83// Spin for small arguments and yield/sleep for longer ones.
84static void BackOff(uint32_t i) {
85 static constexpr uint32_t kSpinMax = 10;
86 static constexpr uint32_t kYieldMax = 20;
87 if (i <= kSpinMax) {
88 // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit
89 // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor.
90 volatile uint32_t x = 0;
91 const uint32_t spin_count = 10 * i;
92 for (uint32_t spin = 0; spin < spin_count; ++spin) {
93 ++x; // Volatile; hence should not be optimized away.
94 }
95 // TODO: Consider adding x86 PAUSE and/or ARM YIELD here.
96 } else if (i <= kYieldMax) {
97 sched_yield();
98 } else {
99 NanoSleep(1000ull * (i - kYieldMax));
100 }
101}
102
103class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final {
104 public:
105 explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) {
106 for (uint32_t i = 0;
107 !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr,
108 mutex);
109 ++i) {
110 BackOff(i);
111 }
112 }
113
114 ~ScopedExpectedMutexesOnWeakRefAccessLock() {
115 DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed),
116 mutex_);
117 Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release);
118 }
119
120 private:
121 const BaseMutex* const mutex_;
122};
123
124void Locks::Init() {
125 if (logging_lock_ != nullptr) {
126 // Already initialized.
127 if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
128 DCHECK(modify_ldt_lock_ != nullptr);
129 } else {
130 DCHECK(modify_ldt_lock_ == nullptr);
131 }
132 DCHECK(abort_lock_ != nullptr);
133 DCHECK(alloc_tracker_lock_ != nullptr);
134 DCHECK(allocated_monitor_ids_lock_ != nullptr);
135 DCHECK(allocated_thread_ids_lock_ != nullptr);
136 DCHECK(breakpoint_lock_ != nullptr);
137 DCHECK(classlinker_classes_lock_ != nullptr);
138 DCHECK(custom_tls_lock_ != nullptr);
139 DCHECK(deoptimization_lock_ != nullptr);
140 DCHECK(heap_bitmap_lock_ != nullptr);
141 DCHECK(oat_file_manager_lock_ != nullptr);
142 DCHECK(verifier_deps_lock_ != nullptr);
143 DCHECK(host_dlopen_handles_lock_ != nullptr);
144 DCHECK(intern_table_lock_ != nullptr);
145 DCHECK(jni_function_table_lock_ != nullptr);
146 DCHECK(jni_libraries_lock_ != nullptr);
147 DCHECK(logging_lock_ != nullptr);
148 DCHECK(mutator_lock_ != nullptr);
149 DCHECK(profiler_lock_ != nullptr);
150 DCHECK(cha_lock_ != nullptr);
Nicolas Geoffray2a905b22019-06-06 09:04:07 +0100151 DCHECK(jit_lock_ != nullptr);
Andreas Gampe7cc45fd2018-11-21 16:03:08 -0800152 DCHECK(subtype_check_lock_ != nullptr);
153 DCHECK(thread_list_lock_ != nullptr);
154 DCHECK(thread_suspend_count_lock_ != nullptr);
155 DCHECK(trace_lock_ != nullptr);
156 DCHECK(unexpected_signal_lock_ != nullptr);
157 DCHECK(user_code_suspension_lock_ != nullptr);
158 DCHECK(dex_lock_ != nullptr);
159 DCHECK(native_debug_interface_lock_ != nullptr);
Mathieu Chartierada33d72018-12-17 13:17:30 -0800160 DCHECK(runtime_thread_pool_lock_ != nullptr);
Andreas Gampe7cc45fd2018-11-21 16:03:08 -0800161 } else {
162 // Create global locks in level order from highest lock level to lowest.
Alex Light66834462019-04-08 16:28:29 +0000163 LockLevel current_lock_level = kUserCodeSuspensionLock;
164 DCHECK(user_code_suspension_lock_ == nullptr);
165 user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level);
Andreas Gampe7cc45fd2018-11-21 16:03:08 -0800166
167 #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \
168 if ((new_level) >= current_lock_level) { \
169 /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \
170 fprintf(stderr, "New local level %d is not less than current level %d\n", \
171 new_level, current_lock_level); \
172 exit(1); \
173 } \
174 current_lock_level = new_level;
175
Alex Light66834462019-04-08 16:28:29 +0000176 UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock);
177 DCHECK(instrument_entrypoints_lock_ == nullptr);
178 instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level);
Andreas Gampe7cc45fd2018-11-21 16:03:08 -0800179
180 UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock);
181 DCHECK(mutator_lock_ == nullptr);
182 mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level);
183
184 UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock);
185 DCHECK(heap_bitmap_lock_ == nullptr);
186 heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level);
187
188 UPDATE_CURRENT_LOCK_LEVEL(kTraceLock);
189 DCHECK(trace_lock_ == nullptr);
190 trace_lock_ = new Mutex("trace lock", current_lock_level);
191
192 UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock);
193 DCHECK(runtime_shutdown_lock_ == nullptr);
194 runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level);
195
Mathieu Chartierada33d72018-12-17 13:17:30 -0800196 UPDATE_CURRENT_LOCK_LEVEL(kRuntimeThreadPoolLock);
197 DCHECK(runtime_thread_pool_lock_ == nullptr);
198 runtime_thread_pool_lock_ = new Mutex("runtime thread pool lock", current_lock_level);
199
Andreas Gampe7cc45fd2018-11-21 16:03:08 -0800200 UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock);
201 DCHECK(profiler_lock_ == nullptr);
202 profiler_lock_ = new Mutex("profiler lock", current_lock_level);
203
204 UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock);
205 DCHECK(deoptimization_lock_ == nullptr);
206 deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level);
207
208 UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock);
209 DCHECK(alloc_tracker_lock_ == nullptr);
210 alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level);
211
212 UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock);
213 DCHECK(thread_list_lock_ == nullptr);
214 thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
215
216 UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
217 DCHECK(jni_libraries_lock_ == nullptr);
218 jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
219
220 UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
221 DCHECK(breakpoint_lock_ == nullptr);
222 breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
223
224 UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock);
225 DCHECK(subtype_check_lock_ == nullptr);
226 subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level);
227
228 UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock);
229 DCHECK(classlinker_classes_lock_ == nullptr);
230 classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock",
231 current_lock_level);
232
233 UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock);
234 DCHECK(allocated_monitor_ids_lock_ == nullptr);
235 allocated_monitor_ids_lock_ = new Mutex("allocated monitor ids lock", current_lock_level);
236
237 UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock);
238 DCHECK(allocated_thread_ids_lock_ == nullptr);
239 allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level);
240
241 if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) {
242 UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock);
243 DCHECK(modify_ldt_lock_ == nullptr);
244 modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level);
245 }
246
247 UPDATE_CURRENT_LOCK_LEVEL(kDexLock);
248 DCHECK(dex_lock_ == nullptr);
249 dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level);
250
251 UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock);
252 DCHECK(oat_file_manager_lock_ == nullptr);
253 oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level);
254
255 UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock);
256 DCHECK(verifier_deps_lock_ == nullptr);
257 verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level);
258
259 UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock);
260 DCHECK(host_dlopen_handles_lock_ == nullptr);
261 host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level);
262
263 UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock);
264 DCHECK(intern_table_lock_ == nullptr);
265 intern_table_lock_ = new Mutex("InternTable lock", current_lock_level);
266
267 UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock);
268 DCHECK(reference_processor_lock_ == nullptr);
269 reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level);
270
271 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock);
272 DCHECK(reference_queue_cleared_references_lock_ == nullptr);
273 reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
274
275 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock);
276 DCHECK(reference_queue_weak_references_lock_ == nullptr);
277 reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level);
278
279 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock);
280 DCHECK(reference_queue_finalizer_references_lock_ == nullptr);
281 reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level);
282
283 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock);
284 DCHECK(reference_queue_phantom_references_lock_ == nullptr);
285 reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level);
286
287 UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock);
288 DCHECK(reference_queue_soft_references_lock_ == nullptr);
289 reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level);
290
291 UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock);
292 DCHECK(jni_globals_lock_ == nullptr);
293 jni_globals_lock_ =
294 new ReaderWriterMutex("JNI global reference table lock", current_lock_level);
295
296 UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock);
297 DCHECK(jni_weak_globals_lock_ == nullptr);
298 jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level);
299
300 UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock);
301 DCHECK(jni_function_table_lock_ == nullptr);
302 jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level);
303
304 UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock);
305 DCHECK(custom_tls_lock_ == nullptr);
306 custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level);
307
Nicolas Geoffray2a905b22019-06-06 09:04:07 +0100308 UPDATE_CURRENT_LOCK_LEVEL(kJitCodeCacheLock);
309 DCHECK(jit_lock_ == nullptr);
310 jit_lock_ = new Mutex("Jit code cache", current_lock_level);
311
Andreas Gampe7cc45fd2018-11-21 16:03:08 -0800312 UPDATE_CURRENT_LOCK_LEVEL(kCHALock);
313 DCHECK(cha_lock_ == nullptr);
314 cha_lock_ = new Mutex("CHA lock", current_lock_level);
315
316 UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
317 DCHECK(native_debug_interface_lock_ == nullptr);
318 native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
319
320 UPDATE_CURRENT_LOCK_LEVEL(kAbortLock);
321 DCHECK(abort_lock_ == nullptr);
322 abort_lock_ = new Mutex("abort lock", current_lock_level, true);
323
324 UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock);
325 DCHECK(thread_suspend_count_lock_ == nullptr);
326 thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level);
327
328 UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock);
329 DCHECK(unexpected_signal_lock_ == nullptr);
330 unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
331
332 UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
333 DCHECK(logging_lock_ == nullptr);
334 logging_lock_ = new Mutex("logging lock", current_lock_level, true);
335
336 #undef UPDATE_CURRENT_LOCK_LEVEL
337
338 // List of mutexes that we may hold when accessing a weak ref.
339 AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false);
340 AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false);
341 AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false);
342
343 InitConditions();
344 }
345}
346
347void Locks::InitConditions() {
348 thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_);
349}
350
351void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) {
352 safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release);
353}
354
355// Helper to allow checking shutdown while ignoring locking requirements.
356bool Locks::IsSafeToCallAbortRacy() {
357 Locks::ClientCallback* safe_to_call_abort_cb =
358 safe_to_call_abort_callback.load(std::memory_order_acquire);
359 return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb();
360}
361
362void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
363 if (need_lock) {
364 ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
365 mutex->SetShouldRespondToEmptyCheckpointRequest(true);
366 expected_mutexes_on_weak_ref_access_.push_back(mutex);
367 } else {
368 mutex->SetShouldRespondToEmptyCheckpointRequest(true);
369 expected_mutexes_on_weak_ref_access_.push_back(mutex);
370 }
371}
372
373void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) {
374 if (need_lock) {
375 ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
376 mutex->SetShouldRespondToEmptyCheckpointRequest(false);
377 std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
378 auto it = std::find(list.begin(), list.end(), mutex);
379 DCHECK(it != list.end());
380 list.erase(it);
381 } else {
382 mutex->SetShouldRespondToEmptyCheckpointRequest(false);
383 std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
384 auto it = std::find(list.begin(), list.end(), mutex);
385 DCHECK(it != list.end());
386 list.erase(it);
387 }
388}
389
390bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) {
391 ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex);
392 std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_;
393 return std::find(list.begin(), list.end(), mutex) != list.end();
394}
395
396} // namespace art