Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "locks.h" |
| 18 | |
| 19 | #include <errno.h> |
| 20 | #include <sys/time.h> |
| 21 | |
| 22 | #include "android-base/logging.h" |
| 23 | |
| 24 | #include "base/atomic.h" |
| 25 | #include "base/logging.h" |
| 26 | #include "base/systrace.h" |
| 27 | #include "base/time_utils.h" |
| 28 | #include "base/value_object.h" |
| 29 | #include "mutex-inl.h" |
| 30 | #include "scoped_thread_state_change-inl.h" |
| 31 | #include "thread-inl.h" |
| 32 | |
| 33 | namespace art { |
| 34 | |
| 35 | static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr); |
| 36 | |
| 37 | Mutex* Locks::abort_lock_ = nullptr; |
| 38 | Mutex* Locks::alloc_tracker_lock_ = nullptr; |
| 39 | Mutex* Locks::allocated_monitor_ids_lock_ = nullptr; |
| 40 | Mutex* Locks::allocated_thread_ids_lock_ = nullptr; |
| 41 | ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr; |
| 42 | ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr; |
| 43 | Mutex* Locks::custom_tls_lock_ = nullptr; |
| 44 | Mutex* Locks::deoptimization_lock_ = nullptr; |
| 45 | ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr; |
| 46 | Mutex* Locks::instrument_entrypoints_lock_ = nullptr; |
| 47 | Mutex* Locks::intern_table_lock_ = nullptr; |
| 48 | Mutex* Locks::jni_function_table_lock_ = nullptr; |
| 49 | Mutex* Locks::jni_libraries_lock_ = nullptr; |
| 50 | Mutex* Locks::logging_lock_ = nullptr; |
| 51 | Mutex* Locks::modify_ldt_lock_ = nullptr; |
| 52 | MutatorMutex* Locks::mutator_lock_ = nullptr; |
| 53 | Mutex* Locks::profiler_lock_ = nullptr; |
| 54 | ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr; |
| 55 | ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr; |
| 56 | Mutex* Locks::host_dlopen_handles_lock_ = nullptr; |
| 57 | Mutex* Locks::reference_processor_lock_ = nullptr; |
| 58 | Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr; |
| 59 | Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr; |
| 60 | Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr; |
| 61 | Mutex* Locks::reference_queue_soft_references_lock_ = nullptr; |
| 62 | Mutex* Locks::reference_queue_weak_references_lock_ = nullptr; |
| 63 | Mutex* Locks::runtime_shutdown_lock_ = nullptr; |
Mathieu Chartier | ada33d7 | 2018-12-17 13:17:30 -0800 | [diff] [blame] | 64 | Mutex* Locks::runtime_thread_pool_lock_ = nullptr; |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 65 | Mutex* Locks::cha_lock_ = nullptr; |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 66 | Mutex* Locks::jit_lock_ = nullptr; |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 67 | Mutex* Locks::subtype_check_lock_ = nullptr; |
| 68 | Mutex* Locks::thread_list_lock_ = nullptr; |
| 69 | ConditionVariable* Locks::thread_exit_cond_ = nullptr; |
| 70 | Mutex* Locks::thread_suspend_count_lock_ = nullptr; |
| 71 | Mutex* Locks::trace_lock_ = nullptr; |
| 72 | Mutex* Locks::unexpected_signal_lock_ = nullptr; |
| 73 | Mutex* Locks::user_code_suspension_lock_ = nullptr; |
| 74 | Uninterruptible Roles::uninterruptible_; |
| 75 | ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr; |
| 76 | Mutex* Locks::jni_weak_globals_lock_ = nullptr; |
| 77 | ReaderWriterMutex* Locks::dex_lock_ = nullptr; |
| 78 | Mutex* Locks::native_debug_interface_lock_ = nullptr; |
Alex Light | 79d6c80 | 2019-06-27 15:50:11 +0000 | [diff] [blame] | 79 | ReaderWriterMutex* Locks::jni_id_lock_ = nullptr; |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 80 | std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_; |
| 81 | Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_; |
| 82 | |
| 83 | // Wait for an amount of time that roughly increases in the argument i. |
| 84 | // Spin for small arguments and yield/sleep for longer ones. |
| 85 | static void BackOff(uint32_t i) { |
| 86 | static constexpr uint32_t kSpinMax = 10; |
| 87 | static constexpr uint32_t kYieldMax = 20; |
| 88 | if (i <= kSpinMax) { |
| 89 | // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit |
| 90 | // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor. |
| 91 | volatile uint32_t x = 0; |
| 92 | const uint32_t spin_count = 10 * i; |
| 93 | for (uint32_t spin = 0; spin < spin_count; ++spin) { |
| 94 | ++x; // Volatile; hence should not be optimized away. |
| 95 | } |
| 96 | // TODO: Consider adding x86 PAUSE and/or ARM YIELD here. |
| 97 | } else if (i <= kYieldMax) { |
| 98 | sched_yield(); |
| 99 | } else { |
| 100 | NanoSleep(1000ull * (i - kYieldMax)); |
| 101 | } |
| 102 | } |
| 103 | |
| 104 | class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final { |
| 105 | public: |
| 106 | explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) { |
| 107 | for (uint32_t i = 0; |
| 108 | !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr, |
| 109 | mutex); |
| 110 | ++i) { |
| 111 | BackOff(i); |
| 112 | } |
| 113 | } |
| 114 | |
| 115 | ~ScopedExpectedMutexesOnWeakRefAccessLock() { |
| 116 | DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed), |
| 117 | mutex_); |
| 118 | Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release); |
| 119 | } |
| 120 | |
| 121 | private: |
| 122 | const BaseMutex* const mutex_; |
| 123 | }; |
| 124 | |
| 125 | void Locks::Init() { |
| 126 | if (logging_lock_ != nullptr) { |
| 127 | // Already initialized. |
| 128 | if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) { |
| 129 | DCHECK(modify_ldt_lock_ != nullptr); |
| 130 | } else { |
| 131 | DCHECK(modify_ldt_lock_ == nullptr); |
| 132 | } |
| 133 | DCHECK(abort_lock_ != nullptr); |
| 134 | DCHECK(alloc_tracker_lock_ != nullptr); |
| 135 | DCHECK(allocated_monitor_ids_lock_ != nullptr); |
| 136 | DCHECK(allocated_thread_ids_lock_ != nullptr); |
| 137 | DCHECK(breakpoint_lock_ != nullptr); |
| 138 | DCHECK(classlinker_classes_lock_ != nullptr); |
| 139 | DCHECK(custom_tls_lock_ != nullptr); |
| 140 | DCHECK(deoptimization_lock_ != nullptr); |
| 141 | DCHECK(heap_bitmap_lock_ != nullptr); |
| 142 | DCHECK(oat_file_manager_lock_ != nullptr); |
| 143 | DCHECK(verifier_deps_lock_ != nullptr); |
| 144 | DCHECK(host_dlopen_handles_lock_ != nullptr); |
| 145 | DCHECK(intern_table_lock_ != nullptr); |
| 146 | DCHECK(jni_function_table_lock_ != nullptr); |
| 147 | DCHECK(jni_libraries_lock_ != nullptr); |
| 148 | DCHECK(logging_lock_ != nullptr); |
| 149 | DCHECK(mutator_lock_ != nullptr); |
| 150 | DCHECK(profiler_lock_ != nullptr); |
| 151 | DCHECK(cha_lock_ != nullptr); |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 152 | DCHECK(jit_lock_ != nullptr); |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 153 | DCHECK(subtype_check_lock_ != nullptr); |
| 154 | DCHECK(thread_list_lock_ != nullptr); |
| 155 | DCHECK(thread_suspend_count_lock_ != nullptr); |
| 156 | DCHECK(trace_lock_ != nullptr); |
| 157 | DCHECK(unexpected_signal_lock_ != nullptr); |
| 158 | DCHECK(user_code_suspension_lock_ != nullptr); |
| 159 | DCHECK(dex_lock_ != nullptr); |
| 160 | DCHECK(native_debug_interface_lock_ != nullptr); |
Alex Light | 79d6c80 | 2019-06-27 15:50:11 +0000 | [diff] [blame] | 161 | DCHECK(jni_id_lock_ != nullptr); |
Mathieu Chartier | ada33d7 | 2018-12-17 13:17:30 -0800 | [diff] [blame] | 162 | DCHECK(runtime_thread_pool_lock_ != nullptr); |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 163 | } else { |
| 164 | // Create global locks in level order from highest lock level to lowest. |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame] | 165 | LockLevel current_lock_level = kUserCodeSuspensionLock; |
| 166 | DCHECK(user_code_suspension_lock_ == nullptr); |
| 167 | user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level); |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 168 | |
| 169 | #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \ |
| 170 | if ((new_level) >= current_lock_level) { \ |
| 171 | /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \ |
| 172 | fprintf(stderr, "New local level %d is not less than current level %d\n", \ |
| 173 | new_level, current_lock_level); \ |
| 174 | exit(1); \ |
| 175 | } \ |
| 176 | current_lock_level = new_level; |
| 177 | |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame] | 178 | UPDATE_CURRENT_LOCK_LEVEL(kInstrumentEntrypointsLock); |
| 179 | DCHECK(instrument_entrypoints_lock_ == nullptr); |
| 180 | instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level); |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 181 | |
| 182 | UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock); |
| 183 | DCHECK(mutator_lock_ == nullptr); |
| 184 | mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level); |
| 185 | |
| 186 | UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock); |
| 187 | DCHECK(heap_bitmap_lock_ == nullptr); |
| 188 | heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level); |
| 189 | |
| 190 | UPDATE_CURRENT_LOCK_LEVEL(kTraceLock); |
| 191 | DCHECK(trace_lock_ == nullptr); |
| 192 | trace_lock_ = new Mutex("trace lock", current_lock_level); |
| 193 | |
| 194 | UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock); |
| 195 | DCHECK(runtime_shutdown_lock_ == nullptr); |
| 196 | runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level); |
| 197 | |
Mathieu Chartier | ada33d7 | 2018-12-17 13:17:30 -0800 | [diff] [blame] | 198 | UPDATE_CURRENT_LOCK_LEVEL(kRuntimeThreadPoolLock); |
| 199 | DCHECK(runtime_thread_pool_lock_ == nullptr); |
| 200 | runtime_thread_pool_lock_ = new Mutex("runtime thread pool lock", current_lock_level); |
| 201 | |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 202 | UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock); |
| 203 | DCHECK(profiler_lock_ == nullptr); |
| 204 | profiler_lock_ = new Mutex("profiler lock", current_lock_level); |
| 205 | |
| 206 | UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock); |
| 207 | DCHECK(deoptimization_lock_ == nullptr); |
| 208 | deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level); |
| 209 | |
| 210 | UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock); |
| 211 | DCHECK(alloc_tracker_lock_ == nullptr); |
| 212 | alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level); |
| 213 | |
| 214 | UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock); |
| 215 | DCHECK(thread_list_lock_ == nullptr); |
| 216 | thread_list_lock_ = new Mutex("thread list lock", current_lock_level); |
| 217 | |
| 218 | UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock); |
| 219 | DCHECK(jni_libraries_lock_ == nullptr); |
| 220 | jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level); |
| 221 | |
| 222 | UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock); |
| 223 | DCHECK(breakpoint_lock_ == nullptr); |
| 224 | breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level); |
| 225 | |
| 226 | UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock); |
| 227 | DCHECK(subtype_check_lock_ == nullptr); |
| 228 | subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level); |
| 229 | |
| 230 | UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock); |
| 231 | DCHECK(classlinker_classes_lock_ == nullptr); |
| 232 | classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", |
| 233 | current_lock_level); |
| 234 | |
| 235 | UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock); |
| 236 | DCHECK(allocated_monitor_ids_lock_ == nullptr); |
| 237 | allocated_monitor_ids_lock_ = new Mutex("allocated monitor ids lock", current_lock_level); |
| 238 | |
| 239 | UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock); |
| 240 | DCHECK(allocated_thread_ids_lock_ == nullptr); |
| 241 | allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level); |
| 242 | |
| 243 | if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) { |
| 244 | UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock); |
| 245 | DCHECK(modify_ldt_lock_ == nullptr); |
| 246 | modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level); |
| 247 | } |
| 248 | |
| 249 | UPDATE_CURRENT_LOCK_LEVEL(kDexLock); |
| 250 | DCHECK(dex_lock_ == nullptr); |
| 251 | dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level); |
| 252 | |
| 253 | UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock); |
| 254 | DCHECK(oat_file_manager_lock_ == nullptr); |
| 255 | oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level); |
| 256 | |
| 257 | UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock); |
| 258 | DCHECK(verifier_deps_lock_ == nullptr); |
| 259 | verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level); |
| 260 | |
| 261 | UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock); |
| 262 | DCHECK(host_dlopen_handles_lock_ == nullptr); |
| 263 | host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level); |
| 264 | |
| 265 | UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock); |
| 266 | DCHECK(intern_table_lock_ == nullptr); |
| 267 | intern_table_lock_ = new Mutex("InternTable lock", current_lock_level); |
| 268 | |
| 269 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock); |
| 270 | DCHECK(reference_processor_lock_ == nullptr); |
| 271 | reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level); |
| 272 | |
| 273 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock); |
| 274 | DCHECK(reference_queue_cleared_references_lock_ == nullptr); |
| 275 | reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level); |
| 276 | |
| 277 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock); |
| 278 | DCHECK(reference_queue_weak_references_lock_ == nullptr); |
| 279 | reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level); |
| 280 | |
| 281 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock); |
| 282 | DCHECK(reference_queue_finalizer_references_lock_ == nullptr); |
| 283 | reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level); |
| 284 | |
| 285 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock); |
| 286 | DCHECK(reference_queue_phantom_references_lock_ == nullptr); |
| 287 | reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level); |
| 288 | |
| 289 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock); |
| 290 | DCHECK(reference_queue_soft_references_lock_ == nullptr); |
| 291 | reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level); |
| 292 | |
| 293 | UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock); |
| 294 | DCHECK(jni_globals_lock_ == nullptr); |
| 295 | jni_globals_lock_ = |
| 296 | new ReaderWriterMutex("JNI global reference table lock", current_lock_level); |
| 297 | |
| 298 | UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock); |
| 299 | DCHECK(jni_weak_globals_lock_ == nullptr); |
| 300 | jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level); |
| 301 | |
| 302 | UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock); |
| 303 | DCHECK(jni_function_table_lock_ == nullptr); |
| 304 | jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level); |
| 305 | |
| 306 | UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock); |
| 307 | DCHECK(custom_tls_lock_ == nullptr); |
| 308 | custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level); |
| 309 | |
Nicolas Geoffray | 2a905b2 | 2019-06-06 09:04:07 +0100 | [diff] [blame] | 310 | UPDATE_CURRENT_LOCK_LEVEL(kJitCodeCacheLock); |
| 311 | DCHECK(jit_lock_ == nullptr); |
| 312 | jit_lock_ = new Mutex("Jit code cache", current_lock_level); |
| 313 | |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 314 | UPDATE_CURRENT_LOCK_LEVEL(kCHALock); |
| 315 | DCHECK(cha_lock_ == nullptr); |
| 316 | cha_lock_ = new Mutex("CHA lock", current_lock_level); |
| 317 | |
| 318 | UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock); |
| 319 | DCHECK(native_debug_interface_lock_ == nullptr); |
| 320 | native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level); |
| 321 | |
Alex Light | 79d6c80 | 2019-06-27 15:50:11 +0000 | [diff] [blame] | 322 | UPDATE_CURRENT_LOCK_LEVEL(kJniIdLock); |
| 323 | DCHECK(jni_id_lock_ == nullptr); |
| 324 | jni_id_lock_ = new ReaderWriterMutex("JNI id map lock", current_lock_level); |
| 325 | |
Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame] | 326 | UPDATE_CURRENT_LOCK_LEVEL(kAbortLock); |
| 327 | DCHECK(abort_lock_ == nullptr); |
| 328 | abort_lock_ = new Mutex("abort lock", current_lock_level, true); |
| 329 | |
| 330 | UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock); |
| 331 | DCHECK(thread_suspend_count_lock_ == nullptr); |
| 332 | thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level); |
| 333 | |
| 334 | UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock); |
| 335 | DCHECK(unexpected_signal_lock_ == nullptr); |
| 336 | unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true); |
| 337 | |
| 338 | UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock); |
| 339 | DCHECK(logging_lock_ == nullptr); |
| 340 | logging_lock_ = new Mutex("logging lock", current_lock_level, true); |
| 341 | |
| 342 | #undef UPDATE_CURRENT_LOCK_LEVEL |
| 343 | |
| 344 | // List of mutexes that we may hold when accessing a weak ref. |
| 345 | AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false); |
| 346 | AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false); |
| 347 | AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false); |
| 348 | |
| 349 | InitConditions(); |
| 350 | } |
| 351 | } |
| 352 | |
| 353 | void Locks::InitConditions() { |
| 354 | thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_); |
| 355 | } |
| 356 | |
| 357 | void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) { |
| 358 | safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release); |
| 359 | } |
| 360 | |
| 361 | // Helper to allow checking shutdown while ignoring locking requirements. |
| 362 | bool Locks::IsSafeToCallAbortRacy() { |
| 363 | Locks::ClientCallback* safe_to_call_abort_cb = |
| 364 | safe_to_call_abort_callback.load(std::memory_order_acquire); |
| 365 | return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb(); |
| 366 | } |
| 367 | |
| 368 | void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) { |
| 369 | if (need_lock) { |
| 370 | ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex); |
| 371 | mutex->SetShouldRespondToEmptyCheckpointRequest(true); |
| 372 | expected_mutexes_on_weak_ref_access_.push_back(mutex); |
| 373 | } else { |
| 374 | mutex->SetShouldRespondToEmptyCheckpointRequest(true); |
| 375 | expected_mutexes_on_weak_ref_access_.push_back(mutex); |
| 376 | } |
| 377 | } |
| 378 | |
| 379 | void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) { |
| 380 | if (need_lock) { |
| 381 | ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex); |
| 382 | mutex->SetShouldRespondToEmptyCheckpointRequest(false); |
| 383 | std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_; |
| 384 | auto it = std::find(list.begin(), list.end(), mutex); |
| 385 | DCHECK(it != list.end()); |
| 386 | list.erase(it); |
| 387 | } else { |
| 388 | mutex->SetShouldRespondToEmptyCheckpointRequest(false); |
| 389 | std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_; |
| 390 | auto it = std::find(list.begin(), list.end(), mutex); |
| 391 | DCHECK(it != list.end()); |
| 392 | list.erase(it); |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) { |
| 397 | ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex); |
| 398 | std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_; |
| 399 | return std::find(list.begin(), list.end(), mutex) != list.end(); |
| 400 | } |
| 401 | |
| 402 | } // namespace art |