Andreas Gampe | 7cc45fd | 2018-11-21 16:03:08 -0800 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "locks.h" |
| 18 | |
| 19 | #include <errno.h> |
| 20 | #include <sys/time.h> |
| 21 | |
| 22 | #include "android-base/logging.h" |
| 23 | |
| 24 | #include "base/atomic.h" |
| 25 | #include "base/logging.h" |
| 26 | #include "base/systrace.h" |
| 27 | #include "base/time_utils.h" |
| 28 | #include "base/value_object.h" |
| 29 | #include "mutex-inl.h" |
| 30 | #include "scoped_thread_state_change-inl.h" |
| 31 | #include "thread-inl.h" |
| 32 | |
| 33 | namespace art { |
| 34 | |
| 35 | static Atomic<Locks::ClientCallback*> safe_to_call_abort_callback(nullptr); |
| 36 | |
| 37 | Mutex* Locks::abort_lock_ = nullptr; |
| 38 | Mutex* Locks::alloc_tracker_lock_ = nullptr; |
| 39 | Mutex* Locks::allocated_monitor_ids_lock_ = nullptr; |
| 40 | Mutex* Locks::allocated_thread_ids_lock_ = nullptr; |
| 41 | ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr; |
| 42 | ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr; |
| 43 | Mutex* Locks::custom_tls_lock_ = nullptr; |
| 44 | Mutex* Locks::deoptimization_lock_ = nullptr; |
| 45 | ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr; |
| 46 | Mutex* Locks::instrument_entrypoints_lock_ = nullptr; |
| 47 | Mutex* Locks::intern_table_lock_ = nullptr; |
| 48 | Mutex* Locks::jni_function_table_lock_ = nullptr; |
| 49 | Mutex* Locks::jni_libraries_lock_ = nullptr; |
| 50 | Mutex* Locks::logging_lock_ = nullptr; |
| 51 | Mutex* Locks::modify_ldt_lock_ = nullptr; |
| 52 | MutatorMutex* Locks::mutator_lock_ = nullptr; |
| 53 | Mutex* Locks::profiler_lock_ = nullptr; |
| 54 | ReaderWriterMutex* Locks::verifier_deps_lock_ = nullptr; |
| 55 | ReaderWriterMutex* Locks::oat_file_manager_lock_ = nullptr; |
| 56 | Mutex* Locks::host_dlopen_handles_lock_ = nullptr; |
| 57 | Mutex* Locks::reference_processor_lock_ = nullptr; |
| 58 | Mutex* Locks::reference_queue_cleared_references_lock_ = nullptr; |
| 59 | Mutex* Locks::reference_queue_finalizer_references_lock_ = nullptr; |
| 60 | Mutex* Locks::reference_queue_phantom_references_lock_ = nullptr; |
| 61 | Mutex* Locks::reference_queue_soft_references_lock_ = nullptr; |
| 62 | Mutex* Locks::reference_queue_weak_references_lock_ = nullptr; |
| 63 | Mutex* Locks::runtime_shutdown_lock_ = nullptr; |
| 64 | Mutex* Locks::cha_lock_ = nullptr; |
| 65 | Mutex* Locks::subtype_check_lock_ = nullptr; |
| 66 | Mutex* Locks::thread_list_lock_ = nullptr; |
| 67 | ConditionVariable* Locks::thread_exit_cond_ = nullptr; |
| 68 | Mutex* Locks::thread_suspend_count_lock_ = nullptr; |
| 69 | Mutex* Locks::trace_lock_ = nullptr; |
| 70 | Mutex* Locks::unexpected_signal_lock_ = nullptr; |
| 71 | Mutex* Locks::user_code_suspension_lock_ = nullptr; |
| 72 | Uninterruptible Roles::uninterruptible_; |
| 73 | ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr; |
| 74 | Mutex* Locks::jni_weak_globals_lock_ = nullptr; |
| 75 | ReaderWriterMutex* Locks::dex_lock_ = nullptr; |
| 76 | Mutex* Locks::native_debug_interface_lock_ = nullptr; |
| 77 | std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_; |
| 78 | Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_; |
| 79 | |
| 80 | // Wait for an amount of time that roughly increases in the argument i. |
| 81 | // Spin for small arguments and yield/sleep for longer ones. |
| 82 | static void BackOff(uint32_t i) { |
| 83 | static constexpr uint32_t kSpinMax = 10; |
| 84 | static constexpr uint32_t kYieldMax = 20; |
| 85 | if (i <= kSpinMax) { |
| 86 | // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit |
| 87 | // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor. |
| 88 | volatile uint32_t x = 0; |
| 89 | const uint32_t spin_count = 10 * i; |
| 90 | for (uint32_t spin = 0; spin < spin_count; ++spin) { |
| 91 | ++x; // Volatile; hence should not be optimized away. |
| 92 | } |
| 93 | // TODO: Consider adding x86 PAUSE and/or ARM YIELD here. |
| 94 | } else if (i <= kYieldMax) { |
| 95 | sched_yield(); |
| 96 | } else { |
| 97 | NanoSleep(1000ull * (i - kYieldMax)); |
| 98 | } |
| 99 | } |
| 100 | |
| 101 | class Locks::ScopedExpectedMutexesOnWeakRefAccessLock final { |
| 102 | public: |
| 103 | explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) { |
| 104 | for (uint32_t i = 0; |
| 105 | !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr, |
| 106 | mutex); |
| 107 | ++i) { |
| 108 | BackOff(i); |
| 109 | } |
| 110 | } |
| 111 | |
| 112 | ~ScopedExpectedMutexesOnWeakRefAccessLock() { |
| 113 | DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed), |
| 114 | mutex_); |
| 115 | Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release); |
| 116 | } |
| 117 | |
| 118 | private: |
| 119 | const BaseMutex* const mutex_; |
| 120 | }; |
| 121 | |
| 122 | void Locks::Init() { |
| 123 | if (logging_lock_ != nullptr) { |
| 124 | // Already initialized. |
| 125 | if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) { |
| 126 | DCHECK(modify_ldt_lock_ != nullptr); |
| 127 | } else { |
| 128 | DCHECK(modify_ldt_lock_ == nullptr); |
| 129 | } |
| 130 | DCHECK(abort_lock_ != nullptr); |
| 131 | DCHECK(alloc_tracker_lock_ != nullptr); |
| 132 | DCHECK(allocated_monitor_ids_lock_ != nullptr); |
| 133 | DCHECK(allocated_thread_ids_lock_ != nullptr); |
| 134 | DCHECK(breakpoint_lock_ != nullptr); |
| 135 | DCHECK(classlinker_classes_lock_ != nullptr); |
| 136 | DCHECK(custom_tls_lock_ != nullptr); |
| 137 | DCHECK(deoptimization_lock_ != nullptr); |
| 138 | DCHECK(heap_bitmap_lock_ != nullptr); |
| 139 | DCHECK(oat_file_manager_lock_ != nullptr); |
| 140 | DCHECK(verifier_deps_lock_ != nullptr); |
| 141 | DCHECK(host_dlopen_handles_lock_ != nullptr); |
| 142 | DCHECK(intern_table_lock_ != nullptr); |
| 143 | DCHECK(jni_function_table_lock_ != nullptr); |
| 144 | DCHECK(jni_libraries_lock_ != nullptr); |
| 145 | DCHECK(logging_lock_ != nullptr); |
| 146 | DCHECK(mutator_lock_ != nullptr); |
| 147 | DCHECK(profiler_lock_ != nullptr); |
| 148 | DCHECK(cha_lock_ != nullptr); |
| 149 | DCHECK(subtype_check_lock_ != nullptr); |
| 150 | DCHECK(thread_list_lock_ != nullptr); |
| 151 | DCHECK(thread_suspend_count_lock_ != nullptr); |
| 152 | DCHECK(trace_lock_ != nullptr); |
| 153 | DCHECK(unexpected_signal_lock_ != nullptr); |
| 154 | DCHECK(user_code_suspension_lock_ != nullptr); |
| 155 | DCHECK(dex_lock_ != nullptr); |
| 156 | DCHECK(native_debug_interface_lock_ != nullptr); |
| 157 | } else { |
| 158 | // Create global locks in level order from highest lock level to lowest. |
| 159 | LockLevel current_lock_level = kInstrumentEntrypointsLock; |
| 160 | DCHECK(instrument_entrypoints_lock_ == nullptr); |
| 161 | instrument_entrypoints_lock_ = new Mutex("instrument entrypoint lock", current_lock_level); |
| 162 | |
| 163 | #define UPDATE_CURRENT_LOCK_LEVEL(new_level) \ |
| 164 | if ((new_level) >= current_lock_level) { \ |
| 165 | /* Do not use CHECKs or FATAL here, abort_lock_ is not setup yet. */ \ |
| 166 | fprintf(stderr, "New local level %d is not less than current level %d\n", \ |
| 167 | new_level, current_lock_level); \ |
| 168 | exit(1); \ |
| 169 | } \ |
| 170 | current_lock_level = new_level; |
| 171 | |
| 172 | UPDATE_CURRENT_LOCK_LEVEL(kUserCodeSuspensionLock); |
| 173 | DCHECK(user_code_suspension_lock_ == nullptr); |
| 174 | user_code_suspension_lock_ = new Mutex("user code suspension lock", current_lock_level); |
| 175 | |
| 176 | UPDATE_CURRENT_LOCK_LEVEL(kMutatorLock); |
| 177 | DCHECK(mutator_lock_ == nullptr); |
| 178 | mutator_lock_ = new MutatorMutex("mutator lock", current_lock_level); |
| 179 | |
| 180 | UPDATE_CURRENT_LOCK_LEVEL(kHeapBitmapLock); |
| 181 | DCHECK(heap_bitmap_lock_ == nullptr); |
| 182 | heap_bitmap_lock_ = new ReaderWriterMutex("heap bitmap lock", current_lock_level); |
| 183 | |
| 184 | UPDATE_CURRENT_LOCK_LEVEL(kTraceLock); |
| 185 | DCHECK(trace_lock_ == nullptr); |
| 186 | trace_lock_ = new Mutex("trace lock", current_lock_level); |
| 187 | |
| 188 | UPDATE_CURRENT_LOCK_LEVEL(kRuntimeShutdownLock); |
| 189 | DCHECK(runtime_shutdown_lock_ == nullptr); |
| 190 | runtime_shutdown_lock_ = new Mutex("runtime shutdown lock", current_lock_level); |
| 191 | |
| 192 | UPDATE_CURRENT_LOCK_LEVEL(kProfilerLock); |
| 193 | DCHECK(profiler_lock_ == nullptr); |
| 194 | profiler_lock_ = new Mutex("profiler lock", current_lock_level); |
| 195 | |
| 196 | UPDATE_CURRENT_LOCK_LEVEL(kDeoptimizationLock); |
| 197 | DCHECK(deoptimization_lock_ == nullptr); |
| 198 | deoptimization_lock_ = new Mutex("Deoptimization lock", current_lock_level); |
| 199 | |
| 200 | UPDATE_CURRENT_LOCK_LEVEL(kAllocTrackerLock); |
| 201 | DCHECK(alloc_tracker_lock_ == nullptr); |
| 202 | alloc_tracker_lock_ = new Mutex("AllocTracker lock", current_lock_level); |
| 203 | |
| 204 | UPDATE_CURRENT_LOCK_LEVEL(kThreadListLock); |
| 205 | DCHECK(thread_list_lock_ == nullptr); |
| 206 | thread_list_lock_ = new Mutex("thread list lock", current_lock_level); |
| 207 | |
| 208 | UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock); |
| 209 | DCHECK(jni_libraries_lock_ == nullptr); |
| 210 | jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level); |
| 211 | |
| 212 | UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock); |
| 213 | DCHECK(breakpoint_lock_ == nullptr); |
| 214 | breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level); |
| 215 | |
| 216 | UPDATE_CURRENT_LOCK_LEVEL(kSubtypeCheckLock); |
| 217 | DCHECK(subtype_check_lock_ == nullptr); |
| 218 | subtype_check_lock_ = new Mutex("SubtypeCheck lock", current_lock_level); |
| 219 | |
| 220 | UPDATE_CURRENT_LOCK_LEVEL(kClassLinkerClassesLock); |
| 221 | DCHECK(classlinker_classes_lock_ == nullptr); |
| 222 | classlinker_classes_lock_ = new ReaderWriterMutex("ClassLinker classes lock", |
| 223 | current_lock_level); |
| 224 | |
| 225 | UPDATE_CURRENT_LOCK_LEVEL(kMonitorPoolLock); |
| 226 | DCHECK(allocated_monitor_ids_lock_ == nullptr); |
| 227 | allocated_monitor_ids_lock_ = new Mutex("allocated monitor ids lock", current_lock_level); |
| 228 | |
| 229 | UPDATE_CURRENT_LOCK_LEVEL(kAllocatedThreadIdsLock); |
| 230 | DCHECK(allocated_thread_ids_lock_ == nullptr); |
| 231 | allocated_thread_ids_lock_ = new Mutex("allocated thread ids lock", current_lock_level); |
| 232 | |
| 233 | if (kRuntimeISA == InstructionSet::kX86 || kRuntimeISA == InstructionSet::kX86_64) { |
| 234 | UPDATE_CURRENT_LOCK_LEVEL(kModifyLdtLock); |
| 235 | DCHECK(modify_ldt_lock_ == nullptr); |
| 236 | modify_ldt_lock_ = new Mutex("modify_ldt lock", current_lock_level); |
| 237 | } |
| 238 | |
| 239 | UPDATE_CURRENT_LOCK_LEVEL(kDexLock); |
| 240 | DCHECK(dex_lock_ == nullptr); |
| 241 | dex_lock_ = new ReaderWriterMutex("ClassLinker dex lock", current_lock_level); |
| 242 | |
| 243 | UPDATE_CURRENT_LOCK_LEVEL(kOatFileManagerLock); |
| 244 | DCHECK(oat_file_manager_lock_ == nullptr); |
| 245 | oat_file_manager_lock_ = new ReaderWriterMutex("OatFile manager lock", current_lock_level); |
| 246 | |
| 247 | UPDATE_CURRENT_LOCK_LEVEL(kVerifierDepsLock); |
| 248 | DCHECK(verifier_deps_lock_ == nullptr); |
| 249 | verifier_deps_lock_ = new ReaderWriterMutex("verifier deps lock", current_lock_level); |
| 250 | |
| 251 | UPDATE_CURRENT_LOCK_LEVEL(kHostDlOpenHandlesLock); |
| 252 | DCHECK(host_dlopen_handles_lock_ == nullptr); |
| 253 | host_dlopen_handles_lock_ = new Mutex("host dlopen handles lock", current_lock_level); |
| 254 | |
| 255 | UPDATE_CURRENT_LOCK_LEVEL(kInternTableLock); |
| 256 | DCHECK(intern_table_lock_ == nullptr); |
| 257 | intern_table_lock_ = new Mutex("InternTable lock", current_lock_level); |
| 258 | |
| 259 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceProcessorLock); |
| 260 | DCHECK(reference_processor_lock_ == nullptr); |
| 261 | reference_processor_lock_ = new Mutex("ReferenceProcessor lock", current_lock_level); |
| 262 | |
| 263 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueClearedReferencesLock); |
| 264 | DCHECK(reference_queue_cleared_references_lock_ == nullptr); |
| 265 | reference_queue_cleared_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level); |
| 266 | |
| 267 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueWeakReferencesLock); |
| 268 | DCHECK(reference_queue_weak_references_lock_ == nullptr); |
| 269 | reference_queue_weak_references_lock_ = new Mutex("ReferenceQueue cleared references lock", current_lock_level); |
| 270 | |
| 271 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueFinalizerReferencesLock); |
| 272 | DCHECK(reference_queue_finalizer_references_lock_ == nullptr); |
| 273 | reference_queue_finalizer_references_lock_ = new Mutex("ReferenceQueue finalizer references lock", current_lock_level); |
| 274 | |
| 275 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueuePhantomReferencesLock); |
| 276 | DCHECK(reference_queue_phantom_references_lock_ == nullptr); |
| 277 | reference_queue_phantom_references_lock_ = new Mutex("ReferenceQueue phantom references lock", current_lock_level); |
| 278 | |
| 279 | UPDATE_CURRENT_LOCK_LEVEL(kReferenceQueueSoftReferencesLock); |
| 280 | DCHECK(reference_queue_soft_references_lock_ == nullptr); |
| 281 | reference_queue_soft_references_lock_ = new Mutex("ReferenceQueue soft references lock", current_lock_level); |
| 282 | |
| 283 | UPDATE_CURRENT_LOCK_LEVEL(kJniGlobalsLock); |
| 284 | DCHECK(jni_globals_lock_ == nullptr); |
| 285 | jni_globals_lock_ = |
| 286 | new ReaderWriterMutex("JNI global reference table lock", current_lock_level); |
| 287 | |
| 288 | UPDATE_CURRENT_LOCK_LEVEL(kJniWeakGlobalsLock); |
| 289 | DCHECK(jni_weak_globals_lock_ == nullptr); |
| 290 | jni_weak_globals_lock_ = new Mutex("JNI weak global reference table lock", current_lock_level); |
| 291 | |
| 292 | UPDATE_CURRENT_LOCK_LEVEL(kJniFunctionTableLock); |
| 293 | DCHECK(jni_function_table_lock_ == nullptr); |
| 294 | jni_function_table_lock_ = new Mutex("JNI function table lock", current_lock_level); |
| 295 | |
| 296 | UPDATE_CURRENT_LOCK_LEVEL(kCustomTlsLock); |
| 297 | DCHECK(custom_tls_lock_ == nullptr); |
| 298 | custom_tls_lock_ = new Mutex("Thread::custom_tls_ lock", current_lock_level); |
| 299 | |
| 300 | UPDATE_CURRENT_LOCK_LEVEL(kCHALock); |
| 301 | DCHECK(cha_lock_ == nullptr); |
| 302 | cha_lock_ = new Mutex("CHA lock", current_lock_level); |
| 303 | |
| 304 | UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock); |
| 305 | DCHECK(native_debug_interface_lock_ == nullptr); |
| 306 | native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level); |
| 307 | |
| 308 | UPDATE_CURRENT_LOCK_LEVEL(kAbortLock); |
| 309 | DCHECK(abort_lock_ == nullptr); |
| 310 | abort_lock_ = new Mutex("abort lock", current_lock_level, true); |
| 311 | |
| 312 | UPDATE_CURRENT_LOCK_LEVEL(kThreadSuspendCountLock); |
| 313 | DCHECK(thread_suspend_count_lock_ == nullptr); |
| 314 | thread_suspend_count_lock_ = new Mutex("thread suspend count lock", current_lock_level); |
| 315 | |
| 316 | UPDATE_CURRENT_LOCK_LEVEL(kUnexpectedSignalLock); |
| 317 | DCHECK(unexpected_signal_lock_ == nullptr); |
| 318 | unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true); |
| 319 | |
| 320 | UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock); |
| 321 | DCHECK(logging_lock_ == nullptr); |
| 322 | logging_lock_ = new Mutex("logging lock", current_lock_level, true); |
| 323 | |
| 324 | #undef UPDATE_CURRENT_LOCK_LEVEL |
| 325 | |
| 326 | // List of mutexes that we may hold when accessing a weak ref. |
| 327 | AddToExpectedMutexesOnWeakRefAccess(dex_lock_, /*need_lock=*/ false); |
| 328 | AddToExpectedMutexesOnWeakRefAccess(classlinker_classes_lock_, /*need_lock=*/ false); |
| 329 | AddToExpectedMutexesOnWeakRefAccess(jni_libraries_lock_, /*need_lock=*/ false); |
| 330 | |
| 331 | InitConditions(); |
| 332 | } |
| 333 | } |
| 334 | |
| 335 | void Locks::InitConditions() { |
| 336 | thread_exit_cond_ = new ConditionVariable("thread exit condition variable", *thread_list_lock_); |
| 337 | } |
| 338 | |
| 339 | void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) { |
| 340 | safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release); |
| 341 | } |
| 342 | |
| 343 | // Helper to allow checking shutdown while ignoring locking requirements. |
| 344 | bool Locks::IsSafeToCallAbortRacy() { |
| 345 | Locks::ClientCallback* safe_to_call_abort_cb = |
| 346 | safe_to_call_abort_callback.load(std::memory_order_acquire); |
| 347 | return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb(); |
| 348 | } |
| 349 | |
| 350 | void Locks::AddToExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) { |
| 351 | if (need_lock) { |
| 352 | ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex); |
| 353 | mutex->SetShouldRespondToEmptyCheckpointRequest(true); |
| 354 | expected_mutexes_on_weak_ref_access_.push_back(mutex); |
| 355 | } else { |
| 356 | mutex->SetShouldRespondToEmptyCheckpointRequest(true); |
| 357 | expected_mutexes_on_weak_ref_access_.push_back(mutex); |
| 358 | } |
| 359 | } |
| 360 | |
| 361 | void Locks::RemoveFromExpectedMutexesOnWeakRefAccess(BaseMutex* mutex, bool need_lock) { |
| 362 | if (need_lock) { |
| 363 | ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex); |
| 364 | mutex->SetShouldRespondToEmptyCheckpointRequest(false); |
| 365 | std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_; |
| 366 | auto it = std::find(list.begin(), list.end(), mutex); |
| 367 | DCHECK(it != list.end()); |
| 368 | list.erase(it); |
| 369 | } else { |
| 370 | mutex->SetShouldRespondToEmptyCheckpointRequest(false); |
| 371 | std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_; |
| 372 | auto it = std::find(list.begin(), list.end(), mutex); |
| 373 | DCHECK(it != list.end()); |
| 374 | list.erase(it); |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | bool Locks::IsExpectedOnWeakRefAccess(BaseMutex* mutex) { |
| 379 | ScopedExpectedMutexesOnWeakRefAccessLock mu(mutex); |
| 380 | std::vector<BaseMutex*>& list = expected_mutexes_on_weak_ref_access_; |
| 381 | return std::find(list.begin(), list.end(), mutex) != list.end(); |
| 382 | } |
| 383 | |
| 384 | } // namespace art |