Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_BASE_MUTEX_INL_H_ |
| 18 | #define ART_RUNTIME_BASE_MUTEX_INL_H_ |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 19 | |
Ian Rogers | 220228e | 2014-01-23 09:08:16 -0800 | [diff] [blame] | 20 | #include <inttypes.h> |
| 21 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 22 | #include "mutex.h" |
| 23 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 24 | #include "base/utils.h" |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 25 | #include "base/value_object.h" |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 26 | #include "thread.h" |
| 27 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 28 | #if ART_USE_FUTEXES |
| 29 | #include "linux/futex.h" |
| 30 | #include "sys/syscall.h" |
| 31 | #ifndef SYS_futex |
| 32 | #define SYS_futex __NR_futex |
| 33 | #endif |
Chih-Hung Hsieh | 729c1cf | 2014-11-06 10:49:16 -0800 | [diff] [blame] | 34 | #endif // ART_USE_FUTEXES |
| 35 | |
Ian Rogers | d6d7c3b | 2014-11-06 14:26:29 -0800 | [diff] [blame] | 36 | #define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_) |
| 37 | |
Chih-Hung Hsieh | 729c1cf | 2014-11-06 10:49:16 -0800 | [diff] [blame] | 38 | namespace art { |
| 39 | |
| 40 | #if ART_USE_FUTEXES |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 41 | static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, |
| 42 | volatile int *uaddr2, int val3) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 43 | return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3); |
| 44 | } |
| 45 | #endif // ART_USE_FUTEXES |
| 46 | |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 47 | // The following isn't strictly necessary, but we want updates on Atomic<pid_t> to be lock-free. |
| 48 | // TODO: Use std::atomic::is_always_lock_free after switching to C++17 atomics. |
| 49 | static_assert(sizeof(pid_t) <= sizeof(int32_t), "pid_t should fit in 32 bits"); |
| 50 | |
| 51 | static inline pid_t SafeGetTid(const Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 52 | if (self != nullptr) { |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 53 | return self->GetTid(); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 54 | } else { |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 55 | return GetTid(); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 56 | } |
| 57 | } |
| 58 | |
| 59 | static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS { |
| 60 | // The check below enumerates the cases where we expect not to be able to sanity check locks |
| 61 | // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock. |
| 62 | // TODO: tighten this check. |
| 63 | if (kDebugLocking) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 64 | CHECK(!Locks::IsSafeToCallAbortRacy() || |
Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 65 | // Used during thread creation to avoid races with runtime shutdown. Thread::Current not |
| 66 | // yet established. |
| 67 | level == kRuntimeShutdownLock || |
| 68 | // Thread Ids are allocated/released before threads are established. |
| 69 | level == kAllocatedThreadIdsLock || |
| 70 | // Thread LDT's are initialized without Thread::Current established. |
| 71 | level == kModifyLdtLock || |
| 72 | // Threads are unregistered while holding the thread list lock, during this process they |
| 73 | // no longer exist and so we expect an unlock with no self. |
| 74 | level == kThreadListLock || |
| 75 | // Ignore logging which may or may not have set up thread data structures. |
| 76 | level == kLoggingLock || |
Nicolas Geoffray | 9f5f8ac | 2016-06-29 14:39:59 +0100 | [diff] [blame] | 77 | // When transitioning from suspended to runnable, a daemon thread might be in |
| 78 | // a situation where the runtime is shutting down. To not crash our debug locking |
| 79 | // mechanism we just pass null Thread* to the MutexLock during that transition |
| 80 | // (see Thread::TransitionFromSuspendedToRunnable). |
| 81 | level == kThreadSuspendCountLock || |
Chao-ying Fu | 9e36931 | 2014-05-21 11:20:52 -0700 | [diff] [blame] | 82 | // Avoid recursive death. |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 83 | level == kAbortLock || |
| 84 | // Locks at the absolute top of the stack can be locked at any time. |
Andreas Gampe | 3e2446b | 2019-06-12 10:00:57 -0700 | [diff] [blame] | 85 | level == kTopLockLevel || |
| 86 | // The unexpected signal handler may be catching signals from any thread. |
| 87 | level == kUnexpectedSignalLock) << level; |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 88 | } |
| 89 | } |
| 90 | |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 91 | inline void BaseMutex::RegisterAsLocked(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 92 | if (UNLIKELY(self == nullptr)) { |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 93 | CheckUnattachedThread(level_); |
| 94 | return; |
| 95 | } |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 96 | LockLevel level = level_; |
| 97 | // It would be nice to avoid this condition checking in the non-debug case, |
| 98 | // but that would make the various methods that check if a mutex is held not |
| 99 | // work properly for thread wait locks. Since the vast majority of lock |
| 100 | // acquisitions are not thread wait locks, this check should not be too |
| 101 | // expensive. |
| 102 | if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitLock) != nullptr) { |
| 103 | level = kThreadWaitWakeLock; |
| 104 | } |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 105 | if (kDebugLocking) { |
| 106 | // Check if a bad Mutex of this level or lower is held. |
| 107 | bool bad_mutexes_held = false; |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 108 | // Specifically allow a kTopLockLevel lock to be gained when the current thread holds the |
| 109 | // mutator_lock_ exclusive. This is because we suspending when holding locks at this level is |
| 110 | // not allowed and if we hold the mutator_lock_ exclusive we must unsuspend stuff eventually |
| 111 | // so there are no deadlocks. |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 112 | if (level == kTopLockLevel && |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 113 | Locks::mutator_lock_->IsSharedHeld(self) && |
| 114 | !Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 115 | LOG(ERROR) << "Lock level violation: holding \"" << Locks::mutator_lock_->name_ << "\" " |
| 116 | << "(level " << kMutatorLock << " - " << static_cast<int>(kMutatorLock) |
| 117 | << ") non-exclusive while locking \"" << name_ << "\" " |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 118 | << "(level " << level << " - " << static_cast<int>(level) << ") a top level" |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 119 | << "mutex. This is not allowed."; |
| 120 | bad_mutexes_held = true; |
| 121 | } else if (this == Locks::mutator_lock_ && self->GetHeldMutex(kTopLockLevel) != nullptr) { |
| 122 | LOG(ERROR) << "Lock level violation. Locking mutator_lock_ while already having a " |
| 123 | << "kTopLevelLock (" << self->GetHeldMutex(kTopLockLevel)->name_ << "held is " |
| 124 | << "not allowed."; |
| 125 | bad_mutexes_held = true; |
| 126 | } |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 127 | for (int i = level; i >= 0; --i) { |
Andreas Gampe | d1fbcff | 2017-04-17 21:40:28 -0700 | [diff] [blame] | 128 | LockLevel lock_level_i = static_cast<LockLevel>(i); |
| 129 | BaseMutex* held_mutex = self->GetHeldMutex(lock_level_i); |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 130 | if (level == kTopLockLevel && |
Alex Light | b284f8d | 2017-11-21 00:00:48 +0000 | [diff] [blame] | 131 | lock_level_i == kMutatorLock && |
| 132 | Locks::mutator_lock_->IsExclusiveHeld(self)) { |
| 133 | // This is checked above. |
| 134 | continue; |
| 135 | } else if (UNLIKELY(held_mutex != nullptr) && lock_level_i != kAbortLock) { |
Elliott Hughes | 0f82716 | 2013-02-26 12:12:58 -0800 | [diff] [blame] | 136 | LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" " |
Andreas Gampe | d1fbcff | 2017-04-17 21:40:28 -0700 | [diff] [blame] | 137 | << "(level " << lock_level_i << " - " << i |
Ian Rogers | 62d6c77 | 2013-02-27 08:32:07 -0800 | [diff] [blame] | 138 | << ") while locking \"" << name_ << "\" " |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 139 | << "(level " << level << " - " << static_cast<int>(level) << ")"; |
Andreas Gampe | d1fbcff | 2017-04-17 21:40:28 -0700 | [diff] [blame] | 140 | if (lock_level_i > kAbortLock) { |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 141 | // Only abort in the check below if this is more than abort level lock. |
| 142 | bad_mutexes_held = true; |
| 143 | } |
| 144 | } |
| 145 | } |
Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 146 | if (gAborting == 0) { // Avoid recursive aborts. |
| 147 | CHECK(!bad_mutexes_held); |
| 148 | } |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 149 | } |
| 150 | // Don't record monitors as they are outside the scope of analysis. They may be inspected off of |
| 151 | // the monitor list. |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 152 | if (level != kMonitorLock) { |
| 153 | self->SetHeldMutex(level, this); |
Ian Rogers | b6c31ea | 2013-02-04 18:11:33 -0800 | [diff] [blame] | 154 | } |
| 155 | } |
| 156 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 157 | inline void BaseMutex::RegisterAsUnlocked(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 158 | if (UNLIKELY(self == nullptr)) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 159 | CheckUnattachedThread(level_); |
| 160 | return; |
| 161 | } |
| 162 | if (level_ != kMonitorLock) { |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 163 | auto level = level_; |
| 164 | if (UNLIKELY(level == kThreadWaitLock) && self->GetHeldMutex(kThreadWaitWakeLock) == this) { |
| 165 | level = kThreadWaitWakeLock; |
Charles Munger | 1ebb52c | 2018-10-25 15:37:14 -0700 | [diff] [blame] | 166 | } |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 167 | if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts. |
| 168 | if (level == kThreadWaitWakeLock) { |
| 169 | CHECK(self->GetHeldMutex(kThreadWaitLock) != nullptr) << "Held " << kThreadWaitWakeLock << " without " << kThreadWaitLock;; |
| 170 | } |
| 171 | CHECK(self->GetHeldMutex(level) == this) << "Unlocking on unacquired mutex: " << name_; |
| 172 | } |
| 173 | self->SetHeldMutex(level, nullptr); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 174 | } |
| 175 | } |
| 176 | |
| 177 | inline void ReaderWriterMutex::SharedLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 178 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 179 | #if ART_USE_FUTEXES |
| 180 | bool done = false; |
| 181 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 182 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | c0fa3ad | 2013-02-05 00:11:55 -0800 | [diff] [blame] | 183 | if (LIKELY(cur_state >= 0)) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 184 | // Add as an extra reader. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 185 | done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 186 | } else { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 187 | HandleSharedLockContention(self, cur_state); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 188 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 189 | } while (!done); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 190 | #else |
| 191 | CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_)); |
| 192 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 193 | DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 194 | RegisterAsLocked(self); |
| 195 | AssertSharedHeld(self); |
| 196 | } |
| 197 | |
| 198 | inline void ReaderWriterMutex::SharedUnlock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 199 | DCHECK(self == nullptr || self == Thread::Current()); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 200 | DCHECK(GetExclusiveOwnerTid() == 0 || GetExclusiveOwnerTid() == -1); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 201 | AssertSharedHeld(self); |
| 202 | RegisterAsUnlocked(self); |
| 203 | #if ART_USE_FUTEXES |
| 204 | bool done = false; |
| 205 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 206 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 207 | if (LIKELY(cur_state > 0)) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 208 | // Reduce state by 1 and impose lock release load/store ordering. |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame] | 209 | // Note, the num_contenders_ load below musn't reorder before the CompareAndSet. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 210 | done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 211 | if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously. |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame] | 212 | if (num_contenders_.load(std::memory_order_seq_cst) > 0) { |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 213 | // Wake any exclusive waiters as there are now no readers. |
Hans Boehm | 81dc7ab | 2019-04-19 17:34:31 -0700 | [diff] [blame] | 214 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, kWakeAll, nullptr, nullptr, 0); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 215 | } |
| 216 | } |
| 217 | } else { |
| 218 | LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; |
| 219 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 220 | } while (!done); |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 221 | #else |
| 222 | CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); |
| 223 | #endif |
| 224 | } |
| 225 | |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 226 | inline bool Mutex::IsExclusiveHeld(const Thread* self) const { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 227 | DCHECK(self == nullptr || self == Thread::Current()); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 228 | bool result = (GetExclusiveOwnerTid() == SafeGetTid(self)); |
| 229 | if (kDebugLocking) { |
| 230 | // Sanity debug check that if we think it is locked we have it in our held mutexes. |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 231 | if (result && self != nullptr && level_ != kMonitorLock && !gAborting) { |
Charles Munger | c665d63 | 2018-11-06 16:20:13 +0000 | [diff] [blame] | 232 | if (level_ == kThreadWaitLock && self->GetHeldMutex(kThreadWaitLock) != this) { |
| 233 | CHECK_EQ(self->GetHeldMutex(kThreadWaitWakeLock), this); |
| 234 | } else { |
| 235 | CHECK_EQ(self->GetHeldMutex(level_), this); |
| 236 | } |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 237 | } |
| 238 | } |
| 239 | return result; |
| 240 | } |
| 241 | |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 242 | inline pid_t Mutex::GetExclusiveOwnerTid() const { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 243 | return exclusive_owner_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 244 | } |
| 245 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 246 | inline void Mutex::AssertExclusiveHeld(const Thread* self) const { |
| 247 | if (kDebugLocking && (gAborting == 0)) { |
| 248 | CHECK(IsExclusiveHeld(self)) << *this; |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | inline void Mutex::AssertHeld(const Thread* self) const { |
| 253 | AssertExclusiveHeld(self); |
| 254 | } |
| 255 | |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 256 | inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 257 | DCHECK(self == nullptr || self == Thread::Current()); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 258 | bool result = (GetExclusiveOwnerTid() == SafeGetTid(self)); |
| 259 | if (kDebugLocking) { |
| 260 | // Sanity that if the pthread thinks we own the lock the Thread agrees. |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 261 | if (self != nullptr && result) { |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 262 | CHECK_EQ(self->GetHeldMutex(level_), this); |
| 263 | } |
| 264 | } |
| 265 | return result; |
| 266 | } |
| 267 | |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 268 | inline pid_t ReaderWriterMutex::GetExclusiveOwnerTid() const { |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 269 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 270 | int32_t state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 271 | if (state == 0) { |
| 272 | return 0; // No owner. |
| 273 | } else if (state > 0) { |
| 274 | return -1; // Shared. |
| 275 | } else { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 276 | return exclusive_owner_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 277 | } |
| 278 | #else |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 279 | return exclusive_owner_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 280 | #endif |
| 281 | } |
| 282 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 283 | inline void ReaderWriterMutex::AssertExclusiveHeld(const Thread* self) const { |
| 284 | if (kDebugLocking && (gAborting == 0)) { |
| 285 | CHECK(IsExclusiveHeld(self)) << *this; |
| 286 | } |
| 287 | } |
| 288 | |
| 289 | inline void ReaderWriterMutex::AssertWriterHeld(const Thread* self) const { |
| 290 | AssertExclusiveHeld(self); |
| 291 | } |
| 292 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 293 | inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) { |
| 294 | AssertSharedHeld(self); |
| 295 | RegisterAsUnlocked(self); |
| 296 | } |
| 297 | |
| 298 | inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) { |
| 299 | RegisterAsLocked(self); |
| 300 | AssertSharedHeld(self); |
| 301 | } |
| 302 | |
Andreas Gampe | b486a98 | 2017-06-01 13:45:54 -0700 | [diff] [blame] | 303 | inline ReaderMutexLock::ReaderMutexLock(Thread* self, ReaderWriterMutex& mu) |
| 304 | : self_(self), mu_(mu) { |
| 305 | mu_.SharedLock(self_); |
| 306 | } |
| 307 | |
| 308 | inline ReaderMutexLock::~ReaderMutexLock() { |
| 309 | mu_.SharedUnlock(self_); |
| 310 | } |
| 311 | |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 312 | } // namespace art |
| 313 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 314 | #endif // ART_RUNTIME_BASE_MUTEX_INL_H_ |