blob: 44a84c834f210cf76f6a84430b1f42f5756f2e08 [file] [log] [blame]
Ian Rogers693ff612013-02-01 10:56:12 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
18#define ART_RUNTIME_BASE_MUTEX_INL_H_
Ian Rogers693ff612013-02-01 10:56:12 -080019
Ian Rogers220228e2014-01-23 09:08:16 -080020#include <inttypes.h>
21
Ian Rogers693ff612013-02-01 10:56:12 -080022#include "mutex.h"
23
Ian Rogerscf7f1912014-10-22 22:06:39 -070024#include "base/value_object.h"
Ian Rogers693ff612013-02-01 10:56:12 -080025#include "thread.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010026#include "utils.h"
Ian Rogers693ff612013-02-01 10:56:12 -080027
Ian Rogers693ff612013-02-01 10:56:12 -080028#if ART_USE_FUTEXES
29#include "linux/futex.h"
30#include "sys/syscall.h"
31#ifndef SYS_futex
32#define SYS_futex __NR_futex
33#endif
Chih-Hung Hsieh729c1cf2014-11-06 10:49:16 -080034#endif // ART_USE_FUTEXES
35
Ian Rogersd6d7c3b2014-11-06 14:26:29 -080036#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
37
Chih-Hung Hsieh729c1cf2014-11-06 10:49:16 -080038namespace art {
39
40#if ART_USE_FUTEXES
Mathieu Chartier2cebb242015-04-21 16:50:40 -070041static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout,
42 volatile int *uaddr2, int val3) {
Ian Rogers693ff612013-02-01 10:56:12 -080043 return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
44}
45#endif // ART_USE_FUTEXES
46
Ian Rogers693ff612013-02-01 10:56:12 -080047static inline uint64_t SafeGetTid(const Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070048 if (self != nullptr) {
Ian Rogers693ff612013-02-01 10:56:12 -080049 return static_cast<uint64_t>(self->GetTid());
50 } else {
51 return static_cast<uint64_t>(GetTid());
52 }
53}
54
55static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
56 // The check below enumerates the cases where we expect not to be able to sanity check locks
57 // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
58 // TODO: tighten this check.
59 if (kDebugLocking) {
David Sehrf42eb2c2016-10-19 13:20:45 -070060 CHECK(!Locks::IsSafeToCallAbortRacy() ||
Chao-ying Fu9e369312014-05-21 11:20:52 -070061 // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
62 // yet established.
63 level == kRuntimeShutdownLock ||
64 // Thread Ids are allocated/released before threads are established.
65 level == kAllocatedThreadIdsLock ||
66 // Thread LDT's are initialized without Thread::Current established.
67 level == kModifyLdtLock ||
68 // Threads are unregistered while holding the thread list lock, during this process they
69 // no longer exist and so we expect an unlock with no self.
70 level == kThreadListLock ||
71 // Ignore logging which may or may not have set up thread data structures.
72 level == kLoggingLock ||
Nicolas Geoffray9f5f8ac2016-06-29 14:39:59 +010073 // When transitioning from suspended to runnable, a daemon thread might be in
74 // a situation where the runtime is shutting down. To not crash our debug locking
75 // mechanism we just pass null Thread* to the MutexLock during that transition
76 // (see Thread::TransitionFromSuspendedToRunnable).
77 level == kThreadSuspendCountLock ||
Chao-ying Fu9e369312014-05-21 11:20:52 -070078 // Avoid recursive death.
Ian Rogers06abcdf2014-05-23 11:39:11 -070079 level == kAbortLock) << level;
Ian Rogers693ff612013-02-01 10:56:12 -080080 }
81}
82
Ian Rogersb6c31ea2013-02-04 18:11:33 -080083inline void BaseMutex::RegisterAsLocked(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -070084 if (UNLIKELY(self == nullptr)) {
Ian Rogersb6c31ea2013-02-04 18:11:33 -080085 CheckUnattachedThread(level_);
86 return;
87 }
88 if (kDebugLocking) {
89 // Check if a bad Mutex of this level or lower is held.
90 bool bad_mutexes_held = false;
91 for (int i = level_; i >= 0; --i) {
92 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
Mathieu Chartier2cebb242015-04-21 16:50:40 -070093 if (UNLIKELY(held_mutex != nullptr)) {
Elliott Hughes0f827162013-02-26 12:12:58 -080094 LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
Ian Rogers62d6c772013-02-27 08:32:07 -080095 << "(level " << LockLevel(i) << " - " << i
96 << ") while locking \"" << name_ << "\" "
97 << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
Ian Rogersb6c31ea2013-02-04 18:11:33 -080098 if (i > kAbortLock) {
99 // Only abort in the check below if this is more than abort level lock.
100 bad_mutexes_held = true;
101 }
102 }
103 }
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000104 if (gAborting == 0) { // Avoid recursive aborts.
105 CHECK(!bad_mutexes_held);
106 }
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800107 }
108 // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
109 // the monitor list.
110 if (level_ != kMonitorLock) {
111 self->SetHeldMutex(level_, this);
112 }
113}
114
Ian Rogers693ff612013-02-01 10:56:12 -0800115inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700116 if (UNLIKELY(self == nullptr)) {
Ian Rogers693ff612013-02-01 10:56:12 -0800117 CheckUnattachedThread(level_);
118 return;
119 }
120 if (level_ != kMonitorLock) {
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000121 if (kDebugLocking && gAborting == 0) { // Avoid recursive aborts.
Ian Rogers693ff612013-02-01 10:56:12 -0800122 CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
123 }
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700124 self->SetHeldMutex(level_, nullptr);
Ian Rogers693ff612013-02-01 10:56:12 -0800125 }
126}
127
128inline void ReaderWriterMutex::SharedLock(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700129 DCHECK(self == nullptr || self == Thread::Current());
Ian Rogers693ff612013-02-01 10:56:12 -0800130#if ART_USE_FUTEXES
131 bool done = false;
132 do {
Ian Rogersc7190692014-07-08 23:50:26 -0700133 int32_t cur_state = state_.LoadRelaxed();
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800134 if (LIKELY(cur_state >= 0)) {
Ian Rogers693ff612013-02-01 10:56:12 -0800135 // Add as an extra reader.
Ian Rogersc7190692014-07-08 23:50:26 -0700136 done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
Ian Rogers693ff612013-02-01 10:56:12 -0800137 } else {
Ian Rogerscf7f1912014-10-22 22:06:39 -0700138 HandleSharedLockContention(self, cur_state);
Ian Rogers693ff612013-02-01 10:56:12 -0800139 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700140 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800141#else
142 CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
143#endif
Ian Rogersc5f17732014-06-05 20:48:42 -0700144 DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
Ian Rogers693ff612013-02-01 10:56:12 -0800145 RegisterAsLocked(self);
146 AssertSharedHeld(self);
147}
148
149inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700150 DCHECK(self == nullptr || self == Thread::Current());
Ian Rogersc5f17732014-06-05 20:48:42 -0700151 DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
Ian Rogers693ff612013-02-01 10:56:12 -0800152 AssertSharedHeld(self);
153 RegisterAsUnlocked(self);
154#if ART_USE_FUTEXES
155 bool done = false;
156 do {
Ian Rogersc7190692014-07-08 23:50:26 -0700157 int32_t cur_state = state_.LoadRelaxed();
Ian Rogers693ff612013-02-01 10:56:12 -0800158 if (LIKELY(cur_state > 0)) {
Ian Rogersc7190692014-07-08 23:50:26 -0700159 // Reduce state by 1 and impose lock release load/store ordering.
160 // Note, the relaxed loads below musn't reorder before the CompareExchange.
161 // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
162 // a status bit into the state on contention.
163 done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, cur_state - 1);
164 if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously.
165 if (num_pending_writers_.LoadRelaxed() > 0 ||
166 num_pending_readers_.LoadRelaxed() > 0) {
Ian Rogers693ff612013-02-01 10:56:12 -0800167 // Wake any exclusive waiters as there are now no readers.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700168 futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
Ian Rogers693ff612013-02-01 10:56:12 -0800169 }
170 }
171 } else {
172 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
173 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700174 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800175#else
176 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
177#endif
178}
179
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700180inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700181 DCHECK(self == nullptr || self == Thread::Current());
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700182 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
183 if (kDebugLocking) {
184 // Sanity debug check that if we think it is locked we have it in our held mutexes.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700185 if (result && self != nullptr && level_ != kMonitorLock && !gAborting) {
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700186 CHECK_EQ(self->GetHeldMutex(level_), this);
187 }
188 }
189 return result;
190}
191
192inline uint64_t Mutex::GetExclusiveOwnerTid() const {
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700193 return exclusive_owner_;
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700194}
195
196inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700197 DCHECK(self == nullptr || self == Thread::Current());
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700198 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
199 if (kDebugLocking) {
200 // Sanity that if the pthread thinks we own the lock the Thread agrees.
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700201 if (self != nullptr && result) {
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700202 CHECK_EQ(self->GetHeldMutex(level_), this);
203 }
204 }
205 return result;
206}
207
208inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
209#if ART_USE_FUTEXES
Ian Rogersc7190692014-07-08 23:50:26 -0700210 int32_t state = state_.LoadRelaxed();
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700211 if (state == 0) {
212 return 0; // No owner.
213 } else if (state > 0) {
214 return -1; // Shared.
215 } else {
216 return exclusive_owner_;
217 }
218#else
Ian Rogersc5f17732014-06-05 20:48:42 -0700219 return exclusive_owner_;
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700220#endif
221}
222
Yu Lieac44242015-06-29 10:50:03 +0800223inline void MutatorMutex::TransitionFromRunnableToSuspended(Thread* self) {
224 AssertSharedHeld(self);
225 RegisterAsUnlocked(self);
226}
227
228inline void MutatorMutex::TransitionFromSuspendedToRunnable(Thread* self) {
229 RegisterAsLocked(self);
230 AssertSharedHeld(self);
231}
232
Ian Rogers693ff612013-02-01 10:56:12 -0800233} // namespace art
234
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700235#endif // ART_RUNTIME_BASE_MUTEX_INL_H_