blob: f70db35f1cd2c9aa00677639b43aa6a076529418 [file] [log] [blame]
Ian Rogers693ff612013-02-01 10:56:12 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
18#define ART_RUNTIME_BASE_MUTEX_INL_H_
Ian Rogers693ff612013-02-01 10:56:12 -080019
Ian Rogers220228e2014-01-23 09:08:16 -080020#include <inttypes.h>
21
Ian Rogers693ff612013-02-01 10:56:12 -080022#include "mutex.h"
23
Hiroshi Yamauchib3733082013-08-12 17:28:49 -070024#define ATRACE_TAG ATRACE_TAG_DALVIK
25
Hiroshi Yamauchib3733082013-08-12 17:28:49 -070026#include "cutils/trace.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070027
28#include "base/stringprintf.h"
Ian Rogers693ff612013-02-01 10:56:12 -080029#include "runtime.h"
30#include "thread.h"
31
32namespace art {
33
34#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
35
36#if ART_USE_FUTEXES
37#include "linux/futex.h"
38#include "sys/syscall.h"
39#ifndef SYS_futex
40#define SYS_futex __NR_futex
41#endif
42static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
43 return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
44}
45#endif // ART_USE_FUTEXES
46
47class ScopedContentionRecorder {
48 public:
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -070049 ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
50 : mutex_(kLogLockContentions ? mutex : NULL),
51 blocked_tid_(kLogLockContentions ? blocked_tid : 0),
52 owner_tid_(kLogLockContentions ? owner_tid : 0),
53 start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
Ian Rogers1ff3c982014-08-12 02:30:58 -070054 if (ATRACE_ENABLED()) {
55 std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
56 mutex->GetName(), owner_tid);
57 ATRACE_BEGIN(msg.c_str());
58 }
Ian Rogers693ff612013-02-01 10:56:12 -080059 }
60
61 ~ScopedContentionRecorder() {
Jeff Hao08f2e7b2013-09-09 16:44:02 -070062 ATRACE_END();
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -070063 if (kLogLockContentions) {
64 uint64_t end_nano_time = NanoTime();
65 mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
66 }
Ian Rogers693ff612013-02-01 10:56:12 -080067 }
68
69 private:
70 BaseMutex* const mutex_;
Ian Rogersc0fa3ad2013-02-05 00:11:55 -080071 const uint64_t blocked_tid_;
72 const uint64_t owner_tid_;
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -070073 const uint64_t start_nano_time_;
Ian Rogers693ff612013-02-01 10:56:12 -080074};
75
76static inline uint64_t SafeGetTid(const Thread* self) {
77 if (self != NULL) {
78 return static_cast<uint64_t>(self->GetTid());
79 } else {
80 return static_cast<uint64_t>(GetTid());
81 }
82}
83
84static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
85 // The check below enumerates the cases where we expect not to be able to sanity check locks
86 // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
87 // TODO: tighten this check.
88 if (kDebugLocking) {
89 Runtime* runtime = Runtime::Current();
Chao-ying Fu9e369312014-05-21 11:20:52 -070090 CHECK(runtime == nullptr || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
91 // Used during thread creation to avoid races with runtime shutdown. Thread::Current not
92 // yet established.
93 level == kRuntimeShutdownLock ||
94 // Thread Ids are allocated/released before threads are established.
95 level == kAllocatedThreadIdsLock ||
96 // Thread LDT's are initialized without Thread::Current established.
97 level == kModifyLdtLock ||
98 // Threads are unregistered while holding the thread list lock, during this process they
99 // no longer exist and so we expect an unlock with no self.
100 level == kThreadListLock ||
101 // Ignore logging which may or may not have set up thread data structures.
102 level == kLoggingLock ||
103 // Avoid recursive death.
Ian Rogers06abcdf2014-05-23 11:39:11 -0700104 level == kAbortLock) << level;
Ian Rogers693ff612013-02-01 10:56:12 -0800105 }
106}
107
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800108inline void BaseMutex::RegisterAsLocked(Thread* self) {
109 if (UNLIKELY(self == NULL)) {
110 CheckUnattachedThread(level_);
111 return;
112 }
113 if (kDebugLocking) {
114 // Check if a bad Mutex of this level or lower is held.
115 bool bad_mutexes_held = false;
116 for (int i = level_; i >= 0; --i) {
117 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
118 if (UNLIKELY(held_mutex != NULL)) {
Elliott Hughes0f827162013-02-26 12:12:58 -0800119 LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
Ian Rogers62d6c772013-02-27 08:32:07 -0800120 << "(level " << LockLevel(i) << " - " << i
121 << ") while locking \"" << name_ << "\" "
122 << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800123 if (i > kAbortLock) {
124 // Only abort in the check below if this is more than abort level lock.
125 bad_mutexes_held = true;
126 }
127 }
128 }
129 CHECK(!bad_mutexes_held);
130 }
131 // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
132 // the monitor list.
133 if (level_ != kMonitorLock) {
134 self->SetHeldMutex(level_, this);
135 }
136}
137
Ian Rogers693ff612013-02-01 10:56:12 -0800138inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
139 if (UNLIKELY(self == NULL)) {
140 CheckUnattachedThread(level_);
141 return;
142 }
143 if (level_ != kMonitorLock) {
144 if (kDebugLocking && !gAborting) {
145 CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
146 }
147 self->SetHeldMutex(level_, NULL);
148 }
149}
150
151inline void ReaderWriterMutex::SharedLock(Thread* self) {
152 DCHECK(self == NULL || self == Thread::Current());
153#if ART_USE_FUTEXES
154 bool done = false;
155 do {
Ian Rogersc7190692014-07-08 23:50:26 -0700156 int32_t cur_state = state_.LoadRelaxed();
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800157 if (LIKELY(cur_state >= 0)) {
Ian Rogers693ff612013-02-01 10:56:12 -0800158 // Add as an extra reader.
Ian Rogersc7190692014-07-08 23:50:26 -0700159 done = state_.CompareExchangeWeakAcquire(cur_state, cur_state + 1);
Ian Rogers693ff612013-02-01 10:56:12 -0800160 } else {
161 // Owner holds it exclusively, hang up.
162 ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
Ian Rogersc7190692014-07-08 23:50:26 -0700163 ++num_pending_readers_;
164 if (futex(state_.Address(), FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
Ian Rogers693ff612013-02-01 10:56:12 -0800165 if (errno != EAGAIN) {
166 PLOG(FATAL) << "futex wait failed for " << name_;
167 }
168 }
Ian Rogersc7190692014-07-08 23:50:26 -0700169 --num_pending_readers_;
Ian Rogers693ff612013-02-01 10:56:12 -0800170 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700171 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800172#else
173 CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
174#endif
Ian Rogersc5f17732014-06-05 20:48:42 -0700175 DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
Ian Rogers693ff612013-02-01 10:56:12 -0800176 RegisterAsLocked(self);
177 AssertSharedHeld(self);
178}
179
180inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
181 DCHECK(self == NULL || self == Thread::Current());
Ian Rogersc5f17732014-06-05 20:48:42 -0700182 DCHECK(exclusive_owner_ == 0U || exclusive_owner_ == -1U);
Ian Rogers693ff612013-02-01 10:56:12 -0800183 AssertSharedHeld(self);
184 RegisterAsUnlocked(self);
185#if ART_USE_FUTEXES
186 bool done = false;
187 do {
Ian Rogersc7190692014-07-08 23:50:26 -0700188 int32_t cur_state = state_.LoadRelaxed();
Ian Rogers693ff612013-02-01 10:56:12 -0800189 if (LIKELY(cur_state > 0)) {
Ian Rogersc7190692014-07-08 23:50:26 -0700190 // Reduce state by 1 and impose lock release load/store ordering.
191 // Note, the relaxed loads below musn't reorder before the CompareExchange.
192 // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing
193 // a status bit into the state on contention.
194 done = state_.CompareExchangeWeakSequentiallyConsistent(cur_state, cur_state - 1);
195 if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously.
196 if (num_pending_writers_.LoadRelaxed() > 0 ||
197 num_pending_readers_.LoadRelaxed() > 0) {
Ian Rogers693ff612013-02-01 10:56:12 -0800198 // Wake any exclusive waiters as there are now no readers.
Ian Rogersc7190692014-07-08 23:50:26 -0700199 futex(state_.Address(), FUTEX_WAKE, -1, NULL, NULL, 0);
Ian Rogers693ff612013-02-01 10:56:12 -0800200 }
201 }
202 } else {
203 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
204 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700205 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800206#else
207 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
208#endif
209}
210
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700211inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
212 DCHECK(self == NULL || self == Thread::Current());
213 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
214 if (kDebugLocking) {
215 // Sanity debug check that if we think it is locked we have it in our held mutexes.
216 if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
217 CHECK_EQ(self->GetHeldMutex(level_), this);
218 }
219 }
220 return result;
221}
222
223inline uint64_t Mutex::GetExclusiveOwnerTid() const {
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700224 return exclusive_owner_;
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700225}
226
227inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
228 DCHECK(self == NULL || self == Thread::Current());
229 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
230 if (kDebugLocking) {
231 // Sanity that if the pthread thinks we own the lock the Thread agrees.
232 if (self != NULL && result) {
233 CHECK_EQ(self->GetHeldMutex(level_), this);
234 }
235 }
236 return result;
237}
238
239inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
240#if ART_USE_FUTEXES
Ian Rogersc7190692014-07-08 23:50:26 -0700241 int32_t state = state_.LoadRelaxed();
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700242 if (state == 0) {
243 return 0; // No owner.
244 } else if (state > 0) {
245 return -1; // Shared.
246 } else {
247 return exclusive_owner_;
248 }
249#else
Ian Rogersc5f17732014-06-05 20:48:42 -0700250 return exclusive_owner_;
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700251#endif
252}
253
Ian Rogers693ff612013-02-01 10:56:12 -0800254} // namespace art
255
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700256#endif // ART_RUNTIME_BASE_MUTEX_INL_H_