blob: adf4c66aa46d8f77aecdc1d35dd0277614c69430 [file] [log] [blame]
Ian Rogers693ff612013-02-01 10:56:12 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
18#define ART_RUNTIME_BASE_MUTEX_INL_H_
Ian Rogers693ff612013-02-01 10:56:12 -080019
Ian Rogers220228e2014-01-23 09:08:16 -080020#include <inttypes.h>
21
Ian Rogers693ff612013-02-01 10:56:12 -080022#include "mutex.h"
23
Hiroshi Yamauchib3733082013-08-12 17:28:49 -070024#define ATRACE_TAG ATRACE_TAG_DALVIK
25
Ian Rogers693ff612013-02-01 10:56:12 -080026#include "cutils/atomic-inline.h"
Hiroshi Yamauchib3733082013-08-12 17:28:49 -070027#include "cutils/trace.h"
Ian Rogers693ff612013-02-01 10:56:12 -080028#include "runtime.h"
29#include "thread.h"
30
31namespace art {
32
33#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
34
35#if ART_USE_FUTEXES
36#include "linux/futex.h"
37#include "sys/syscall.h"
38#ifndef SYS_futex
39#define SYS_futex __NR_futex
40#endif
41static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
42 return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
43}
44#endif // ART_USE_FUTEXES
45
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070046#if defined(__APPLE__)
47
48// This works on Mac OS 10.6 but hasn't been tested on older releases.
49struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
50 long padding0; // NOLINT(runtime/int) exact match to darwin type
51 int padding1;
52 uint32_t padding2;
53 int16_t padding3;
54 int16_t padding4;
55 uint32_t padding5;
56 pthread_t darwin_pthread_mutex_owner;
57 // ...other stuff we don't care about.
58};
59
60struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
61 long padding0; // NOLINT(runtime/int) exact match to darwin type
62 pthread_mutex_t padding1;
63 int padding2;
64 pthread_cond_t padding3;
65 pthread_cond_t padding4;
66 int padding5;
67 int padding6;
68 pthread_t darwin_pthread_rwlock_owner;
69 // ...other stuff we don't care about.
70};
71
72#endif // __APPLE__
73
74#if defined(__GLIBC__)
75
76struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
77 int32_t padding0[2];
78 int owner;
79 // ...other stuff we don't care about.
80};
81
82struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
83#ifdef __LP64__
84 int32_t padding0[6];
85#else
86 int32_t padding0[7];
87#endif
88 int writer;
89 // ...other stuff we don't care about.
90};
91
92#endif // __GLIBC__
93
Ian Rogers693ff612013-02-01 10:56:12 -080094class ScopedContentionRecorder {
95 public:
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -070096 ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
97 : mutex_(kLogLockContentions ? mutex : NULL),
98 blocked_tid_(kLogLockContentions ? blocked_tid : 0),
99 owner_tid_(kLogLockContentions ? owner_tid : 0),
100 start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
Ian Rogers220228e2014-01-23 09:08:16 -0800101 std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")",
Jeff Hao08f2e7b2013-09-09 16:44:02 -0700102 mutex->GetName(), owner_tid);
103 ATRACE_BEGIN(msg.c_str());
Ian Rogers693ff612013-02-01 10:56:12 -0800104 }
105
106 ~ScopedContentionRecorder() {
Jeff Hao08f2e7b2013-09-09 16:44:02 -0700107 ATRACE_END();
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -0700108 if (kLogLockContentions) {
109 uint64_t end_nano_time = NanoTime();
110 mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
111 }
Ian Rogers693ff612013-02-01 10:56:12 -0800112 }
113
114 private:
115 BaseMutex* const mutex_;
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800116 const uint64_t blocked_tid_;
117 const uint64_t owner_tid_;
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -0700118 const uint64_t start_nano_time_;
Ian Rogers693ff612013-02-01 10:56:12 -0800119};
120
121static inline uint64_t SafeGetTid(const Thread* self) {
122 if (self != NULL) {
123 return static_cast<uint64_t>(self->GetTid());
124 } else {
125 return static_cast<uint64_t>(GetTid());
126 }
127}
128
129static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
130 // The check below enumerates the cases where we expect not to be able to sanity check locks
131 // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
132 // TODO: tighten this check.
133 if (kDebugLocking) {
134 Runtime* runtime = Runtime::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700135 CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
Ian Rogers693ff612013-02-01 10:56:12 -0800136 level == kDefaultMutexLevel || level == kRuntimeShutdownLock ||
137 level == kThreadListLock || level == kLoggingLock || level == kAbortLock);
138 }
139}
140
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800141inline void BaseMutex::RegisterAsLocked(Thread* self) {
142 if (UNLIKELY(self == NULL)) {
143 CheckUnattachedThread(level_);
144 return;
145 }
146 if (kDebugLocking) {
147 // Check if a bad Mutex of this level or lower is held.
148 bool bad_mutexes_held = false;
149 for (int i = level_; i >= 0; --i) {
150 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
151 if (UNLIKELY(held_mutex != NULL)) {
Elliott Hughes0f827162013-02-26 12:12:58 -0800152 LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
Ian Rogers62d6c772013-02-27 08:32:07 -0800153 << "(level " << LockLevel(i) << " - " << i
154 << ") while locking \"" << name_ << "\" "
155 << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800156 if (i > kAbortLock) {
157 // Only abort in the check below if this is more than abort level lock.
158 bad_mutexes_held = true;
159 }
160 }
161 }
162 CHECK(!bad_mutexes_held);
163 }
164 // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
165 // the monitor list.
166 if (level_ != kMonitorLock) {
167 self->SetHeldMutex(level_, this);
168 }
169}
170
Ian Rogers693ff612013-02-01 10:56:12 -0800171inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
172 if (UNLIKELY(self == NULL)) {
173 CheckUnattachedThread(level_);
174 return;
175 }
176 if (level_ != kMonitorLock) {
177 if (kDebugLocking && !gAborting) {
178 CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
179 }
180 self->SetHeldMutex(level_, NULL);
181 }
182}
183
184inline void ReaderWriterMutex::SharedLock(Thread* self) {
185 DCHECK(self == NULL || self == Thread::Current());
186#if ART_USE_FUTEXES
187 bool done = false;
188 do {
189 int32_t cur_state = state_;
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800190 if (LIKELY(cur_state >= 0)) {
Ian Rogers693ff612013-02-01 10:56:12 -0800191 // Add as an extra reader.
192 done = android_atomic_acquire_cas(cur_state, cur_state + 1, &state_) == 0;
193 } else {
194 // Owner holds it exclusively, hang up.
195 ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
196 android_atomic_inc(&num_pending_readers_);
197 if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
198 if (errno != EAGAIN) {
199 PLOG(FATAL) << "futex wait failed for " << name_;
200 }
201 }
202 android_atomic_dec(&num_pending_readers_);
203 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700204 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800205#else
206 CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
207#endif
208 RegisterAsLocked(self);
209 AssertSharedHeld(self);
210}
211
212inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
213 DCHECK(self == NULL || self == Thread::Current());
214 AssertSharedHeld(self);
215 RegisterAsUnlocked(self);
216#if ART_USE_FUTEXES
217 bool done = false;
218 do {
219 int32_t cur_state = state_;
220 if (LIKELY(cur_state > 0)) {
221 // Reduce state by 1.
222 done = android_atomic_release_cas(cur_state, cur_state - 1, &state_) == 0;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700223 if (done && (cur_state - 1) == 0) { // cas may fail due to noise?
Ian Rogers3e5cf302014-05-20 16:40:37 -0700224 if (num_pending_writers_.LoadRelaxed() > 0 || num_pending_readers_ > 0) {
Ian Rogers693ff612013-02-01 10:56:12 -0800225 // Wake any exclusive waiters as there are now no readers.
226 futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
227 }
228 }
229 } else {
230 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
231 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700232 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800233#else
234 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
235#endif
236}
237
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700238inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
239 DCHECK(self == NULL || self == Thread::Current());
240 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
241 if (kDebugLocking) {
242 // Sanity debug check that if we think it is locked we have it in our held mutexes.
243 if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
244 CHECK_EQ(self->GetHeldMutex(level_), this);
245 }
246 }
247 return result;
248}
249
250inline uint64_t Mutex::GetExclusiveOwnerTid() const {
251#if ART_USE_FUTEXES
252 return exclusive_owner_;
253#elif defined(__BIONIC__)
254 return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
255#elif defined(__GLIBC__)
256 return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
257#elif defined(__APPLE__)
258 const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
259 pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
260 // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
261 // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
262 if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
263 return 0;
264 }
265 uint64_t tid;
266 CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
267 return tid;
268#else
269#error unsupported C library
270#endif
271}
272
273inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
274 DCHECK(self == NULL || self == Thread::Current());
275 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
276 if (kDebugLocking) {
277 // Sanity that if the pthread thinks we own the lock the Thread agrees.
278 if (self != NULL && result) {
279 CHECK_EQ(self->GetHeldMutex(level_), this);
280 }
281 }
282 return result;
283}
284
285inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
286#if ART_USE_FUTEXES
287 int32_t state = state_;
288 if (state == 0) {
289 return 0; // No owner.
290 } else if (state > 0) {
291 return -1; // Shared.
292 } else {
293 return exclusive_owner_;
294 }
295#else
296#if defined(__BIONIC__)
297 return rwlock_.writerThreadId;
298#elif defined(__GLIBC__)
299 return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
300#elif defined(__APPLE__)
301 const darwin_pthread_rwlock_t*
302 dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
303 pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
304 if (owner == (pthread_t)0) {
305 return 0;
306 }
307 uint64_t tid;
308 CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
309 return tid;
310#else
311#error unsupported C library
312#endif
313#endif
314}
315
Ian Rogers693ff612013-02-01 10:56:12 -0800316} // namespace art
317
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700318#endif // ART_RUNTIME_BASE_MUTEX_INL_H_