blob: 29b39817a86e93460700b3c3ab892141a1ffc4d0 [file] [log] [blame]
Ian Rogers693ff612013-02-01 10:56:12 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_BASE_MUTEX_INL_H_
18#define ART_RUNTIME_BASE_MUTEX_INL_H_
Ian Rogers693ff612013-02-01 10:56:12 -080019
20#include "mutex.h"
21
Hiroshi Yamauchib3733082013-08-12 17:28:49 -070022#define ATRACE_TAG ATRACE_TAG_DALVIK
23
Ian Rogers693ff612013-02-01 10:56:12 -080024#include "cutils/atomic-inline.h"
Hiroshi Yamauchib3733082013-08-12 17:28:49 -070025#include "cutils/trace.h"
Ian Rogers693ff612013-02-01 10:56:12 -080026#include "runtime.h"
27#include "thread.h"
28
29namespace art {
30
31#define CHECK_MUTEX_CALL(call, args) CHECK_PTHREAD_CALL(call, args, name_)
32
33#if ART_USE_FUTEXES
34#include "linux/futex.h"
35#include "sys/syscall.h"
36#ifndef SYS_futex
37#define SYS_futex __NR_futex
38#endif
39static inline int futex(volatile int *uaddr, int op, int val, const struct timespec *timeout, volatile int *uaddr2, int val3) {
40 return syscall(SYS_futex, uaddr, op, val, timeout, uaddr2, val3);
41}
42#endif // ART_USE_FUTEXES
43
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -070044#if defined(__APPLE__)
45
46// This works on Mac OS 10.6 but hasn't been tested on older releases.
47struct __attribute__((__may_alias__)) darwin_pthread_mutex_t {
48 long padding0; // NOLINT(runtime/int) exact match to darwin type
49 int padding1;
50 uint32_t padding2;
51 int16_t padding3;
52 int16_t padding4;
53 uint32_t padding5;
54 pthread_t darwin_pthread_mutex_owner;
55 // ...other stuff we don't care about.
56};
57
58struct __attribute__((__may_alias__)) darwin_pthread_rwlock_t {
59 long padding0; // NOLINT(runtime/int) exact match to darwin type
60 pthread_mutex_t padding1;
61 int padding2;
62 pthread_cond_t padding3;
63 pthread_cond_t padding4;
64 int padding5;
65 int padding6;
66 pthread_t darwin_pthread_rwlock_owner;
67 // ...other stuff we don't care about.
68};
69
70#endif // __APPLE__
71
72#if defined(__GLIBC__)
73
74struct __attribute__((__may_alias__)) glibc_pthread_mutex_t {
75 int32_t padding0[2];
76 int owner;
77 // ...other stuff we don't care about.
78};
79
80struct __attribute__((__may_alias__)) glibc_pthread_rwlock_t {
81#ifdef __LP64__
82 int32_t padding0[6];
83#else
84 int32_t padding0[7];
85#endif
86 int writer;
87 // ...other stuff we don't care about.
88};
89
90#endif // __GLIBC__
91
Ian Rogers693ff612013-02-01 10:56:12 -080092class ScopedContentionRecorder {
93 public:
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -070094 ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid)
95 : mutex_(kLogLockContentions ? mutex : NULL),
96 blocked_tid_(kLogLockContentions ? blocked_tid : 0),
97 owner_tid_(kLogLockContentions ? owner_tid : 0),
98 start_nano_time_(kLogLockContentions ? NanoTime() : 0) {
Jeff Hao08f2e7b2013-09-09 16:44:02 -070099 std::string msg = StringPrintf("Lock contention on %s (owner tid: %llu)",
100 mutex->GetName(), owner_tid);
101 ATRACE_BEGIN(msg.c_str());
Ian Rogers693ff612013-02-01 10:56:12 -0800102 }
103
104 ~ScopedContentionRecorder() {
Jeff Hao08f2e7b2013-09-09 16:44:02 -0700105 ATRACE_END();
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -0700106 if (kLogLockContentions) {
107 uint64_t end_nano_time = NanoTime();
108 mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_);
109 }
Ian Rogers693ff612013-02-01 10:56:12 -0800110 }
111
112 private:
113 BaseMutex* const mutex_;
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800114 const uint64_t blocked_tid_;
115 const uint64_t owner_tid_;
Hiroshi Yamauchi1afde132013-08-06 17:09:30 -0700116 const uint64_t start_nano_time_;
Ian Rogers693ff612013-02-01 10:56:12 -0800117};
118
119static inline uint64_t SafeGetTid(const Thread* self) {
120 if (self != NULL) {
121 return static_cast<uint64_t>(self->GetTid());
122 } else {
123 return static_cast<uint64_t>(GetTid());
124 }
125}
126
127static inline void CheckUnattachedThread(LockLevel level) NO_THREAD_SAFETY_ANALYSIS {
128 // The check below enumerates the cases where we expect not to be able to sanity check locks
129 // on a thread. Lock checking is disabled to avoid deadlock when checking shutdown lock.
130 // TODO: tighten this check.
131 if (kDebugLocking) {
132 Runtime* runtime = Runtime::Current();
Mathieu Chartier590fee92013-09-13 13:46:47 -0700133 CHECK(runtime == NULL || !runtime->IsStarted() || runtime->IsShuttingDownLocked() ||
Ian Rogers693ff612013-02-01 10:56:12 -0800134 level == kDefaultMutexLevel || level == kRuntimeShutdownLock ||
135 level == kThreadListLock || level == kLoggingLock || level == kAbortLock);
136 }
137}
138
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800139inline void BaseMutex::RegisterAsLocked(Thread* self) {
140 if (UNLIKELY(self == NULL)) {
141 CheckUnattachedThread(level_);
142 return;
143 }
144 if (kDebugLocking) {
145 // Check if a bad Mutex of this level or lower is held.
146 bool bad_mutexes_held = false;
147 for (int i = level_; i >= 0; --i) {
148 BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i));
149 if (UNLIKELY(held_mutex != NULL)) {
Elliott Hughes0f827162013-02-26 12:12:58 -0800150 LOG(ERROR) << "Lock level violation: holding \"" << held_mutex->name_ << "\" "
Ian Rogers62d6c772013-02-27 08:32:07 -0800151 << "(level " << LockLevel(i) << " - " << i
152 << ") while locking \"" << name_ << "\" "
153 << "(level " << level_ << " - " << static_cast<int>(level_) << ")";
Ian Rogersb6c31ea2013-02-04 18:11:33 -0800154 if (i > kAbortLock) {
155 // Only abort in the check below if this is more than abort level lock.
156 bad_mutexes_held = true;
157 }
158 }
159 }
160 CHECK(!bad_mutexes_held);
161 }
162 // Don't record monitors as they are outside the scope of analysis. They may be inspected off of
163 // the monitor list.
164 if (level_ != kMonitorLock) {
165 self->SetHeldMutex(level_, this);
166 }
167}
168
Ian Rogers693ff612013-02-01 10:56:12 -0800169inline void BaseMutex::RegisterAsUnlocked(Thread* self) {
170 if (UNLIKELY(self == NULL)) {
171 CheckUnattachedThread(level_);
172 return;
173 }
174 if (level_ != kMonitorLock) {
175 if (kDebugLocking && !gAborting) {
176 CHECK(self->GetHeldMutex(level_) == this) << "Unlocking on unacquired mutex: " << name_;
177 }
178 self->SetHeldMutex(level_, NULL);
179 }
180}
181
182inline void ReaderWriterMutex::SharedLock(Thread* self) {
183 DCHECK(self == NULL || self == Thread::Current());
184#if ART_USE_FUTEXES
185 bool done = false;
186 do {
187 int32_t cur_state = state_;
Ian Rogersc0fa3ad2013-02-05 00:11:55 -0800188 if (LIKELY(cur_state >= 0)) {
Ian Rogers693ff612013-02-01 10:56:12 -0800189 // Add as an extra reader.
190 done = android_atomic_acquire_cas(cur_state, cur_state + 1, &state_) == 0;
191 } else {
192 // Owner holds it exclusively, hang up.
193 ScopedContentionRecorder scr(this, GetExclusiveOwnerTid(), SafeGetTid(self));
194 android_atomic_inc(&num_pending_readers_);
195 if (futex(&state_, FUTEX_WAIT, cur_state, NULL, NULL, 0) != 0) {
196 if (errno != EAGAIN) {
197 PLOG(FATAL) << "futex wait failed for " << name_;
198 }
199 }
200 android_atomic_dec(&num_pending_readers_);
201 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700202 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800203#else
204 CHECK_MUTEX_CALL(pthread_rwlock_rdlock, (&rwlock_));
205#endif
206 RegisterAsLocked(self);
207 AssertSharedHeld(self);
208}
209
210inline void ReaderWriterMutex::SharedUnlock(Thread* self) {
211 DCHECK(self == NULL || self == Thread::Current());
212 AssertSharedHeld(self);
213 RegisterAsUnlocked(self);
214#if ART_USE_FUTEXES
215 bool done = false;
216 do {
217 int32_t cur_state = state_;
218 if (LIKELY(cur_state > 0)) {
219 // Reduce state by 1.
220 done = android_atomic_release_cas(cur_state, cur_state - 1, &state_) == 0;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700221 if (done && (cur_state - 1) == 0) { // cas may fail due to noise?
Ian Rogers693ff612013-02-01 10:56:12 -0800222 if (num_pending_writers_ > 0 || num_pending_readers_ > 0) {
223 // Wake any exclusive waiters as there are now no readers.
224 futex(&state_, FUTEX_WAKE, -1, NULL, NULL, 0);
225 }
226 }
227 } else {
228 LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_;
229 }
Brian Carlstromdf629502013-07-17 22:39:56 -0700230 } while (!done);
Ian Rogers693ff612013-02-01 10:56:12 -0800231#else
232 CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_));
233#endif
234}
235
Hiroshi Yamauchi967a0ad2013-09-10 16:24:21 -0700236inline bool Mutex::IsExclusiveHeld(const Thread* self) const {
237 DCHECK(self == NULL || self == Thread::Current());
238 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
239 if (kDebugLocking) {
240 // Sanity debug check that if we think it is locked we have it in our held mutexes.
241 if (result && self != NULL && level_ != kMonitorLock && !gAborting) {
242 CHECK_EQ(self->GetHeldMutex(level_), this);
243 }
244 }
245 return result;
246}
247
248inline uint64_t Mutex::GetExclusiveOwnerTid() const {
249#if ART_USE_FUTEXES
250 return exclusive_owner_;
251#elif defined(__BIONIC__)
252 return static_cast<uint64_t>((mutex_.value >> 16) & 0xffff);
253#elif defined(__GLIBC__)
254 return reinterpret_cast<const glibc_pthread_mutex_t*>(&mutex_)->owner;
255#elif defined(__APPLE__)
256 const darwin_pthread_mutex_t* dpmutex = reinterpret_cast<const darwin_pthread_mutex_t*>(&mutex_);
257 pthread_t owner = dpmutex->darwin_pthread_mutex_owner;
258 // 0 for unowned, -1 for PTHREAD_MTX_TID_SWITCHING
259 // TODO: should we make darwin_pthread_mutex_owner volatile and recheck until not -1?
260 if ((owner == (pthread_t)0) || (owner == (pthread_t)-1)) {
261 return 0;
262 }
263 uint64_t tid;
264 CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
265 return tid;
266#else
267#error unsupported C library
268#endif
269}
270
271inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const {
272 DCHECK(self == NULL || self == Thread::Current());
273 bool result = (GetExclusiveOwnerTid() == SafeGetTid(self));
274 if (kDebugLocking) {
275 // Sanity that if the pthread thinks we own the lock the Thread agrees.
276 if (self != NULL && result) {
277 CHECK_EQ(self->GetHeldMutex(level_), this);
278 }
279 }
280 return result;
281}
282
283inline uint64_t ReaderWriterMutex::GetExclusiveOwnerTid() const {
284#if ART_USE_FUTEXES
285 int32_t state = state_;
286 if (state == 0) {
287 return 0; // No owner.
288 } else if (state > 0) {
289 return -1; // Shared.
290 } else {
291 return exclusive_owner_;
292 }
293#else
294#if defined(__BIONIC__)
295 return rwlock_.writerThreadId;
296#elif defined(__GLIBC__)
297 return reinterpret_cast<const glibc_pthread_rwlock_t*>(&rwlock_)->writer;
298#elif defined(__APPLE__)
299 const darwin_pthread_rwlock_t*
300 dprwlock = reinterpret_cast<const darwin_pthread_rwlock_t*>(&rwlock_);
301 pthread_t owner = dprwlock->darwin_pthread_rwlock_owner;
302 if (owner == (pthread_t)0) {
303 return 0;
304 }
305 uint64_t tid;
306 CHECK_PTHREAD_CALL(pthread_threadid_np, (owner, &tid), __FUNCTION__); // Requires Mac OS 10.6
307 return tid;
308#else
309#error unsupported C library
310#endif
311#endif
312}
313
Ian Rogers693ff612013-02-01 10:56:12 -0800314} // namespace art
315
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700316#endif // ART_RUNTIME_BASE_MUTEX_INL_H_