Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "mutex.h" |
| 18 | |
| 19 | #include <errno.h> |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 20 | #include <sys/time.h> |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 21 | |
Andreas Gampe | 46ee31b | 2016-12-14 10:11:49 -0800 | [diff] [blame] | 22 | #include "android-base/stringprintf.h" |
| 23 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 24 | #include "base/atomic.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 25 | #include "base/logging.h" |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 26 | #include "base/systrace.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 27 | #include "base/time_utils.h" |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 28 | #include "base/value_object.h" |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 29 | #include "mutex-inl.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 30 | #include "scoped_thread_state_change-inl.h" |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 31 | #include "thread-inl.h" |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 32 | |
| 33 | namespace art { |
| 34 | |
Andreas Gampe | 46ee31b | 2016-12-14 10:11:49 -0800 | [diff] [blame] | 35 | using android::base::StringPrintf; |
| 36 | |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 37 | struct AllMutexData { |
| 38 | // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). |
| 39 | Atomic<const BaseMutex*> all_mutexes_guard; |
| 40 | // All created mutexes guarded by all_mutexes_guard_. |
| 41 | std::set<BaseMutex*>* all_mutexes; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 42 | AllMutexData() : all_mutexes(nullptr) {} |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 43 | }; |
| 44 | static struct AllMutexData gAllMutexData[kAllMutexDataSize]; |
| 45 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 46 | #if ART_USE_FUTEXES |
| 47 | static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) { |
Brian Carlstrom | fb6996f | 2013-07-18 18:21:14 -0700 | [diff] [blame] | 48 | const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds. |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 49 | result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec; |
| 50 | result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec; |
| 51 | if (result_ts->tv_nsec < 0) { |
| 52 | result_ts->tv_sec--; |
| 53 | result_ts->tv_nsec += one_sec; |
| 54 | } else if (result_ts->tv_nsec > one_sec) { |
| 55 | result_ts->tv_sec++; |
| 56 | result_ts->tv_nsec -= one_sec; |
| 57 | } |
| 58 | return result_ts->tv_sec < 0; |
| 59 | } |
| 60 | #endif |
| 61 | |
Hans Boehm | ae915a0 | 2017-12-12 11:05:32 -0800 | [diff] [blame] | 62 | // Wait for an amount of time that roughly increases in the argument i. |
| 63 | // Spin for small arguments and yield/sleep for longer ones. |
| 64 | static void BackOff(uint32_t i) { |
| 65 | static constexpr uint32_t kSpinMax = 10; |
| 66 | static constexpr uint32_t kYieldMax = 20; |
| 67 | if (i <= kSpinMax) { |
| 68 | // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit |
| 69 | // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor. |
| 70 | volatile uint32_t x = 0; |
| 71 | const uint32_t spin_count = 10 * i; |
| 72 | for (uint32_t spin = 0; spin < spin_count; ++spin) { |
| 73 | ++x; // Volatile; hence should not be optimized away. |
| 74 | } |
| 75 | // TODO: Consider adding x86 PAUSE and/or ARM YIELD here. |
| 76 | } else if (i <= kYieldMax) { |
| 77 | sched_yield(); |
| 78 | } else { |
| 79 | NanoSleep(1000ull * (i - kYieldMax)); |
| 80 | } |
| 81 | } |
| 82 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 83 | class ScopedAllMutexesLock final { |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 84 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 85 | explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { |
Hans Boehm | ae915a0 | 2017-12-12 11:05:32 -0800 | [diff] [blame] | 86 | for (uint32_t i = 0; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 87 | !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex); |
Hans Boehm | ae915a0 | 2017-12-12 11:05:32 -0800 | [diff] [blame] | 88 | ++i) { |
| 89 | BackOff(i); |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 90 | } |
| 91 | } |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 92 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 93 | ~ScopedAllMutexesLock() { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 94 | DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_); |
| 95 | gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release); |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 96 | } |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 97 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 98 | private: |
| 99 | const BaseMutex* const mutex_; |
| 100 | }; |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 101 | |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 102 | // Scoped class that generates events at the beginning and end of lock contention. |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 103 | class ScopedContentionRecorder final : public ValueObject { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 104 | public: |
| 105 | ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid) |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 106 | : mutex_(kLogLockContentions ? mutex : nullptr), |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 107 | blocked_tid_(kLogLockContentions ? blocked_tid : 0), |
| 108 | owner_tid_(kLogLockContentions ? owner_tid : 0), |
| 109 | start_nano_time_(kLogLockContentions ? NanoTime() : 0) { |
Orion Hodson | 119733d | 2019-01-30 15:14:41 +0000 | [diff] [blame] | 110 | if (ATraceEnabled()) { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 111 | std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")", |
| 112 | mutex->GetName(), owner_tid); |
Orion Hodson | 119733d | 2019-01-30 15:14:41 +0000 | [diff] [blame] | 113 | ATraceBegin(msg.c_str()); |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 114 | } |
| 115 | } |
| 116 | |
| 117 | ~ScopedContentionRecorder() { |
Orion Hodson | 119733d | 2019-01-30 15:14:41 +0000 | [diff] [blame] | 118 | ATraceEnd(); |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 119 | if (kLogLockContentions) { |
| 120 | uint64_t end_nano_time = NanoTime(); |
| 121 | mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | private: |
| 126 | BaseMutex* const mutex_; |
| 127 | const uint64_t blocked_tid_; |
| 128 | const uint64_t owner_tid_; |
| 129 | const uint64_t start_nano_time_; |
| 130 | }; |
| 131 | |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 132 | BaseMutex::BaseMutex(const char* name, LockLevel level) |
Andreas Gampe | 5db8b7b | 2018-05-08 16:10:59 -0700 | [diff] [blame] | 133 | : name_(name), |
| 134 | level_(level), |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 135 | should_respond_to_empty_checkpoint_request_(false) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 136 | if (kLogLockContentions) { |
| 137 | ScopedAllMutexesLock mu(this); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 138 | std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 139 | if (*all_mutexes_ptr == nullptr) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 140 | // We leak the global set of all mutexes to avoid ordering issues in global variable |
| 141 | // construction/destruction. |
| 142 | *all_mutexes_ptr = new std::set<BaseMutex*>(); |
| 143 | } |
| 144 | (*all_mutexes_ptr)->insert(this); |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 145 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | BaseMutex::~BaseMutex() { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 149 | if (kLogLockContentions) { |
| 150 | ScopedAllMutexesLock mu(this); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 151 | gAllMutexData->all_mutexes->erase(this); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 152 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | void BaseMutex::DumpAll(std::ostream& os) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 156 | if (kLogLockContentions) { |
| 157 | os << "Mutex logging:\n"; |
| 158 | ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1)); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 159 | std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 160 | if (all_mutexes == nullptr) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 161 | // No mutexes have been created yet during at startup. |
| 162 | return; |
| 163 | } |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 164 | os << "(Contended)\n"; |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 165 | for (const BaseMutex* mutex : *all_mutexes) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 166 | if (mutex->HasEverContended()) { |
| 167 | mutex->Dump(os); |
| 168 | os << "\n"; |
| 169 | } |
| 170 | } |
| 171 | os << "(Never contented)\n"; |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 172 | for (const BaseMutex* mutex : *all_mutexes) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 173 | if (!mutex->HasEverContended()) { |
| 174 | mutex->Dump(os); |
| 175 | os << "\n"; |
| 176 | } |
| 177 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 178 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 179 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 180 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 181 | void BaseMutex::CheckSafeToWait(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 182 | if (self == nullptr) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 183 | CheckUnattachedThread(level_); |
| 184 | return; |
| 185 | } |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 186 | if (kDebugLocking) { |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 187 | CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock) |
| 188 | << "Waiting on unacquired mutex: " << name_; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 189 | bool bad_mutexes_held = false; |
Elliott Hughes | 0f82716 | 2013-02-26 12:12:58 -0800 | [diff] [blame] | 190 | for (int i = kLockLevelCount - 1; i >= 0; --i) { |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 191 | if (i != level_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 192 | BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i)); |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame] | 193 | // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This |
| 194 | // just means that gc or some other internal process is suspending the thread while it is |
| 195 | // trying to suspend some other thread. So long as the current thread is not being suspended |
| 196 | // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear) |
| 197 | // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted |
| 198 | // code interacts with suspension. One holds the lock to prevent user-code-suspension from |
| 199 | // occurring. Since this is only initiated from user-supplied native-code this is safe. |
| 200 | if (held_mutex == Locks::user_code_suspension_lock_) { |
Alex Light | 79400aa | 2017-07-18 15:34:21 -0700 | [diff] [blame] | 201 | // No thread safety analysis is fine since we have both the user_code_suspension_lock_ |
| 202 | // from the line above and the ThreadSuspendCountLock since it is our level_. We use this |
| 203 | // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS. |
| 204 | auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS { |
| 205 | return self->GetUserCodeSuspendCount() != 0; |
| 206 | }; |
| 207 | if (is_suspending_for_user_code()) { |
| 208 | LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " |
| 209 | << "(level " << LockLevel(i) << ") while performing wait on " |
| 210 | << "\"" << name_ << "\" (level " << level_ << ") " |
| 211 | << "with SuspendReason::kForUserCode pending suspensions"; |
| 212 | bad_mutexes_held = true; |
| 213 | } |
| 214 | } else if (held_mutex != nullptr) { |
Elliott Hughes | 0f82716 | 2013-02-26 12:12:58 -0800 | [diff] [blame] | 215 | LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " |
| 216 | << "(level " << LockLevel(i) << ") while performing wait on " |
| 217 | << "\"" << name_ << "\" (level " << level_ << ")"; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 218 | bad_mutexes_held = true; |
| 219 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 220 | } |
| 221 | } |
Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 222 | if (gAborting == 0) { // Avoid recursive aborts. |
Alex Light | 79400aa | 2017-07-18 15:34:21 -0700 | [diff] [blame] | 223 | CHECK(!bad_mutexes_held) << this; |
Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 224 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 225 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 226 | } |
| 227 | |
Ian Rogers | 37f3c96 | 2014-07-17 11:25:30 -0700 | [diff] [blame] | 228 | void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 229 | if (kLogLockContentions) { |
| 230 | // Atomically add value to wait_time. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 231 | wait_time.fetch_add(value, std::memory_order_seq_cst); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 232 | } |
| 233 | } |
| 234 | |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 235 | void BaseMutex::RecordContention(uint64_t blocked_tid, |
| 236 | uint64_t owner_tid, |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 237 | uint64_t nano_time_blocked) { |
| 238 | if (kLogLockContentions) { |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 239 | ContentionLogData* data = contention_log_data_; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 240 | ++(data->contention_count); |
| 241 | data->AddToWaitTime(nano_time_blocked); |
| 242 | ContentionLogEntry* log = data->contention_log; |
| 243 | // This code is intentionally racy as it is only used for diagnostics. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 244 | int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 245 | if (log[slot].blocked_tid == blocked_tid && |
| 246 | log[slot].owner_tid == blocked_tid) { |
| 247 | ++log[slot].count; |
| 248 | } else { |
| 249 | uint32_t new_slot; |
| 250 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 251 | slot = data->cur_content_log_entry.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 252 | new_slot = (slot + 1) % kContentionLogSize; |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 253 | } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot)); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 254 | log[new_slot].blocked_tid = blocked_tid; |
| 255 | log[new_slot].owner_tid = owner_tid; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 256 | log[new_slot].count.store(1, std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 257 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 258 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 259 | } |
| 260 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 261 | void BaseMutex::DumpContention(std::ostream& os) const { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 262 | if (kLogLockContentions) { |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 263 | const ContentionLogData* data = contention_log_data_; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 264 | const ContentionLogEntry* log = data->contention_log; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 265 | uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed); |
| 266 | uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 267 | if (contention_count == 0) { |
| 268 | os << "never contended"; |
| 269 | } else { |
| 270 | os << "contended " << contention_count |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 271 | << " total wait of contender " << PrettyDuration(wait_time) |
| 272 | << " average " << PrettyDuration(wait_time / contention_count); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 273 | SafeMap<uint64_t, size_t> most_common_blocker; |
| 274 | SafeMap<uint64_t, size_t> most_common_blocked; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 275 | for (size_t i = 0; i < kContentionLogSize; ++i) { |
| 276 | uint64_t blocked_tid = log[i].blocked_tid; |
| 277 | uint64_t owner_tid = log[i].owner_tid; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 278 | uint32_t count = log[i].count.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 279 | if (count > 0) { |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 280 | auto it = most_common_blocked.find(blocked_tid); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 281 | if (it != most_common_blocked.end()) { |
| 282 | most_common_blocked.Overwrite(blocked_tid, it->second + count); |
| 283 | } else { |
| 284 | most_common_blocked.Put(blocked_tid, count); |
| 285 | } |
| 286 | it = most_common_blocker.find(owner_tid); |
| 287 | if (it != most_common_blocker.end()) { |
| 288 | most_common_blocker.Overwrite(owner_tid, it->second + count); |
| 289 | } else { |
| 290 | most_common_blocker.Put(owner_tid, count); |
| 291 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 292 | } |
| 293 | } |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 294 | uint64_t max_tid = 0; |
| 295 | size_t max_tid_count = 0; |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 296 | for (const auto& pair : most_common_blocked) { |
| 297 | if (pair.second > max_tid_count) { |
| 298 | max_tid = pair.first; |
| 299 | max_tid_count = pair.second; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 300 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 301 | } |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 302 | if (max_tid != 0) { |
| 303 | os << " sample shows most blocked tid=" << max_tid; |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 304 | } |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 305 | max_tid = 0; |
| 306 | max_tid_count = 0; |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 307 | for (const auto& pair : most_common_blocker) { |
| 308 | if (pair.second > max_tid_count) { |
| 309 | max_tid = pair.first; |
| 310 | max_tid_count = pair.second; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 311 | } |
| 312 | } |
| 313 | if (max_tid != 0) { |
| 314 | os << " sample shows tid=" << max_tid << " owning during this time"; |
| 315 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 316 | } |
| 317 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 321 | Mutex::Mutex(const char* name, LockLevel level, bool recursive) |
Andreas Gampe | 5db8b7b | 2018-05-08 16:10:59 -0700 | [diff] [blame] | 322 | : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 323 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 324 | DCHECK_EQ(0, state_.load(std::memory_order_relaxed)); |
| 325 | DCHECK_EQ(0, num_contenders_.load(std::memory_order_relaxed)); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 326 | #else |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 327 | CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr)); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 328 | #endif |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 329 | } |
| 330 | |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 331 | // Helper to allow checking shutdown while locking for thread safety. |
| 332 | static bool IsSafeToCallAbortSafe() { |
| 333 | MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); |
| 334 | return Locks::IsSafeToCallAbortRacy(); |
Andreas Gampe | 8f1fa10 | 2015-01-22 19:48:51 -0800 | [diff] [blame] | 335 | } |
| 336 | |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 337 | Mutex::~Mutex() { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 338 | bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy(); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 339 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 340 | if (state_.load(std::memory_order_relaxed) != 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 341 | LOG(safe_to_call_abort ? FATAL : WARNING) |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 342 | << "destroying mutex with owner: " << GetExclusiveOwnerTid(); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 343 | } else { |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 344 | if (GetExclusiveOwnerTid() != 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 345 | LOG(safe_to_call_abort ? FATAL : WARNING) |
| 346 | << "unexpectedly found an owner on unlocked mutex " << name_; |
Andreas Gampe | 8f1fa10 | 2015-01-22 19:48:51 -0800 | [diff] [blame] | 347 | } |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 348 | if (num_contenders_.load(std::memory_order_seq_cst) != 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 349 | LOG(safe_to_call_abort ? FATAL : WARNING) |
| 350 | << "unexpectedly found a contender on mutex " << name_; |
Mathieu Chartier | cef50f0 | 2014-12-09 17:38:52 -0800 | [diff] [blame] | 351 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 352 | } |
| 353 | #else |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 354 | // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread |
| 355 | // may still be using locks. |
Elliott Hughes | 6b35575 | 2012-01-13 16:49:08 -0800 | [diff] [blame] | 356 | int rc = pthread_mutex_destroy(&mutex_); |
| 357 | if (rc != 0) { |
| 358 | errno = rc; |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 359 | PLOG(safe_to_call_abort ? FATAL : WARNING) |
| 360 | << "pthread_mutex_destroy failed for " << name_; |
Elliott Hughes | 6b35575 | 2012-01-13 16:49:08 -0800 | [diff] [blame] | 361 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 362 | #endif |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 363 | } |
| 364 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 365 | void Mutex::ExclusiveLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 366 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 367 | if (kDebugLocking && !recursive_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 368 | AssertNotHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 369 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 370 | if (!recursive_ || !IsExclusiveHeld(self)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 371 | #if ART_USE_FUTEXES |
| 372 | bool done = false; |
| 373 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 374 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 375 | if (LIKELY(cur_state == 0)) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 376 | // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 377 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 378 | } else { |
| 379 | // Failed to acquire, hang up. |
Hiroshi Yamauchi | b373308 | 2013-08-12 17:28:49 -0700 | [diff] [blame] | 380 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 381 | num_contenders_++; |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 382 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 383 | self->CheckEmptyCheckpointFromMutex(); |
| 384 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 385 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, 1, nullptr, nullptr, 0) != 0) { |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 386 | // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. |
| 387 | // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. |
| 388 | if ((errno != EAGAIN) && (errno != EINTR)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 389 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 390 | } |
| 391 | } |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 392 | num_contenders_--; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 393 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 394 | } while (!done); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 395 | DCHECK_EQ(state_.load(std::memory_order_relaxed), 1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 396 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 397 | CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 398 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 399 | DCHECK_EQ(GetExclusiveOwnerTid(), 0); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 400 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 401 | RegisterAsLocked(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 402 | } |
| 403 | recursion_count_++; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 404 | if (kDebugLocking) { |
| 405 | CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " |
| 406 | << name_ << " " << recursion_count_; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 407 | AssertHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 408 | } |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 409 | } |
| 410 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 411 | bool Mutex::ExclusiveTryLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 412 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 413 | if (kDebugLocking && !recursive_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 414 | AssertNotHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 415 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 416 | if (!recursive_ || !IsExclusiveHeld(self)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 417 | #if ART_USE_FUTEXES |
| 418 | bool done = false; |
| 419 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 420 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 421 | if (cur_state == 0) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 422 | // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 423 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 424 | } else { |
| 425 | return false; |
| 426 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 427 | } while (!done); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 428 | DCHECK_EQ(state_.load(std::memory_order_relaxed), 1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 429 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 430 | int result = pthread_mutex_trylock(&mutex_); |
| 431 | if (result == EBUSY) { |
| 432 | return false; |
| 433 | } |
| 434 | if (result != 0) { |
| 435 | errno = result; |
| 436 | PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; |
| 437 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 438 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 439 | DCHECK_EQ(GetExclusiveOwnerTid(), 0); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 440 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 441 | RegisterAsLocked(self); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 442 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 443 | recursion_count_++; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 444 | if (kDebugLocking) { |
| 445 | CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " |
| 446 | << name_ << " " << recursion_count_; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 447 | AssertHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 448 | } |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 449 | return true; |
| 450 | } |
| 451 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 452 | void Mutex::ExclusiveUnlock(Thread* self) { |
Mathieu Chartier | eb0a179 | 2014-12-15 17:23:45 -0800 | [diff] [blame] | 453 | if (kIsDebugBuild && self != nullptr && self != Thread::Current()) { |
| 454 | std::string name1 = "<null>"; |
| 455 | std::string name2 = "<null>"; |
| 456 | if (self != nullptr) { |
| 457 | self->GetThreadName(name1); |
| 458 | } |
| 459 | if (Thread::Current() != nullptr) { |
| 460 | Thread::Current()->GetThreadName(name2); |
| 461 | } |
Mathieu Chartier | 4c10110 | 2015-01-27 17:14:16 -0800 | [diff] [blame] | 462 | LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1 |
| 463 | << " Thread::Current()=" << name2; |
Mathieu Chartier | eb0a179 | 2014-12-15 17:23:45 -0800 | [diff] [blame] | 464 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 465 | AssertHeld(self); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 466 | DCHECK_NE(GetExclusiveOwnerTid(), 0); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 467 | recursion_count_--; |
| 468 | if (!recursive_ || recursion_count_ == 0) { |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 469 | if (kDebugLocking) { |
| 470 | CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: " |
| 471 | << name_ << " " << recursion_count_; |
| 472 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 473 | RegisterAsUnlocked(self); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 474 | #if ART_USE_FUTEXES |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 475 | bool done = false; |
| 476 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 477 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 478 | if (LIKELY(cur_state == 1)) { |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 479 | // We're no longer the owner. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 480 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 481 | // Change state to 0 and impose load/store ordering appropriate for lock release. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 482 | // Note, the relaxed loads below mustn't reorder before the CompareAndSet. |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 483 | // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing |
| 484 | // a status bit into the state on contention. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 485 | done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, 0 /* new state */); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 486 | if (LIKELY(done)) { // Spurious fail? |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 487 | // Wake a contender. |
Hyangseok Chae | 240a564 | 2018-07-25 16:45:08 +0900 | [diff] [blame] | 488 | if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 489 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 490 | } |
| 491 | } |
| 492 | } else { |
| 493 | // Logging acquires the logging lock, avoid infinite recursion in that case. |
| 494 | if (this != Locks::logging_lock_) { |
| 495 | LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_; |
| 496 | } else { |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 497 | LogHelper::LogLineLowStack(__FILE__, |
| 498 | __LINE__, |
| 499 | ::android::base::FATAL_WITHOUT_ABORT, |
| 500 | StringPrintf("Unexpected state_ %d in unlock for %s", |
| 501 | cur_state, name_).c_str()); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 502 | _exit(1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 503 | } |
| 504 | } |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 505 | } while (!done); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 506 | #else |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 507 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 508 | CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 509 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 510 | } |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 511 | } |
| 512 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 513 | void Mutex::Dump(std::ostream& os) const { |
| 514 | os << (recursive_ ? "recursive " : "non-recursive ") |
| 515 | << name_ |
| 516 | << " level=" << static_cast<int>(level_) |
| 517 | << " rec=" << recursion_count_ |
| 518 | << " owner=" << GetExclusiveOwnerTid() << " "; |
| 519 | DumpContention(os); |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | std::ostream& operator<<(std::ostream& os, const Mutex& mu) { |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 523 | mu.Dump(os); |
| 524 | return os; |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 525 | } |
| 526 | |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 527 | void Mutex::WakeupToRespondToEmptyCheckpoint() { |
| 528 | #if ART_USE_FUTEXES |
| 529 | // Wake up all the waiters so they will respond to the emtpy checkpoint. |
| 530 | DCHECK(should_respond_to_empty_checkpoint_request_); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 531 | if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 532 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0); |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 533 | } |
| 534 | #else |
| 535 | LOG(FATAL) << "Non futex case isn't supported."; |
| 536 | #endif |
| 537 | } |
| 538 | |
Brian Carlstrom | 02c8cc6 | 2013-07-18 15:54:44 -0700 | [diff] [blame] | 539 | ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) |
| 540 | : BaseMutex(name, level) |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 541 | #if ART_USE_FUTEXES |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 542 | , state_(0), exclusive_owner_(0), num_contenders_(0) |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 543 | #endif |
Igor Murashkin | 5573c37 | 2017-11-16 13:34:30 -0800 | [diff] [blame] | 544 | { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 545 | #if !ART_USE_FUTEXES |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 546 | CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr)); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 547 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 548 | } |
| 549 | |
| 550 | ReaderWriterMutex::~ReaderWriterMutex() { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 551 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 552 | CHECK_EQ(state_.load(std::memory_order_relaxed), 0); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 553 | CHECK_EQ(GetExclusiveOwnerTid(), 0); |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 554 | CHECK_EQ(num_contenders_.load(std::memory_order_relaxed), 0); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 555 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 556 | // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread |
| 557 | // may still be using locks. |
| 558 | int rc = pthread_rwlock_destroy(&rwlock_); |
| 559 | if (rc != 0) { |
| 560 | errno = rc; |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 561 | bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); |
| 562 | PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_; |
Brian Carlstrom | cd74c4b | 2012-01-23 13:21:00 -0800 | [diff] [blame] | 563 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 564 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 565 | } |
| 566 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 567 | void ReaderWriterMutex::ExclusiveLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 568 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 569 | AssertNotExclusiveHeld(self); |
| 570 | #if ART_USE_FUTEXES |
| 571 | bool done = false; |
| 572 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 573 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 574 | if (LIKELY(cur_state == 0)) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 575 | // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 576 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 577 | } else { |
| 578 | // Failed to acquire, hang up. |
Hiroshi Yamauchi | b373308 | 2013-08-12 17:28:49 -0700 | [diff] [blame] | 579 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 580 | num_contenders_.fetch_add(1); |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 581 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 582 | self->CheckEmptyCheckpointFromMutex(); |
| 583 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 584 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 585 | // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. |
| 586 | // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. |
| 587 | if ((errno != EAGAIN) && (errno != EINTR)) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 588 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 589 | } |
| 590 | } |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 591 | num_contenders_.fetch_sub(1); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 592 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 593 | } while (!done); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 594 | DCHECK_EQ(state_.load(std::memory_order_relaxed), -1); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 595 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 596 | CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 597 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 598 | DCHECK_EQ(GetExclusiveOwnerTid(), 0); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 599 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 600 | RegisterAsLocked(self); |
| 601 | AssertExclusiveHeld(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 602 | } |
| 603 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 604 | void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 605 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 606 | AssertExclusiveHeld(self); |
| 607 | RegisterAsUnlocked(self); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 608 | DCHECK_NE(GetExclusiveOwnerTid(), 0); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 609 | #if ART_USE_FUTEXES |
| 610 | bool done = false; |
| 611 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 612 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 613 | if (LIKELY(cur_state == -1)) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 614 | // We're no longer the owner. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 615 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 616 | // Change state from -1 to 0 and impose load/store ordering appropriate for lock release. |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 617 | // Note, the num_contenders_ load below musn't reorder before the CompareAndSet. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 618 | done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 619 | if (LIKELY(done)) { // Weak CAS may fail spuriously. |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 620 | // Wake any waiters. |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 621 | if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 622 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 623 | } |
| 624 | } |
| 625 | } else { |
| 626 | LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; |
| 627 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 628 | } while (!done); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 629 | #else |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 630 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 631 | CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 632 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 633 | } |
| 634 | |
Ian Rogers | 66aee5c | 2012-08-15 17:17:47 -0700 | [diff] [blame] | 635 | #if HAVE_TIMED_RWLOCK |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 636 | bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 637 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 638 | #if ART_USE_FUTEXES |
| 639 | bool done = false; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 640 | timespec end_abs_ts; |
tony.ys_liu | 071e48e | 2015-01-14 18:28:03 +0800 | [diff] [blame] | 641 | InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 642 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 643 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 644 | if (cur_state == 0) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 645 | // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 646 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 647 | } else { |
| 648 | // Failed to acquire, hang up. |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 649 | timespec now_abs_ts; |
tony.ys_liu | 071e48e | 2015-01-14 18:28:03 +0800 | [diff] [blame] | 650 | InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 651 | timespec rel_ts; |
| 652 | if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) { |
| 653 | return false; // Timed out. |
| 654 | } |
Hiroshi Yamauchi | b373308 | 2013-08-12 17:28:49 -0700 | [diff] [blame] | 655 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 656 | num_contenders_.fetch_add(1); |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 657 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 658 | self->CheckEmptyCheckpointFromMutex(); |
| 659 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 660 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 661 | if (errno == ETIMEDOUT) { |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 662 | num_contenders_.fetch_sub(1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 663 | return false; // Timed out. |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 664 | } else if ((errno != EAGAIN) && (errno != EINTR)) { |
| 665 | // EAGAIN and EINTR both indicate a spurious failure, |
| 666 | // recompute the relative time out from now and try again. |
| 667 | // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 668 | PLOG(FATAL) << "timed futex wait failed for " << name_; |
| 669 | } |
| 670 | } |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 671 | num_contenders_.fetch_sub(1); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 672 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 673 | } while (!done); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 674 | #else |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 675 | timespec ts; |
Brian Carlstrom | bcc2926 | 2012-11-02 11:36:03 -0700 | [diff] [blame] | 676 | InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 677 | int result = pthread_rwlock_timedwrlock(&rwlock_, &ts); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 678 | if (result == ETIMEDOUT) { |
| 679 | return false; |
| 680 | } |
| 681 | if (result != 0) { |
| 682 | errno = result; |
Ian Rogers | a5acfd3 | 2012-08-15 11:50:10 -0700 | [diff] [blame] | 683 | PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 684 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 685 | #endif |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 686 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 687 | RegisterAsLocked(self); |
| 688 | AssertSharedHeld(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 689 | return true; |
| 690 | } |
Ian Rogers | 66aee5c | 2012-08-15 17:17:47 -0700 | [diff] [blame] | 691 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 692 | |
Ian Rogers | 51d212e | 2014-10-23 17:48:20 -0700 | [diff] [blame] | 693 | #if ART_USE_FUTEXES |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 694 | void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) { |
| 695 | // Owner holds it exclusively, hang up. |
Roland Levillain | cd72dc9 | 2018-02-27 19:15:31 +0000 | [diff] [blame] | 696 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 697 | num_contenders_.fetch_add(1); |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 698 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 699 | self->CheckEmptyCheckpointFromMutex(); |
| 700 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 701 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { |
Daniel Colascione | 6f4d102 | 2016-11-21 14:35:42 -0800 | [diff] [blame] | 702 | if (errno != EAGAIN && errno != EINTR) { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 703 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 704 | } |
| 705 | } |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 706 | num_contenders_.fetch_sub(1); |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 707 | } |
Ian Rogers | 51d212e | 2014-10-23 17:48:20 -0700 | [diff] [blame] | 708 | #endif |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 709 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 710 | bool ReaderWriterMutex::SharedTryLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 711 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 712 | #if ART_USE_FUTEXES |
| 713 | bool done = false; |
| 714 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 715 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 716 | if (cur_state >= 0) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 717 | // Add as an extra reader and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 718 | done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 719 | } else { |
| 720 | // Owner holds it exclusively. |
| 721 | return false; |
| 722 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 723 | } while (!done); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 724 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 725 | int result = pthread_rwlock_tryrdlock(&rwlock_); |
| 726 | if (result == EBUSY) { |
| 727 | return false; |
| 728 | } |
| 729 | if (result != 0) { |
| 730 | errno = result; |
| 731 | PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; |
| 732 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 733 | #endif |
| 734 | RegisterAsLocked(self); |
| 735 | AssertSharedHeld(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 736 | return true; |
| 737 | } |
| 738 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 739 | bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 740 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 741 | bool result; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 742 | if (UNLIKELY(self == nullptr)) { // Handle unattached threads. |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 743 | result = IsExclusiveHeld(self); // TODO: a better best effort here. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 744 | } else { |
| 745 | result = (self->GetHeldMutex(level_) == this); |
| 746 | } |
| 747 | return result; |
| 748 | } |
| 749 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 750 | void ReaderWriterMutex::Dump(std::ostream& os) const { |
| 751 | os << name_ |
| 752 | << " level=" << static_cast<int>(level_) |
Mathieu Chartier | 5869a2c | 2014-10-08 14:26:23 -0700 | [diff] [blame] | 753 | << " owner=" << GetExclusiveOwnerTid() |
| 754 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 755 | << " state=" << state_.load(std::memory_order_seq_cst) |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 756 | << " num_contenders=" << num_contenders_.load(std::memory_order_seq_cst) |
Mathieu Chartier | 5869a2c | 2014-10-08 14:26:23 -0700 | [diff] [blame] | 757 | #endif |
| 758 | << " "; |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 759 | DumpContention(os); |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 760 | } |
| 761 | |
| 762 | std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) { |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 763 | mu.Dump(os); |
| 764 | return os; |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 765 | } |
| 766 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 767 | std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) { |
| 768 | mu.Dump(os); |
| 769 | return os; |
| 770 | } |
| 771 | |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 772 | void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() { |
| 773 | #if ART_USE_FUTEXES |
| 774 | // Wake up all the waiters so they will respond to the emtpy checkpoint. |
| 775 | DCHECK(should_respond_to_empty_checkpoint_request_); |
Hans Boehm | 467b692 | 2019-04-22 16:15:53 -0700 | [diff] [blame^] | 776 | if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 777 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0); |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 778 | } |
| 779 | #else |
| 780 | LOG(FATAL) << "Non futex case isn't supported."; |
| 781 | #endif |
| 782 | } |
| 783 | |
Ian Rogers | 23055dc | 2013-04-18 16:29:16 -0700 | [diff] [blame] | 784 | ConditionVariable::ConditionVariable(const char* name, Mutex& guard) |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 785 | : name_(name), guard_(guard) { |
| 786 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 787 | DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 788 | num_waiters_ = 0; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 789 | #else |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 790 | pthread_condattr_t cond_attrs; |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 791 | CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs)); |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 792 | #if !defined(__APPLE__) |
| 793 | // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock. |
Ian Rogers | 51d212e | 2014-10-23 17:48:20 -0700 | [diff] [blame] | 794 | CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC)); |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 795 | #endif |
| 796 | CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 797 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 798 | } |
| 799 | |
| 800 | ConditionVariable::~ConditionVariable() { |
Ian Rogers | 5bd97c4 | 2012-11-27 02:38:26 -0800 | [diff] [blame] | 801 | #if ART_USE_FUTEXES |
| 802 | if (num_waiters_!= 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 803 | bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); |
| 804 | LOG(is_safe_to_call_abort ? FATAL : WARNING) |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 805 | << "ConditionVariable::~ConditionVariable for " << name_ |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 806 | << " called with " << num_waiters_ << " waiters."; |
Ian Rogers | 5bd97c4 | 2012-11-27 02:38:26 -0800 | [diff] [blame] | 807 | } |
| 808 | #else |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 809 | // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread |
| 810 | // may still be using condition variables. |
| 811 | int rc = pthread_cond_destroy(&cond_); |
| 812 | if (rc != 0) { |
| 813 | errno = rc; |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 814 | bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); |
| 815 | PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_; |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 816 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 817 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 818 | } |
| 819 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 820 | void ConditionVariable::Broadcast(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 821 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 822 | // TODO: enable below, there's a race in thread creation that causes false failures currently. |
| 823 | // guard_.AssertExclusiveHeld(self); |
Mathieu Chartier | e46cd75 | 2012-10-31 16:56:18 -0700 | [diff] [blame] | 824 | DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 825 | #if ART_USE_FUTEXES |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 826 | RequeueWaiters(std::numeric_limits<int32_t>::max()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 827 | #else |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 828 | CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 829 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 830 | } |
| 831 | |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 832 | #if ART_USE_FUTEXES |
| 833 | void ConditionVariable::RequeueWaiters(int32_t count) { |
| 834 | if (num_waiters_ > 0) { |
| 835 | sequence_++; // Indicate a signal occurred. |
| 836 | // Move waiters from the condition variable's futex to the guard's futex, |
| 837 | // so that they will be woken up when the mutex is released. |
| 838 | bool done = futex(sequence_.Address(), |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 839 | FUTEX_REQUEUE_PRIVATE, |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 840 | /* Threads to wake */ 0, |
| 841 | /* Threads to requeue*/ reinterpret_cast<const timespec*>(count), |
| 842 | guard_.state_.Address(), |
| 843 | 0) != -1; |
| 844 | if (!done && errno != EAGAIN && errno != EINTR) { |
| 845 | PLOG(FATAL) << "futex requeue failed for " << name_; |
| 846 | } |
| 847 | } |
| 848 | } |
| 849 | #endif |
| 850 | |
| 851 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 852 | void ConditionVariable::Signal(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 853 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 854 | guard_.AssertExclusiveHeld(self); |
| 855 | #if ART_USE_FUTEXES |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 856 | RequeueWaiters(1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 857 | #else |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 858 | CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 859 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 860 | } |
| 861 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 862 | void ConditionVariable::Wait(Thread* self) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 863 | guard_.CheckSafeToWait(self); |
| 864 | WaitHoldingLocks(self); |
| 865 | } |
| 866 | |
| 867 | void ConditionVariable::WaitHoldingLocks(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 868 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 869 | guard_.AssertExclusiveHeld(self); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 870 | unsigned int old_recursion_count = guard_.recursion_count_; |
| 871 | #if ART_USE_FUTEXES |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 872 | num_waiters_++; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 873 | // Ensure the Mutex is contended so that requeued threads are awoken. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 874 | guard_.num_contenders_++; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 875 | guard_.recursion_count_ = 1; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 876 | int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 877 | guard_.ExclusiveUnlock(self); |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 878 | if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) { |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 879 | // Futex failed, check it is an expected error. |
| 880 | // EAGAIN == EWOULDBLK, so we let the caller try again. |
| 881 | // EINTR implies a signal was sent to this thread. |
| 882 | if ((errno != EINTR) && (errno != EAGAIN)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 883 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 884 | } |
| 885 | } |
Mathieu Chartier | 4d87df6 | 2016-01-07 15:14:19 -0800 | [diff] [blame] | 886 | if (self != nullptr) { |
| 887 | JNIEnvExt* const env = self->GetJniEnv(); |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 888 | if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) { |
Mathieu Chartier | 4d87df6 | 2016-01-07 15:14:19 -0800 | [diff] [blame] | 889 | CHECK(self->IsDaemon()); |
| 890 | // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may |
| 891 | // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with |
| 892 | // --host and --gdb. |
| 893 | // After we wake up, the runtime may have been shutdown, which means that this condition may |
| 894 | // have been deleted. It is not safe to retry the wait. |
| 895 | SleepForever(); |
| 896 | } |
| 897 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 898 | guard_.ExclusiveLock(self); |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 899 | CHECK_GE(num_waiters_, 0); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 900 | num_waiters_--; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 901 | // We awoke and so no longer require awakes from the guard_'s unlock. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 902 | CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 903 | guard_.num_contenders_--; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 904 | #else |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 905 | pid_t old_owner = guard_.GetExclusiveOwnerTid(); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 906 | guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 907 | guard_.recursion_count_ = 0; |
| 908 | CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_)); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 909 | guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 910 | #endif |
| 911 | guard_.recursion_count_ = old_recursion_count; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 912 | } |
| 913 | |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 914 | bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 915 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 916 | bool timed_out = false; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 917 | guard_.AssertExclusiveHeld(self); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 918 | guard_.CheckSafeToWait(self); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 919 | unsigned int old_recursion_count = guard_.recursion_count_; |
| 920 | #if ART_USE_FUTEXES |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 921 | timespec rel_ts; |
Ian Rogers | 5bd97c4 | 2012-11-27 02:38:26 -0800 | [diff] [blame] | 922 | InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 923 | num_waiters_++; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 924 | // Ensure the Mutex is contended so that requeued threads are awoken. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 925 | guard_.num_contenders_++; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 926 | guard_.recursion_count_ = 1; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 927 | int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 928 | guard_.ExclusiveUnlock(self); |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 929 | if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 930 | if (errno == ETIMEDOUT) { |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 931 | // Timed out we're done. |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 932 | timed_out = true; |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 933 | } else if ((errno == EAGAIN) || (errno == EINTR)) { |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 934 | // A signal or ConditionVariable::Signal/Broadcast has come in. |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 935 | } else { |
| 936 | PLOG(FATAL) << "timed futex wait failed for " << name_; |
| 937 | } |
| 938 | } |
| 939 | guard_.ExclusiveLock(self); |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 940 | CHECK_GE(num_waiters_, 0); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 941 | num_waiters_--; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 942 | // We awoke and so no longer require awakes from the guard_'s unlock. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 943 | CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 944 | guard_.num_contenders_--; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 945 | #else |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 946 | #if !defined(__APPLE__) |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 947 | int clock = CLOCK_MONOTONIC; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 948 | #else |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 949 | int clock = CLOCK_REALTIME; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 950 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 951 | pid_t old_owner = guard_.GetExclusiveOwnerTid(); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 952 | guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 953 | guard_.recursion_count_ = 0; |
| 954 | timespec ts; |
Brian Carlstrom | bcc2926 | 2012-11-02 11:36:03 -0700 | [diff] [blame] | 955 | InitTimeSpec(true, clock, ms, ns, &ts); |
Josh Gao | 2d899c4 | 2018-10-17 16:03:42 -0700 | [diff] [blame] | 956 | int rc; |
| 957 | while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) { |
| 958 | continue; |
| 959 | } |
| 960 | |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 961 | if (rc == ETIMEDOUT) { |
| 962 | timed_out = true; |
| 963 | } else if (rc != 0) { |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 964 | errno = rc; |
| 965 | PLOG(FATAL) << "TimedWait failed for " << name_; |
| 966 | } |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 967 | guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 968 | #endif |
| 969 | guard_.recursion_count_ = old_recursion_count; |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 970 | return timed_out; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 971 | } |
| 972 | |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 973 | } // namespace art |