Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2011 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "mutex.h" |
| 18 | |
| 19 | #include <errno.h> |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 20 | #include <sys/time.h> |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 21 | |
Andreas Gampe | 46ee31b | 2016-12-14 10:11:49 -0800 | [diff] [blame] | 22 | #include "android-base/stringprintf.h" |
| 23 | |
David Sehr | c431b9d | 2018-03-02 12:01:51 -0800 | [diff] [blame] | 24 | #include "base/atomic.h" |
Elliott Hughes | 07ed66b | 2012-12-12 18:34:25 -0800 | [diff] [blame] | 25 | #include "base/logging.h" |
Mathieu Chartier | 32ce2ad | 2016-03-04 14:58:03 -0800 | [diff] [blame] | 26 | #include "base/systrace.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 27 | #include "base/time_utils.h" |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 28 | #include "base/value_object.h" |
Ian Rogers | 693ff61 | 2013-02-01 10:56:12 -0800 | [diff] [blame] | 29 | #include "mutex-inl.h" |
Mathieu Chartier | 0795f23 | 2016-09-27 18:43:30 -0700 | [diff] [blame] | 30 | #include "scoped_thread_state_change-inl.h" |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 31 | #include "thread-inl.h" |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 32 | |
| 33 | namespace art { |
| 34 | |
Andreas Gampe | 46ee31b | 2016-12-14 10:11:49 -0800 | [diff] [blame] | 35 | using android::base::StringPrintf; |
| 36 | |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 37 | struct AllMutexData { |
| 38 | // A guard for all_mutexes_ that's not a mutex (Mutexes must CAS to acquire and busy wait). |
| 39 | Atomic<const BaseMutex*> all_mutexes_guard; |
| 40 | // All created mutexes guarded by all_mutexes_guard_. |
| 41 | std::set<BaseMutex*>* all_mutexes; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 42 | AllMutexData() : all_mutexes(nullptr) {} |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 43 | }; |
| 44 | static struct AllMutexData gAllMutexData[kAllMutexDataSize]; |
| 45 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 46 | #if ART_USE_FUTEXES |
| 47 | static bool ComputeRelativeTimeSpec(timespec* result_ts, const timespec& lhs, const timespec& rhs) { |
Brian Carlstrom | fb6996f | 2013-07-18 18:21:14 -0700 | [diff] [blame] | 48 | const int32_t one_sec = 1000 * 1000 * 1000; // one second in nanoseconds. |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 49 | result_ts->tv_sec = lhs.tv_sec - rhs.tv_sec; |
| 50 | result_ts->tv_nsec = lhs.tv_nsec - rhs.tv_nsec; |
| 51 | if (result_ts->tv_nsec < 0) { |
| 52 | result_ts->tv_sec--; |
| 53 | result_ts->tv_nsec += one_sec; |
| 54 | } else if (result_ts->tv_nsec > one_sec) { |
| 55 | result_ts->tv_sec++; |
| 56 | result_ts->tv_nsec -= one_sec; |
| 57 | } |
| 58 | return result_ts->tv_sec < 0; |
| 59 | } |
| 60 | #endif |
| 61 | |
Hans Boehm | ae915a0 | 2017-12-12 11:05:32 -0800 | [diff] [blame] | 62 | // Wait for an amount of time that roughly increases in the argument i. |
| 63 | // Spin for small arguments and yield/sleep for longer ones. |
| 64 | static void BackOff(uint32_t i) { |
| 65 | static constexpr uint32_t kSpinMax = 10; |
| 66 | static constexpr uint32_t kYieldMax = 20; |
| 67 | if (i <= kSpinMax) { |
| 68 | // TODO: Esp. in very latency-sensitive cases, consider replacing this with an explicit |
| 69 | // test-and-test-and-set loop in the caller. Possibly skip entirely on a uniprocessor. |
| 70 | volatile uint32_t x = 0; |
| 71 | const uint32_t spin_count = 10 * i; |
| 72 | for (uint32_t spin = 0; spin < spin_count; ++spin) { |
| 73 | ++x; // Volatile; hence should not be optimized away. |
| 74 | } |
| 75 | // TODO: Consider adding x86 PAUSE and/or ARM YIELD here. |
| 76 | } else if (i <= kYieldMax) { |
| 77 | sched_yield(); |
| 78 | } else { |
| 79 | NanoSleep(1000ull * (i - kYieldMax)); |
| 80 | } |
| 81 | } |
| 82 | |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 83 | class ScopedAllMutexesLock final { |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 84 | public: |
Brian Carlstrom | 93ba893 | 2013-07-17 21:31:49 -0700 | [diff] [blame] | 85 | explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { |
Hans Boehm | ae915a0 | 2017-12-12 11:05:32 -0800 | [diff] [blame] | 86 | for (uint32_t i = 0; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 87 | !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex); |
Hans Boehm | ae915a0 | 2017-12-12 11:05:32 -0800 | [diff] [blame] | 88 | ++i) { |
| 89 | BackOff(i); |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 90 | } |
| 91 | } |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 92 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 93 | ~ScopedAllMutexesLock() { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 94 | DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_); |
| 95 | gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release); |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 96 | } |
Ian Rogers | 6f3dbba | 2014-10-14 17:41:57 -0700 | [diff] [blame] | 97 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 98 | private: |
| 99 | const BaseMutex* const mutex_; |
| 100 | }; |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 101 | |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 102 | // Scoped class that generates events at the beginning and end of lock contention. |
Roland Levillain | bbc6e7e | 2018-08-24 16:58:47 +0100 | [diff] [blame] | 103 | class ScopedContentionRecorder final : public ValueObject { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 104 | public: |
| 105 | ScopedContentionRecorder(BaseMutex* mutex, uint64_t blocked_tid, uint64_t owner_tid) |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 106 | : mutex_(kLogLockContentions ? mutex : nullptr), |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 107 | blocked_tid_(kLogLockContentions ? blocked_tid : 0), |
| 108 | owner_tid_(kLogLockContentions ? owner_tid : 0), |
| 109 | start_nano_time_(kLogLockContentions ? NanoTime() : 0) { |
Orion Hodson | 119733d | 2019-01-30 15:14:41 +0000 | [diff] [blame] | 110 | if (ATraceEnabled()) { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 111 | std::string msg = StringPrintf("Lock contention on %s (owner tid: %" PRIu64 ")", |
| 112 | mutex->GetName(), owner_tid); |
Orion Hodson | 119733d | 2019-01-30 15:14:41 +0000 | [diff] [blame] | 113 | ATraceBegin(msg.c_str()); |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 114 | } |
| 115 | } |
| 116 | |
| 117 | ~ScopedContentionRecorder() { |
Orion Hodson | 119733d | 2019-01-30 15:14:41 +0000 | [diff] [blame] | 118 | ATraceEnd(); |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 119 | if (kLogLockContentions) { |
| 120 | uint64_t end_nano_time = NanoTime(); |
| 121 | mutex_->RecordContention(blocked_tid_, owner_tid_, end_nano_time - start_nano_time_); |
| 122 | } |
| 123 | } |
| 124 | |
| 125 | private: |
| 126 | BaseMutex* const mutex_; |
| 127 | const uint64_t blocked_tid_; |
| 128 | const uint64_t owner_tid_; |
| 129 | const uint64_t start_nano_time_; |
| 130 | }; |
| 131 | |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 132 | BaseMutex::BaseMutex(const char* name, LockLevel level) |
Andreas Gampe | 5db8b7b | 2018-05-08 16:10:59 -0700 | [diff] [blame] | 133 | : name_(name), |
| 134 | level_(level), |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 135 | should_respond_to_empty_checkpoint_request_(false) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 136 | if (kLogLockContentions) { |
| 137 | ScopedAllMutexesLock mu(this); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 138 | std::set<BaseMutex*>** all_mutexes_ptr = &gAllMutexData->all_mutexes; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 139 | if (*all_mutexes_ptr == nullptr) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 140 | // We leak the global set of all mutexes to avoid ordering issues in global variable |
| 141 | // construction/destruction. |
| 142 | *all_mutexes_ptr = new std::set<BaseMutex*>(); |
| 143 | } |
| 144 | (*all_mutexes_ptr)->insert(this); |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 145 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 146 | } |
| 147 | |
| 148 | BaseMutex::~BaseMutex() { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 149 | if (kLogLockContentions) { |
| 150 | ScopedAllMutexesLock mu(this); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 151 | gAllMutexData->all_mutexes->erase(this); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 152 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | void BaseMutex::DumpAll(std::ostream& os) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 156 | if (kLogLockContentions) { |
| 157 | os << "Mutex logging:\n"; |
| 158 | ScopedAllMutexesLock mu(reinterpret_cast<const BaseMutex*>(-1)); |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 159 | std::set<BaseMutex*>* all_mutexes = gAllMutexData->all_mutexes; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 160 | if (all_mutexes == nullptr) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 161 | // No mutexes have been created yet during at startup. |
| 162 | return; |
| 163 | } |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 164 | os << "(Contended)\n"; |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 165 | for (const BaseMutex* mutex : *all_mutexes) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 166 | if (mutex->HasEverContended()) { |
| 167 | mutex->Dump(os); |
| 168 | os << "\n"; |
| 169 | } |
| 170 | } |
| 171 | os << "(Never contented)\n"; |
Andreas Gampe | c55bb39 | 2018-09-21 00:02:02 +0000 | [diff] [blame] | 172 | for (const BaseMutex* mutex : *all_mutexes) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 173 | if (!mutex->HasEverContended()) { |
| 174 | mutex->Dump(os); |
| 175 | os << "\n"; |
| 176 | } |
| 177 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 178 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 179 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 180 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 181 | void BaseMutex::CheckSafeToWait(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 182 | if (self == nullptr) { |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 183 | CheckUnattachedThread(level_); |
| 184 | return; |
| 185 | } |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 186 | if (kDebugLocking) { |
Ian Rogers | d9c4fc9 | 2013-10-01 19:45:43 -0700 | [diff] [blame] | 187 | CHECK(self->GetHeldMutex(level_) == this || level_ == kMonitorLock) |
| 188 | << "Waiting on unacquired mutex: " << name_; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 189 | bool bad_mutexes_held = false; |
Elliott Hughes | 0f82716 | 2013-02-26 12:12:58 -0800 | [diff] [blame] | 190 | for (int i = kLockLevelCount - 1; i >= 0; --i) { |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 191 | if (i != level_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 192 | BaseMutex* held_mutex = self->GetHeldMutex(static_cast<LockLevel>(i)); |
Alex Light | 6683446 | 2019-04-08 16:28:29 +0000 | [diff] [blame^] | 193 | // We allow the thread to wait even if the user_code_suspension_lock_ is held so long. This |
| 194 | // just means that gc or some other internal process is suspending the thread while it is |
| 195 | // trying to suspend some other thread. So long as the current thread is not being suspended |
| 196 | // by a SuspendReason::kForUserCode (which needs the user_code_suspension_lock_ to clear) |
| 197 | // this is fine. This is needed due to user_code_suspension_lock_ being the way untrusted |
| 198 | // code interacts with suspension. One holds the lock to prevent user-code-suspension from |
| 199 | // occurring. Since this is only initiated from user-supplied native-code this is safe. |
| 200 | if (held_mutex == Locks::user_code_suspension_lock_) { |
Alex Light | 79400aa | 2017-07-18 15:34:21 -0700 | [diff] [blame] | 201 | // No thread safety analysis is fine since we have both the user_code_suspension_lock_ |
| 202 | // from the line above and the ThreadSuspendCountLock since it is our level_. We use this |
| 203 | // lambda to avoid having to annotate the whole function as NO_THREAD_SAFETY_ANALYSIS. |
| 204 | auto is_suspending_for_user_code = [self]() NO_THREAD_SAFETY_ANALYSIS { |
| 205 | return self->GetUserCodeSuspendCount() != 0; |
| 206 | }; |
| 207 | if (is_suspending_for_user_code()) { |
| 208 | LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " |
| 209 | << "(level " << LockLevel(i) << ") while performing wait on " |
| 210 | << "\"" << name_ << "\" (level " << level_ << ") " |
| 211 | << "with SuspendReason::kForUserCode pending suspensions"; |
| 212 | bad_mutexes_held = true; |
| 213 | } |
| 214 | } else if (held_mutex != nullptr) { |
Elliott Hughes | 0f82716 | 2013-02-26 12:12:58 -0800 | [diff] [blame] | 215 | LOG(ERROR) << "Holding \"" << held_mutex->name_ << "\" " |
| 216 | << "(level " << LockLevel(i) << ") while performing wait on " |
| 217 | << "\"" << name_ << "\" (level " << level_ << ")"; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 218 | bad_mutexes_held = true; |
| 219 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 220 | } |
| 221 | } |
Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 222 | if (gAborting == 0) { // Avoid recursive aborts. |
Alex Light | 79400aa | 2017-07-18 15:34:21 -0700 | [diff] [blame] | 223 | CHECK(!bad_mutexes_held) << this; |
Nicolas Geoffray | db97871 | 2014-12-09 13:33:38 +0000 | [diff] [blame] | 224 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 225 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 226 | } |
| 227 | |
Ian Rogers | 37f3c96 | 2014-07-17 11:25:30 -0700 | [diff] [blame] | 228 | void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 229 | if (kLogLockContentions) { |
| 230 | // Atomically add value to wait_time. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 231 | wait_time.fetch_add(value, std::memory_order_seq_cst); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 232 | } |
| 233 | } |
| 234 | |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 235 | void BaseMutex::RecordContention(uint64_t blocked_tid, |
| 236 | uint64_t owner_tid, |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 237 | uint64_t nano_time_blocked) { |
| 238 | if (kLogLockContentions) { |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 239 | ContentionLogData* data = contention_log_data_; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 240 | ++(data->contention_count); |
| 241 | data->AddToWaitTime(nano_time_blocked); |
| 242 | ContentionLogEntry* log = data->contention_log; |
| 243 | // This code is intentionally racy as it is only used for diagnostics. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 244 | int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 245 | if (log[slot].blocked_tid == blocked_tid && |
| 246 | log[slot].owner_tid == blocked_tid) { |
| 247 | ++log[slot].count; |
| 248 | } else { |
| 249 | uint32_t new_slot; |
| 250 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 251 | slot = data->cur_content_log_entry.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 252 | new_slot = (slot + 1) % kContentionLogSize; |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 253 | } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot)); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 254 | log[new_slot].blocked_tid = blocked_tid; |
| 255 | log[new_slot].owner_tid = owner_tid; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 256 | log[new_slot].count.store(1, std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 257 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 258 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 259 | } |
| 260 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 261 | void BaseMutex::DumpContention(std::ostream& os) const { |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 262 | if (kLogLockContentions) { |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 263 | const ContentionLogData* data = contention_log_data_; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 264 | const ContentionLogEntry* log = data->contention_log; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 265 | uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed); |
| 266 | uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 267 | if (contention_count == 0) { |
| 268 | os << "never contended"; |
| 269 | } else { |
| 270 | os << "contended " << contention_count |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 271 | << " total wait of contender " << PrettyDuration(wait_time) |
| 272 | << " average " << PrettyDuration(wait_time / contention_count); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 273 | SafeMap<uint64_t, size_t> most_common_blocker; |
| 274 | SafeMap<uint64_t, size_t> most_common_blocked; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 275 | for (size_t i = 0; i < kContentionLogSize; ++i) { |
| 276 | uint64_t blocked_tid = log[i].blocked_tid; |
| 277 | uint64_t owner_tid = log[i].owner_tid; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 278 | uint32_t count = log[i].count.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 279 | if (count > 0) { |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 280 | auto it = most_common_blocked.find(blocked_tid); |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 281 | if (it != most_common_blocked.end()) { |
| 282 | most_common_blocked.Overwrite(blocked_tid, it->second + count); |
| 283 | } else { |
| 284 | most_common_blocked.Put(blocked_tid, count); |
| 285 | } |
| 286 | it = most_common_blocker.find(owner_tid); |
| 287 | if (it != most_common_blocker.end()) { |
| 288 | most_common_blocker.Overwrite(owner_tid, it->second + count); |
| 289 | } else { |
| 290 | most_common_blocker.Put(owner_tid, count); |
| 291 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 292 | } |
| 293 | } |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 294 | uint64_t max_tid = 0; |
| 295 | size_t max_tid_count = 0; |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 296 | for (const auto& pair : most_common_blocked) { |
| 297 | if (pair.second > max_tid_count) { |
| 298 | max_tid = pair.first; |
| 299 | max_tid_count = pair.second; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 300 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 301 | } |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 302 | if (max_tid != 0) { |
| 303 | os << " sample shows most blocked tid=" << max_tid; |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 304 | } |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 305 | max_tid = 0; |
| 306 | max_tid_count = 0; |
Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 307 | for (const auto& pair : most_common_blocker) { |
| 308 | if (pair.second > max_tid_count) { |
| 309 | max_tid = pair.first; |
| 310 | max_tid_count = pair.second; |
Hiroshi Yamauchi | 1afde13 | 2013-08-06 17:09:30 -0700 | [diff] [blame] | 311 | } |
| 312 | } |
| 313 | if (max_tid != 0) { |
| 314 | os << " sample shows tid=" << max_tid << " owning during this time"; |
| 315 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 316 | } |
| 317 | } |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 321 | Mutex::Mutex(const char* name, LockLevel level, bool recursive) |
Andreas Gampe | 5db8b7b | 2018-05-08 16:10:59 -0700 | [diff] [blame] | 322 | : BaseMutex(name, level), exclusive_owner_(0), recursion_count_(0), recursive_(recursive) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 323 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 324 | DCHECK_EQ(0, state_.load(std::memory_order_relaxed)); |
| 325 | DCHECK_EQ(0, num_contenders_.load(std::memory_order_relaxed)); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 326 | #else |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 327 | CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr)); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 328 | #endif |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 329 | } |
| 330 | |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 331 | // Helper to allow checking shutdown while locking for thread safety. |
| 332 | static bool IsSafeToCallAbortSafe() { |
| 333 | MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); |
| 334 | return Locks::IsSafeToCallAbortRacy(); |
Andreas Gampe | 8f1fa10 | 2015-01-22 19:48:51 -0800 | [diff] [blame] | 335 | } |
| 336 | |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 337 | Mutex::~Mutex() { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 338 | bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy(); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 339 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 340 | if (state_.load(std::memory_order_relaxed) != 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 341 | LOG(safe_to_call_abort ? FATAL : WARNING) |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 342 | << "destroying mutex with owner: " << GetExclusiveOwnerTid(); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 343 | } else { |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 344 | if (GetExclusiveOwnerTid() != 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 345 | LOG(safe_to_call_abort ? FATAL : WARNING) |
| 346 | << "unexpectedly found an owner on unlocked mutex " << name_; |
Andreas Gampe | 8f1fa10 | 2015-01-22 19:48:51 -0800 | [diff] [blame] | 347 | } |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 348 | if (num_contenders_.load(std::memory_order_seq_cst) != 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 349 | LOG(safe_to_call_abort ? FATAL : WARNING) |
| 350 | << "unexpectedly found a contender on mutex " << name_; |
Mathieu Chartier | cef50f0 | 2014-12-09 17:38:52 -0800 | [diff] [blame] | 351 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 352 | } |
| 353 | #else |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 354 | // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread |
| 355 | // may still be using locks. |
Elliott Hughes | 6b35575 | 2012-01-13 16:49:08 -0800 | [diff] [blame] | 356 | int rc = pthread_mutex_destroy(&mutex_); |
| 357 | if (rc != 0) { |
| 358 | errno = rc; |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 359 | PLOG(safe_to_call_abort ? FATAL : WARNING) |
| 360 | << "pthread_mutex_destroy failed for " << name_; |
Elliott Hughes | 6b35575 | 2012-01-13 16:49:08 -0800 | [diff] [blame] | 361 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 362 | #endif |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 363 | } |
| 364 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 365 | void Mutex::ExclusiveLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 366 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 367 | if (kDebugLocking && !recursive_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 368 | AssertNotHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 369 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 370 | if (!recursive_ || !IsExclusiveHeld(self)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 371 | #if ART_USE_FUTEXES |
| 372 | bool done = false; |
| 373 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 374 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 375 | if (LIKELY(cur_state == 0)) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 376 | // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 377 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 378 | } else { |
| 379 | // Failed to acquire, hang up. |
Hiroshi Yamauchi | b373308 | 2013-08-12 17:28:49 -0700 | [diff] [blame] | 380 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 381 | num_contenders_++; |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 382 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 383 | self->CheckEmptyCheckpointFromMutex(); |
| 384 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 385 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, 1, nullptr, nullptr, 0) != 0) { |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 386 | // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. |
| 387 | // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. |
| 388 | if ((errno != EAGAIN) && (errno != EINTR)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 389 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 390 | } |
| 391 | } |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 392 | num_contenders_--; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 393 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 394 | } while (!done); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 395 | DCHECK_EQ(state_.load(std::memory_order_relaxed), 1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 396 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 397 | CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 398 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 399 | DCHECK_EQ(GetExclusiveOwnerTid(), 0); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 400 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 401 | RegisterAsLocked(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 402 | } |
| 403 | recursion_count_++; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 404 | if (kDebugLocking) { |
| 405 | CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " |
| 406 | << name_ << " " << recursion_count_; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 407 | AssertHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 408 | } |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 409 | } |
| 410 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 411 | bool Mutex::ExclusiveTryLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 412 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 413 | if (kDebugLocking && !recursive_) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 414 | AssertNotHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 415 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 416 | if (!recursive_ || !IsExclusiveHeld(self)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 417 | #if ART_USE_FUTEXES |
| 418 | bool done = false; |
| 419 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 420 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 421 | if (cur_state == 0) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 422 | // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 423 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 424 | } else { |
| 425 | return false; |
| 426 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 427 | } while (!done); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 428 | DCHECK_EQ(state_.load(std::memory_order_relaxed), 1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 429 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 430 | int result = pthread_mutex_trylock(&mutex_); |
| 431 | if (result == EBUSY) { |
| 432 | return false; |
| 433 | } |
| 434 | if (result != 0) { |
| 435 | errno = result; |
| 436 | PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; |
| 437 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 438 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 439 | DCHECK_EQ(GetExclusiveOwnerTid(), 0); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 440 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 441 | RegisterAsLocked(self); |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 442 | } |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 443 | recursion_count_++; |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 444 | if (kDebugLocking) { |
| 445 | CHECK(recursion_count_ == 1 || recursive_) << "Unexpected recursion count on mutex: " |
| 446 | << name_ << " " << recursion_count_; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 447 | AssertHeld(self); |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 448 | } |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 449 | return true; |
| 450 | } |
| 451 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 452 | void Mutex::ExclusiveUnlock(Thread* self) { |
Mathieu Chartier | eb0a179 | 2014-12-15 17:23:45 -0800 | [diff] [blame] | 453 | if (kIsDebugBuild && self != nullptr && self != Thread::Current()) { |
| 454 | std::string name1 = "<null>"; |
| 455 | std::string name2 = "<null>"; |
| 456 | if (self != nullptr) { |
| 457 | self->GetThreadName(name1); |
| 458 | } |
| 459 | if (Thread::Current() != nullptr) { |
| 460 | Thread::Current()->GetThreadName(name2); |
| 461 | } |
Mathieu Chartier | 4c10110 | 2015-01-27 17:14:16 -0800 | [diff] [blame] | 462 | LOG(FATAL) << GetName() << " level=" << level_ << " self=" << name1 |
| 463 | << " Thread::Current()=" << name2; |
Mathieu Chartier | eb0a179 | 2014-12-15 17:23:45 -0800 | [diff] [blame] | 464 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 465 | AssertHeld(self); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 466 | DCHECK_NE(GetExclusiveOwnerTid(), 0); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 467 | recursion_count_--; |
| 468 | if (!recursive_ || recursion_count_ == 0) { |
Ian Rogers | 25fd14b | 2012-09-05 10:56:38 -0700 | [diff] [blame] | 469 | if (kDebugLocking) { |
| 470 | CHECK(recursion_count_ == 0 || recursive_) << "Unexpected recursion count on mutex: " |
| 471 | << name_ << " " << recursion_count_; |
| 472 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 473 | RegisterAsUnlocked(self); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 474 | #if ART_USE_FUTEXES |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 475 | bool done = false; |
| 476 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 477 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 478 | if (LIKELY(cur_state == 1)) { |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 479 | // We're no longer the owner. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 480 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 481 | // Change state to 0 and impose load/store ordering appropriate for lock release. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 482 | // Note, the relaxed loads below mustn't reorder before the CompareAndSet. |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 483 | // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing |
| 484 | // a status bit into the state on contention. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 485 | done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, 0 /* new state */); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 486 | if (LIKELY(done)) { // Spurious fail? |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 487 | // Wake a contender. |
Hyangseok Chae | 240a564 | 2018-07-25 16:45:08 +0900 | [diff] [blame] | 488 | if (UNLIKELY(num_contenders_.load(std::memory_order_seq_cst) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 489 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 490 | } |
| 491 | } |
| 492 | } else { |
| 493 | // Logging acquires the logging lock, avoid infinite recursion in that case. |
| 494 | if (this != Locks::logging_lock_) { |
| 495 | LOG(FATAL) << "Unexpected state_ in unlock " << cur_state << " for " << name_; |
| 496 | } else { |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 497 | LogHelper::LogLineLowStack(__FILE__, |
| 498 | __LINE__, |
| 499 | ::android::base::FATAL_WITHOUT_ABORT, |
| 500 | StringPrintf("Unexpected state_ %d in unlock for %s", |
| 501 | cur_state, name_).c_str()); |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 502 | _exit(1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 503 | } |
| 504 | } |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 505 | } while (!done); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 506 | #else |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 507 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 508 | CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 509 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 510 | } |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 511 | } |
| 512 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 513 | void Mutex::Dump(std::ostream& os) const { |
| 514 | os << (recursive_ ? "recursive " : "non-recursive ") |
| 515 | << name_ |
| 516 | << " level=" << static_cast<int>(level_) |
| 517 | << " rec=" << recursion_count_ |
| 518 | << " owner=" << GetExclusiveOwnerTid() << " "; |
| 519 | DumpContention(os); |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 520 | } |
| 521 | |
| 522 | std::ostream& operator<<(std::ostream& os, const Mutex& mu) { |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 523 | mu.Dump(os); |
| 524 | return os; |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 525 | } |
| 526 | |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 527 | void Mutex::WakeupToRespondToEmptyCheckpoint() { |
| 528 | #if ART_USE_FUTEXES |
| 529 | // Wake up all the waiters so they will respond to the emtpy checkpoint. |
| 530 | DCHECK(should_respond_to_empty_checkpoint_request_); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 531 | if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 532 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0); |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 533 | } |
| 534 | #else |
| 535 | LOG(FATAL) << "Non futex case isn't supported."; |
| 536 | #endif |
| 537 | } |
| 538 | |
Brian Carlstrom | 02c8cc6 | 2013-07-18 15:54:44 -0700 | [diff] [blame] | 539 | ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) |
| 540 | : BaseMutex(name, level) |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 541 | #if ART_USE_FUTEXES |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 542 | , state_(0), num_pending_readers_(0), num_pending_writers_(0) |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 543 | #endif |
Igor Murashkin | 5573c37 | 2017-11-16 13:34:30 -0800 | [diff] [blame] | 544 | { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 545 | #if !ART_USE_FUTEXES |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 546 | CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr)); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 547 | #endif |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 548 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 549 | } |
| 550 | |
| 551 | ReaderWriterMutex::~ReaderWriterMutex() { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 552 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 553 | CHECK_EQ(state_.load(std::memory_order_relaxed), 0); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 554 | CHECK_EQ(GetExclusiveOwnerTid(), 0); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 555 | CHECK_EQ(num_pending_readers_.load(std::memory_order_relaxed), 0); |
| 556 | CHECK_EQ(num_pending_writers_.load(std::memory_order_relaxed), 0); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 557 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 558 | // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread |
| 559 | // may still be using locks. |
| 560 | int rc = pthread_rwlock_destroy(&rwlock_); |
| 561 | if (rc != 0) { |
| 562 | errno = rc; |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 563 | bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); |
| 564 | PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_rwlock_destroy failed for " << name_; |
Brian Carlstrom | cd74c4b | 2012-01-23 13:21:00 -0800 | [diff] [blame] | 565 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 566 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 567 | } |
| 568 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 569 | void ReaderWriterMutex::ExclusiveLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 570 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 571 | AssertNotExclusiveHeld(self); |
| 572 | #if ART_USE_FUTEXES |
| 573 | bool done = false; |
| 574 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 575 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 576 | if (LIKELY(cur_state == 0)) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 577 | // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 578 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 579 | } else { |
| 580 | // Failed to acquire, hang up. |
Hiroshi Yamauchi | b373308 | 2013-08-12 17:28:49 -0700 | [diff] [blame] | 581 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 582 | ++num_pending_writers_; |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 583 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 584 | self->CheckEmptyCheckpointFromMutex(); |
| 585 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 586 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 587 | // EAGAIN and EINTR both indicate a spurious failure, try again from the beginning. |
| 588 | // We don't use TEMP_FAILURE_RETRY so we can intentionally retry to acquire the lock. |
| 589 | if ((errno != EAGAIN) && (errno != EINTR)) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 590 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 591 | } |
| 592 | } |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 593 | --num_pending_writers_; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 594 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 595 | } while (!done); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 596 | DCHECK_EQ(state_.load(std::memory_order_relaxed), -1); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 597 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 598 | CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 599 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 600 | DCHECK_EQ(GetExclusiveOwnerTid(), 0); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 601 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 602 | RegisterAsLocked(self); |
| 603 | AssertExclusiveHeld(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 604 | } |
| 605 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 606 | void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 607 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 608 | AssertExclusiveHeld(self); |
| 609 | RegisterAsUnlocked(self); |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 610 | DCHECK_NE(GetExclusiveOwnerTid(), 0); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 611 | #if ART_USE_FUTEXES |
| 612 | bool done = false; |
| 613 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 614 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Hiroshi Yamauchi | 967a0ad | 2013-09-10 16:24:21 -0700 | [diff] [blame] | 615 | if (LIKELY(cur_state == -1)) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 616 | // We're no longer the owner. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 617 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 618 | // Change state from -1 to 0 and impose load/store ordering appropriate for lock release. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 619 | // Note, the relaxed loads below musn't reorder before the CompareAndSet. |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 620 | // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing |
| 621 | // a status bit into the state on contention. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 622 | done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 623 | if (LIKELY(done)) { // Weak CAS may fail spuriously. |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 624 | // Wake any waiters. |
Hyangseok Chae | 240a564 | 2018-07-25 16:45:08 +0900 | [diff] [blame] | 625 | if (UNLIKELY(num_pending_readers_.load(std::memory_order_seq_cst) > 0 || |
| 626 | num_pending_writers_.load(std::memory_order_seq_cst) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 627 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 628 | } |
| 629 | } |
| 630 | } else { |
| 631 | LOG(FATAL) << "Unexpected state_:" << cur_state << " for " << name_; |
| 632 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 633 | } while (!done); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 634 | #else |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 635 | exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 636 | CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 637 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 638 | } |
| 639 | |
Ian Rogers | 66aee5c | 2012-08-15 17:17:47 -0700 | [diff] [blame] | 640 | #if HAVE_TIMED_RWLOCK |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 641 | bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32_t ns) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 642 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 643 | #if ART_USE_FUTEXES |
| 644 | bool done = false; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 645 | timespec end_abs_ts; |
tony.ys_liu | 071e48e | 2015-01-14 18:28:03 +0800 | [diff] [blame] | 646 | InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 647 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 648 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 649 | if (cur_state == 0) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 650 | // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 651 | done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 652 | } else { |
| 653 | // Failed to acquire, hang up. |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 654 | timespec now_abs_ts; |
tony.ys_liu | 071e48e | 2015-01-14 18:28:03 +0800 | [diff] [blame] | 655 | InitTimeSpec(true, CLOCK_MONOTONIC, 0, 0, &now_abs_ts); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 656 | timespec rel_ts; |
| 657 | if (ComputeRelativeTimeSpec(&rel_ts, end_abs_ts, now_abs_ts)) { |
| 658 | return false; // Timed out. |
| 659 | } |
Hiroshi Yamauchi | b373308 | 2013-08-12 17:28:49 -0700 | [diff] [blame] | 660 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 661 | ++num_pending_writers_; |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 662 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 663 | self->CheckEmptyCheckpointFromMutex(); |
| 664 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 665 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, &rel_ts, nullptr, 0) != 0) { |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 666 | if (errno == ETIMEDOUT) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 667 | --num_pending_writers_; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 668 | return false; // Timed out. |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 669 | } else if ((errno != EAGAIN) && (errno != EINTR)) { |
| 670 | // EAGAIN and EINTR both indicate a spurious failure, |
| 671 | // recompute the relative time out from now and try again. |
| 672 | // We don't use TEMP_FAILURE_RETRY so we can recompute rel_ts; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 673 | PLOG(FATAL) << "timed futex wait failed for " << name_; |
| 674 | } |
| 675 | } |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 676 | --num_pending_writers_; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 677 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 678 | } while (!done); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 679 | #else |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 680 | timespec ts; |
Brian Carlstrom | bcc2926 | 2012-11-02 11:36:03 -0700 | [diff] [blame] | 681 | InitTimeSpec(true, CLOCK_REALTIME, ms, ns, &ts); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 682 | int result = pthread_rwlock_timedwrlock(&rwlock_, &ts); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 683 | if (result == ETIMEDOUT) { |
| 684 | return false; |
| 685 | } |
| 686 | if (result != 0) { |
| 687 | errno = result; |
Ian Rogers | a5acfd3 | 2012-08-15 11:50:10 -0700 | [diff] [blame] | 688 | PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 689 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 690 | #endif |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 691 | exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 692 | RegisterAsLocked(self); |
| 693 | AssertSharedHeld(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 694 | return true; |
| 695 | } |
Ian Rogers | 66aee5c | 2012-08-15 17:17:47 -0700 | [diff] [blame] | 696 | #endif |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 697 | |
Ian Rogers | 51d212e | 2014-10-23 17:48:20 -0700 | [diff] [blame] | 698 | #if ART_USE_FUTEXES |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 699 | void ReaderWriterMutex::HandleSharedLockContention(Thread* self, int32_t cur_state) { |
| 700 | // Owner holds it exclusively, hang up. |
Roland Levillain | cd72dc9 | 2018-02-27 19:15:31 +0000 | [diff] [blame] | 701 | ScopedContentionRecorder scr(this, SafeGetTid(self), GetExclusiveOwnerTid()); |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 702 | ++num_pending_readers_; |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 703 | if (UNLIKELY(should_respond_to_empty_checkpoint_request_)) { |
| 704 | self->CheckEmptyCheckpointFromMutex(); |
| 705 | } |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 706 | if (futex(state_.Address(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) { |
Daniel Colascione | 6f4d102 | 2016-11-21 14:35:42 -0800 | [diff] [blame] | 707 | if (errno != EAGAIN && errno != EINTR) { |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 708 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 709 | } |
| 710 | } |
| 711 | --num_pending_readers_; |
| 712 | } |
Ian Rogers | 51d212e | 2014-10-23 17:48:20 -0700 | [diff] [blame] | 713 | #endif |
Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 714 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 715 | bool ReaderWriterMutex::SharedTryLock(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 716 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 717 | #if ART_USE_FUTEXES |
| 718 | bool done = false; |
| 719 | do { |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 720 | int32_t cur_state = state_.load(std::memory_order_relaxed); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 721 | if (cur_state >= 0) { |
Ian Rogers | c719069 | 2014-07-08 23:50:26 -0700 | [diff] [blame] | 722 | // Add as an extra reader and impose load/store ordering appropriate for lock acquisition. |
Orion Hodson | 4557b38 | 2018-01-03 11:47:54 +0000 | [diff] [blame] | 723 | done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 724 | } else { |
| 725 | // Owner holds it exclusively. |
| 726 | return false; |
| 727 | } |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 728 | } while (!done); |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 729 | #else |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 730 | int result = pthread_rwlock_tryrdlock(&rwlock_); |
| 731 | if (result == EBUSY) { |
| 732 | return false; |
| 733 | } |
| 734 | if (result != 0) { |
| 735 | errno = result; |
| 736 | PLOG(FATAL) << "pthread_mutex_trylock failed for " << name_; |
| 737 | } |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 738 | #endif |
| 739 | RegisterAsLocked(self); |
| 740 | AssertSharedHeld(self); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 741 | return true; |
| 742 | } |
| 743 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 744 | bool ReaderWriterMutex::IsSharedHeld(const Thread* self) const { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 745 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 746 | bool result; |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 747 | if (UNLIKELY(self == nullptr)) { // Handle unattached threads. |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 748 | result = IsExclusiveHeld(self); // TODO: a better best effort here. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 749 | } else { |
| 750 | result = (self->GetHeldMutex(level_) == this); |
| 751 | } |
| 752 | return result; |
| 753 | } |
| 754 | |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 755 | void ReaderWriterMutex::Dump(std::ostream& os) const { |
| 756 | os << name_ |
| 757 | << " level=" << static_cast<int>(level_) |
Mathieu Chartier | 5869a2c | 2014-10-08 14:26:23 -0700 | [diff] [blame] | 758 | << " owner=" << GetExclusiveOwnerTid() |
| 759 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 760 | << " state=" << state_.load(std::memory_order_seq_cst) |
| 761 | << " num_pending_writers=" << num_pending_writers_.load(std::memory_order_seq_cst) |
| 762 | << " num_pending_readers=" << num_pending_readers_.load(std::memory_order_seq_cst) |
Mathieu Chartier | 5869a2c | 2014-10-08 14:26:23 -0700 | [diff] [blame] | 763 | #endif |
| 764 | << " "; |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 765 | DumpContention(os); |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 766 | } |
| 767 | |
| 768 | std::ostream& operator<<(std::ostream& os, const ReaderWriterMutex& mu) { |
Ian Rogers | 56edc43 | 2013-01-18 16:51:51 -0800 | [diff] [blame] | 769 | mu.Dump(os); |
| 770 | return os; |
Ian Rogers | 01ae580 | 2012-09-28 16:14:01 -0700 | [diff] [blame] | 771 | } |
| 772 | |
Yu Li | eac4424 | 2015-06-29 10:50:03 +0800 | [diff] [blame] | 773 | std::ostream& operator<<(std::ostream& os, const MutatorMutex& mu) { |
| 774 | mu.Dump(os); |
| 775 | return os; |
| 776 | } |
| 777 | |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 778 | void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() { |
| 779 | #if ART_USE_FUTEXES |
| 780 | // Wake up all the waiters so they will respond to the emtpy checkpoint. |
| 781 | DCHECK(should_respond_to_empty_checkpoint_request_); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 782 | if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 || |
| 783 | num_pending_writers_.load(std::memory_order_relaxed) > 0)) { |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 784 | futex(state_.Address(), FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0); |
Hiroshi Yamauchi | a222404 | 2017-02-08 16:35:45 -0800 | [diff] [blame] | 785 | } |
| 786 | #else |
| 787 | LOG(FATAL) << "Non futex case isn't supported."; |
| 788 | #endif |
| 789 | } |
| 790 | |
Ian Rogers | 23055dc | 2013-04-18 16:29:16 -0700 | [diff] [blame] | 791 | ConditionVariable::ConditionVariable(const char* name, Mutex& guard) |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 792 | : name_(name), guard_(guard) { |
| 793 | #if ART_USE_FUTEXES |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 794 | DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 795 | num_waiters_ = 0; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 796 | #else |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 797 | pthread_condattr_t cond_attrs; |
Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 798 | CHECK_MUTEX_CALL(pthread_condattr_init, (&cond_attrs)); |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 799 | #if !defined(__APPLE__) |
| 800 | // Apple doesn't have CLOCK_MONOTONIC or pthread_condattr_setclock. |
Ian Rogers | 51d212e | 2014-10-23 17:48:20 -0700 | [diff] [blame] | 801 | CHECK_MUTEX_CALL(pthread_condattr_setclock, (&cond_attrs, CLOCK_MONOTONIC)); |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 802 | #endif |
| 803 | CHECK_MUTEX_CALL(pthread_cond_init, (&cond_, &cond_attrs)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 804 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 805 | } |
| 806 | |
| 807 | ConditionVariable::~ConditionVariable() { |
Ian Rogers | 5bd97c4 | 2012-11-27 02:38:26 -0800 | [diff] [blame] | 808 | #if ART_USE_FUTEXES |
| 809 | if (num_waiters_!= 0) { |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 810 | bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); |
| 811 | LOG(is_safe_to_call_abort ? FATAL : WARNING) |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 812 | << "ConditionVariable::~ConditionVariable for " << name_ |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 813 | << " called with " << num_waiters_ << " waiters."; |
Ian Rogers | 5bd97c4 | 2012-11-27 02:38:26 -0800 | [diff] [blame] | 814 | } |
| 815 | #else |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 816 | // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread |
| 817 | // may still be using condition variables. |
| 818 | int rc = pthread_cond_destroy(&cond_); |
| 819 | if (rc != 0) { |
| 820 | errno = rc; |
David Sehr | f42eb2c | 2016-10-19 13:20:45 -0700 | [diff] [blame] | 821 | bool is_safe_to_call_abort = IsSafeToCallAbortSafe(); |
| 822 | PLOG(is_safe_to_call_abort ? FATAL : WARNING) << "pthread_cond_destroy failed for " << name_; |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 823 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 824 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 825 | } |
| 826 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 827 | void ConditionVariable::Broadcast(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 828 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 829 | // TODO: enable below, there's a race in thread creation that causes false failures currently. |
| 830 | // guard_.AssertExclusiveHeld(self); |
Mathieu Chartier | e46cd75 | 2012-10-31 16:56:18 -0700 | [diff] [blame] | 831 | DCHECK_EQ(guard_.GetExclusiveOwnerTid(), SafeGetTid(self)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 832 | #if ART_USE_FUTEXES |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 833 | RequeueWaiters(std::numeric_limits<int32_t>::max()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 834 | #else |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 835 | CHECK_MUTEX_CALL(pthread_cond_broadcast, (&cond_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 836 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 837 | } |
| 838 | |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 839 | #if ART_USE_FUTEXES |
| 840 | void ConditionVariable::RequeueWaiters(int32_t count) { |
| 841 | if (num_waiters_ > 0) { |
| 842 | sequence_++; // Indicate a signal occurred. |
| 843 | // Move waiters from the condition variable's futex to the guard's futex, |
| 844 | // so that they will be woken up when the mutex is released. |
| 845 | bool done = futex(sequence_.Address(), |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 846 | FUTEX_REQUEUE_PRIVATE, |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 847 | /* Threads to wake */ 0, |
| 848 | /* Threads to requeue*/ reinterpret_cast<const timespec*>(count), |
| 849 | guard_.state_.Address(), |
| 850 | 0) != -1; |
| 851 | if (!done && errno != EAGAIN && errno != EINTR) { |
| 852 | PLOG(FATAL) << "futex requeue failed for " << name_; |
| 853 | } |
| 854 | } |
| 855 | } |
| 856 | #endif |
| 857 | |
| 858 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 859 | void ConditionVariable::Signal(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 860 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 861 | guard_.AssertExclusiveHeld(self); |
| 862 | #if ART_USE_FUTEXES |
Charles Munger | bcd16ee | 2018-10-22 13:03:23 -0700 | [diff] [blame] | 863 | RequeueWaiters(1); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 864 | #else |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 865 | CHECK_MUTEX_CALL(pthread_cond_signal, (&cond_)); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 866 | #endif |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 867 | } |
| 868 | |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 869 | void ConditionVariable::Wait(Thread* self) { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 870 | guard_.CheckSafeToWait(self); |
| 871 | WaitHoldingLocks(self); |
| 872 | } |
| 873 | |
| 874 | void ConditionVariable::WaitHoldingLocks(Thread* self) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 875 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 876 | guard_.AssertExclusiveHeld(self); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 877 | unsigned int old_recursion_count = guard_.recursion_count_; |
| 878 | #if ART_USE_FUTEXES |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 879 | num_waiters_++; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 880 | // Ensure the Mutex is contended so that requeued threads are awoken. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 881 | guard_.num_contenders_++; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 882 | guard_.recursion_count_ = 1; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 883 | int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 884 | guard_.ExclusiveUnlock(self); |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 885 | if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, nullptr, nullptr, 0) != 0) { |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 886 | // Futex failed, check it is an expected error. |
| 887 | // EAGAIN == EWOULDBLK, so we let the caller try again. |
| 888 | // EINTR implies a signal was sent to this thread. |
| 889 | if ((errno != EINTR) && (errno != EAGAIN)) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 890 | PLOG(FATAL) << "futex wait failed for " << name_; |
| 891 | } |
| 892 | } |
Mathieu Chartier | 4d87df6 | 2016-01-07 15:14:19 -0800 | [diff] [blame] | 893 | if (self != nullptr) { |
| 894 | JNIEnvExt* const env = self->GetJniEnv(); |
Ian Rogers | 55256cb | 2017-12-21 17:07:11 -0800 | [diff] [blame] | 895 | if (UNLIKELY(env != nullptr && env->IsRuntimeDeleted())) { |
Mathieu Chartier | 4d87df6 | 2016-01-07 15:14:19 -0800 | [diff] [blame] | 896 | CHECK(self->IsDaemon()); |
| 897 | // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may |
| 898 | // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with |
| 899 | // --host and --gdb. |
| 900 | // After we wake up, the runtime may have been shutdown, which means that this condition may |
| 901 | // have been deleted. It is not safe to retry the wait. |
| 902 | SleepForever(); |
| 903 | } |
| 904 | } |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 905 | guard_.ExclusiveLock(self); |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 906 | CHECK_GE(num_waiters_, 0); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 907 | num_waiters_--; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 908 | // We awoke and so no longer require awakes from the guard_'s unlock. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 909 | CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 910 | guard_.num_contenders_--; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 911 | #else |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 912 | pid_t old_owner = guard_.GetExclusiveOwnerTid(); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 913 | guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 914 | guard_.recursion_count_ = 0; |
| 915 | CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_)); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 916 | guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 917 | #endif |
| 918 | guard_.recursion_count_ = old_recursion_count; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 919 | } |
| 920 | |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 921 | bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 922 | DCHECK(self == nullptr || self == Thread::Current()); |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 923 | bool timed_out = false; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 924 | guard_.AssertExclusiveHeld(self); |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 925 | guard_.CheckSafeToWait(self); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 926 | unsigned int old_recursion_count = guard_.recursion_count_; |
| 927 | #if ART_USE_FUTEXES |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 928 | timespec rel_ts; |
Ian Rogers | 5bd97c4 | 2012-11-27 02:38:26 -0800 | [diff] [blame] | 929 | InitTimeSpec(false, CLOCK_REALTIME, ms, ns, &rel_ts); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 930 | num_waiters_++; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 931 | // Ensure the Mutex is contended so that requeued threads are awoken. |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 932 | guard_.num_contenders_++; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 933 | guard_.recursion_count_ = 1; |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 934 | int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 935 | guard_.ExclusiveUnlock(self); |
Charles Munger | 7530bae | 2018-10-29 20:03:51 -0700 | [diff] [blame] | 936 | if (futex(sequence_.Address(), FUTEX_WAIT_PRIVATE, cur_sequence, &rel_ts, nullptr, 0) != 0) { |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 937 | if (errno == ETIMEDOUT) { |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 938 | // Timed out we're done. |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 939 | timed_out = true; |
Brian Carlstrom | 0de7985 | 2013-07-25 22:29:58 -0700 | [diff] [blame] | 940 | } else if ((errno == EAGAIN) || (errno == EINTR)) { |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 941 | // A signal or ConditionVariable::Signal/Broadcast has come in. |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 942 | } else { |
| 943 | PLOG(FATAL) << "timed futex wait failed for " << name_; |
| 944 | } |
| 945 | } |
| 946 | guard_.ExclusiveLock(self); |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 947 | CHECK_GE(num_waiters_, 0); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 948 | num_waiters_--; |
Ian Rogers | d45f201 | 2012-11-28 11:46:23 -0800 | [diff] [blame] | 949 | // We awoke and so no longer require awakes from the guard_'s unlock. |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 950 | CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 951 | guard_.num_contenders_--; |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 952 | #else |
Narayan Kamath | 51b7102 | 2014-03-04 11:57:09 +0000 | [diff] [blame] | 953 | #if !defined(__APPLE__) |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 954 | int clock = CLOCK_MONOTONIC; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 955 | #else |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 956 | int clock = CLOCK_REALTIME; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 957 | #endif |
Hans Boehm | 0882af2 | 2017-08-31 15:21:57 -0700 | [diff] [blame] | 958 | pid_t old_owner = guard_.GetExclusiveOwnerTid(); |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 959 | guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 960 | guard_.recursion_count_ = 0; |
| 961 | timespec ts; |
Brian Carlstrom | bcc2926 | 2012-11-02 11:36:03 -0700 | [diff] [blame] | 962 | InitTimeSpec(true, clock, ms, ns, &ts); |
Josh Gao | 2d899c4 | 2018-10-17 16:03:42 -0700 | [diff] [blame] | 963 | int rc; |
| 964 | while ((rc = pthread_cond_timedwait(&cond_, &guard_.mutex_, &ts)) == EINTR) { |
| 965 | continue; |
| 966 | } |
| 967 | |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 968 | if (rc == ETIMEDOUT) { |
| 969 | timed_out = true; |
| 970 | } else if (rc != 0) { |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 971 | errno = rc; |
| 972 | PLOG(FATAL) << "TimedWait failed for " << name_; |
| 973 | } |
Orion Hodson | 88591fe | 2018-03-06 13:35:43 +0000 | [diff] [blame] | 974 | guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); |
Ian Rogers | c604d73 | 2012-10-14 16:09:54 -0700 | [diff] [blame] | 975 | #endif |
| 976 | guard_.recursion_count_ = old_recursion_count; |
Ian Rogers | 7b078e8 | 2014-09-10 14:44:24 -0700 | [diff] [blame] | 977 | return timed_out; |
Elliott Hughes | 5f79133 | 2011-09-15 17:45:30 -0700 | [diff] [blame] | 978 | } |
| 979 | |
Elliott Hughes | e62934d | 2012-04-09 11:24:29 -0700 | [diff] [blame] | 980 | } // namespace art |