David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2010 The Android Open Source Project |
| 3 | * All rights reserved. |
| 4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
| 7 | * are met: |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above copyright |
| 11 | * notice, this list of conditions and the following disclaimer in |
| 12 | * the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * |
| 15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 16 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 17 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| 18 | * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| 19 | * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| 20 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, |
| 21 | * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS |
| 22 | * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED |
| 23 | * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
| 24 | * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT |
| 25 | * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
| 26 | * SUCH DAMAGE. |
| 27 | */ |
| 28 | |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 29 | #include <errno.h> |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 30 | #include <stdatomic.h> |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 31 | |
| 32 | #include "pthread_internal.h" |
| 33 | #include "private/bionic_futex.h" |
Elliott Hughes | 04303f5 | 2014-09-18 16:11:59 -0700 | [diff] [blame] | 34 | #include "private/bionic_time_conversions.h" |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 35 | |
| 36 | /* Technical note: |
| 37 | * |
| 38 | * Possible states of a read/write lock: |
| 39 | * |
| 40 | * - no readers and no writer (unlocked) |
| 41 | * - one or more readers sharing the lock at the same time (read-locked) |
| 42 | * - one writer holding the lock (write-lock) |
| 43 | * |
| 44 | * Additionally: |
| 45 | * - trying to get the write-lock while there are any readers blocks |
| 46 | * - trying to get the read-lock while there is a writer blocks |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 47 | * - a single thread can acquire the lock multiple times in read mode |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 48 | * |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 49 | * - Posix states that behavior is undefined (may deadlock) if a thread tries |
| 50 | * to acquire the lock |
| 51 | * - in write mode while already holding the lock (whether in read or write mode) |
| 52 | * - in read mode while already holding the lock in write mode. |
| 53 | * - This implementation will return EDEADLK in "write after write" and "read after |
| 54 | * write" cases and will deadlock in write after read case. |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 55 | * |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 56 | * TODO: As it stands now, pending_readers and pending_writers could be merged into a |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 57 | * a single waiters variable. Keeping them separate adds a bit of clarity and keeps |
| 58 | * the door open for a writer-biased implementation. |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 59 | * |
| 60 | */ |
| 61 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 62 | #define RWLOCKATTR_DEFAULT 0 |
| 63 | #define RWLOCKATTR_SHARED_MASK 0x0010 |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 64 | |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 65 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 66 | int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 67 | *attr = PTHREAD_PROCESS_PRIVATE; |
| 68 | return 0; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 69 | } |
| 70 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 71 | int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 72 | *attr = -1; |
| 73 | return 0; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 74 | } |
| 75 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 76 | int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 77 | switch (pshared) { |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 78 | case PTHREAD_PROCESS_PRIVATE: |
| 79 | case PTHREAD_PROCESS_SHARED: |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 80 | *attr = pshared; |
| 81 | return 0; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 82 | default: |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 83 | return EINVAL; |
| 84 | } |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 87 | int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 88 | *pshared = *attr; |
| 89 | return 0; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 90 | } |
| 91 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 92 | struct pthread_rwlock_internal_t { |
| 93 | atomic_int state; // 0=unlock, -1=writer lock, +n=reader lock |
| 94 | atomic_int writer_thread_id; |
| 95 | atomic_uint pending_readers; |
| 96 | atomic_uint pending_writers; |
| 97 | int32_t attr; |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 98 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 99 | bool process_shared() const { |
| 100 | return attr == PTHREAD_PROCESS_SHARED; |
| 101 | } |
| 102 | |
| 103 | #if defined(__LP64__) |
| 104 | char __reserved[36]; |
| 105 | #else |
| 106 | char __reserved[20]; |
| 107 | #endif |
| 108 | }; |
| 109 | |
| 110 | static inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) { |
| 111 | static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t), |
| 112 | "pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation."); |
| 113 | return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 114 | } |
| 115 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 116 | int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) { |
| 117 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 118 | |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 119 | if (__predict_true(attr == NULL)) { |
| 120 | rwlock->attr = 0; |
| 121 | } else { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 122 | switch (*attr) { |
| 123 | case PTHREAD_PROCESS_SHARED: |
| 124 | case PTHREAD_PROCESS_PRIVATE: |
| 125 | rwlock->attr= *attr; |
| 126 | break; |
| 127 | default: |
| 128 | return EINVAL; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 129 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 130 | } |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 131 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 132 | atomic_init(&rwlock->state, 0); |
| 133 | atomic_init(&rwlock->writer_thread_id, 0); |
| 134 | atomic_init(&rwlock->pending_readers, 0); |
| 135 | atomic_init(&rwlock->pending_writers, 0); |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 136 | |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 137 | return 0; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 138 | } |
| 139 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 140 | int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) { |
| 141 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
| 142 | |
| 143 | if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 144 | return EBUSY; |
| 145 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 146 | return 0; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 147 | } |
| 148 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 149 | static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock, |
| 150 | const timespec* abs_timeout_or_null) { |
| 151 | |
| 152 | if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id, |
| 153 | memory_order_relaxed))) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 154 | return EDEADLK; |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 155 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 156 | |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 157 | while (true) { |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 158 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); |
| 159 | if (__predict_true(old_state >= 0)) { |
| 160 | if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, old_state + 1, |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 161 | memory_order_acquire, memory_order_relaxed)) { |
| 162 | return 0; |
| 163 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 164 | } else { |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 165 | timespec ts; |
| 166 | timespec* rel_timeout = NULL; |
| 167 | |
| 168 | if (abs_timeout_or_null != NULL) { |
| 169 | rel_timeout = &ts; |
| 170 | if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) { |
| 171 | return ETIMEDOUT; |
| 172 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 173 | } |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 174 | |
| 175 | // To avoid losing wake ups, the pending_readers increment should be observed before |
| 176 | // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used |
| 177 | // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic |
| 178 | // operations in futex_wait. |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 179 | atomic_fetch_add_explicit(&rwlock->pending_readers, 1, memory_order_relaxed); |
| 180 | |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 181 | atomic_thread_fence(memory_order_seq_cst); |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 182 | |
| 183 | int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state, |
| 184 | rel_timeout); |
| 185 | |
| 186 | atomic_fetch_sub_explicit(&rwlock->pending_readers, 1, memory_order_relaxed); |
| 187 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 188 | if (ret == -ETIMEDOUT) { |
| 189 | return ETIMEDOUT; |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 190 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 191 | } |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 192 | } |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 193 | } |
| 194 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 195 | static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock, |
| 196 | const timespec* abs_timeout_or_null) { |
| 197 | |
| 198 | if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id, |
| 199 | memory_order_relaxed))) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 200 | return EDEADLK; |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 201 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 202 | |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 203 | while (true) { |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 204 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); |
| 205 | if (__predict_true(old_state == 0)) { |
| 206 | if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1, |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 207 | memory_order_acquire, memory_order_relaxed)) { |
| 208 | // writer_thread_id is protected by rwlock and can only be modified in rwlock write |
| 209 | // owner thread. Other threads may read it for EDEADLK error checking, atomic operation |
| 210 | // is safe enough for it. |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 211 | atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 212 | return 0; |
| 213 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 214 | } else { |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 215 | timespec ts; |
| 216 | timespec* rel_timeout = NULL; |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 217 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 218 | if (abs_timeout_or_null != NULL) { |
| 219 | rel_timeout = &ts; |
| 220 | if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) { |
| 221 | return ETIMEDOUT; |
| 222 | } |
| 223 | } |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 224 | |
| 225 | // To avoid losing wake ups, the pending_writers increment should be observed before |
| 226 | // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used |
| 227 | // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic |
| 228 | // operations in futex_wait. |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 229 | atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed); |
| 230 | |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 231 | atomic_thread_fence(memory_order_seq_cst); |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 232 | |
| 233 | int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state, |
| 234 | rel_timeout); |
| 235 | |
| 236 | atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed); |
| 237 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 238 | if (ret == -ETIMEDOUT) { |
| 239 | return ETIMEDOUT; |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 240 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 241 | } |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 242 | } |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 243 | } |
| 244 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 245 | int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) { |
| 246 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
| 247 | |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 248 | return __pthread_rwlock_timedrdlock(rwlock, NULL); |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 249 | } |
| 250 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 251 | int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) { |
| 252 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
| 253 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 254 | return __pthread_rwlock_timedrdlock(rwlock, abs_timeout); |
| 255 | } |
| 256 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 257 | int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) { |
| 258 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 259 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 260 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); |
| 261 | |
| 262 | while (old_state >= 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, |
| 263 | old_state + 1, memory_order_acquire, memory_order_relaxed)) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 264 | } |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 265 | return (old_state >= 0) ? 0 : EBUSY; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 266 | } |
| 267 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 268 | int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) { |
| 269 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
| 270 | |
Elliott Hughes | c3f1140 | 2013-10-30 14:40:09 -0700 | [diff] [blame] | 271 | return __pthread_rwlock_timedwrlock(rwlock, NULL); |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 272 | } |
| 273 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 274 | int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) { |
| 275 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
| 276 | |
Calin Juravle | 92687e4 | 2014-05-22 19:21:22 +0100 | [diff] [blame] | 277 | return __pthread_rwlock_timedwrlock(rwlock, abs_timeout); |
| 278 | } |
| 279 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 280 | int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) { |
| 281 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 282 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 283 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); |
| 284 | |
| 285 | while (old_state == 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1, |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 286 | memory_order_acquire, memory_order_relaxed)) { |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 287 | } |
| 288 | if (old_state == 0) { |
| 289 | atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed); |
| 290 | return 0; |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 291 | } |
Calin Juravle | 1b676ea | 2014-05-23 00:15:10 +0100 | [diff] [blame] | 292 | return EBUSY; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 293 | } |
| 294 | |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 295 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 296 | int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) { |
| 297 | pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 298 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 299 | int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed); |
| 300 | if (__predict_false(old_state == 0)) { |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 301 | return EPERM; |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 302 | } else if (old_state == -1) { |
| 303 | if (atomic_load_explicit(&rwlock->writer_thread_id, memory_order_relaxed) != __get_thread()->tid) { |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 304 | return EPERM; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 305 | } |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 306 | // We're no longer the owner. |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 307 | atomic_store_explicit(&rwlock->writer_thread_id, 0, memory_order_relaxed); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 308 | // Change state from -1 to 0. |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 309 | atomic_store_explicit(&rwlock->state, 0, memory_order_release); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 310 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 311 | } else { // old_state > 0 |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 312 | // Reduce state by 1. |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 313 | while (old_state > 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, |
| 314 | old_state - 1, memory_order_release, memory_order_relaxed)) { |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 315 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 316 | |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 317 | if (old_state <= 0) { |
| 318 | return EPERM; |
| 319 | } else if (old_state > 1) { |
| 320 | return 0; |
| 321 | } |
| 322 | // old_state = 1, which means the last reader calling unlock. It has to wake up waiters. |
| 323 | } |
| 324 | |
| 325 | // If having waiters, wake up them. |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 326 | // To avoid losing wake ups, the update of state should be observed before reading |
| 327 | // pending_readers/pending_writers by all threads. Use read locking as an example: |
| 328 | // read locking thread unlocking thread |
| 329 | // pending_readers++; state = 0; |
| 330 | // seq_cst fence seq_cst fence |
| 331 | // read state for futex_wait read pending_readers for futex_wake |
| 332 | // |
| 333 | // So when locking and unlocking threads are running in parallel, we will not get |
| 334 | // in a situation that the locking thread reads state as negative and needs to wait, |
| 335 | // while the unlocking thread reads pending_readers as zero and doesn't need to wake up waiters. |
| 336 | atomic_thread_fence(memory_order_seq_cst); |
Yabin Cui | 2fabea4 | 2015-03-13 14:22:05 -0700 | [diff] [blame^] | 337 | if (__predict_false(atomic_load_explicit(&rwlock->pending_readers, memory_order_relaxed) > 0 || |
| 338 | atomic_load_explicit(&rwlock->pending_writers, memory_order_relaxed) > 0)) { |
| 339 | __futex_wake_ex(&rwlock->state, rwlock->process_shared(), INT_MAX); |
Yabin Cui | 08ee8d2 | 2015-02-11 17:04:36 -0800 | [diff] [blame] | 340 | } |
Calin Juravle | 76f352e | 2014-05-19 13:41:10 +0100 | [diff] [blame] | 341 | return 0; |
David 'Digit' Turner | a418c3b | 2010-05-11 16:39:22 -0700 | [diff] [blame] | 342 | } |