blob: 8aa40ae0aad4438541f8023d3f2f703f22d8146c [file] [log] [blame]
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -07001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070029#include <errno.h>
Yabin Cui08ee8d22015-02-11 17:04:36 -080030#include <stdatomic.h>
Calin Juravle76f352e2014-05-19 13:41:10 +010031
32#include "pthread_internal.h"
33#include "private/bionic_futex.h"
Elliott Hughes04303f52014-09-18 16:11:59 -070034#include "private/bionic_time_conversions.h"
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070035
36/* Technical note:
37 *
38 * Possible states of a read/write lock:
39 *
40 * - no readers and no writer (unlocked)
41 * - one or more readers sharing the lock at the same time (read-locked)
42 * - one writer holding the lock (write-lock)
43 *
44 * Additionally:
45 * - trying to get the write-lock while there are any readers blocks
46 * - trying to get the read-lock while there is a writer blocks
Calin Juravle76f352e2014-05-19 13:41:10 +010047 * - a single thread can acquire the lock multiple times in read mode
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070048 *
Calin Juravle76f352e2014-05-19 13:41:10 +010049 * - Posix states that behavior is undefined (may deadlock) if a thread tries
50 * to acquire the lock
51 * - in write mode while already holding the lock (whether in read or write mode)
52 * - in read mode while already holding the lock in write mode.
53 * - This implementation will return EDEADLK in "write after write" and "read after
54 * write" cases and will deadlock in write after read case.
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070055 *
Calin Juravle92687e42014-05-22 19:21:22 +010056 * TODO: As it stands now, pending_readers and pending_writers could be merged into a
Calin Juravle76f352e2014-05-19 13:41:10 +010057 * a single waiters variable. Keeping them separate adds a bit of clarity and keeps
58 * the door open for a writer-biased implementation.
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070059 *
60 */
61
Calin Juravle92687e42014-05-22 19:21:22 +010062#define RWLOCKATTR_DEFAULT 0
63#define RWLOCKATTR_SHARED_MASK 0x0010
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070064
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070065
Calin Juravle92687e42014-05-22 19:21:22 +010066int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) {
Calin Juravle76f352e2014-05-19 13:41:10 +010067 *attr = PTHREAD_PROCESS_PRIVATE;
68 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070069}
70
Calin Juravle92687e42014-05-22 19:21:22 +010071int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr) {
Calin Juravle76f352e2014-05-19 13:41:10 +010072 *attr = -1;
73 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070074}
75
Calin Juravle92687e42014-05-22 19:21:22 +010076int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) {
Calin Juravle76f352e2014-05-19 13:41:10 +010077 switch (pshared) {
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070078 case PTHREAD_PROCESS_PRIVATE:
79 case PTHREAD_PROCESS_SHARED:
Calin Juravle76f352e2014-05-19 13:41:10 +010080 *attr = pshared;
81 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070082 default:
Calin Juravle76f352e2014-05-19 13:41:10 +010083 return EINVAL;
84 }
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070085}
86
Elliott Hughesc3f11402013-10-30 14:40:09 -070087int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
Calin Juravle76f352e2014-05-19 13:41:10 +010088 *pshared = *attr;
89 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070090}
91
Yabin Cui2fabea42015-03-13 14:22:05 -070092struct pthread_rwlock_internal_t {
93 atomic_int state; // 0=unlock, -1=writer lock, +n=reader lock
94 atomic_int writer_thread_id;
95 atomic_uint pending_readers;
96 atomic_uint pending_writers;
97 int32_t attr;
Yabin Cui08ee8d22015-02-11 17:04:36 -080098
Yabin Cui2fabea42015-03-13 14:22:05 -070099 bool process_shared() const {
100 return attr == PTHREAD_PROCESS_SHARED;
101 }
102
103#if defined(__LP64__)
104 char __reserved[36];
105#else
106 char __reserved[20];
107#endif
108};
109
Yabin Cuib5845722015-03-16 22:46:42 -0700110static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t),
111 "pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation.");
112
113// For binary compatibility with old version of pthread_rwlock_t, we can't use more strict
114// alignment than 4-byte alignment.
115static_assert(alignof(pthread_rwlock_t) == 4,
116 "pthread_rwlock_t should fulfill the alignment requirement of pthread_rwlock_internal_t.");
117
Yabin Cui2fabea42015-03-13 14:22:05 -0700118static inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) {
Yabin Cui2fabea42015-03-13 14:22:05 -0700119 return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800120}
121
Yabin Cui2fabea42015-03-13 14:22:05 -0700122int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
123 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800124
Yabin Cui08ee8d22015-02-11 17:04:36 -0800125 if (__predict_true(attr == NULL)) {
126 rwlock->attr = 0;
127 } else {
Calin Juravle76f352e2014-05-19 13:41:10 +0100128 switch (*attr) {
129 case PTHREAD_PROCESS_SHARED:
130 case PTHREAD_PROCESS_PRIVATE:
131 rwlock->attr= *attr;
132 break;
133 default:
134 return EINVAL;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700135 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100136 }
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700137
Yabin Cui2fabea42015-03-13 14:22:05 -0700138 atomic_init(&rwlock->state, 0);
139 atomic_init(&rwlock->writer_thread_id, 0);
140 atomic_init(&rwlock->pending_readers, 0);
141 atomic_init(&rwlock->pending_writers, 0);
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700142
Calin Juravle76f352e2014-05-19 13:41:10 +0100143 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700144}
145
Yabin Cui2fabea42015-03-13 14:22:05 -0700146int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) {
147 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
148
149 if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100150 return EBUSY;
151 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100152 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700153}
154
Yabin Cui2fabea42015-03-13 14:22:05 -0700155static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock,
156 const timespec* abs_timeout_or_null) {
157
158 if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
159 memory_order_relaxed))) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100160 return EDEADLK;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700161 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100162
Yabin Cui08ee8d22015-02-11 17:04:36 -0800163 while (true) {
Yabin Cui2fabea42015-03-13 14:22:05 -0700164 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
165 if (__predict_true(old_state >= 0)) {
166 if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, old_state + 1,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800167 memory_order_acquire, memory_order_relaxed)) {
168 return 0;
169 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100170 } else {
Yabin Cui2fabea42015-03-13 14:22:05 -0700171 timespec ts;
172 timespec* rel_timeout = NULL;
173
174 if (abs_timeout_or_null != NULL) {
175 rel_timeout = &ts;
176 if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
177 return ETIMEDOUT;
178 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100179 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800180
181 // To avoid losing wake ups, the pending_readers increment should be observed before
182 // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
183 // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
184 // operations in futex_wait.
Yabin Cui2fabea42015-03-13 14:22:05 -0700185 atomic_fetch_add_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
186
Yabin Cui08ee8d22015-02-11 17:04:36 -0800187 atomic_thread_fence(memory_order_seq_cst);
Yabin Cui2fabea42015-03-13 14:22:05 -0700188
189 int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
190 rel_timeout);
191
192 atomic_fetch_sub_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
193
Calin Juravle92687e42014-05-22 19:21:22 +0100194 if (ret == -ETIMEDOUT) {
195 return ETIMEDOUT;
Calin Juravle76f352e2014-05-19 13:41:10 +0100196 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100197 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800198 }
Elliott Hughesc3f11402013-10-30 14:40:09 -0700199}
200
Yabin Cui2fabea42015-03-13 14:22:05 -0700201static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
202 const timespec* abs_timeout_or_null) {
203
204 if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
205 memory_order_relaxed))) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100206 return EDEADLK;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700207 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100208
Yabin Cui08ee8d22015-02-11 17:04:36 -0800209 while (true) {
Yabin Cui2fabea42015-03-13 14:22:05 -0700210 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
211 if (__predict_true(old_state == 0)) {
212 if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800213 memory_order_acquire, memory_order_relaxed)) {
214 // writer_thread_id is protected by rwlock and can only be modified in rwlock write
215 // owner thread. Other threads may read it for EDEADLK error checking, atomic operation
216 // is safe enough for it.
Yabin Cui2fabea42015-03-13 14:22:05 -0700217 atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800218 return 0;
219 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100220 } else {
Yabin Cui2fabea42015-03-13 14:22:05 -0700221 timespec ts;
222 timespec* rel_timeout = NULL;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800223
Yabin Cui2fabea42015-03-13 14:22:05 -0700224 if (abs_timeout_or_null != NULL) {
225 rel_timeout = &ts;
226 if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
227 return ETIMEDOUT;
228 }
229 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800230
231 // To avoid losing wake ups, the pending_writers increment should be observed before
232 // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
233 // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
234 // operations in futex_wait.
Yabin Cui2fabea42015-03-13 14:22:05 -0700235 atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
236
Yabin Cui08ee8d22015-02-11 17:04:36 -0800237 atomic_thread_fence(memory_order_seq_cst);
Yabin Cui2fabea42015-03-13 14:22:05 -0700238
239 int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
240 rel_timeout);
241
242 atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
243
Calin Juravle92687e42014-05-22 19:21:22 +0100244 if (ret == -ETIMEDOUT) {
245 return ETIMEDOUT;
Calin Juravle76f352e2014-05-19 13:41:10 +0100246 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100247 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800248 }
Elliott Hughesc3f11402013-10-30 14:40:09 -0700249}
250
Yabin Cui2fabea42015-03-13 14:22:05 -0700251int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) {
252 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
253
Elliott Hughesc3f11402013-10-30 14:40:09 -0700254 return __pthread_rwlock_timedrdlock(rwlock, NULL);
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700255}
256
Yabin Cui2fabea42015-03-13 14:22:05 -0700257int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
258 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
259
Calin Juravle92687e42014-05-22 19:21:22 +0100260 return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
261}
262
Yabin Cui2fabea42015-03-13 14:22:05 -0700263int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) {
264 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800265
Yabin Cui2fabea42015-03-13 14:22:05 -0700266 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
267
268 while (old_state >= 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
269 old_state + 1, memory_order_acquire, memory_order_relaxed)) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100270 }
Yabin Cui2fabea42015-03-13 14:22:05 -0700271 return (old_state >= 0) ? 0 : EBUSY;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700272}
273
Yabin Cui2fabea42015-03-13 14:22:05 -0700274int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) {
275 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
276
Elliott Hughesc3f11402013-10-30 14:40:09 -0700277 return __pthread_rwlock_timedwrlock(rwlock, NULL);
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700278}
279
Yabin Cui2fabea42015-03-13 14:22:05 -0700280int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
281 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
282
Calin Juravle92687e42014-05-22 19:21:22 +0100283 return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
284}
285
Yabin Cui2fabea42015-03-13 14:22:05 -0700286int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) {
287 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800288
Yabin Cui2fabea42015-03-13 14:22:05 -0700289 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
290
291 while (old_state == 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800292 memory_order_acquire, memory_order_relaxed)) {
Yabin Cui2fabea42015-03-13 14:22:05 -0700293 }
294 if (old_state == 0) {
295 atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
296 return 0;
Calin Juravle76f352e2014-05-19 13:41:10 +0100297 }
Calin Juravle1b676ea2014-05-23 00:15:10 +0100298 return EBUSY;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700299}
300
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700301
Yabin Cui2fabea42015-03-13 14:22:05 -0700302int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
303 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800304
Yabin Cui2fabea42015-03-13 14:22:05 -0700305 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
306 if (__predict_false(old_state == 0)) {
Yabin Cui08ee8d22015-02-11 17:04:36 -0800307 return EPERM;
Yabin Cui2fabea42015-03-13 14:22:05 -0700308 } else if (old_state == -1) {
309 if (atomic_load_explicit(&rwlock->writer_thread_id, memory_order_relaxed) != __get_thread()->tid) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100310 return EPERM;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700311 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800312 // We're no longer the owner.
Yabin Cui2fabea42015-03-13 14:22:05 -0700313 atomic_store_explicit(&rwlock->writer_thread_id, 0, memory_order_relaxed);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800314 // Change state from -1 to 0.
Yabin Cui2fabea42015-03-13 14:22:05 -0700315 atomic_store_explicit(&rwlock->state, 0, memory_order_release);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800316
Yabin Cui2fabea42015-03-13 14:22:05 -0700317 } else { // old_state > 0
Yabin Cui08ee8d22015-02-11 17:04:36 -0800318 // Reduce state by 1.
Yabin Cui2fabea42015-03-13 14:22:05 -0700319 while (old_state > 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
320 old_state - 1, memory_order_release, memory_order_relaxed)) {
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700321 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100322
Yabin Cui2fabea42015-03-13 14:22:05 -0700323 if (old_state <= 0) {
324 return EPERM;
325 } else if (old_state > 1) {
326 return 0;
327 }
328 // old_state = 1, which means the last reader calling unlock. It has to wake up waiters.
329 }
330
331 // If having waiters, wake up them.
Yabin Cui08ee8d22015-02-11 17:04:36 -0800332 // To avoid losing wake ups, the update of state should be observed before reading
333 // pending_readers/pending_writers by all threads. Use read locking as an example:
334 // read locking thread unlocking thread
335 // pending_readers++; state = 0;
336 // seq_cst fence seq_cst fence
337 // read state for futex_wait read pending_readers for futex_wake
338 //
339 // So when locking and unlocking threads are running in parallel, we will not get
340 // in a situation that the locking thread reads state as negative and needs to wait,
341 // while the unlocking thread reads pending_readers as zero and doesn't need to wake up waiters.
342 atomic_thread_fence(memory_order_seq_cst);
Yabin Cui2fabea42015-03-13 14:22:05 -0700343 if (__predict_false(atomic_load_explicit(&rwlock->pending_readers, memory_order_relaxed) > 0 ||
344 atomic_load_explicit(&rwlock->pending_writers, memory_order_relaxed) > 0)) {
345 __futex_wake_ex(&rwlock->state, rwlock->process_shared(), INT_MAX);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800346 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100347 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700348}