blob: f089940a8dfd2e29b04dc7317ccfd5b1898aca4e [file] [log] [blame]
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -07001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070029#include <errno.h>
Yabin Cui08ee8d22015-02-11 17:04:36 -080030#include <stdatomic.h>
Calin Juravle76f352e2014-05-19 13:41:10 +010031
32#include "pthread_internal.h"
33#include "private/bionic_futex.h"
Elliott Hughes04303f52014-09-18 16:11:59 -070034#include "private/bionic_time_conversions.h"
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070035
36/* Technical note:
37 *
38 * Possible states of a read/write lock:
39 *
40 * - no readers and no writer (unlocked)
41 * - one or more readers sharing the lock at the same time (read-locked)
42 * - one writer holding the lock (write-lock)
43 *
44 * Additionally:
45 * - trying to get the write-lock while there are any readers blocks
46 * - trying to get the read-lock while there is a writer blocks
Calin Juravle76f352e2014-05-19 13:41:10 +010047 * - a single thread can acquire the lock multiple times in read mode
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070048 *
Calin Juravle76f352e2014-05-19 13:41:10 +010049 * - Posix states that behavior is undefined (may deadlock) if a thread tries
50 * to acquire the lock
51 * - in write mode while already holding the lock (whether in read or write mode)
52 * - in read mode while already holding the lock in write mode.
53 * - This implementation will return EDEADLK in "write after write" and "read after
54 * write" cases and will deadlock in write after read case.
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070055 *
Calin Juravle92687e42014-05-22 19:21:22 +010056 * TODO: As it stands now, pending_readers and pending_writers could be merged into a
Calin Juravle76f352e2014-05-19 13:41:10 +010057 * a single waiters variable. Keeping them separate adds a bit of clarity and keeps
58 * the door open for a writer-biased implementation.
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070059 *
60 */
61
Calin Juravle92687e42014-05-22 19:21:22 +010062#define RWLOCKATTR_DEFAULT 0
63#define RWLOCKATTR_SHARED_MASK 0x0010
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070064
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070065
Calin Juravle92687e42014-05-22 19:21:22 +010066int pthread_rwlockattr_init(pthread_rwlockattr_t* attr) {
Calin Juravle76f352e2014-05-19 13:41:10 +010067 *attr = PTHREAD_PROCESS_PRIVATE;
68 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070069}
70
Calin Juravle92687e42014-05-22 19:21:22 +010071int pthread_rwlockattr_destroy(pthread_rwlockattr_t* attr) {
Calin Juravle76f352e2014-05-19 13:41:10 +010072 *attr = -1;
73 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070074}
75
Calin Juravle92687e42014-05-22 19:21:22 +010076int pthread_rwlockattr_setpshared(pthread_rwlockattr_t* attr, int pshared) {
Calin Juravle76f352e2014-05-19 13:41:10 +010077 switch (pshared) {
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070078 case PTHREAD_PROCESS_PRIVATE:
79 case PTHREAD_PROCESS_SHARED:
Calin Juravle76f352e2014-05-19 13:41:10 +010080 *attr = pshared;
81 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070082 default:
Calin Juravle76f352e2014-05-19 13:41:10 +010083 return EINVAL;
84 }
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070085}
86
Elliott Hughesc3f11402013-10-30 14:40:09 -070087int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
Calin Juravle76f352e2014-05-19 13:41:10 +010088 *pshared = *attr;
89 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -070090}
91
Yabin Cui2fabea42015-03-13 14:22:05 -070092struct pthread_rwlock_internal_t {
93 atomic_int state; // 0=unlock, -1=writer lock, +n=reader lock
94 atomic_int writer_thread_id;
95 atomic_uint pending_readers;
96 atomic_uint pending_writers;
97 int32_t attr;
Yabin Cui08ee8d22015-02-11 17:04:36 -080098
Yabin Cui2fabea42015-03-13 14:22:05 -070099 bool process_shared() const {
100 return attr == PTHREAD_PROCESS_SHARED;
101 }
102
103#if defined(__LP64__)
104 char __reserved[36];
105#else
106 char __reserved[20];
107#endif
108};
109
110static inline pthread_rwlock_internal_t* __get_internal_rwlock(pthread_rwlock_t* rwlock_interface) {
111 static_assert(sizeof(pthread_rwlock_t) == sizeof(pthread_rwlock_internal_t),
112 "pthread_rwlock_t should actually be pthread_rwlock_internal_t in implementation.");
113 return reinterpret_cast<pthread_rwlock_internal_t*>(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800114}
115
Yabin Cui2fabea42015-03-13 14:22:05 -0700116int pthread_rwlock_init(pthread_rwlock_t* rwlock_interface, const pthread_rwlockattr_t* attr) {
117 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800118
Yabin Cui08ee8d22015-02-11 17:04:36 -0800119 if (__predict_true(attr == NULL)) {
120 rwlock->attr = 0;
121 } else {
Calin Juravle76f352e2014-05-19 13:41:10 +0100122 switch (*attr) {
123 case PTHREAD_PROCESS_SHARED:
124 case PTHREAD_PROCESS_PRIVATE:
125 rwlock->attr= *attr;
126 break;
127 default:
128 return EINVAL;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700129 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100130 }
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700131
Yabin Cui2fabea42015-03-13 14:22:05 -0700132 atomic_init(&rwlock->state, 0);
133 atomic_init(&rwlock->writer_thread_id, 0);
134 atomic_init(&rwlock->pending_readers, 0);
135 atomic_init(&rwlock->pending_writers, 0);
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700136
Calin Juravle76f352e2014-05-19 13:41:10 +0100137 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700138}
139
Yabin Cui2fabea42015-03-13 14:22:05 -0700140int pthread_rwlock_destroy(pthread_rwlock_t* rwlock_interface) {
141 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
142
143 if (atomic_load_explicit(&rwlock->state, memory_order_relaxed) != 0) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100144 return EBUSY;
145 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100146 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700147}
148
Yabin Cui2fabea42015-03-13 14:22:05 -0700149static int __pthread_rwlock_timedrdlock(pthread_rwlock_internal_t* rwlock,
150 const timespec* abs_timeout_or_null) {
151
152 if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
153 memory_order_relaxed))) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100154 return EDEADLK;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700155 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100156
Yabin Cui08ee8d22015-02-11 17:04:36 -0800157 while (true) {
Yabin Cui2fabea42015-03-13 14:22:05 -0700158 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
159 if (__predict_true(old_state >= 0)) {
160 if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, old_state + 1,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800161 memory_order_acquire, memory_order_relaxed)) {
162 return 0;
163 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100164 } else {
Yabin Cui2fabea42015-03-13 14:22:05 -0700165 timespec ts;
166 timespec* rel_timeout = NULL;
167
168 if (abs_timeout_or_null != NULL) {
169 rel_timeout = &ts;
170 if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
171 return ETIMEDOUT;
172 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100173 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800174
175 // To avoid losing wake ups, the pending_readers increment should be observed before
176 // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
177 // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
178 // operations in futex_wait.
Yabin Cui2fabea42015-03-13 14:22:05 -0700179 atomic_fetch_add_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
180
Yabin Cui08ee8d22015-02-11 17:04:36 -0800181 atomic_thread_fence(memory_order_seq_cst);
Yabin Cui2fabea42015-03-13 14:22:05 -0700182
183 int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
184 rel_timeout);
185
186 atomic_fetch_sub_explicit(&rwlock->pending_readers, 1, memory_order_relaxed);
187
Calin Juravle92687e42014-05-22 19:21:22 +0100188 if (ret == -ETIMEDOUT) {
189 return ETIMEDOUT;
Calin Juravle76f352e2014-05-19 13:41:10 +0100190 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100191 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800192 }
Elliott Hughesc3f11402013-10-30 14:40:09 -0700193}
194
Yabin Cui2fabea42015-03-13 14:22:05 -0700195static int __pthread_rwlock_timedwrlock(pthread_rwlock_internal_t* rwlock,
196 const timespec* abs_timeout_or_null) {
197
198 if (__predict_false(__get_thread()->tid == atomic_load_explicit(&rwlock->writer_thread_id,
199 memory_order_relaxed))) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100200 return EDEADLK;
Elliott Hughesc3f11402013-10-30 14:40:09 -0700201 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100202
Yabin Cui08ee8d22015-02-11 17:04:36 -0800203 while (true) {
Yabin Cui2fabea42015-03-13 14:22:05 -0700204 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
205 if (__predict_true(old_state == 0)) {
206 if (atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800207 memory_order_acquire, memory_order_relaxed)) {
208 // writer_thread_id is protected by rwlock and can only be modified in rwlock write
209 // owner thread. Other threads may read it for EDEADLK error checking, atomic operation
210 // is safe enough for it.
Yabin Cui2fabea42015-03-13 14:22:05 -0700211 atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800212 return 0;
213 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100214 } else {
Yabin Cui2fabea42015-03-13 14:22:05 -0700215 timespec ts;
216 timespec* rel_timeout = NULL;
Yabin Cui08ee8d22015-02-11 17:04:36 -0800217
Yabin Cui2fabea42015-03-13 14:22:05 -0700218 if (abs_timeout_or_null != NULL) {
219 rel_timeout = &ts;
220 if (!timespec_from_absolute_timespec(*rel_timeout, *abs_timeout_or_null, CLOCK_REALTIME)) {
221 return ETIMEDOUT;
222 }
223 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800224
225 // To avoid losing wake ups, the pending_writers increment should be observed before
226 // futex_wait by all threads. A seq_cst fence instead of a seq_cst operation is used
227 // here. Because only a seq_cst fence can ensure sequential consistency for non-atomic
228 // operations in futex_wait.
Yabin Cui2fabea42015-03-13 14:22:05 -0700229 atomic_fetch_add_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
230
Yabin Cui08ee8d22015-02-11 17:04:36 -0800231 atomic_thread_fence(memory_order_seq_cst);
Yabin Cui2fabea42015-03-13 14:22:05 -0700232
233 int ret = __futex_wait_ex(&rwlock->state, rwlock->process_shared(), old_state,
234 rel_timeout);
235
236 atomic_fetch_sub_explicit(&rwlock->pending_writers, 1, memory_order_relaxed);
237
Calin Juravle92687e42014-05-22 19:21:22 +0100238 if (ret == -ETIMEDOUT) {
239 return ETIMEDOUT;
Calin Juravle76f352e2014-05-19 13:41:10 +0100240 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100241 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800242 }
Elliott Hughesc3f11402013-10-30 14:40:09 -0700243}
244
Yabin Cui2fabea42015-03-13 14:22:05 -0700245int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock_interface) {
246 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
247
Elliott Hughesc3f11402013-10-30 14:40:09 -0700248 return __pthread_rwlock_timedrdlock(rwlock, NULL);
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700249}
250
Yabin Cui2fabea42015-03-13 14:22:05 -0700251int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
252 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
253
Calin Juravle92687e42014-05-22 19:21:22 +0100254 return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
255}
256
Yabin Cui2fabea42015-03-13 14:22:05 -0700257int pthread_rwlock_tryrdlock(pthread_rwlock_t* rwlock_interface) {
258 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800259
Yabin Cui2fabea42015-03-13 14:22:05 -0700260 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
261
262 while (old_state >= 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
263 old_state + 1, memory_order_acquire, memory_order_relaxed)) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100264 }
Yabin Cui2fabea42015-03-13 14:22:05 -0700265 return (old_state >= 0) ? 0 : EBUSY;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700266}
267
Yabin Cui2fabea42015-03-13 14:22:05 -0700268int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock_interface) {
269 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
270
Elliott Hughesc3f11402013-10-30 14:40:09 -0700271 return __pthread_rwlock_timedwrlock(rwlock, NULL);
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700272}
273
Yabin Cui2fabea42015-03-13 14:22:05 -0700274int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock_interface, const timespec* abs_timeout) {
275 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
276
Calin Juravle92687e42014-05-22 19:21:22 +0100277 return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
278}
279
Yabin Cui2fabea42015-03-13 14:22:05 -0700280int pthread_rwlock_trywrlock(pthread_rwlock_t* rwlock_interface) {
281 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800282
Yabin Cui2fabea42015-03-13 14:22:05 -0700283 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
284
285 while (old_state == 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state, -1,
Yabin Cui08ee8d22015-02-11 17:04:36 -0800286 memory_order_acquire, memory_order_relaxed)) {
Yabin Cui2fabea42015-03-13 14:22:05 -0700287 }
288 if (old_state == 0) {
289 atomic_store_explicit(&rwlock->writer_thread_id, __get_thread()->tid, memory_order_relaxed);
290 return 0;
Calin Juravle76f352e2014-05-19 13:41:10 +0100291 }
Calin Juravle1b676ea2014-05-23 00:15:10 +0100292 return EBUSY;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700293}
294
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700295
Yabin Cui2fabea42015-03-13 14:22:05 -0700296int pthread_rwlock_unlock(pthread_rwlock_t* rwlock_interface) {
297 pthread_rwlock_internal_t* rwlock = __get_internal_rwlock(rwlock_interface);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800298
Yabin Cui2fabea42015-03-13 14:22:05 -0700299 int old_state = atomic_load_explicit(&rwlock->state, memory_order_relaxed);
300 if (__predict_false(old_state == 0)) {
Yabin Cui08ee8d22015-02-11 17:04:36 -0800301 return EPERM;
Yabin Cui2fabea42015-03-13 14:22:05 -0700302 } else if (old_state == -1) {
303 if (atomic_load_explicit(&rwlock->writer_thread_id, memory_order_relaxed) != __get_thread()->tid) {
Calin Juravle76f352e2014-05-19 13:41:10 +0100304 return EPERM;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700305 }
Yabin Cui08ee8d22015-02-11 17:04:36 -0800306 // We're no longer the owner.
Yabin Cui2fabea42015-03-13 14:22:05 -0700307 atomic_store_explicit(&rwlock->writer_thread_id, 0, memory_order_relaxed);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800308 // Change state from -1 to 0.
Yabin Cui2fabea42015-03-13 14:22:05 -0700309 atomic_store_explicit(&rwlock->state, 0, memory_order_release);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800310
Yabin Cui2fabea42015-03-13 14:22:05 -0700311 } else { // old_state > 0
Yabin Cui08ee8d22015-02-11 17:04:36 -0800312 // Reduce state by 1.
Yabin Cui2fabea42015-03-13 14:22:05 -0700313 while (old_state > 0 && !atomic_compare_exchange_weak_explicit(&rwlock->state, &old_state,
314 old_state - 1, memory_order_release, memory_order_relaxed)) {
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700315 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100316
Yabin Cui2fabea42015-03-13 14:22:05 -0700317 if (old_state <= 0) {
318 return EPERM;
319 } else if (old_state > 1) {
320 return 0;
321 }
322 // old_state = 1, which means the last reader calling unlock. It has to wake up waiters.
323 }
324
325 // If having waiters, wake up them.
Yabin Cui08ee8d22015-02-11 17:04:36 -0800326 // To avoid losing wake ups, the update of state should be observed before reading
327 // pending_readers/pending_writers by all threads. Use read locking as an example:
328 // read locking thread unlocking thread
329 // pending_readers++; state = 0;
330 // seq_cst fence seq_cst fence
331 // read state for futex_wait read pending_readers for futex_wake
332 //
333 // So when locking and unlocking threads are running in parallel, we will not get
334 // in a situation that the locking thread reads state as negative and needs to wait,
335 // while the unlocking thread reads pending_readers as zero and doesn't need to wake up waiters.
336 atomic_thread_fence(memory_order_seq_cst);
Yabin Cui2fabea42015-03-13 14:22:05 -0700337 if (__predict_false(atomic_load_explicit(&rwlock->pending_readers, memory_order_relaxed) > 0 ||
338 atomic_load_explicit(&rwlock->pending_writers, memory_order_relaxed) > 0)) {
339 __futex_wake_ex(&rwlock->state, rwlock->process_shared(), INT_MAX);
Yabin Cui08ee8d22015-02-11 17:04:36 -0800340 }
Calin Juravle76f352e2014-05-19 13:41:10 +0100341 return 0;
David 'Digit' Turnera418c3b2010-05-11 16:39:22 -0700342}