blob: 83d6b541a953d8413a97dd7ae26ffdd60c30410a [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
Pierre Peifferd0c884d2012-02-22 16:40:15 +010028
Elliott Hughes6f94de32013-02-12 06:06:22 +000029#include <pthread.h>
Elliott Hughes3e898472013-02-12 16:40:24 +000030
31#include <errno.h>
32#include <limits.h>
Yabin Cui86fc96f2015-01-29 21:50:48 -080033#include <stdatomic.h>
34#include <sys/cdefs.h>
Elliott Hughes84114c82013-07-17 13:33:19 -070035#include <sys/mman.h>
Pierre Peifferd0c884d2012-02-22 16:40:15 +010036#include <unistd.h>
37
Pierre Peifferd0c884d2012-02-22 16:40:15 +010038#include "pthread_internal.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070039
Elliott Hughes04303f52014-09-18 16:11:59 -070040#include "private/bionic_constants.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070041#include "private/bionic_futex.h"
Yabin Cui86fc96f2015-01-29 21:50:48 -080042#include "private/bionic_systrace.h"
Elliott Hughes04303f52014-09-18 16:11:59 -070043#include "private/bionic_time_conversions.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070044#include "private/bionic_tls.h"
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080045
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080046/* a mutex is implemented as a 32-bit integer holding the following fields
47 *
48 * bits: name description
Elliott Hughes40eabe22013-02-14 18:59:37 -080049 * 31-16 tid owner thread's tid (recursive and errorcheck only)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080050 * 15-14 type mutex type
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -070051 * 13 shared process-shared flag
52 * 12-2 counter counter of recursive mutexes
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080053 * 1-0 state lock state (0, 1 or 2)
54 */
55
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010056/* Convenience macro, creates a mask of 'bits' bits that starts from
57 * the 'shift'-th least significant bit in a 32-bit word.
58 *
59 * Examples: FIELD_MASK(0,4) -> 0xf
60 * FIELD_MASK(16,9) -> 0x1ff0000
61 */
62#define FIELD_MASK(shift,bits) (((1 << (bits))-1) << (shift))
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080063
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010064/* This one is used to create a bit pattern from a given field value */
65#define FIELD_TO_BITS(val,shift,bits) (((val) & ((1 << (bits))-1)) << (shift))
David 'Digit' Turner022d3032011-12-07 14:02:17 +010066
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010067/* And this one does the opposite, i.e. extract a field's value from a bit pattern */
68#define FIELD_FROM_BITS(val,shift,bits) (((val) >> (shift)) & ((1 << (bits))-1))
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080069
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010070/* Mutex state:
71 *
72 * 0 for unlocked
73 * 1 for locked, no waiters
74 * 2 for locked, maybe waiters
75 */
76#define MUTEX_STATE_SHIFT 0
77#define MUTEX_STATE_LEN 2
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080078
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010079#define MUTEX_STATE_MASK FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
80#define MUTEX_STATE_FROM_BITS(v) FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
81#define MUTEX_STATE_TO_BITS(v) FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
82
83#define MUTEX_STATE_UNLOCKED 0 /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
84#define MUTEX_STATE_LOCKED_UNCONTENDED 1 /* must be 1 due to atomic dec in unlock operation */
85#define MUTEX_STATE_LOCKED_CONTENDED 2 /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
86
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010087#define MUTEX_STATE_BITS_UNLOCKED MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
88#define MUTEX_STATE_BITS_LOCKED_UNCONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
89#define MUTEX_STATE_BITS_LOCKED_CONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
90
91/* return true iff the mutex if locked with no waiters */
92#define MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
93
94/* return true iff the mutex if locked with maybe waiters */
95#define MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
96
97/* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
98#define MUTEX_STATE_BITS_FLIP_CONTENTION(v) ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
99
100/* Mutex counter:
101 *
102 * We need to check for overflow before incrementing, and we also need to
103 * detect when the counter is 0
104 */
105#define MUTEX_COUNTER_SHIFT 2
106#define MUTEX_COUNTER_LEN 11
107#define MUTEX_COUNTER_MASK FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
108
109#define MUTEX_COUNTER_BITS_WILL_OVERFLOW(v) (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
110#define MUTEX_COUNTER_BITS_IS_ZERO(v) (((v) & MUTEX_COUNTER_MASK) == 0)
111
112/* Used to increment the counter directly after overflow has been checked */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800113#define MUTEX_COUNTER_BITS_ONE FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100114
115/* Mutex shared bit flag
116 *
117 * This flag is set to indicate that the mutex is shared among processes.
118 * This changes the futex opcode we use for futex wait/wake operations
119 * (non-shared operations are much faster).
120 */
121#define MUTEX_SHARED_SHIFT 13
122#define MUTEX_SHARED_MASK FIELD_MASK(MUTEX_SHARED_SHIFT,1)
123
124/* Mutex type:
125 *
126 * We support normal, recursive and errorcheck mutexes.
127 *
128 * The constants defined here *cannot* be changed because they must match
129 * the C library ABI which defines the following initialization values in
130 * <pthread.h>:
131 *
132 * __PTHREAD_MUTEX_INIT_VALUE
133 * __PTHREAD_RECURSIVE_MUTEX_VALUE
134 * __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
135 */
136#define MUTEX_TYPE_SHIFT 14
137#define MUTEX_TYPE_LEN 2
138#define MUTEX_TYPE_MASK FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
139
140#define MUTEX_TYPE_NORMAL 0 /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
141#define MUTEX_TYPE_RECURSIVE 1
142#define MUTEX_TYPE_ERRORCHECK 2
143
144#define MUTEX_TYPE_TO_BITS(t) FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
145
146#define MUTEX_TYPE_BITS_NORMAL MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
147#define MUTEX_TYPE_BITS_RECURSIVE MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
148#define MUTEX_TYPE_BITS_ERRORCHECK MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
149
150/* Mutex owner field:
151 *
152 * This is only used for recursive and errorcheck mutexes. It holds the
Yabin Cui140f3672015-02-03 10:32:00 -0800153 * tid of the owning thread. We use 16 bits to represent tid here,
154 * so the highest tid is 65535. There is a test to check /proc/sys/kernel/pid_max
155 * to make sure it will not exceed our limit.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100156 */
157#define MUTEX_OWNER_SHIFT 16
158#define MUTEX_OWNER_LEN 16
159
160#define MUTEX_OWNER_FROM_BITS(v) FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
161#define MUTEX_OWNER_TO_BITS(v) FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
162
163/* Convenience macros.
164 *
165 * These are used to form or modify the bit pattern of a given mutex value
166 */
167
168
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800169
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700170/* a mutex attribute holds the following fields
171 *
172 * bits: name description
173 * 0-3 type type of mutex
174 * 4 shared process-shared flag
175 */
176#define MUTEXATTR_TYPE_MASK 0x000f
177#define MUTEXATTR_SHARED_MASK 0x0010
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800178
179
180int pthread_mutexattr_init(pthread_mutexattr_t *attr)
181{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800182 *attr = PTHREAD_MUTEX_DEFAULT;
183 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800184}
185
186int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
187{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800188 *attr = -1;
189 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800190}
191
Elliott Hughes39b644a2014-03-04 10:55:39 -0800192int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800193{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800194 int type = (*attr & MUTEXATTR_TYPE_MASK);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700195
Elliott Hughes39b644a2014-03-04 10:55:39 -0800196 if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
197 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800198 }
Elliott Hughes39b644a2014-03-04 10:55:39 -0800199
200 *type_p = type;
201 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800202}
203
204int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
205{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800206 if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
207 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800208 }
Elliott Hughes39b644a2014-03-04 10:55:39 -0800209
210 *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
211 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800212}
213
214/* process-shared mutexes are not supported at the moment */
215
216int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
217{
Mathias Agopianb7681162009-07-13 22:00:33 -0700218 switch (pshared) {
219 case PTHREAD_PROCESS_PRIVATE:
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700220 *attr &= ~MUTEXATTR_SHARED_MASK;
221 return 0;
222
Mathias Agopianb7681162009-07-13 22:00:33 -0700223 case PTHREAD_PROCESS_SHARED:
224 /* our current implementation of pthread actually supports shared
225 * mutexes but won't cleanup if a process dies with the mutex held.
226 * Nevertheless, it's better than nothing. Shared mutexes are used
227 * by surfaceflinger and audioflinger.
228 */
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700229 *attr |= MUTEXATTR_SHARED_MASK;
Mathias Agopianb7681162009-07-13 22:00:33 -0700230 return 0;
231 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700232 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800233}
234
Elliott Hughesc3f11402013-10-30 14:40:09 -0700235int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
Elliott Hughes39b644a2014-03-04 10:55:39 -0800236 *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800237 return 0;
238}
239
Yabin Cui86fc96f2015-01-29 21:50:48 -0800240static inline atomic_int* MUTEX_TO_ATOMIC_POINTER(pthread_mutex_t* mutex) {
241 static_assert(sizeof(atomic_int) == sizeof(mutex->value),
242 "mutex->value should actually be atomic_int in implementation.");
243
244 // We prefer casting to atomic_int instead of declaring mutex->value to be atomic_int directly.
245 // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
246 return reinterpret_cast<atomic_int*>(&mutex->value);
247}
248
Elliott Hughesdff72032013-12-11 14:54:00 -0800249int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800250 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
251
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700252 if (__predict_true(attr == NULL)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800253 atomic_init(mutex_value_ptr, MUTEX_TYPE_BITS_NORMAL);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700254 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800255 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700256
Elliott Hughesdff72032013-12-11 14:54:00 -0800257 int value = 0;
258 if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700259 value |= MUTEX_SHARED_MASK;
Elliott Hughesdff72032013-12-11 14:54:00 -0800260 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700261
262 switch (*attr & MUTEXATTR_TYPE_MASK) {
263 case PTHREAD_MUTEX_NORMAL:
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100264 value |= MUTEX_TYPE_BITS_NORMAL;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700265 break;
266 case PTHREAD_MUTEX_RECURSIVE:
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100267 value |= MUTEX_TYPE_BITS_RECURSIVE;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700268 break;
269 case PTHREAD_MUTEX_ERRORCHECK:
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100270 value |= MUTEX_TYPE_BITS_ERRORCHECK;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700271 break;
272 default:
273 return EINVAL;
274 }
275
Yabin Cui86fc96f2015-01-29 21:50:48 -0800276 atomic_init(mutex_value_ptr, value);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700277 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800278}
279
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800280
281/*
Yabin Cui86fc96f2015-01-29 21:50:48 -0800282 * Lock a mutex of type NORMAL.
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800283 *
284 * As noted above, there are three states:
285 * 0 (unlocked, no contention)
286 * 1 (locked, no contention)
287 * 2 (locked, contention)
288 *
289 * Non-recursive mutexes don't use the thread-id or counter fields, and the
290 * "type" value is zero, so the only bits that will be set are the ones in
291 * the lock state field.
292 */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800293static inline void _normal_mutex_lock(atomic_int* mutex_value_ptr, int shared) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100294 /* convenience shortcuts */
295 const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
296 const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
Brigid Smitha406ee62014-07-21 15:38:06 -0700297
Yabin Cui86fc96f2015-01-29 21:50:48 -0800298 // The common case is an unlocked mutex, so we begin by trying to
299 // change the lock's state from unlocked to locked_uncontended.
300 // If exchanged successfully, An acquire fence is required to make
301 // all memory accesses made by other threads visible in current CPU.
302 int mvalue = unlocked;
303 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
304 locked_uncontended,
305 memory_order_acquire,
306 memory_order_relaxed))) {
307 return;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800308 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800309
310 ScopedTrace trace("Contending for pthread mutex");
311
312 // We want to go to sleep until the mutex is available, which requires
313 // promoting it to locked_contended. We need to swap in the new state
314 // value and then wait until somebody wakes us up.
315 // An atomic_exchange is used to compete with other threads for the lock.
316 // If it returns unlocked, we have acquired the lock, otherwise another
317 // thread still holds the lock and we should wait again.
318 // If lock is acquired, an acquire fence is needed to make all memory accesses
319 // made by other threads visible in current CPU.
320 const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
321 while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
322 memory_order_acquire) != unlocked) {
323
324 __futex_wait_ex(mutex_value_ptr, shared, locked_contended, NULL);
325 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800326}
327
328/*
Yabin Cui86fc96f2015-01-29 21:50:48 -0800329 * Release a mutex of type NORMAL. The caller is responsible for determining
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800330 * that we are in fact the owner of this lock.
331 */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800332static inline void _normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared) {
333 const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
334 const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
Andy McFaddenfcd00eb2010-05-28 13:31:45 -0700335
Yabin Cui86fc96f2015-01-29 21:50:48 -0800336 // We use an atomic_exchange to release the lock. If locked_contended state
337 // is returned, some threads is waiting for the lock and we need to wake up
338 // one of them.
339 // A release fence is required to make previous stores visible to next
340 // lock owner threads.
341 if (atomic_exchange_explicit(mutex_value_ptr, unlocked,
342 memory_order_release) == locked_contended) {
343 // Wake up one waiting thread. We don't know which thread will be
344 // woken or when it'll start executing -- futexes make no guarantees
345 // here. There may not even be a thread waiting.
346 //
347 // The newly-woken thread will replace the unlocked state we just set above
348 // with locked_contended state, which means that when it eventually releases
349 // the mutex it will also call FUTEX_WAKE. This results in one extra wake
350 // call whenever a lock is contended, but let us avoid forgetting anyone
351 // without requiring us to track the number of sleepers.
352 //
353 // It's possible for another thread to sneak in and grab the lock between
354 // the exchange above and the wake call below. If the new thread is "slow"
355 // and holds the lock for a while, we'll wake up a sleeper, which will swap
356 // in locked_uncontended state and then go back to sleep since the lock is
357 // still held. If the new thread is "fast", running to completion before
358 // we call wake, the thread we eventually wake will find an unlocked mutex
359 // and will execute. Either way we have correct behavior and nobody is
360 // orphaned on the wait queue.
361 __futex_wake_ex(mutex_value_ptr, shared, 1);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800362 }
363}
364
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100365/* This common inlined function is used to increment the counter of an
366 * errorcheck or recursive mutex.
367 *
368 * For errorcheck mutexes, it will return EDEADLK
369 * If the counter overflows, it will return EAGAIN
370 * Otherwise, it atomically increments the counter and returns 0
371 * after providing an acquire barrier.
372 *
373 * mtype is the current mutex type
374 * mvalue is the current mutex value (already loaded)
375 * mutex pointers to the mutex.
376 */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800377static inline __always_inline
378int _recursive_increment(atomic_int* mutex_value_ptr, int mvalue, int mtype) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100379 if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800380 // Trying to re-lock a mutex we already acquired.
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100381 return EDEADLK;
382 }
383
Yabin Cui86fc96f2015-01-29 21:50:48 -0800384 // Detect recursive lock overflow and return EAGAIN.
385 // This is safe because only the owner thread can modify the
386 // counter bits in the mutex value.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100387 if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100388 return EAGAIN;
389 }
390
Yabin Cui86fc96f2015-01-29 21:50:48 -0800391 // We own the mutex, but other threads are able to change the lower bits
392 // (e.g. promoting it to "contended"), so we need to use an atomic exchange
393 // loop to update the counter. The counter will not overflow in the loop,
394 // as only the owner thread can change it.
395 // The mutex is still locked, so we don't need a release fence.
396 while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
397 mvalue + MUTEX_COUNTER_BITS_ONE,
398 memory_order_relaxed,
399 memory_order_relaxed)) { }
400 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800401}
402
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700403int pthread_mutex_lock(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800404 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
405
Wink Savillea12c5442013-01-08 15:15:45 -0800406 int mvalue, mtype, tid, shared;
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800407
Yabin Cui86fc96f2015-01-29 21:50:48 -0800408 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100409 mtype = (mvalue & MUTEX_TYPE_MASK);
410 shared = (mvalue & MUTEX_SHARED_MASK);
Fabrice Di Meglio86418332010-03-11 14:47:47 -0800411
Yabin Cui86fc96f2015-01-29 21:50:48 -0800412 // Handle common case first.
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700413 if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800414 _normal_mutex_lock(mutex_value_ptr, shared);
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800415 return 0;
416 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700417
Yabin Cui86fc96f2015-01-29 21:50:48 -0800418 // Do we already own this recursive or error-check mutex?
Elliott Hughes40eabe22013-02-14 18:59:37 -0800419 tid = __get_thread()->tid;
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100420 if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
Yabin Cui86fc96f2015-01-29 21:50:48 -0800421 return _recursive_increment(mutex_value_ptr, mvalue, mtype);
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700422
Yabin Cui86fc96f2015-01-29 21:50:48 -0800423 // Add in shared state to avoid extra 'or' operations below.
David 'Digit' Turner6304d8b2010-06-02 18:12:12 -0700424 mtype |= shared;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700425
Yabin Cui86fc96f2015-01-29 21:50:48 -0800426 // First, if the mutex is unlocked, try to quickly acquire it.
427 // In the optimistic case where this works, set the state to locked_uncontended.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100428 if (mvalue == mtype) {
429 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
Yabin Cui86fc96f2015-01-29 21:50:48 -0800430 // If exchanged successfully, An acquire fence is required to make
431 // all memory accesses made by other threads visible in current CPU.
432 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
433 newval, memory_order_acquire, memory_order_relaxed))) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100434 return 0;
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700435 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700436 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100437
Brigid Smitha406ee62014-07-21 15:38:06 -0700438 ScopedTrace trace("Contending for pthread mutex");
439
Yabin Cui86fc96f2015-01-29 21:50:48 -0800440 while (true) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100441 if (mvalue == mtype) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800442 // If the mutex is unlocked, its value should be 'mtype' and
443 // we try to acquire it by setting its owner and state atomically.
444 // NOTE: We put the state to locked_contended since we _know_ there
445 // is contention when we are in this loop. This ensures all waiters
446 // will be unlocked.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100447
Yabin Cui86fc96f2015-01-29 21:50:48 -0800448 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
449 // If exchanged successfully, An acquire fence is required to make
450 // all memory accesses made by other threads visible in current CPU.
451 if (__predict_true(atomic_compare_exchange_weak_explicit(mutex_value_ptr,
452 &mvalue, newval,
453 memory_order_acquire,
454 memory_order_relaxed))) {
455 return 0;
456 }
457 continue;
458 } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
459 // The mutex is already locked by another thread, if the state is locked_uncontended,
460 // we should set it to locked_contended beforing going to sleep. This can make
461 // sure waiters will be woken up eventually.
462
463 int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
464 if (__predict_false(!atomic_compare_exchange_weak_explicit(mutex_value_ptr,
465 &mvalue, newval,
466 memory_order_relaxed,
467 memory_order_relaxed))) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100468 continue;
469 }
470 mvalue = newval;
471 }
472
Yabin Cui86fc96f2015-01-29 21:50:48 -0800473 // We are in locked_contended state, sleep until someone wake us up.
474 __futex_wait_ex(mutex_value_ptr, shared, mvalue, NULL);
475 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100476 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800477}
478
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700479int pthread_mutex_unlock(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800480 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
481
Wink Savillea12c5442013-01-08 15:15:45 -0800482 int mvalue, mtype, tid, shared;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800483
Yabin Cui86fc96f2015-01-29 21:50:48 -0800484 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100485 mtype = (mvalue & MUTEX_TYPE_MASK);
486 shared = (mvalue & MUTEX_SHARED_MASK);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800487
Yabin Cui86fc96f2015-01-29 21:50:48 -0800488 // Handle common case first.
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700489 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800490 _normal_mutex_unlock(mutex_value_ptr, shared);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800491 return 0;
492 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700493
Yabin Cui86fc96f2015-01-29 21:50:48 -0800494 // Do we already own this recursive or error-check mutex?
Elliott Hughes40eabe22013-02-14 18:59:37 -0800495 tid = __get_thread()->tid;
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100496 if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700497 return EPERM;
498
Yabin Cui86fc96f2015-01-29 21:50:48 -0800499 // If the counter is > 0, we can simply decrement it atomically.
500 // Since other threads can mutate the lower state bits (and only the
501 // lower state bits), use a compare_exchange loop to do it.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100502 if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800503 // We still own the mutex, so a release fence is not needed.
504 while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
505 mvalue - MUTEX_COUNTER_BITS_ONE,
506 memory_order_relaxed,
507 memory_order_relaxed)) { }
508 return 0;
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700509 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100510
Yabin Cui86fc96f2015-01-29 21:50:48 -0800511 // The counter is 0, so we'are going to unlock the mutex by resetting its
512 // state to unlocked, we need to perform a atomic_exchange inorder to read
513 // the current state, which will be locked_contended if there may have waiters
514 // to awake.
515 // A release fence is required to make previous stores visible to next
516 // lock owner threads.
517 mvalue = atomic_exchange_explicit(mutex_value_ptr,
518 mtype | shared | MUTEX_STATE_BITS_UNLOCKED,
519 memory_order_release);
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100520 if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800521 __futex_wake_ex(mutex_value_ptr, shared, 1);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700522 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800523
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700524 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800525}
526
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700527int pthread_mutex_trylock(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800528 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
529
530 int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700531 int mtype = (mvalue & MUTEX_TYPE_MASK);
532 int shared = (mvalue & MUTEX_SHARED_MASK);
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700533
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700534 // Handle common case first.
535 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800536 mvalue = shared | MUTEX_STATE_BITS_UNLOCKED;
537 // If exchanged successfully, An acquire fence is required to make
538 // all memory accesses made by other threads visible in current CPU.
539 if (atomic_compare_exchange_strong_explicit(mutex_value_ptr,
540 &mvalue,
541 shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
542 memory_order_acquire,
543 memory_order_relaxed)) {
Fabrice Di Meglio86418332010-03-11 14:47:47 -0800544 return 0;
Andy McFaddenfcd00eb2010-05-28 13:31:45 -0700545 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700546 return EBUSY;
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800547 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700548
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700549 // Do we already own this recursive or error-check mutex?
550 pid_t tid = __get_thread()->tid;
551 if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
552 if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
553 return EBUSY;
554 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800555 return _recursive_increment(mutex_value_ptr, mvalue, mtype);
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700556 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700557
Yabin Cui86fc96f2015-01-29 21:50:48 -0800558 // Same as pthread_mutex_lock, except that we don't want to wait, and
559 // the only operation that can succeed is a single compare_exchange to acquire the
560 // lock if it is released / not owned by anyone. No need for a complex loop.
561 // If exchanged successfully, An acquire fence is required to make
562 // all memory accesses made by other threads visible in current CPU.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100563 mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
564 mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700565
Yabin Cui86fc96f2015-01-29 21:50:48 -0800566 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
567 &mtype, mvalue,
568 memory_order_acquire,
569 memory_order_relaxed))) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100570 return 0;
571 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100572 return EBUSY;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800573}
574
Elliott Hughes04303f52014-09-18 16:11:59 -0700575static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_ts, clockid_t clock) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800576 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
Elliott Hughes0e714a52014-03-03 16:42:47 -0800577
Yabin Cui86fc96f2015-01-29 21:50:48 -0800578 timespec ts;
Elliott Hughes0e714a52014-03-03 16:42:47 -0800579
Yabin Cui86fc96f2015-01-29 21:50:48 -0800580 int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
581 int mtype = (mvalue & MUTEX_TYPE_MASK);
582 int shared = (mvalue & MUTEX_SHARED_MASK);
Elliott Hughes0e714a52014-03-03 16:42:47 -0800583
Yabin Cui86fc96f2015-01-29 21:50:48 -0800584 // Handle common case first.
585 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
586 const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
587 const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
588 const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
589
590 // If exchanged successfully, An acquire fence is required to make
591 // all memory accesses made by other threads visible in current CPU.
592 mvalue = unlocked;
593 if (atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, locked_uncontended,
594 memory_order_acquire, memory_order_relaxed)) {
595 return 0;
596 }
597
598 ScopedTrace trace("Contending for timed pthread mutex");
599
600 // Same as pthread_mutex_lock, except that we can only wait for a specified
601 // time interval. If lock is acquired, an acquire fence is needed to make
602 // all memory accesses made by other threads visible in current CPU.
603 while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
604 memory_order_acquire) != unlocked) {
605 if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
606 return ETIMEDOUT;
607 }
608 __futex_wait_ex(mutex_value_ptr, shared, locked_contended, &ts);
609 }
610
611 return 0;
612 }
613
614 // Do we already own this recursive or error-check mutex?
615 pid_t tid = __get_thread()->tid;
616 if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
617 return _recursive_increment(mutex_value_ptr, mvalue, mtype);
618 }
619
620 mtype |= shared;
621
622 // First try a quick lock.
623 if (mvalue == mtype) {
624 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
625 // If exchanged successfully, An acquire fence is required to make
626 // all memory accesses made by other threads visible in current CPU.
627 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
628 &mvalue, newval,
629 memory_order_acquire,
630 memory_order_relaxed))) {
631 return 0;
632 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -0700633 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -0700634
Brigid Smitha406ee62014-07-21 15:38:06 -0700635 ScopedTrace trace("Contending for timed pthread mutex");
636
Yabin Cui86fc96f2015-01-29 21:50:48 -0800637 // The following implements the same loop as pthread_mutex_lock,
638 // but adds checks to ensure that the operation never exceeds the
639 // absolute expiration time.
640 while (true) {
641 if (mvalue == mtype) { // Unlocked.
642 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
643 // An acquire fence is needed for successful exchange.
644 if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
645 memory_order_acquire,
646 memory_order_relaxed)) {
647 goto check_time;
648 }
649
650 return 0;
651 } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
652 // The value is locked. If the state is locked_uncontended, we need to switch
653 // it to locked_contended before sleep, so we can get woken up later.
654 int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
655 if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
656 memory_order_relaxed,
657 memory_order_relaxed)) {
658 goto check_time;
659 }
660 mvalue = newval;
661 }
662
663 if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
664 return ETIMEDOUT;
665 }
666
667 if (__futex_wait_ex(mutex_value_ptr, shared, mvalue, &ts) == -ETIMEDOUT) {
668 return ETIMEDOUT;
669 }
670
671check_time:
672 if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
673 return ETIMEDOUT;
674 }
675 // After futex_wait or time costly timespec_from_absolte_timespec,
676 // we'd better read mvalue again in case it is changed.
677 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
Elliott Hughes0e714a52014-03-03 16:42:47 -0800678 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -0700679}
680
Elliott Hughes0e714a52014-03-03 16:42:47 -0800681#if !defined(__LP64__)
682extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800683 timespec abs_timeout;
684 clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
685 abs_timeout.tv_sec += ms / 1000;
686 abs_timeout.tv_nsec += (ms % 1000) * 1000000;
687 if (abs_timeout.tv_nsec >= NS_PER_S) {
688 abs_timeout.tv_sec++;
689 abs_timeout.tv_nsec -= NS_PER_S;
690 }
Elliott Hughes0e714a52014-03-03 16:42:47 -0800691
Yabin Cui86fc96f2015-01-29 21:50:48 -0800692 int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
693 if (error == ETIMEDOUT) {
694 error = EBUSY;
695 }
696 return error;
Elliott Hughes0e714a52014-03-03 16:42:47 -0800697}
698#endif
699
700int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800701 return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700702}
703
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700704int pthread_mutex_destroy(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800705 // Use trylock to ensure that the mutex is valid and not already locked.
706 int error = pthread_mutex_trylock(mutex);
707 if (error != 0) {
708 return error;
709 }
710
711 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
712 atomic_store_explicit(mutex_value_ptr, 0xdead10cc, memory_order_relaxed);
713 return 0;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700714}