blob: a0628b01d5c312217800f865b012526f36eba002 [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
Pierre Peifferd0c884d2012-02-22 16:40:15 +010028
Elliott Hughes6f94de32013-02-12 06:06:22 +000029#include <pthread.h>
Elliott Hughes3e898472013-02-12 16:40:24 +000030
31#include <errno.h>
32#include <limits.h>
Yabin Cui86fc96f2015-01-29 21:50:48 -080033#include <stdatomic.h>
34#include <sys/cdefs.h>
Elliott Hughes84114c82013-07-17 13:33:19 -070035#include <sys/mman.h>
Pierre Peifferd0c884d2012-02-22 16:40:15 +010036#include <unistd.h>
37
Pierre Peifferd0c884d2012-02-22 16:40:15 +010038#include "pthread_internal.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070039
Elliott Hughes04303f52014-09-18 16:11:59 -070040#include "private/bionic_constants.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070041#include "private/bionic_futex.h"
Yabin Cui86fc96f2015-01-29 21:50:48 -080042#include "private/bionic_systrace.h"
Elliott Hughes04303f52014-09-18 16:11:59 -070043#include "private/bionic_time_conversions.h"
Elliott Hugheseb847bc2013-10-09 15:50:50 -070044#include "private/bionic_tls.h"
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080045
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080046/* a mutex is implemented as a 32-bit integer holding the following fields
47 *
48 * bits: name description
Elliott Hughes40eabe22013-02-14 18:59:37 -080049 * 31-16 tid owner thread's tid (recursive and errorcheck only)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080050 * 15-14 type mutex type
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -070051 * 13 shared process-shared flag
52 * 12-2 counter counter of recursive mutexes
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080053 * 1-0 state lock state (0, 1 or 2)
54 */
55
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010056/* Convenience macro, creates a mask of 'bits' bits that starts from
57 * the 'shift'-th least significant bit in a 32-bit word.
58 *
59 * Examples: FIELD_MASK(0,4) -> 0xf
60 * FIELD_MASK(16,9) -> 0x1ff0000
61 */
62#define FIELD_MASK(shift,bits) (((1 << (bits))-1) << (shift))
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080063
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010064/* This one is used to create a bit pattern from a given field value */
65#define FIELD_TO_BITS(val,shift,bits) (((val) & ((1 << (bits))-1)) << (shift))
David 'Digit' Turner022d3032011-12-07 14:02:17 +010066
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010067/* And this one does the opposite, i.e. extract a field's value from a bit pattern */
68#define FIELD_FROM_BITS(val,shift,bits) (((val) >> (shift)) & ((1 << (bits))-1))
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080069
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010070/* Mutex state:
71 *
72 * 0 for unlocked
73 * 1 for locked, no waiters
74 * 2 for locked, maybe waiters
75 */
76#define MUTEX_STATE_SHIFT 0
77#define MUTEX_STATE_LEN 2
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080078
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010079#define MUTEX_STATE_MASK FIELD_MASK(MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
80#define MUTEX_STATE_FROM_BITS(v) FIELD_FROM_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
81#define MUTEX_STATE_TO_BITS(v) FIELD_TO_BITS(v, MUTEX_STATE_SHIFT, MUTEX_STATE_LEN)
82
83#define MUTEX_STATE_UNLOCKED 0 /* must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
84#define MUTEX_STATE_LOCKED_UNCONTENDED 1 /* must be 1 due to atomic dec in unlock operation */
85#define MUTEX_STATE_LOCKED_CONTENDED 2 /* must be 1 + LOCKED_UNCONTENDED due to atomic dec */
86
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +010087#define MUTEX_STATE_BITS_UNLOCKED MUTEX_STATE_TO_BITS(MUTEX_STATE_UNLOCKED)
88#define MUTEX_STATE_BITS_LOCKED_UNCONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_UNCONTENDED)
89#define MUTEX_STATE_BITS_LOCKED_CONTENDED MUTEX_STATE_TO_BITS(MUTEX_STATE_LOCKED_CONTENDED)
90
91/* return true iff the mutex if locked with no waiters */
92#define MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_UNCONTENDED)
93
94/* return true iff the mutex if locked with maybe waiters */
95#define MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(v) (((v) & MUTEX_STATE_MASK) == MUTEX_STATE_BITS_LOCKED_CONTENDED)
96
97/* used to flip from LOCKED_UNCONTENDED to LOCKED_CONTENDED */
98#define MUTEX_STATE_BITS_FLIP_CONTENTION(v) ((v) ^ (MUTEX_STATE_BITS_LOCKED_CONTENDED ^ MUTEX_STATE_BITS_LOCKED_UNCONTENDED))
99
100/* Mutex counter:
101 *
102 * We need to check for overflow before incrementing, and we also need to
103 * detect when the counter is 0
104 */
105#define MUTEX_COUNTER_SHIFT 2
106#define MUTEX_COUNTER_LEN 11
107#define MUTEX_COUNTER_MASK FIELD_MASK(MUTEX_COUNTER_SHIFT, MUTEX_COUNTER_LEN)
108
109#define MUTEX_COUNTER_BITS_WILL_OVERFLOW(v) (((v) & MUTEX_COUNTER_MASK) == MUTEX_COUNTER_MASK)
110#define MUTEX_COUNTER_BITS_IS_ZERO(v) (((v) & MUTEX_COUNTER_MASK) == 0)
111
112/* Used to increment the counter directly after overflow has been checked */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800113#define MUTEX_COUNTER_BITS_ONE FIELD_TO_BITS(1, MUTEX_COUNTER_SHIFT,MUTEX_COUNTER_LEN)
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100114
115/* Mutex shared bit flag
116 *
117 * This flag is set to indicate that the mutex is shared among processes.
118 * This changes the futex opcode we use for futex wait/wake operations
119 * (non-shared operations are much faster).
120 */
121#define MUTEX_SHARED_SHIFT 13
122#define MUTEX_SHARED_MASK FIELD_MASK(MUTEX_SHARED_SHIFT,1)
123
124/* Mutex type:
125 *
126 * We support normal, recursive and errorcheck mutexes.
127 *
128 * The constants defined here *cannot* be changed because they must match
129 * the C library ABI which defines the following initialization values in
130 * <pthread.h>:
131 *
132 * __PTHREAD_MUTEX_INIT_VALUE
133 * __PTHREAD_RECURSIVE_MUTEX_VALUE
134 * __PTHREAD_ERRORCHECK_MUTEX_INIT_VALUE
135 */
136#define MUTEX_TYPE_SHIFT 14
137#define MUTEX_TYPE_LEN 2
138#define MUTEX_TYPE_MASK FIELD_MASK(MUTEX_TYPE_SHIFT,MUTEX_TYPE_LEN)
139
140#define MUTEX_TYPE_NORMAL 0 /* Must be 0 to match __PTHREAD_MUTEX_INIT_VALUE */
141#define MUTEX_TYPE_RECURSIVE 1
142#define MUTEX_TYPE_ERRORCHECK 2
143
144#define MUTEX_TYPE_TO_BITS(t) FIELD_TO_BITS(t, MUTEX_TYPE_SHIFT, MUTEX_TYPE_LEN)
145
146#define MUTEX_TYPE_BITS_NORMAL MUTEX_TYPE_TO_BITS(MUTEX_TYPE_NORMAL)
147#define MUTEX_TYPE_BITS_RECURSIVE MUTEX_TYPE_TO_BITS(MUTEX_TYPE_RECURSIVE)
148#define MUTEX_TYPE_BITS_ERRORCHECK MUTEX_TYPE_TO_BITS(MUTEX_TYPE_ERRORCHECK)
149
150/* Mutex owner field:
151 *
152 * This is only used for recursive and errorcheck mutexes. It holds the
Elliott Hughes40eabe22013-02-14 18:59:37 -0800153 * tid of the owning thread. Note that this works because the Linux
154 * kernel _only_ uses 16-bit values for tids.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100155 *
156 * More specifically, it will wrap to 10000 when it reaches over 32768 for
157 * application processes. You can check this by running the following inside
158 * an adb shell session:
159 *
160 OLDPID=$$;
161 while true; do
162 NEWPID=$(sh -c 'echo $$')
163 if [ "$NEWPID" -gt 32768 ]; then
164 echo "AARGH: new PID $NEWPID is too high!"
165 exit 1
166 fi
167 if [ "$NEWPID" -lt "$OLDPID" ]; then
168 echo "****** Wrapping from PID $OLDPID to $NEWPID. *******"
169 else
170 echo -n "$NEWPID!"
171 fi
172 OLDPID=$NEWPID
173 done
174
175 * Note that you can run the same example on a desktop Linux system,
176 * the wrapping will also happen at 32768, but will go back to 300 instead.
177 */
178#define MUTEX_OWNER_SHIFT 16
179#define MUTEX_OWNER_LEN 16
180
181#define MUTEX_OWNER_FROM_BITS(v) FIELD_FROM_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
182#define MUTEX_OWNER_TO_BITS(v) FIELD_TO_BITS(v,MUTEX_OWNER_SHIFT,MUTEX_OWNER_LEN)
183
184/* Convenience macros.
185 *
186 * These are used to form or modify the bit pattern of a given mutex value
187 */
188
189
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800190
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700191/* a mutex attribute holds the following fields
192 *
193 * bits: name description
194 * 0-3 type type of mutex
195 * 4 shared process-shared flag
196 */
197#define MUTEXATTR_TYPE_MASK 0x000f
198#define MUTEXATTR_SHARED_MASK 0x0010
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800199
200
201int pthread_mutexattr_init(pthread_mutexattr_t *attr)
202{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800203 *attr = PTHREAD_MUTEX_DEFAULT;
204 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800205}
206
207int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
208{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800209 *attr = -1;
210 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800211}
212
Elliott Hughes39b644a2014-03-04 10:55:39 -0800213int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type_p)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800214{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800215 int type = (*attr & MUTEXATTR_TYPE_MASK);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700216
Elliott Hughes39b644a2014-03-04 10:55:39 -0800217 if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK) {
218 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800219 }
Elliott Hughes39b644a2014-03-04 10:55:39 -0800220
221 *type_p = type;
222 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800223}
224
225int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
226{
Elliott Hughes39b644a2014-03-04 10:55:39 -0800227 if (type < PTHREAD_MUTEX_NORMAL || type > PTHREAD_MUTEX_ERRORCHECK ) {
228 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800229 }
Elliott Hughes39b644a2014-03-04 10:55:39 -0800230
231 *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
232 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800233}
234
235/* process-shared mutexes are not supported at the moment */
236
237int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
238{
Mathias Agopianb7681162009-07-13 22:00:33 -0700239 switch (pshared) {
240 case PTHREAD_PROCESS_PRIVATE:
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700241 *attr &= ~MUTEXATTR_SHARED_MASK;
242 return 0;
243
Mathias Agopianb7681162009-07-13 22:00:33 -0700244 case PTHREAD_PROCESS_SHARED:
245 /* our current implementation of pthread actually supports shared
246 * mutexes but won't cleanup if a process dies with the mutex held.
247 * Nevertheless, it's better than nothing. Shared mutexes are used
248 * by surfaceflinger and audioflinger.
249 */
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700250 *attr |= MUTEXATTR_SHARED_MASK;
Mathias Agopianb7681162009-07-13 22:00:33 -0700251 return 0;
252 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700253 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800254}
255
Elliott Hughesc3f11402013-10-30 14:40:09 -0700256int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
Elliott Hughes39b644a2014-03-04 10:55:39 -0800257 *pshared = (*attr & MUTEXATTR_SHARED_MASK) ? PTHREAD_PROCESS_SHARED : PTHREAD_PROCESS_PRIVATE;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800258 return 0;
259}
260
Yabin Cui86fc96f2015-01-29 21:50:48 -0800261static inline atomic_int* MUTEX_TO_ATOMIC_POINTER(pthread_mutex_t* mutex) {
262 static_assert(sizeof(atomic_int) == sizeof(mutex->value),
263 "mutex->value should actually be atomic_int in implementation.");
264
265 // We prefer casting to atomic_int instead of declaring mutex->value to be atomic_int directly.
266 // Because using the second method pollutes pthread.h, and causes an error when compiling libcxx.
267 return reinterpret_cast<atomic_int*>(&mutex->value);
268}
269
Elliott Hughesdff72032013-12-11 14:54:00 -0800270int pthread_mutex_init(pthread_mutex_t* mutex, const pthread_mutexattr_t* attr) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800271 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
272
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700273 if (__predict_true(attr == NULL)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800274 atomic_init(mutex_value_ptr, MUTEX_TYPE_BITS_NORMAL);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700275 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800276 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700277
Elliott Hughesdff72032013-12-11 14:54:00 -0800278 int value = 0;
279 if ((*attr & MUTEXATTR_SHARED_MASK) != 0) {
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700280 value |= MUTEX_SHARED_MASK;
Elliott Hughesdff72032013-12-11 14:54:00 -0800281 }
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700282
283 switch (*attr & MUTEXATTR_TYPE_MASK) {
284 case PTHREAD_MUTEX_NORMAL:
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100285 value |= MUTEX_TYPE_BITS_NORMAL;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700286 break;
287 case PTHREAD_MUTEX_RECURSIVE:
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100288 value |= MUTEX_TYPE_BITS_RECURSIVE;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700289 break;
290 case PTHREAD_MUTEX_ERRORCHECK:
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100291 value |= MUTEX_TYPE_BITS_ERRORCHECK;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700292 break;
293 default:
294 return EINVAL;
295 }
296
Yabin Cui86fc96f2015-01-29 21:50:48 -0800297 atomic_init(mutex_value_ptr, value);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700298 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800299}
300
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800301
302/*
Yabin Cui86fc96f2015-01-29 21:50:48 -0800303 * Lock a mutex of type NORMAL.
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800304 *
305 * As noted above, there are three states:
306 * 0 (unlocked, no contention)
307 * 1 (locked, no contention)
308 * 2 (locked, contention)
309 *
310 * Non-recursive mutexes don't use the thread-id or counter fields, and the
311 * "type" value is zero, so the only bits that will be set are the ones in
312 * the lock state field.
313 */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800314static inline void _normal_mutex_lock(atomic_int* mutex_value_ptr, int shared) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100315 /* convenience shortcuts */
316 const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
317 const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
Brigid Smitha406ee62014-07-21 15:38:06 -0700318
Yabin Cui86fc96f2015-01-29 21:50:48 -0800319 // The common case is an unlocked mutex, so we begin by trying to
320 // change the lock's state from unlocked to locked_uncontended.
321 // If exchanged successfully, An acquire fence is required to make
322 // all memory accesses made by other threads visible in current CPU.
323 int mvalue = unlocked;
324 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
325 locked_uncontended,
326 memory_order_acquire,
327 memory_order_relaxed))) {
328 return;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800329 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800330
331 ScopedTrace trace("Contending for pthread mutex");
332
333 // We want to go to sleep until the mutex is available, which requires
334 // promoting it to locked_contended. We need to swap in the new state
335 // value and then wait until somebody wakes us up.
336 // An atomic_exchange is used to compete with other threads for the lock.
337 // If it returns unlocked, we have acquired the lock, otherwise another
338 // thread still holds the lock and we should wait again.
339 // If lock is acquired, an acquire fence is needed to make all memory accesses
340 // made by other threads visible in current CPU.
341 const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
342 while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
343 memory_order_acquire) != unlocked) {
344
345 __futex_wait_ex(mutex_value_ptr, shared, locked_contended, NULL);
346 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800347}
348
349/*
Yabin Cui86fc96f2015-01-29 21:50:48 -0800350 * Release a mutex of type NORMAL. The caller is responsible for determining
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800351 * that we are in fact the owner of this lock.
352 */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800353static inline void _normal_mutex_unlock(atomic_int* mutex_value_ptr, int shared) {
354 const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
355 const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
Andy McFaddenfcd00eb2010-05-28 13:31:45 -0700356
Yabin Cui86fc96f2015-01-29 21:50:48 -0800357 // We use an atomic_exchange to release the lock. If locked_contended state
358 // is returned, some threads is waiting for the lock and we need to wake up
359 // one of them.
360 // A release fence is required to make previous stores visible to next
361 // lock owner threads.
362 if (atomic_exchange_explicit(mutex_value_ptr, unlocked,
363 memory_order_release) == locked_contended) {
364 // Wake up one waiting thread. We don't know which thread will be
365 // woken or when it'll start executing -- futexes make no guarantees
366 // here. There may not even be a thread waiting.
367 //
368 // The newly-woken thread will replace the unlocked state we just set above
369 // with locked_contended state, which means that when it eventually releases
370 // the mutex it will also call FUTEX_WAKE. This results in one extra wake
371 // call whenever a lock is contended, but let us avoid forgetting anyone
372 // without requiring us to track the number of sleepers.
373 //
374 // It's possible for another thread to sneak in and grab the lock between
375 // the exchange above and the wake call below. If the new thread is "slow"
376 // and holds the lock for a while, we'll wake up a sleeper, which will swap
377 // in locked_uncontended state and then go back to sleep since the lock is
378 // still held. If the new thread is "fast", running to completion before
379 // we call wake, the thread we eventually wake will find an unlocked mutex
380 // and will execute. Either way we have correct behavior and nobody is
381 // orphaned on the wait queue.
382 __futex_wake_ex(mutex_value_ptr, shared, 1);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800383 }
384}
385
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100386/* This common inlined function is used to increment the counter of an
387 * errorcheck or recursive mutex.
388 *
389 * For errorcheck mutexes, it will return EDEADLK
390 * If the counter overflows, it will return EAGAIN
391 * Otherwise, it atomically increments the counter and returns 0
392 * after providing an acquire barrier.
393 *
394 * mtype is the current mutex type
395 * mvalue is the current mutex value (already loaded)
396 * mutex pointers to the mutex.
397 */
Yabin Cui86fc96f2015-01-29 21:50:48 -0800398static inline __always_inline
399int _recursive_increment(atomic_int* mutex_value_ptr, int mvalue, int mtype) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100400 if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800401 // Trying to re-lock a mutex we already acquired.
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100402 return EDEADLK;
403 }
404
Yabin Cui86fc96f2015-01-29 21:50:48 -0800405 // Detect recursive lock overflow and return EAGAIN.
406 // This is safe because only the owner thread can modify the
407 // counter bits in the mutex value.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100408 if (MUTEX_COUNTER_BITS_WILL_OVERFLOW(mvalue)) {
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100409 return EAGAIN;
410 }
411
Yabin Cui86fc96f2015-01-29 21:50:48 -0800412 // We own the mutex, but other threads are able to change the lower bits
413 // (e.g. promoting it to "contended"), so we need to use an atomic exchange
414 // loop to update the counter. The counter will not overflow in the loop,
415 // as only the owner thread can change it.
416 // The mutex is still locked, so we don't need a release fence.
417 while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
418 mvalue + MUTEX_COUNTER_BITS_ONE,
419 memory_order_relaxed,
420 memory_order_relaxed)) { }
421 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800422}
423
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700424int pthread_mutex_lock(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800425 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
426
Wink Savillea12c5442013-01-08 15:15:45 -0800427 int mvalue, mtype, tid, shared;
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800428
Yabin Cui86fc96f2015-01-29 21:50:48 -0800429 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100430 mtype = (mvalue & MUTEX_TYPE_MASK);
431 shared = (mvalue & MUTEX_SHARED_MASK);
Fabrice Di Meglio86418332010-03-11 14:47:47 -0800432
Yabin Cui86fc96f2015-01-29 21:50:48 -0800433 // Handle common case first.
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700434 if ( __predict_true(mtype == MUTEX_TYPE_BITS_NORMAL) ) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800435 _normal_mutex_lock(mutex_value_ptr, shared);
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800436 return 0;
437 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700438
Yabin Cui86fc96f2015-01-29 21:50:48 -0800439 // Do we already own this recursive or error-check mutex?
Elliott Hughes40eabe22013-02-14 18:59:37 -0800440 tid = __get_thread()->tid;
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100441 if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
Yabin Cui86fc96f2015-01-29 21:50:48 -0800442 return _recursive_increment(mutex_value_ptr, mvalue, mtype);
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700443
Yabin Cui86fc96f2015-01-29 21:50:48 -0800444 // Add in shared state to avoid extra 'or' operations below.
David 'Digit' Turner6304d8b2010-06-02 18:12:12 -0700445 mtype |= shared;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700446
Yabin Cui86fc96f2015-01-29 21:50:48 -0800447 // First, if the mutex is unlocked, try to quickly acquire it.
448 // In the optimistic case where this works, set the state to locked_uncontended.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100449 if (mvalue == mtype) {
450 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
Yabin Cui86fc96f2015-01-29 21:50:48 -0800451 // If exchanged successfully, An acquire fence is required to make
452 // all memory accesses made by other threads visible in current CPU.
453 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue,
454 newval, memory_order_acquire, memory_order_relaxed))) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100455 return 0;
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700456 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700457 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100458
Brigid Smitha406ee62014-07-21 15:38:06 -0700459 ScopedTrace trace("Contending for pthread mutex");
460
Yabin Cui86fc96f2015-01-29 21:50:48 -0800461 while (true) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100462 if (mvalue == mtype) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800463 // If the mutex is unlocked, its value should be 'mtype' and
464 // we try to acquire it by setting its owner and state atomically.
465 // NOTE: We put the state to locked_contended since we _know_ there
466 // is contention when we are in this loop. This ensures all waiters
467 // will be unlocked.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100468
Yabin Cui86fc96f2015-01-29 21:50:48 -0800469 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
470 // If exchanged successfully, An acquire fence is required to make
471 // all memory accesses made by other threads visible in current CPU.
472 if (__predict_true(atomic_compare_exchange_weak_explicit(mutex_value_ptr,
473 &mvalue, newval,
474 memory_order_acquire,
475 memory_order_relaxed))) {
476 return 0;
477 }
478 continue;
479 } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
480 // The mutex is already locked by another thread, if the state is locked_uncontended,
481 // we should set it to locked_contended beforing going to sleep. This can make
482 // sure waiters will be woken up eventually.
483
484 int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
485 if (__predict_false(!atomic_compare_exchange_weak_explicit(mutex_value_ptr,
486 &mvalue, newval,
487 memory_order_relaxed,
488 memory_order_relaxed))) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100489 continue;
490 }
491 mvalue = newval;
492 }
493
Yabin Cui86fc96f2015-01-29 21:50:48 -0800494 // We are in locked_contended state, sleep until someone wake us up.
495 __futex_wait_ex(mutex_value_ptr, shared, mvalue, NULL);
496 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100497 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800498}
499
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700500int pthread_mutex_unlock(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800501 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
502
Wink Savillea12c5442013-01-08 15:15:45 -0800503 int mvalue, mtype, tid, shared;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800504
Yabin Cui86fc96f2015-01-29 21:50:48 -0800505 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
David 'Digit' Turner022d3032011-12-07 14:02:17 +0100506 mtype = (mvalue & MUTEX_TYPE_MASK);
507 shared = (mvalue & MUTEX_SHARED_MASK);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800508
Yabin Cui86fc96f2015-01-29 21:50:48 -0800509 // Handle common case first.
Elliott Hughesd4e753f2013-07-16 12:45:46 -0700510 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800511 _normal_mutex_unlock(mutex_value_ptr, shared);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800512 return 0;
513 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700514
Yabin Cui86fc96f2015-01-29 21:50:48 -0800515 // Do we already own this recursive or error-check mutex?
Elliott Hughes40eabe22013-02-14 18:59:37 -0800516 tid = __get_thread()->tid;
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100517 if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700518 return EPERM;
519
Yabin Cui86fc96f2015-01-29 21:50:48 -0800520 // If the counter is > 0, we can simply decrement it atomically.
521 // Since other threads can mutate the lower state bits (and only the
522 // lower state bits), use a compare_exchange loop to do it.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100523 if (!MUTEX_COUNTER_BITS_IS_ZERO(mvalue)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800524 // We still own the mutex, so a release fence is not needed.
525 while (!atomic_compare_exchange_weak_explicit(mutex_value_ptr, &mvalue,
526 mvalue - MUTEX_COUNTER_BITS_ONE,
527 memory_order_relaxed,
528 memory_order_relaxed)) { }
529 return 0;
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700530 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100531
Yabin Cui86fc96f2015-01-29 21:50:48 -0800532 // The counter is 0, so we'are going to unlock the mutex by resetting its
533 // state to unlocked, we need to perform a atomic_exchange inorder to read
534 // the current state, which will be locked_contended if there may have waiters
535 // to awake.
536 // A release fence is required to make previous stores visible to next
537 // lock owner threads.
538 mvalue = atomic_exchange_explicit(mutex_value_ptr,
539 mtype | shared | MUTEX_STATE_BITS_UNLOCKED,
540 memory_order_release);
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100541 if (MUTEX_STATE_BITS_IS_LOCKED_CONTENDED(mvalue)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800542 __futex_wake_ex(mutex_value_ptr, shared, 1);
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700543 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800544
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700545 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800546}
547
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700548int pthread_mutex_trylock(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800549 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
550
551 int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700552 int mtype = (mvalue & MUTEX_TYPE_MASK);
553 int shared = (mvalue & MUTEX_SHARED_MASK);
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700554
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700555 // Handle common case first.
556 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800557 mvalue = shared | MUTEX_STATE_BITS_UNLOCKED;
558 // If exchanged successfully, An acquire fence is required to make
559 // all memory accesses made by other threads visible in current CPU.
560 if (atomic_compare_exchange_strong_explicit(mutex_value_ptr,
561 &mvalue,
562 shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED,
563 memory_order_acquire,
564 memory_order_relaxed)) {
Fabrice Di Meglio86418332010-03-11 14:47:47 -0800565 return 0;
Andy McFaddenfcd00eb2010-05-28 13:31:45 -0700566 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700567 return EBUSY;
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800568 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700569
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700570 // Do we already own this recursive or error-check mutex?
571 pid_t tid = __get_thread()->tid;
572 if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
573 if (mtype == MUTEX_TYPE_BITS_ERRORCHECK) {
574 return EBUSY;
575 }
Yabin Cui86fc96f2015-01-29 21:50:48 -0800576 return _recursive_increment(mutex_value_ptr, mvalue, mtype);
Elliott Hughes5b1111a2014-10-24 19:33:11 -0700577 }
David 'Digit' Turner40e6b822010-03-17 11:25:46 -0700578
Yabin Cui86fc96f2015-01-29 21:50:48 -0800579 // Same as pthread_mutex_lock, except that we don't want to wait, and
580 // the only operation that can succeed is a single compare_exchange to acquire the
581 // lock if it is released / not owned by anyone. No need for a complex loop.
582 // If exchanged successfully, An acquire fence is required to make
583 // all memory accesses made by other threads visible in current CPU.
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100584 mtype |= shared | MUTEX_STATE_BITS_UNLOCKED;
585 mvalue = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
David 'Digit' Turner88f06cd2010-03-18 17:13:41 -0700586
Yabin Cui86fc96f2015-01-29 21:50:48 -0800587 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
588 &mtype, mvalue,
589 memory_order_acquire,
590 memory_order_relaxed))) {
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100591 return 0;
592 }
David 'Digit' Turnere1414aa2012-01-24 15:26:54 +0100593 return EBUSY;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800594}
595
Elliott Hughes04303f52014-09-18 16:11:59 -0700596static int __pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_ts, clockid_t clock) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800597 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
Elliott Hughes0e714a52014-03-03 16:42:47 -0800598
Yabin Cui86fc96f2015-01-29 21:50:48 -0800599 timespec ts;
Elliott Hughes0e714a52014-03-03 16:42:47 -0800600
Yabin Cui86fc96f2015-01-29 21:50:48 -0800601 int mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
602 int mtype = (mvalue & MUTEX_TYPE_MASK);
603 int shared = (mvalue & MUTEX_SHARED_MASK);
Elliott Hughes0e714a52014-03-03 16:42:47 -0800604
Yabin Cui86fc96f2015-01-29 21:50:48 -0800605 // Handle common case first.
606 if (__predict_true(mtype == MUTEX_TYPE_BITS_NORMAL)) {
607 const int unlocked = shared | MUTEX_STATE_BITS_UNLOCKED;
608 const int locked_uncontended = shared | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
609 const int locked_contended = shared | MUTEX_STATE_BITS_LOCKED_CONTENDED;
610
611 // If exchanged successfully, An acquire fence is required to make
612 // all memory accesses made by other threads visible in current CPU.
613 mvalue = unlocked;
614 if (atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, locked_uncontended,
615 memory_order_acquire, memory_order_relaxed)) {
616 return 0;
617 }
618
619 ScopedTrace trace("Contending for timed pthread mutex");
620
621 // Same as pthread_mutex_lock, except that we can only wait for a specified
622 // time interval. If lock is acquired, an acquire fence is needed to make
623 // all memory accesses made by other threads visible in current CPU.
624 while (atomic_exchange_explicit(mutex_value_ptr, locked_contended,
625 memory_order_acquire) != unlocked) {
626 if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
627 return ETIMEDOUT;
628 }
629 __futex_wait_ex(mutex_value_ptr, shared, locked_contended, &ts);
630 }
631
632 return 0;
633 }
634
635 // Do we already own this recursive or error-check mutex?
636 pid_t tid = __get_thread()->tid;
637 if (tid == MUTEX_OWNER_FROM_BITS(mvalue)) {
638 return _recursive_increment(mutex_value_ptr, mvalue, mtype);
639 }
640
641 mtype |= shared;
642
643 // First try a quick lock.
644 if (mvalue == mtype) {
645 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_UNCONTENDED;
646 // If exchanged successfully, An acquire fence is required to make
647 // all memory accesses made by other threads visible in current CPU.
648 if (__predict_true(atomic_compare_exchange_strong_explicit(mutex_value_ptr,
649 &mvalue, newval,
650 memory_order_acquire,
651 memory_order_relaxed))) {
652 return 0;
653 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -0700654 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -0700655
Brigid Smitha406ee62014-07-21 15:38:06 -0700656 ScopedTrace trace("Contending for timed pthread mutex");
657
Yabin Cui86fc96f2015-01-29 21:50:48 -0800658 // The following implements the same loop as pthread_mutex_lock,
659 // but adds checks to ensure that the operation never exceeds the
660 // absolute expiration time.
661 while (true) {
662 if (mvalue == mtype) { // Unlocked.
663 int newval = MUTEX_OWNER_TO_BITS(tid) | mtype | MUTEX_STATE_BITS_LOCKED_CONTENDED;
664 // An acquire fence is needed for successful exchange.
665 if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
666 memory_order_acquire,
667 memory_order_relaxed)) {
668 goto check_time;
669 }
670
671 return 0;
672 } else if (MUTEX_STATE_BITS_IS_LOCKED_UNCONTENDED(mvalue)) {
673 // The value is locked. If the state is locked_uncontended, we need to switch
674 // it to locked_contended before sleep, so we can get woken up later.
675 int newval = MUTEX_STATE_BITS_FLIP_CONTENTION(mvalue);
676 if (!atomic_compare_exchange_strong_explicit(mutex_value_ptr, &mvalue, newval,
677 memory_order_relaxed,
678 memory_order_relaxed)) {
679 goto check_time;
680 }
681 mvalue = newval;
682 }
683
684 if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
685 return ETIMEDOUT;
686 }
687
688 if (__futex_wait_ex(mutex_value_ptr, shared, mvalue, &ts) == -ETIMEDOUT) {
689 return ETIMEDOUT;
690 }
691
692check_time:
693 if (!timespec_from_absolute_timespec(ts, *abs_ts, clock)) {
694 return ETIMEDOUT;
695 }
696 // After futex_wait or time costly timespec_from_absolte_timespec,
697 // we'd better read mvalue again in case it is changed.
698 mvalue = atomic_load_explicit(mutex_value_ptr, memory_order_relaxed);
Elliott Hughes0e714a52014-03-03 16:42:47 -0800699 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -0700700}
701
Elliott Hughes0e714a52014-03-03 16:42:47 -0800702#if !defined(__LP64__)
703extern "C" int pthread_mutex_lock_timeout_np(pthread_mutex_t* mutex, unsigned ms) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800704 timespec abs_timeout;
705 clock_gettime(CLOCK_MONOTONIC, &abs_timeout);
706 abs_timeout.tv_sec += ms / 1000;
707 abs_timeout.tv_nsec += (ms % 1000) * 1000000;
708 if (abs_timeout.tv_nsec >= NS_PER_S) {
709 abs_timeout.tv_sec++;
710 abs_timeout.tv_nsec -= NS_PER_S;
711 }
Elliott Hughes0e714a52014-03-03 16:42:47 -0800712
Yabin Cui86fc96f2015-01-29 21:50:48 -0800713 int error = __pthread_mutex_timedlock(mutex, &abs_timeout, CLOCK_MONOTONIC);
714 if (error == ETIMEDOUT) {
715 error = EBUSY;
716 }
717 return error;
Elliott Hughes0e714a52014-03-03 16:42:47 -0800718}
719#endif
720
721int pthread_mutex_timedlock(pthread_mutex_t* mutex, const timespec* abs_timeout) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800722 return __pthread_mutex_timedlock(mutex, abs_timeout, CLOCK_REALTIME);
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700723}
724
Elliott Hughes07f1ded2014-05-14 11:35:49 -0700725int pthread_mutex_destroy(pthread_mutex_t* mutex) {
Yabin Cui86fc96f2015-01-29 21:50:48 -0800726 // Use trylock to ensure that the mutex is valid and not already locked.
727 int error = pthread_mutex_trylock(mutex);
728 if (error != 0) {
729 return error;
730 }
731
732 atomic_int* mutex_value_ptr = MUTEX_TO_ATOMIC_POINTER(mutex);
733 atomic_store_explicit(mutex_value_ptr, 0xdead10cc, memory_order_relaxed);
734 return 0;
Mathias Agopian7c0c3792011-09-05 23:54:55 -0700735}