blob: 6a6386833ed3eab40432677f1c2b3d79c1cec50c [file] [log] [blame]
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <sys/types.h>
29#include <unistd.h>
30#include <signal.h>
31#include <stdint.h>
32#include <stdio.h>
33#include <stdlib.h>
34#include <errno.h>
35#include <sys/atomics.h>
36#include <bionic_tls.h>
37#include <sys/mman.h>
38#include <pthread.h>
39#include <time.h>
40#include "pthread_internal.h"
41#include "thread_private.h"
42#include <limits.h>
43#include <memory.h>
44#include <assert.h>
45#include <malloc.h>
46
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -080047#define __likely(cond) __builtin_expect(!!(cond), 1)
48#define __unlikely(cond) __builtin_expect(!!(cond), 0)
49
The Android Open Source Project1dc9e472009-03-03 19:28:35 -080050extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
51extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
52extern void _exit_thread(int retCode);
53extern int __set_errno(int);
54
55void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
56
57#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
58#define PTHREAD_ATTR_FLAG_USER_STACK 0x00000002
59
60#define DEFAULT_STACKSIZE (1024 * 1024)
61#define STACKBASE 0x10000000
62
63static uint8_t * gStackBase = (uint8_t *)STACKBASE;
64
65static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER;
66
67
68static const pthread_attr_t gDefaultPthreadAttr = {
69 .flags = 0,
70 .stack_base = NULL,
71 .stack_size = DEFAULT_STACKSIZE,
72 .guard_size = PAGE_SIZE,
73 .sched_policy = SCHED_NORMAL,
74 .sched_priority = 0
75};
76
77#define INIT_THREADS 1
78
79static pthread_internal_t* gThreadList = NULL;
80static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
81static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
82
83
84/* we simply malloc/free the internal pthread_internal_t structures. we may
85 * want to use a different allocation scheme in the future, but this one should
86 * be largely enough
87 */
88static pthread_internal_t*
89_pthread_internal_alloc(void)
90{
91 pthread_internal_t* thread;
92
93 thread = calloc( sizeof(*thread), 1 );
94 if (thread)
95 thread->intern = 1;
96
97 return thread;
98}
99
100static void
101_pthread_internal_free( pthread_internal_t* thread )
102{
103 if (thread && thread->intern) {
104 thread->intern = 0; /* just in case */
105 free (thread);
106 }
107}
108
109
110static void
111_pthread_internal_remove_locked( pthread_internal_t* thread )
112{
113 thread->next->pref = thread->pref;
114 thread->pref[0] = thread->next;
115}
116
117static void
118_pthread_internal_remove( pthread_internal_t* thread )
119{
120 pthread_mutex_lock(&gThreadListLock);
121 _pthread_internal_remove_locked(thread);
122 pthread_mutex_unlock(&gThreadListLock);
123}
124
125static void
126_pthread_internal_add( pthread_internal_t* thread )
127{
128 pthread_mutex_lock(&gThreadListLock);
129 thread->pref = &gThreadList;
130 thread->next = thread->pref[0];
131 if (thread->next)
132 thread->next->pref = &thread->next;
133 thread->pref[0] = thread;
134 pthread_mutex_unlock(&gThreadListLock);
135}
136
137pthread_internal_t*
138__get_thread(void)
139{
140 void** tls = (void**)__get_tls();
141
142 return (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
143}
144
145
146void*
147__get_stack_base(int *p_stack_size)
148{
149 pthread_internal_t* thread = __get_thread();
150
151 *p_stack_size = thread->attr.stack_size;
152 return thread->attr.stack_base;
153}
154
155
156void __init_tls(void** tls, void* thread)
157{
158 int nn;
159
160 ((pthread_internal_t*)thread)->tls = tls;
161
162 // slot 0 must point to the tls area, this is required by the implementation
163 // of the x86 Linux kernel thread-local-storage
164 tls[TLS_SLOT_SELF] = (void*)tls;
165 tls[TLS_SLOT_THREAD_ID] = thread;
166 for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++)
167 tls[nn] = 0;
168
169 __set_tls( (void*)tls );
170}
171
172
173/*
174 * This trampoline is called from the assembly clone() function
175 */
176void __thread_entry(int (*func)(void*), void *arg, void **tls)
177{
178 int retValue;
179 pthread_internal_t * thrInfo;
180
181 // Wait for our creating thread to release us. This lets it have time to
182 // notify gdb about this thread before it starts doing anything.
183 pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF];
184 pthread_mutex_lock(start_mutex);
185 pthread_mutex_destroy(start_mutex);
186
187 thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID];
188
189 __init_tls( tls, thrInfo );
190
191 pthread_exit( (void*)func(arg) );
192}
193
194void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base)
195{
196 if (attr == NULL) {
197 thread->attr = gDefaultPthreadAttr;
198 } else {
199 thread->attr = *attr;
200 }
201 thread->attr.stack_base = stack_base;
202 thread->kernel_id = kernel_id;
203
204 // set the scheduling policy/priority of the thread
205 if (thread->attr.sched_policy != SCHED_NORMAL) {
206 struct sched_param param;
207 param.sched_priority = thread->attr.sched_priority;
208 sched_setscheduler(kernel_id, thread->attr.sched_policy, &param);
209 }
210
211 pthread_cond_init(&thread->join_cond, NULL);
212 thread->join_count = 0;
213
214 thread->cleanup_stack = NULL;
215
216 _pthread_internal_add(thread);
217}
218
219
220/* XXX stacks not reclaimed if thread spawn fails */
221/* XXX stacks address spaces should be reused if available again */
222
223static void *mkstack(size_t size, size_t guard_size)
224{
225 void * stack;
226
227 pthread_mutex_lock(&mmap_lock);
228
229 stack = mmap((void *)gStackBase, size,
230 PROT_READ | PROT_WRITE,
231 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
232 -1, 0);
233
234 if(stack == MAP_FAILED) {
235 stack = NULL;
236 goto done;
237 }
238
239 if(mprotect(stack, guard_size, PROT_NONE)){
240 munmap(stack, size);
241 stack = NULL;
242 goto done;
243 }
244
245done:
246 pthread_mutex_unlock(&mmap_lock);
247 return stack;
248}
249
250/*
251 * Create a new thread. The thread's stack is layed out like so:
252 *
253 * +---------------------------+
254 * | pthread_internal_t |
255 * +---------------------------+
256 * | |
257 * | TLS area |
258 * | |
259 * +---------------------------+
260 * | |
261 * . .
262 * . stack area .
263 * . .
264 * | |
265 * +---------------------------+
266 * | guard page |
267 * +---------------------------+
268 *
269 * note that TLS[0] must be a pointer to itself, this is required
270 * by the thread-local storage implementation of the x86 Linux
271 * kernel, where the TLS pointer is read by reading fs:[0]
272 */
273int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr,
274 void *(*start_routine)(void *), void * arg)
275{
276 char* stack;
277 void** tls;
278 int tid;
279 pthread_mutex_t * start_mutex;
280 pthread_internal_t * thread;
281 int madestack = 0;
282 int old_errno = errno;
283
284 /* this will inform the rest of the C library that at least one thread
285 * was created. this will enforce certain functions to acquire/release
286 * locks (e.g. atexit()) to protect shared global structures.
287 *
288 * this works because pthread_create() is not called by the C library
289 * initialization routine that sets up the main thread's data structures.
290 */
291 __isthreaded = 1;
292
293 thread = _pthread_internal_alloc();
294 if (thread == NULL)
295 return ENOMEM;
296
297 if (attr == NULL) {
298 attr = &gDefaultPthreadAttr;
299 }
300
301 // make sure the stack is PAGE_SIZE aligned
302 size_t stackSize = (attr->stack_size +
303 (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
304
305 if (!attr->stack_base) {
306 stack = mkstack(stackSize, attr->guard_size);
307 if(stack == NULL) {
308 _pthread_internal_free(thread);
309 return ENOMEM;
310 }
311 madestack = 1;
312 } else {
313 stack = attr->stack_base;
314 }
315
316 // Make room for TLS
317 tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*));
318
319 // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep
320 // it from doing anything until after we notify the debugger about it
321 start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF];
322 pthread_mutex_init(start_mutex, NULL);
323 pthread_mutex_lock(start_mutex);
324
325 tls[TLS_SLOT_THREAD_ID] = thread;
326
327 tid = __pthread_clone((int(*)(void*))start_routine, tls,
328 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND
329 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED,
330 arg);
331
332 if(tid < 0) {
333 int result;
334 if (madestack)
335 munmap(stack, stackSize);
336 _pthread_internal_free(thread);
337 result = errno;
338 errno = old_errno;
339 return result;
340 }
341
342 _init_thread(thread, tid, (pthread_attr_t*)attr, stack);
343
344 if (!madestack)
345 thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK;
346
347 // Notify any debuggers about the new thread
348 pthread_mutex_lock(&gDebuggerNotificationLock);
349 _thread_created_hook(tid);
350 pthread_mutex_unlock(&gDebuggerNotificationLock);
351
352 // Let the thread do it's thing
353 pthread_mutex_unlock(start_mutex);
354
355 *thread_out = (pthread_t)thread;
356 return 0;
357}
358
359
360int pthread_attr_init(pthread_attr_t * attr)
361{
362 *attr = gDefaultPthreadAttr;
363 return 0;
364}
365
366int pthread_attr_destroy(pthread_attr_t * attr)
367{
368 memset(attr, 0x42, sizeof(pthread_attr_t));
369 return 0;
370}
371
372int pthread_attr_setdetachstate(pthread_attr_t * attr, int state)
373{
374 if (state == PTHREAD_CREATE_DETACHED) {
375 attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
376 } else if (state == PTHREAD_CREATE_JOINABLE) {
377 attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
378 } else {
379 return EINVAL;
380 }
381 return 0;
382}
383
384int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state)
385{
386 *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED)
387 ? PTHREAD_CREATE_DETACHED
388 : PTHREAD_CREATE_JOINABLE;
389 return 0;
390}
391
392int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy)
393{
394 attr->sched_policy = policy;
395 return 0;
396}
397
398int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy)
399{
400 *policy = attr->sched_policy;
401 return 0;
402}
403
404int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param)
405{
406 attr->sched_priority = param->sched_priority;
407 return 0;
408}
409
410int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param)
411{
412 param->sched_priority = attr->sched_priority;
413 return 0;
414}
415
416int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size)
417{
418 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
419 return EINVAL;
420 }
421 attr->stack_size = stack_size;
422 return 0;
423}
424
425int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size)
426{
427 *stack_size = attr->stack_size;
428 return 0;
429}
430
431int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr)
432{
433#if 1
434 // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now.
435 return ENOSYS;
436#else
437 if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) {
438 return EINVAL;
439 }
440 attr->stack_base = stack_addr;
441 return 0;
442#endif
443}
444
445int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr)
446{
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -0700447 *stack_addr = (char*)attr->stack_base + attr->stack_size;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800448 return 0;
449}
450
451int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size)
452{
453 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
454 return EINVAL;
455 }
456 if ((uint32_t)stack_base & (PAGE_SIZE - 1)) {
457 return EINVAL;
458 }
459 attr->stack_base = stack_base;
460 attr->stack_size = stack_size;
461 return 0;
462}
463
464int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size)
465{
466 *stack_base = attr->stack_base;
467 *stack_size = attr->stack_size;
468 return 0;
469}
470
471int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size)
472{
473 if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) {
474 return EINVAL;
475 }
476
477 attr->guard_size = guard_size;
478 return 0;
479}
480
481int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size)
482{
483 *guard_size = attr->guard_size;
484 return 0;
485}
486
487int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr)
488{
489 pthread_internal_t * thread = (pthread_internal_t *)thid;
490 *attr = thread->attr;
491 return 0;
492}
493
494int pthread_attr_setscope(pthread_attr_t *attr, int scope)
495{
496 if (scope == PTHREAD_SCOPE_SYSTEM)
497 return 0;
498 if (scope == PTHREAD_SCOPE_PROCESS)
499 return ENOTSUP;
500
501 return EINVAL;
502}
503
504int pthread_attr_getscope(pthread_attr_t const *attr)
505{
506 return PTHREAD_SCOPE_SYSTEM;
507}
508
509
510/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
511 * and thread cancelation
512 */
513
514void __pthread_cleanup_push( __pthread_cleanup_t* c,
515 __pthread_cleanup_func_t routine,
516 void* arg )
517{
518 pthread_internal_t* thread = __get_thread();
519
520 c->__cleanup_routine = routine;
521 c->__cleanup_arg = arg;
522 c->__cleanup_prev = thread->cleanup_stack;
523 thread->cleanup_stack = c;
524}
525
526void __pthread_cleanup_pop( __pthread_cleanup_t* c, int execute )
527{
528 pthread_internal_t* thread = __get_thread();
529
530 thread->cleanup_stack = c->__cleanup_prev;
531 if (execute)
532 c->__cleanup_routine(c->__cleanup_arg);
533}
534
535/* used by pthread_exit() to clean all TLS keys of the current thread */
536static void pthread_key_clean_all(void);
537
538void pthread_exit(void * retval)
539{
540 pthread_internal_t* thread = __get_thread();
541 void* stack_base = thread->attr.stack_base;
542 int stack_size = thread->attr.stack_size;
543 int user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
544
545 // call the cleanup handlers first
546 while (thread->cleanup_stack) {
547 __pthread_cleanup_t* c = thread->cleanup_stack;
548 thread->cleanup_stack = c->__cleanup_prev;
549 c->__cleanup_routine(c->__cleanup_arg);
550 }
551
552 // call the TLS destructors, it is important to do that before removing this
553 // thread from the global list. this will ensure that if someone else deletes
554 // a TLS key, the corresponding value will be set to NULL in this thread's TLS
555 // space (see pthread_key_delete)
556 pthread_key_clean_all();
557
558 // if the thread is detached, destroy the pthread_internal_t
559 // otherwise, keep it in memory and signal any joiners
560 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
561 _pthread_internal_remove(thread);
562 _pthread_internal_free(thread);
563 } else {
564 /* the join_count field is used to store the number of threads waiting for
565 * the termination of this thread with pthread_join(),
566 *
567 * if it is positive we need to signal the waiters, and we do not touch
568 * the count (it will be decremented by the waiters, the last one will
569 * also remove/free the thread structure
570 *
571 * if it is zero, we set the count value to -1 to indicate that the
572 * thread is in 'zombie' state: it has stopped executing, and its stack
573 * is gone (as well as its TLS area). when another thread calls pthread_join()
574 * on it, it will immediately free the thread and return.
575 */
576 pthread_mutex_lock(&gThreadListLock);
577 thread->return_value = retval;
578 if (thread->join_count > 0) {
579 pthread_cond_broadcast(&thread->join_cond);
580 } else {
581 thread->join_count = -1; /* zombie thread */
582 }
583 pthread_mutex_unlock(&gThreadListLock);
584 }
585
586 // destroy the thread stack
587 if (user_stack)
588 _exit_thread((int)retval);
589 else
590 _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
591}
592
593int pthread_join(pthread_t thid, void ** ret_val)
594{
595 pthread_internal_t* thread = (pthread_internal_t*)thid;
596 int count;
597
598 // check that the thread still exists and is not detached
599 pthread_mutex_lock(&gThreadListLock);
600
601 for (thread = gThreadList; thread != NULL; thread = thread->next)
602 if (thread == (pthread_internal_t*)thid)
603 break;
604
605 if (!thread) {
606 pthread_mutex_unlock(&gThreadListLock);
607 return ESRCH;
608 }
609
610 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
611 pthread_mutex_unlock(&gThreadListLock);
612 return EINVAL;
613 }
614
615 /* wait for thread death when needed
616 *
617 * if the 'join_count' is negative, this is a 'zombie' thread that
618 * is already dead and without stack/TLS
619 *
620 * otherwise, we need to increment 'join-count' and wait to be signaled
621 */
622 count = thread->join_count;
623 if (count >= 0) {
624 thread->join_count += 1;
625 pthread_cond_wait( &thread->join_cond, &gThreadListLock );
626 count = --thread->join_count;
627 }
628 if (ret_val)
629 *ret_val = thread->return_value;
630
631 /* remove thread descriptor when we're the last joiner or when the
632 * thread was already a zombie.
633 */
634 if (count <= 0) {
635 _pthread_internal_remove_locked(thread);
636 _pthread_internal_free(thread);
637 }
638 pthread_mutex_unlock(&gThreadListLock);
639 return 0;
640}
641
642int pthread_detach( pthread_t thid )
643{
644 pthread_internal_t* thread;
645 int result = 0;
646 int flags;
647
648 pthread_mutex_lock(&gThreadListLock);
649 for (thread = gThreadList; thread != NULL; thread = thread->next)
650 if (thread == (pthread_internal_t*)thid)
651 goto FoundIt;
652
653 result = ESRCH;
654 goto Exit;
655
656FoundIt:
657 do {
658 flags = thread->attr.flags;
659
660 if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) {
661 /* thread is not joinable ! */
662 result = EINVAL;
663 goto Exit;
664 }
665 }
666 while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
667 (volatile int*)&thread->attr.flags ) != 0 );
668Exit:
669 pthread_mutex_unlock(&gThreadListLock);
670 return result;
671}
672
673pthread_t pthread_self(void)
674{
675 return (pthread_t)__get_thread();
676}
677
678int pthread_equal(pthread_t one, pthread_t two)
679{
680 return (one == two ? 1 : 0);
681}
682
683int pthread_getschedparam(pthread_t thid, int * policy,
684 struct sched_param * param)
685{
686 int old_errno = errno;
687
688 pthread_internal_t * thread = (pthread_internal_t *)thid;
689 int err = sched_getparam(thread->kernel_id, param);
690 if (!err) {
691 *policy = sched_getscheduler(thread->kernel_id);
692 } else {
693 err = errno;
694 errno = old_errno;
695 }
696 return err;
697}
698
699int pthread_setschedparam(pthread_t thid, int policy,
700 struct sched_param const * param)
701{
702 pthread_internal_t * thread = (pthread_internal_t *)thid;
703 int old_errno = errno;
704 int ret;
705
706 ret = sched_setscheduler(thread->kernel_id, policy, param);
707 if (ret < 0) {
708 ret = errno;
709 errno = old_errno;
710 }
711 return ret;
712}
713
714
715int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
716int __futex_wake(volatile void *ftx, int count);
717
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800718int __futex_wait_private(volatile void *ftx, int val, const struct timespec *timeout);
719int __futex_wake_private(volatile void *ftx, int count);
720
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800721// mutex lock states
722//
723// 0: unlocked
724// 1: locked, no waiters
725// 2: locked, maybe waiters
726
727/* a mutex is implemented as a 32-bit integer holding the following fields
728 *
729 * bits: name description
730 * 31-16 tid owner thread's kernel id (recursive and errorcheck only)
731 * 15-14 type mutex type
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800732 * 13 sharing sharing flag
733 * 12-2 counter counter of recursive mutexes
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800734 * 1-0 state lock state (0, 1 or 2)
735 */
736
737
738#define MUTEX_OWNER(m) (((m)->value >> 16) & 0xffff)
739#define MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff)
740
741#define MUTEX_TYPE_MASK 0xc000
742#define MUTEX_TYPE_NORMAL 0x0000
743#define MUTEX_TYPE_RECURSIVE 0x4000
744#define MUTEX_TYPE_ERRORCHECK 0x8000
745
746#define MUTEX_COUNTER_SHIFT 2
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800747#define MUTEX_COUNTER_MASK 0x1ffc
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800748
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800749#define MUTEX_SHARING_MASK 0x2000
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800750
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800751#define MUTEX_IS_SHARED(m) (((m)->value & MUTEX_SHARING_MASK) != 0)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800752
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800753/* A mutex attribute stores the following in its fields:
754 *
755 * bits: name description
756 * 0-3 type type of thread (NORMAL/RECURSIVE/ERRORCHECK)
757 * 4 sharing 1 if shared, or 0 otherwise.
758 */
759
760#define MUTEXATTR_TYPE_MASK 0x0007
761#define MUTEXATTR_SHARING_MASK 0x0010
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800762
763int pthread_mutexattr_init(pthread_mutexattr_t *attr)
764{
765 if (attr) {
766 *attr = PTHREAD_MUTEX_DEFAULT;
767 return 0;
768 } else {
769 return EINVAL;
770 }
771}
772
773int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
774{
775 if (attr) {
776 *attr = -1;
777 return 0;
778 } else {
779 return EINVAL;
780 }
781}
782
783int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
784{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800785 if (attr) {
786 int atype = (*attr & MUTEXATTR_TYPE_MASK);
787 if (atype >= PTHREAD_MUTEX_NORMAL && atype <= PTHREAD_MUTEX_ERRORCHECK) {
788 *type = atype;
789 return 0;
790 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800791 }
792 return EINVAL;
793}
794
795int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
796{
797 if (attr && type >= PTHREAD_MUTEX_NORMAL &&
798 type <= PTHREAD_MUTEX_ERRORCHECK ) {
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800799 *attr = (*attr & ~MUTEXATTR_TYPE_MASK) | type;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800800 return 0;
801 }
802 return EINVAL;
803}
804
805/* process-shared mutexes are not supported at the moment */
806
807int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared)
808{
809 if (!attr)
810 return EINVAL;
811
Mathias Agopianb7681162009-07-13 22:00:33 -0700812 switch (pshared) {
813 case PTHREAD_PROCESS_PRIVATE:
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800814 *attr &= ~MUTEXATTR_SHARING_MASK;
815 return 0;
816
Mathias Agopianb7681162009-07-13 22:00:33 -0700817 case PTHREAD_PROCESS_SHARED:
818 /* our current implementation of pthread actually supports shared
819 * mutexes but won't cleanup if a process dies with the mutex held.
820 * Nevertheless, it's better than nothing. Shared mutexes are used
821 * by surfaceflinger and audioflinger.
822 */
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800823 *attr |= MUTEXATTR_SHARING_MASK;
Mathias Agopianb7681162009-07-13 22:00:33 -0700824 return 0;
825 }
826
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800827 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800828}
829
830int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
831{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800832 if (!attr || !pshared)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800833 return EINVAL;
834
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800835 *pshared = (*attr & MUTEXATTR_SHARING_MASK) ? PTHREAD_PROCESS_SHARED
836 : PTHREAD_PROCESS_PRIVATE;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800837 return 0;
838}
839
840int pthread_mutex_init(pthread_mutex_t *mutex,
841 const pthread_mutexattr_t *attr)
842{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800843 int value = 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800844
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800845 if (__unlikely(mutex == NULL))
846 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800847
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800848 if (__likely(attr == NULL)) {
849 mutex->value = MUTEX_TYPE_NORMAL;
850 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800851 }
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800852
853 if ((*attr & MUTEXATTR_SHARING_MASK) != 0)
854 value |= MUTEX_SHARING_MASK;
855
856 switch (*attr & MUTEXATTR_TYPE_MASK) {
857 case PTHREAD_MUTEX_NORMAL:
858 value |= MUTEX_TYPE_NORMAL;
859 break;
860 case PTHREAD_MUTEX_RECURSIVE:
861 value |= MUTEX_TYPE_RECURSIVE;
862 break;
863 case PTHREAD_MUTEX_ERRORCHECK:
864 value |= MUTEX_TYPE_ERRORCHECK;
865 break;
866 default:
867 return EINVAL;
868 }
869 mutex->value = value;
870 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800871}
872
873int pthread_mutex_destroy(pthread_mutex_t *mutex)
874{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800875 if (__unlikely(mutex == NULL))
876 return EINVAL;
877
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800878 mutex->value = 0xdead10cc;
879 return 0;
880}
881
882
883/*
884 * Lock a non-recursive mutex.
885 *
886 * As noted above, there are three states:
887 * 0 (unlocked, no contention)
888 * 1 (locked, no contention)
889 * 2 (locked, contention)
890 *
891 * Non-recursive mutexes don't use the thread-id or counter fields, and the
892 * "type" value is zero, so the only bits that will be set are the ones in
893 * the lock state field.
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800894 *
895 * This routine is used for both shared and private mutexes.
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800896 */
897static __inline__ void
898_normal_lock(pthread_mutex_t* mutex)
899{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800900 if (__likely(!MUTEX_IS_SHARED(mutex))) {
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800901 /*
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800902 * The common case is an unlocked mutex, so we begin by trying to
903 * change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0
904 * if it made the swap successfully. If the result is nonzero, this
905 * lock is already held by another thread.
906 */
907 if (__atomic_cmpxchg(0, 1, &mutex->value) != 0) {
908 /*
909 * We want to go to sleep until the mutex is available, which
910 * requires promoting it to state 2. We need to swap in the new
911 * state value and then wait until somebody wakes us up.
912 *
913 * __atomic_swap() returns the previous value. We swap 2 in and
914 * see if we got zero back; if so, we have acquired the lock. If
915 * not, another thread still holds the lock and we wait again.
916 *
917 * The second argument to the __futex_wait() call is compared
918 * against the current value. If it doesn't match, __futex_wait()
919 * returns immediately (otherwise, it sleeps for a time specified
920 * by the third argument; 0 means sleep forever). This ensures
921 * that the mutex is in state 2 when we go to sleep on it, which
922 * guarantees a wake-up call.
923 */
924 while (__atomic_swap(2, &mutex->value ) != 0)
925 __futex_wait_private(&mutex->value, 2, 0);
926 }
927 } else {
928 /* Same algorithm, with the sharing bit flag set */
929 const int sharing = MUTEX_SHARING_MASK;
930 if (__atomic_cmpxchg(sharing|0, sharing|1, &mutex->value) != 0) {
931 while (__atomic_swap(sharing|2, &mutex->value ) != (sharing|0))
932 __futex_wait(&mutex->value, sharing|2, 0);
933 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800934 }
935}
936
937/*
938 * Release a non-recursive mutex. The caller is responsible for determining
939 * that we are in fact the owner of this lock.
940 */
941static __inline__ void
942_normal_unlock(pthread_mutex_t* mutex)
943{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800944 if (__likely(!MUTEX_IS_SHARED(mutex))) {
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800945 /*
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800946 * The mutex value will be 1 or (rarely) 2. We use an atomic decrement
947 * to release the lock. __atomic_dec() returns the previous value;
948 * if it wasn't 1 we have to do some additional work.
949 */
950 if (__atomic_dec(&mutex->value) != 1) {
951 /*
952 * Start by releasing the lock. The decrement changed it from
953 * "contended lock" to "uncontended lock", which means we still
954 * hold it, and anybody who tries to sneak in will push it back
955 * to state 2.
956 *
957 * Once we set it to zero the lock is up for grabs. We follow
958 * this with a __futex_wake() to ensure that one of the waiting
959 * threads has a chance to grab it.
960 *
961 * This doesn't cause a race with the swap/wait pair in
962 * _normal_lock(), because the __futex_wait() call there will
963 * return immediately if the mutex value isn't 2.
964 */
965 mutex->value = 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800966
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -0800967 /*
968 * Wake up one waiting thread. We don't know which thread will be
969 * woken or when it'll start executing -- futexes make no guarantees
970 * here. There may not even be a thread waiting.
971 *
972 * The newly-woken thread will replace the 0 we just set above
973 * with 2, which means that when it eventually releases the mutex
974 * it will also call FUTEX_WAKE. This results in one extra wake
975 * call whenever a lock is contended, but lets us avoid forgetting
976 * anyone without requiring us to track the number of sleepers.
977 *
978 * It's possible for another thread to sneak in and grab the lock
979 * between the zero assignment above and the wake call below. If
980 * the new thread is "slow" and holds the lock for a while, we'll
981 * wake up a sleeper, which will swap in a 2 and then go back to
982 * sleep since the lock is still held. If the new thread is "fast",
983 * running to completion before we call wake, the thread we
984 * eventually wake will find an unlocked mutex and will execute.
985 * Either way we have correct behavior and nobody is orphaned on
986 * the wait queue.
987 */
988 __futex_wake_private(&mutex->value, 1);
989 }
990 } else {
991 /* Same algorithm with sharing bit flag set */
992 const int sharing = MUTEX_SHARING_MASK;
993 if (__atomic_dec(&mutex->value) != (sharing|1)) {
994 mutex->value = sharing;
995 __futex_wake(&mutex->value, 1);
996 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -0800997 }
998}
999
1000static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
1001
1002static void
1003_recursive_lock(void)
1004{
1005 _normal_lock( &__recursive_lock);
1006}
1007
1008static void
1009_recursive_unlock(void)
1010{
1011 _normal_unlock( &__recursive_lock );
1012}
1013
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001014int pthread_mutex_lock(pthread_mutex_t *mutex)
1015{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001016 int mtype, tid, new_lock_type, sharing;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001017
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001018 if (__unlikely(mutex == NULL))
1019 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001020
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001021 /* get mutex type */
1022 mtype = (mutex->value & MUTEX_TYPE_MASK);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001023
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001024 /* Handle normal mutexes quickly */
1025 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
1026 _normal_lock(mutex);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001027 return 0;
1028 }
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001029
1030 /* This is a recursive or error check mutex.
1031 * Check that we don't already own it.
1032 */
1033 tid = __get_thread()->kernel_id;
1034 if ( tid == MUTEX_OWNER(mutex) )
1035 {
1036 int oldv, counter;
1037
1038 if (mtype == MUTEX_TYPE_ERRORCHECK) {
1039 /* trying to re-lock a mutex we already acquired */
1040 return EDEADLK;
1041 }
1042 /*
1043 * We own the mutex, but other threads are able to change
1044 * the contents (e.g. promoting it to "contended"), so we
1045 * need to hold the global lock.
1046 */
1047 _recursive_lock();
1048 oldv = mutex->value;
1049 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1050 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1051 _recursive_unlock();
1052
1053 return 0;
1054 }
1055
1056 /* We don't own it, try to lock it.
1057 * If the new lock is available immediately, we grab it in
1058 * the "uncontended" state.
1059 */
1060 new_lock_type = 1;
1061 sharing = (mutex->value & MUTEX_SHARING_MASK);
1062
1063 mtype |= sharing; /* restore sharing bit flag */
1064
1065 /* here, mtype corresponds to the uncontended value for the mutex,
1066 * i.e. something like:
1067 *
1068 * <tid=0><type=?><sharing=?><counter=0><state=0>
1069 */
1070
1071 for (;;) {
1072 int oldv;
1073
1074 _recursive_lock();
1075 oldv = mutex->value;
1076 if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
1077 mutex->value = ((tid << 16) | mtype | new_lock_type);
1078 } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
1079 oldv ^= 3;
1080 mutex->value = oldv;
1081 }
1082 _recursive_unlock();
1083
1084 if (oldv == mtype)
1085 break;
1086
1087 /*
1088 * The lock was held, possibly contended by others. From
1089 * now on, if we manage to acquire the lock, we have to
1090 * assume that others are still contending for it so that
1091 * we'll wake them when we unlock it.
1092 */
1093 new_lock_type = 2;
1094
1095 if (sharing) {
1096 __futex_wait(&mutex->value, oldv, 0);
1097 } else {
1098 __futex_wait_private(&mutex->value, oldv, 0);
1099 }
1100 }
1101 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001102}
1103
1104
1105int pthread_mutex_unlock(pthread_mutex_t *mutex)
1106{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001107 int mtype, tid, sharing, oldv;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001108
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001109 if (__unlikely(mutex == NULL))
1110 return EINVAL;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001111
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001112 mtype = (mutex->value & MUTEX_TYPE_MASK);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001113
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001114 if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
1115 _normal_unlock(mutex);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001116 return 0;
1117 }
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001118
1119 tid = __get_thread()->kernel_id;
1120 sharing = (mutex->value & MUTEX_SHARING_MASK);
1121
1122 mtype |= sharing; /* restore sharing bit flag */
1123
1124 /* ensure that we own the mutex */
1125 if (__unlikely(tid != MUTEX_OWNER(mutex)))
1126 return EPERM;
1127
1128 /* decrement or unlock it */
1129 _recursive_lock();
1130 oldv = mutex->value;
1131 if (oldv & MUTEX_COUNTER_MASK) {
1132 /* decrement non-0 counter */
1133 mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
1134 oldv = 0;
1135 } else {
1136 /* counter was 0, revert to uncontended value */
1137 mutex->value = mtype;
1138 }
1139 _recursive_unlock();
1140
1141 /* if the mutex was contended, wake one waiting thread */
1142 if ((oldv & 3) == 2) {
1143 if (sharing) {
1144 __futex_wake(&mutex->value, 1);
1145 } else {
1146 __futex_wake_private(&mutex->value, 1);
1147 }
1148 }
1149 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001150}
1151
1152
1153int pthread_mutex_trylock(pthread_mutex_t *mutex)
1154{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001155 int mtype, sharing, tid, oldv;
1156
1157 if (__unlikely(mutex == NULL))
1158 return EINVAL;
1159
1160 mtype = (mutex->value & MUTEX_TYPE_MASK);
1161
1162 /* handle normal mutex first */
1163 if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001164 {
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001165 int sharing = (mutex->value & MUTEX_SHARING_MASK);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001166
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001167 if (__atomic_cmpxchg(sharing|0, sharing|1, &mutex->value) == 0)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001168 return 0;
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001169
1170 return EBUSY;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001171 }
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001172
1173 /* recursive or errorcheck mutex, do we already own it ? */
1174 tid = __get_thread()->kernel_id;
1175 sharing = mutex->value & MUTEX_SHARING_MASK;
1176
1177 if ( tid == MUTEX_OWNER(mutex) )
1178 {
1179 int counter;
1180
1181 if (mtype == MUTEX_TYPE_ERRORCHECK) {
1182 /* already locked by ourselves */
1183 return EDEADLK;
1184 }
1185
1186 _recursive_lock();
1187 oldv = mutex->value;
1188 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1189 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1190 _recursive_unlock();
1191 return 0;
1192 }
1193
1194 /* we don't own it, so try to get it */
1195 mtype |= sharing;
1196
1197 /* try to lock it */
1198 _recursive_lock();
1199 oldv = mutex->value;
1200 if (oldv == mtype) /* uncontended released lock => state 1 */
1201 mutex->value = ((tid << 16) | mtype | 1);
1202 _recursive_unlock();
1203
1204 if (oldv != mtype)
1205 return EBUSY;
1206
1207 return 0;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001208}
1209
1210
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001211/* initialize 'ts' with the difference between 'abstime' and the current time
1212 * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
1213 */
1214static int
1215__timespec_to_absolute(struct timespec* ts, const struct timespec* abstime, clockid_t clock)
1216{
1217 clock_gettime(clock, ts);
1218 ts->tv_sec = abstime->tv_sec - ts->tv_sec;
1219 ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
1220 if (ts->tv_nsec < 0) {
1221 ts->tv_sec--;
1222 ts->tv_nsec += 1000000000;
1223 }
David 'Digit' Turnerbc10cd22009-09-23 15:56:50 -07001224 if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001225 return -1;
1226
1227 return 0;
1228}
1229
1230/* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
1231 * milliseconds.
1232 */
1233static void
1234__timespec_to_relative_msec(struct timespec* abstime, unsigned msecs, clockid_t clock)
1235{
1236 clock_gettime(clock, abstime);
1237 abstime->tv_sec += msecs/1000;
1238 abstime->tv_nsec += (msecs%1000)*1000000;
1239 if (abstime->tv_nsec >= 1000000000) {
1240 abstime->tv_sec++;
1241 abstime->tv_nsec -= 1000000000;
1242 }
1243}
1244
1245int pthread_mutex_lock_timeout_np(pthread_mutex_t *mutex, unsigned msecs)
1246{
1247 clockid_t clock = CLOCK_MONOTONIC;
1248 struct timespec abstime;
1249 struct timespec ts;
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001250 int mtype, tid, oldv, sharing, new_lock_type;
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001251
1252 /* compute absolute expiration time */
1253 __timespec_to_relative_msec(&abstime, msecs, clock);
1254
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001255 if (__unlikely(mutex == NULL))
1256 return EINVAL;
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001257
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001258
1259 /* handle normal mutexes first */
1260 mtype = (mutex->value & MUTEX_TYPE_MASK);
1261
1262 if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
1263 {
1264 if (__likely(!MUTEX_IS_SHARED(mutex))) {
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001265 /* fast path for unconteded lock */
1266 if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
1267 return 0;
1268
1269 /* loop while needed */
1270 while (__atomic_swap(2, &mutex->value) != 0) {
1271 if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
1272 return EBUSY;
1273
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001274 __futex_wait_private(&mutex->value, 2, &ts);
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001275 }
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001276 } else /* sharing */ {
1277 const int sharing = MUTEX_SHARING_MASK;
1278 if (__atomic_cmpxchg(sharing|0, sharing|1, &mutex->value) == 0)
1279 return 0;
1280
1281 /* loop while needed */
1282 while (__atomic_swap(sharing|2, &mutex->value) != (sharing|0)) {
1283 if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
1284 return EBUSY;
1285
1286 __futex_wait(&mutex->value, sharing|2, &ts);
1287 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001288 }
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001289 return 0;
1290 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001291
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001292 /* recursive or errorcheck - do we own the mutex ? */
1293 tid = __get_thread()->kernel_id;
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001294
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001295 if ( tid == MUTEX_OWNER(mutex) )
1296 {
1297 int counter;
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001298
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001299 if (mtype == MUTEX_TYPE_ERRORCHECK) {
1300 /* already locked by ourselves */
1301 return EDEADLK;
1302 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001303
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001304 _recursive_lock();
1305 oldv = mutex->value;
1306 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1307 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1308 _recursive_unlock();
1309 return 0;
1310 }
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001311
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001312 /* we don't own it, try to lock it */
1313 new_lock_type = 1;
1314 sharing = (mutex->value & MUTEX_SHARING_MASK);
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001315
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001316 mtype |= sharing;
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001317
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001318 for (;;) {
1319 struct timespec ts;
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001320
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001321 _recursive_lock();
1322 oldv = mutex->value;
1323 if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
1324 mutex->value = ((tid << 16) | mtype | new_lock_type);
1325 } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
1326 oldv ^= 3;
1327 mutex->value = oldv;
1328 }
1329 _recursive_unlock();
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001330
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001331 if (oldv == mtype)
1332 break;
1333
1334 /*
1335 * The lock was held, possibly contended by others. From
1336 * now on, if we manage to acquire the lock, we have to
1337 * assume that others are still contending for it so that
1338 * we'll wake them when we unlock it.
1339 */
1340 new_lock_type = 2;
1341
1342 if (__timespec_to_absolute(&ts, &abstime, clock) < 0)
1343 return EBUSY;
1344
1345 if (sharing) {
1346 __futex_wait(&mutex->value, oldv, &ts);
1347 } else {
1348 __futex_wait_private(&mutex->value, oldv, &ts);
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001349 }
1350 }
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001351 return 0;
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001352}
1353
1354
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001355int
1356pthread_condattr_init(pthread_condattr_t *attr)
1357{
1358 *attr = PTHREAD_PROCESS_PRIVATE;
1359 return 0;
1360}
1361
1362int
1363pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
1364{
1365 if (attr == NULL)
1366 return EINVAL;
1367
1368 if (pshared != PTHREAD_PROCESS_PRIVATE &&
1369 pshared != PTHREAD_PROCESS_SHARED)
1370 return EINVAL;
1371
1372 *attr = pshared;
1373 return 0;
1374}
1375
1376int
1377pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
1378{
1379 if (attr == NULL || pshared == NULL)
1380 return EINVAL;
1381
1382 *pshared = *attr;
1383 return 0;
1384}
1385
1386int
1387pthread_condattr_destroy(pthread_condattr_t *attr)
1388{
1389 *attr = 0xdeada11d;
1390 return 0;
1391}
1392
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001393/* XXX *technically* there is a race condition that could allow
1394 * XXX a signal to be missed. If thread A is preempted in _wait()
1395 * XXX after unlocking the mutex and before waiting, and if other
1396 * XXX threads call signal or broadcast UINT_MAX times (exactly),
1397 * XXX before thread A is scheduled again and calls futex_wait(),
1398 * XXX then the signal will be lost.
1399 */
1400
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001401/* Condition variables:
1402 * bits name description
1403 * 0 sharing 1 if process-shared, 0 if private
1404 * 2-31 counter counter increment on each signal/broadcast
1405 */
1406
1407#define COND_SHARING_MASK 0x0001
1408#define COND_COUNTER_INCREMENT 0x0002
1409#define COND_COUNTER_MASK (~COND_SHARING_MASK)
1410
1411#define COND_IS_SHARED(cond) (((cond)->value & COND_SHARING_MASK) != 0)
1412
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001413int pthread_cond_init(pthread_cond_t *cond,
1414 const pthread_condattr_t *attr)
1415{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001416 if (cond == NULL)
1417 return EINVAL;
1418
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001419 cond->value = 0;
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001420
1421 if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
1422 cond->value |= COND_SHARING_MASK;
1423
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001424 return 0;
1425}
1426
1427int pthread_cond_destroy(pthread_cond_t *cond)
1428{
1429 cond->value = 0xdeadc04d;
1430 return 0;
1431}
1432
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001433/* This function is used by pthread_cond_broadcast and
1434 * pthread_cond_signal to 'pulse' the condition variable.
1435 *
1436 * This means atomically decrementing the counter value
1437 * while leaving the other bits untouched.
1438 */
1439static void
1440__pthread_cond_pulse(pthread_cond_t *cond)
1441{
1442 long flags = (cond->value & ~COND_COUNTER_MASK);
1443
1444 for (;;) {
1445 long oldval = cond->value;
1446 long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) | flags;
1447 if (__atomic_cmpxchg(oldval, newval, &cond->value) == 0)
1448 break;
1449 }
1450}
1451
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001452int pthread_cond_broadcast(pthread_cond_t *cond)
1453{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001454 if (cond == NULL)
1455 return EINVAL;
1456
1457 __pthread_cond_pulse(cond);
1458
1459 if (COND_IS_SHARED(cond)) {
1460 __futex_wake(&cond->value, INT_MAX);
1461 } else {
1462 __futex_wake_private(&cond->value, INT_MAX);
1463 }
1464
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001465 return 0;
1466}
1467
1468int pthread_cond_signal(pthread_cond_t *cond)
1469{
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001470 if (cond == NULL)
1471 return EINVAL;
1472
1473 __pthread_cond_pulse(cond);
1474
1475 if (COND_IS_SHARED(cond)) {
1476 __futex_wake(&cond->value, 1);
1477 } else {
1478 __futex_wake_private(&cond->value, 1);
1479 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001480 return 0;
1481}
1482
1483int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
1484{
1485 return pthread_cond_timedwait(cond, mutex, NULL);
1486}
1487
1488int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
1489 pthread_mutex_t * mutex,
1490 const struct timespec *reltime)
1491{
1492 int status;
1493 int oldvalue = cond->value;
1494
1495 pthread_mutex_unlock(mutex);
David 'Digit' Turnerba9c6f02010-03-10 16:44:08 -08001496 if (COND_IS_SHARED(cond)) {
1497 status = __futex_wait(&cond->value, oldvalue, reltime);
1498 } else {
1499 status = __futex_wait_private(&cond->value, oldvalue, reltime);
1500 }
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001501 pthread_mutex_lock(mutex);
1502
1503 if (status == (-ETIMEDOUT)) return ETIMEDOUT;
1504 return 0;
1505}
1506
1507int __pthread_cond_timedwait(pthread_cond_t *cond,
1508 pthread_mutex_t * mutex,
1509 const struct timespec *abstime,
1510 clockid_t clock)
1511{
1512 struct timespec ts;
1513 struct timespec * tsp;
1514
1515 if (abstime != NULL) {
David 'Digit' Turner3f56b7f2009-09-22 12:40:22 -07001516 if (__timespec_to_absolute(&ts, abstime, clock) < 0)
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001517 return ETIMEDOUT;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001518 tsp = &ts;
1519 } else {
1520 tsp = NULL;
1521 }
1522
1523 return __pthread_cond_timedwait_relative(cond, mutex, tsp);
1524}
1525
1526int pthread_cond_timedwait(pthread_cond_t *cond,
1527 pthread_mutex_t * mutex,
1528 const struct timespec *abstime)
1529{
1530 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
1531}
1532
1533
Mathias Agopiana2f5e212009-07-13 15:00:46 -07001534/* this one exists only for backward binary compatibility */
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001535int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
1536 pthread_mutex_t * mutex,
1537 const struct timespec *abstime)
1538{
1539 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1540}
1541
Mathias Agopiana2f5e212009-07-13 15:00:46 -07001542int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
1543 pthread_mutex_t * mutex,
1544 const struct timespec *abstime)
1545{
1546 return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
1547}
1548
1549int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
1550 pthread_mutex_t * mutex,
1551 const struct timespec *reltime)
1552{
1553 return __pthread_cond_timedwait_relative(cond, mutex, reltime);
1554}
1555
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001556int pthread_cond_timeout_np(pthread_cond_t *cond,
1557 pthread_mutex_t * mutex,
1558 unsigned msecs)
1559{
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001560 struct timespec ts;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001561
1562 ts.tv_sec = msecs / 1000;
1563 ts.tv_nsec = (msecs % 1000) * 1000000;
1564
Matthieu CASTETa4e67f42008-12-27 00:04:10 +01001565 return __pthread_cond_timedwait_relative(cond, mutex, &ts);
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001566}
1567
1568
1569
1570/* A technical note regarding our thread-local-storage (TLS) implementation:
1571 *
1572 * There can be up to TLSMAP_SIZE independent TLS keys in a given process,
1573 * though the first TLSMAP_START keys are reserved for Bionic to hold
1574 * special thread-specific variables like errno or a pointer to
1575 * the current thread's descriptor.
1576 *
1577 * while stored in the TLS area, these entries cannot be accessed through
1578 * pthread_getspecific() / pthread_setspecific() and pthread_key_delete()
1579 *
1580 * also, some entries in the key table are pre-allocated (see tlsmap_lock)
1581 * to greatly simplify and speedup some OpenGL-related operations. though the
1582 * initialy value will be NULL on all threads.
1583 *
1584 * you can use pthread_getspecific()/setspecific() on these, and in theory
1585 * you could also call pthread_key_delete() as well, though this would
1586 * probably break some apps.
1587 *
1588 * The 'tlsmap_t' type defined below implements a shared global map of
1589 * currently created/allocated TLS keys and the destructors associated
1590 * with them. You should use tlsmap_lock/unlock to access it to avoid
1591 * any race condition.
1592 *
1593 * the global TLS map simply contains a bitmap of allocated keys, and
1594 * an array of destructors.
1595 *
1596 * each thread has a TLS area that is a simple array of TLSMAP_SIZE void*
1597 * pointers. the TLS area of the main thread is stack-allocated in
1598 * __libc_init_common, while the TLS area of other threads is placed at
1599 * the top of their stack in pthread_create.
1600 *
1601 * when pthread_key_create() is called, it finds the first free key in the
1602 * bitmap, then set it to 1, saving the destructor altogether
1603 *
1604 * when pthread_key_delete() is called. it will erase the key's bitmap bit
1605 * and its destructor, and will also clear the key data in the TLS area of
1606 * all created threads. As mandated by Posix, it is the responsability of
1607 * the caller of pthread_key_delete() to properly reclaim the objects that
1608 * were pointed to by these data fields (either before or after the call).
1609 *
1610 */
1611
1612/* TLS Map implementation
1613 */
1614
1615#define TLSMAP_START (TLS_SLOT_MAX_WELL_KNOWN+1)
1616#define TLSMAP_SIZE BIONIC_TLS_SLOTS
1617#define TLSMAP_BITS 32
1618#define TLSMAP_WORDS ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS)
1619#define TLSMAP_WORD(m,k) (m)->map[(k)/TLSMAP_BITS]
1620#define TLSMAP_MASK(k) (1U << ((k)&(TLSMAP_BITS-1)))
1621
1622/* this macro is used to quickly check that a key belongs to a reasonable range */
1623#define TLSMAP_VALIDATE_KEY(key) \
1624 ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE)
1625
1626/* the type of tls key destructor functions */
1627typedef void (*tls_dtor_t)(void*);
1628
1629typedef struct {
1630 int init; /* see comment in tlsmap_lock() */
1631 uint32_t map[TLSMAP_WORDS]; /* bitmap of allocated keys */
1632 tls_dtor_t dtors[TLSMAP_SIZE]; /* key destructors */
1633} tlsmap_t;
1634
1635static pthread_mutex_t _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER;
1636static tlsmap_t _tlsmap;
1637
1638/* lock the global TLS map lock and return a handle to it */
1639static __inline__ tlsmap_t* tlsmap_lock(void)
1640{
1641 tlsmap_t* m = &_tlsmap;
1642
1643 pthread_mutex_lock(&_tlsmap_lock);
1644 /* we need to initialize the first entry of the 'map' array
1645 * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically
1646 * when declaring _tlsmap is a bit awkward and is going to
1647 * produce warnings, so do it the first time we use the map
1648 * instead
1649 */
1650 if (__unlikely(!m->init)) {
1651 TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP;
1652 m->init = 1;
1653 }
1654 return m;
1655}
1656
1657/* unlock the global TLS map */
1658static __inline__ void tlsmap_unlock(tlsmap_t* m)
1659{
1660 pthread_mutex_unlock(&_tlsmap_lock);
1661 (void)m; /* a good compiler is a happy compiler */
1662}
1663
1664/* test to see wether a key is allocated */
1665static __inline__ int tlsmap_test(tlsmap_t* m, int key)
1666{
1667 return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0;
1668}
1669
1670/* set the destructor and bit flag on a newly allocated key */
1671static __inline__ void tlsmap_set(tlsmap_t* m, int key, tls_dtor_t dtor)
1672{
1673 TLSMAP_WORD(m,key) |= TLSMAP_MASK(key);
1674 m->dtors[key] = dtor;
1675}
1676
1677/* clear the destructor and bit flag on an existing key */
1678static __inline__ void tlsmap_clear(tlsmap_t* m, int key)
1679{
1680 TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key);
1681 m->dtors[key] = NULL;
1682}
1683
1684/* allocate a new TLS key, return -1 if no room left */
1685static int tlsmap_alloc(tlsmap_t* m, tls_dtor_t dtor)
1686{
1687 int key;
1688
1689 for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) {
1690 if ( !tlsmap_test(m, key) ) {
1691 tlsmap_set(m, key, dtor);
1692 return key;
1693 }
1694 }
1695 return -1;
1696}
1697
1698
1699int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *))
1700{
1701 uint32_t err = ENOMEM;
1702 tlsmap_t* map = tlsmap_lock();
1703 int k = tlsmap_alloc(map, destructor_function);
1704
1705 if (k >= 0) {
1706 *key = k;
1707 err = 0;
1708 }
1709 tlsmap_unlock(map);
1710 return err;
1711}
1712
1713
1714/* This deletes a pthread_key_t. note that the standard mandates that this does
1715 * not call the destructor of non-NULL key values. Instead, it is the
1716 * responsability of the caller to properly dispose of the corresponding data
1717 * and resources, using any mean it finds suitable.
1718 *
1719 * On the other hand, this function will clear the corresponding key data
1720 * values in all known threads. this prevents later (invalid) calls to
1721 * pthread_getspecific() to receive invalid/stale values.
1722 */
1723int pthread_key_delete(pthread_key_t key)
1724{
1725 uint32_t err;
1726 pthread_internal_t* thr;
1727 tlsmap_t* map;
1728
1729 if (!TLSMAP_VALIDATE_KEY(key)) {
1730 return EINVAL;
1731 }
1732
1733 map = tlsmap_lock();
1734
1735 if (!tlsmap_test(map, key)) {
1736 err = EINVAL;
1737 goto err1;
1738 }
1739
1740 /* clear value in all threads */
1741 pthread_mutex_lock(&gThreadListLock);
1742 for ( thr = gThreadList; thr != NULL; thr = thr->next ) {
1743 /* avoid zombie threads with a negative 'join_count'. these are really
1744 * already dead and don't have a TLS area anymore.
1745 *
1746 * similarly, it is possible to have thr->tls == NULL for threads that
1747 * were just recently created through pthread_create() but whose
1748 * startup trampoline (__thread_entry) hasn't been run yet by the
1749 * scheduler. so check for this too.
1750 */
1751 if (thr->join_count < 0 || !thr->tls)
1752 continue;
1753
1754 thr->tls[key] = NULL;
1755 }
1756 tlsmap_clear(map, key);
1757
1758 pthread_mutex_unlock(&gThreadListLock);
1759 err = 0;
1760
1761err1:
1762 tlsmap_unlock(map);
1763 return err;
1764}
1765
1766
1767int pthread_setspecific(pthread_key_t key, const void *ptr)
1768{
1769 int err = EINVAL;
1770 tlsmap_t* map;
1771
1772 if (TLSMAP_VALIDATE_KEY(key)) {
1773 /* check that we're trying to set data for an allocated key */
1774 map = tlsmap_lock();
1775 if (tlsmap_test(map, key)) {
1776 ((uint32_t *)__get_tls())[key] = (uint32_t)ptr;
1777 err = 0;
1778 }
1779 tlsmap_unlock(map);
1780 }
1781 return err;
1782}
1783
1784void * pthread_getspecific(pthread_key_t key)
1785{
1786 if (!TLSMAP_VALIDATE_KEY(key)) {
1787 return NULL;
1788 }
1789
1790 /* for performance reason, we do not lock/unlock the global TLS map
1791 * to check that the key is properly allocated. if the key was not
1792 * allocated, the value read from the TLS should always be NULL
1793 * due to pthread_key_delete() clearing the values for all threads.
1794 */
1795 return (void *)(((unsigned *)__get_tls())[key]);
1796}
1797
1798/* Posix mandates that this be defined in <limits.h> but we don't have
1799 * it just yet.
1800 */
1801#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
1802# define PTHREAD_DESTRUCTOR_ITERATIONS 4
1803#endif
1804
1805/* this function is called from pthread_exit() to remove all TLS key data
1806 * from this thread's TLS area. this must call the destructor of all keys
1807 * that have a non-NULL data value (and a non-NULL destructor).
1808 *
1809 * because destructors can do funky things like deleting/creating other
1810 * keys, we need to implement this in a loop
1811 */
1812static void pthread_key_clean_all(void)
1813{
1814 tlsmap_t* map;
1815 void** tls = (void**)__get_tls();
1816 int rounds = PTHREAD_DESTRUCTOR_ITERATIONS;
1817
1818 map = tlsmap_lock();
1819
1820 for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--)
1821 {
1822 int kk, count = 0;
1823
1824 for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) {
1825 if ( tlsmap_test(map, kk) )
1826 {
1827 void* data = tls[kk];
1828 tls_dtor_t dtor = map->dtors[kk];
1829
1830 if (data != NULL && dtor != NULL)
1831 {
1832 /* we need to clear the key data now, this will prevent the
1833 * destructor (or a later one) from seeing the old value if
1834 * it calls pthread_getspecific() for some odd reason
1835 *
1836 * we do not do this if 'dtor == NULL' just in case another
1837 * destructor function might be responsible for manually
1838 * releasing the corresponding data.
1839 */
1840 tls[kk] = NULL;
1841
1842 /* because the destructor is free to call pthread_key_create
1843 * and/or pthread_key_delete, we need to temporarily unlock
1844 * the TLS map
1845 */
1846 tlsmap_unlock(map);
1847 (*dtor)(data);
1848 map = tlsmap_lock();
1849
1850 count += 1;
1851 }
1852 }
1853 }
1854
1855 /* if we didn't call any destructor, there is no need to check the
1856 * TLS data again
1857 */
1858 if (count == 0)
1859 break;
1860 }
1861 tlsmap_unlock(map);
1862}
1863
1864// man says this should be in <linux/unistd.h>, but it isn't
1865extern int tkill(int tid, int sig);
1866
1867int pthread_kill(pthread_t tid, int sig)
1868{
1869 int ret;
1870 int old_errno = errno;
1871 pthread_internal_t * thread = (pthread_internal_t *)tid;
1872
1873 ret = tkill(thread->kernel_id, sig);
1874 if (ret < 0) {
1875 ret = errno;
1876 errno = old_errno;
1877 }
1878
1879 return ret;
1880}
1881
1882extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
1883
1884int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
1885{
David 'Digit' Turner8f8b5312010-03-01 11:30:40 -08001886 /* pthread_sigmask must return the error code, but the syscall
1887 * will set errno instead and return 0/-1
1888 */
1889 int ret, old_errno = errno;
1890
1891 ret = __rt_sigprocmask(how, set, oset, _NSIG / 8);
1892 if (ret < 0)
1893 ret = errno;
1894
1895 errno = old_errno;
1896 return ret;
The Android Open Source Project1dc9e472009-03-03 19:28:35 -08001897}
1898
1899
1900int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid)
1901{
1902 const int CLOCK_IDTYPE_BITS = 3;
1903 pthread_internal_t* thread = (pthread_internal_t*)tid;
1904
1905 if (!thread)
1906 return ESRCH;
1907
1908 *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
1909 return 0;
1910}
1911
1912
1913/* NOTE: this implementation doesn't support a init function that throws a C++ exception
1914 * or calls fork()
1915 */
1916int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
1917{
1918 static pthread_mutex_t once_lock = PTHREAD_MUTEX_INITIALIZER;
1919
1920 if (*once_control == PTHREAD_ONCE_INIT) {
1921 _normal_lock( &once_lock );
1922 if (*once_control == PTHREAD_ONCE_INIT) {
1923 (*init_routine)();
1924 *once_control = ~PTHREAD_ONCE_INIT;
1925 }
1926 _normal_unlock( &once_lock );
1927 }
1928 return 0;
1929}