blob: 67fc5199d82207e9443503bc0cfbd789b70e9603 [file] [log] [blame]
The Android Open Source Projecta27d2ba2008-10-21 07:00:00 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <sys/types.h>
29#include <unistd.h>
30#include <signal.h>
31#include <stdint.h>
32#include <stdio.h>
33#include <stdlib.h>
34#include <errno.h>
35#include <sys/atomics.h>
36#include <sys/tls.h>
37#include <sys/mman.h>
38#include <pthread.h>
39#include <time.h>
40#include "pthread_internal.h"
41#include "thread_private.h"
42#include <limits.h>
43#include <memory.h>
44#include <assert.h>
45#include <malloc.h>
46
47extern int __pthread_clone(int (*fn)(void*), void *child_stack, int flags, void *arg);
48extern void _exit_with_stack_teardown(void * stackBase, int stackSize, int retCode);
49extern void _exit_thread(int retCode);
50extern int __set_errno(int);
51
52void _thread_created_hook(pid_t thread_id) __attribute__((noinline));
53
54#define PTHREAD_ATTR_FLAG_DETACHED 0x00000001
55#define PTHREAD_ATTR_FLAG_USER_STACK 0x00000002
56
57#define DEFAULT_STACKSIZE (1024 * 1024)
58#define STACKBASE 0x10000000
59
60static uint8_t * gStackBase = (uint8_t *)STACKBASE;
61
62static pthread_mutex_t mmap_lock = PTHREAD_MUTEX_INITIALIZER;
63
64
65static const pthread_attr_t gDefaultPthreadAttr = {
66 .flags = 0,
67 .stack_base = NULL,
68 .stack_size = DEFAULT_STACKSIZE,
69 .guard_size = PAGE_SIZE,
70 .sched_policy = SCHED_NORMAL,
71 .sched_priority = 0
72};
73
74#define INIT_THREADS 1
75
76static pthread_internal_t* gThreadList = NULL;
77static pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
78static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
79
80
81/* we simply malloc/free the internal pthread_internal_t structures. we may
82 * want to use a different allocation scheme in the future, but this one should
83 * be largely enough
84 */
85static pthread_internal_t*
86_pthread_internal_alloc(void)
87{
88 pthread_internal_t* thread;
89
90 thread = calloc( sizeof(*thread), 1 );
91 if (thread)
92 thread->intern = 1;
93
94 return thread;
95}
96
97static void
98_pthread_internal_free( pthread_internal_t* thread )
99{
100 if (thread && thread->intern) {
101 thread->intern = 0; /* just in case */
102 free (thread);
103 }
104}
105
106
107static void
108_pthread_internal_remove_locked( pthread_internal_t* thread )
109{
110 thread->next->pref = thread->pref;
111 thread->pref[0] = thread->next;
112}
113
114static void
115_pthread_internal_remove( pthread_internal_t* thread )
116{
117 pthread_mutex_lock(&gThreadListLock);
118 _pthread_internal_remove_locked(thread);
119 pthread_mutex_unlock(&gThreadListLock);
120}
121
122static void
123_pthread_internal_add( pthread_internal_t* thread )
124{
125 pthread_mutex_lock(&gThreadListLock);
126 thread->pref = &gThreadList;
127 thread->next = thread->pref[0];
128 if (thread->next)
129 thread->next->pref = &thread->next;
130 thread->pref[0] = thread;
131 pthread_mutex_unlock(&gThreadListLock);
132}
133
134pthread_internal_t*
135__get_thread(void)
136{
137 void** tls = (void**)__get_tls();
138
139 return (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
140}
141
142
143void*
144__get_stack_base(int *p_stack_size)
145{
146 pthread_internal_t* thread = __get_thread();
147
148 *p_stack_size = thread->attr.stack_size;
149 return thread->attr.stack_base;
150}
151
152
153void __init_tls(void** tls, void* thread)
154{
155 int nn;
156
157 ((pthread_internal_t*)thread)->tls = tls;
158
159 // slot 0 must point to the tls area, this is required by the implementation
160 // of the x86 Linux kernel thread-local-storage
161 tls[TLS_SLOT_SELF] = (void*)tls;
162 tls[TLS_SLOT_THREAD_ID] = thread;
163 for (nn = TLS_SLOT_ERRNO; nn < BIONIC_TLS_SLOTS; nn++)
164 tls[nn] = 0;
165
166 __set_tls( (void*)tls );
167}
168
169
170/*
171 * This trampoline is called from the assembly clone() function
172 */
173void __thread_entry(int (*func)(void*), void *arg, void **tls)
174{
175 int retValue;
176 pthread_internal_t * thrInfo;
177
178 // Wait for our creating thread to release us. This lets it have time to
179 // notify gdb about this thread before it starts doing anything.
180 pthread_mutex_t * start_mutex = (pthread_mutex_t *)&tls[TLS_SLOT_SELF];
181 pthread_mutex_lock(start_mutex);
182 pthread_mutex_destroy(start_mutex);
183
184 thrInfo = (pthread_internal_t *) tls[TLS_SLOT_THREAD_ID];
185
186 __init_tls( tls, thrInfo );
187
188 pthread_exit( (void*)func(arg) );
189}
190
191void _init_thread(pthread_internal_t * thread, pid_t kernel_id, pthread_attr_t * attr, void * stack_base)
192{
193 if (attr == NULL) {
194 thread->attr = gDefaultPthreadAttr;
195 } else {
196 thread->attr = *attr;
197 }
198 thread->attr.stack_base = stack_base;
199 thread->kernel_id = kernel_id;
200
201 // set the scheduling policy/priority of the thread
202 if (thread->attr.sched_policy != SCHED_NORMAL) {
203 struct sched_param param;
204 param.sched_priority = thread->attr.sched_priority;
205 sched_setscheduler(kernel_id, thread->attr.sched_policy, &param);
206 }
207
208 pthread_cond_init(&thread->join_cond, NULL);
209 thread->join_count = 0;
210
211 thread->cleanup_stack = NULL;
212
213 _pthread_internal_add(thread);
214}
215
216
217/* XXX stacks not reclaimed if thread spawn fails */
218/* XXX stacks address spaces should be reused if available again */
219
220static void *mkstack(size_t size, size_t guard_size)
221{
222 void * stack;
223
224 pthread_mutex_lock(&mmap_lock);
225
226 stack = mmap((void *)gStackBase, size,
227 PROT_READ | PROT_WRITE,
228 MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
229 -1, 0);
230
231 if(stack == MAP_FAILED) {
232 stack = NULL;
233 goto done;
234 }
235
236 if(mprotect(stack, guard_size, PROT_NONE)){
237 munmap(stack, size);
238 stack = NULL;
239 goto done;
240 }
241
242done:
243 pthread_mutex_unlock(&mmap_lock);
244 return stack;
245}
246
247/*
248 * Create a new thread. The thread's stack is layed out like so:
249 *
250 * +---------------------------+
251 * | pthread_internal_t |
252 * +---------------------------+
253 * | |
254 * | TLS area |
255 * | |
256 * +---------------------------+
257 * | |
258 * . .
259 * . stack area .
260 * . .
261 * | |
262 * +---------------------------+
263 * | guard page |
264 * +---------------------------+
265 *
266 * note that TLS[0] must be a pointer to itself, this is required
267 * by the thread-local storage implementation of the x86 Linux
268 * kernel, where the TLS pointer is read by reading fs:[0]
269 */
270int pthread_create(pthread_t *thread_out, pthread_attr_t const * attr,
271 void *(*start_routine)(void *), void * arg)
272{
273 char* stack;
274 void** tls;
275 int tid;
276 pthread_mutex_t * start_mutex;
277 pthread_internal_t * thread;
278 int madestack = 0;
279 int old_errno = errno;
280
281 /* this will inform the rest of the C library that at least one thread
282 * was created. this will enforce certain functions to acquire/release
283 * locks (e.g. atexit()) to protect shared global structures.
284 *
285 * this works because pthread_create() is not called by the C library
286 * initialization routine that sets up the main thread's data structures.
287 */
288 __isthreaded = 1;
289
290 thread = _pthread_internal_alloc();
291 if (thread == NULL)
292 return ENOMEM;
293
294 if (attr == NULL) {
295 attr = &gDefaultPthreadAttr;
296 }
297
298 // make sure the stack is PAGE_SIZE aligned
299 size_t stackSize = (attr->stack_size +
300 (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
301
302 if (!attr->stack_base) {
303 stack = mkstack(stackSize, attr->guard_size);
304 if(stack == NULL) {
305 _pthread_internal_free(thread);
306 return ENOMEM;
307 }
308 madestack = 1;
309 } else {
310 stack = attr->stack_base;
311 }
312
313 // Make room for TLS
314 tls = (void**)(stack + stackSize - BIONIC_TLS_SLOTS*sizeof(void*));
315
316 // Create a mutex for the thread in TLS_SLOT_SELF to wait on once it starts so we can keep
317 // it from doing anything until after we notify the debugger about it
318 start_mutex = (pthread_mutex_t *) &tls[TLS_SLOT_SELF];
319 pthread_mutex_init(start_mutex, NULL);
320 pthread_mutex_lock(start_mutex);
321
322 tls[TLS_SLOT_THREAD_ID] = thread;
323
324 tid = __pthread_clone((int(*)(void*))start_routine, tls,
325 CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND
326 | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED,
327 arg);
328
329 if(tid < 0) {
330 int result;
331 if (madestack)
332 munmap(stack, stackSize);
333 _pthread_internal_free(thread);
334 result = errno;
335 errno = old_errno;
336 return result;
337 }
338
339 _init_thread(thread, tid, (pthread_attr_t*)attr, stack);
340
341 if (!madestack)
342 thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_STACK;
343
344 // Notify any debuggers about the new thread
345 pthread_mutex_lock(&gDebuggerNotificationLock);
346 _thread_created_hook(tid);
347 pthread_mutex_unlock(&gDebuggerNotificationLock);
348
349 // Let the thread do it's thing
350 pthread_mutex_unlock(start_mutex);
351
352 *thread_out = (pthread_t)thread;
353 return 0;
354}
355
356
357int pthread_attr_init(pthread_attr_t * attr)
358{
359 *attr = gDefaultPthreadAttr;
360 return 0;
361}
362
363int pthread_attr_destroy(pthread_attr_t * attr)
364{
365 memset(attr, 0x42, sizeof(pthread_attr_t));
366 return 0;
367}
368
369int pthread_attr_setdetachstate(pthread_attr_t * attr, int state)
370{
371 if (state == PTHREAD_CREATE_DETACHED) {
372 attr->flags |= PTHREAD_ATTR_FLAG_DETACHED;
373 } else if (state == PTHREAD_CREATE_JOINABLE) {
374 attr->flags &= ~PTHREAD_ATTR_FLAG_DETACHED;
375 } else {
376 return EINVAL;
377 }
378 return 0;
379}
380
381int pthread_attr_getdetachstate(pthread_attr_t const * attr, int * state)
382{
383 *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED)
384 ? PTHREAD_CREATE_DETACHED
385 : PTHREAD_CREATE_JOINABLE;
386 return 0;
387}
388
389int pthread_attr_setschedpolicy(pthread_attr_t * attr, int policy)
390{
391 attr->sched_policy = policy;
392 return 0;
393}
394
395int pthread_attr_getschedpolicy(pthread_attr_t const * attr, int * policy)
396{
397 *policy = attr->sched_policy;
398 return 0;
399}
400
401int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const * param)
402{
403 attr->sched_priority = param->sched_priority;
404 return 0;
405}
406
407int pthread_attr_getschedparam(pthread_attr_t const * attr, struct sched_param * param)
408{
409 param->sched_priority = attr->sched_priority;
410 return 0;
411}
412
413int pthread_attr_setstacksize(pthread_attr_t * attr, size_t stack_size)
414{
415 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
416 return EINVAL;
417 }
418 attr->stack_size = stack_size;
419 return 0;
420}
421
422int pthread_attr_getstacksize(pthread_attr_t const * attr, size_t * stack_size)
423{
424 *stack_size = attr->stack_size;
425 return 0;
426}
427
428int pthread_attr_setstackaddr(pthread_attr_t * attr, void * stack_addr)
429{
430#if 1
431 // It's not clear if this is setting the top or bottom of the stack, so don't handle it for now.
432 return ENOSYS;
433#else
434 if ((uint32_t)stack_addr & (PAGE_SIZE - 1)) {
435 return EINVAL;
436 }
437 attr->stack_base = stack_addr;
438 return 0;
439#endif
440}
441
442int pthread_attr_getstackaddr(pthread_attr_t const * attr, void ** stack_addr)
443{
444 *stack_addr = attr->stack_base + attr->stack_size;
445 return 0;
446}
447
448int pthread_attr_setstack(pthread_attr_t * attr, void * stack_base, size_t stack_size)
449{
450 if ((stack_size & (PAGE_SIZE - 1) || stack_size < PTHREAD_STACK_MIN)) {
451 return EINVAL;
452 }
453 if ((uint32_t)stack_base & (PAGE_SIZE - 1)) {
454 return EINVAL;
455 }
456 attr->stack_base = stack_base;
457 attr->stack_size = stack_size;
458 return 0;
459}
460
461int pthread_attr_getstack(pthread_attr_t const * attr, void ** stack_base, size_t * stack_size)
462{
463 *stack_base = attr->stack_base;
464 *stack_size = attr->stack_size;
465 return 0;
466}
467
468int pthread_attr_setguardsize(pthread_attr_t * attr, size_t guard_size)
469{
470 if (guard_size & (PAGE_SIZE - 1) || guard_size < PAGE_SIZE) {
471 return EINVAL;
472 }
473
474 attr->guard_size = guard_size;
475 return 0;
476}
477
478int pthread_attr_getguardsize(pthread_attr_t const * attr, size_t * guard_size)
479{
480 *guard_size = attr->guard_size;
481 return 0;
482}
483
484int pthread_getattr_np(pthread_t thid, pthread_attr_t * attr)
485{
486 pthread_internal_t * thread = (pthread_internal_t *)thid;
487 *attr = thread->attr;
488 return 0;
489}
490
491
492/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
493 * and thread cancelation
494 */
495
496void __pthread_cleanup_push( __pthread_cleanup_t* c,
497 __pthread_cleanup_func_t routine,
498 void* arg )
499{
500 pthread_internal_t* thread = __get_thread();
501
502 c->__cleanup_routine = routine;
503 c->__cleanup_arg = arg;
504 c->__cleanup_prev = thread->cleanup_stack;
505 thread->cleanup_stack = c;
506}
507
508void __pthread_cleanup_pop( __pthread_cleanup_t* c, int execute )
509{
510 pthread_internal_t* thread = __get_thread();
511
512 thread->cleanup_stack = c->__cleanup_prev;
513 if (execute)
514 c->__cleanup_routine(c->__cleanup_arg);
515}
516
517/* used by pthread_exit() to clean all TLS keys of the current thread */
518static void pthread_key_clean_all(void);
519
520void pthread_exit(void * retval)
521{
522 pthread_internal_t* thread = __get_thread();
523 void* stack_base = thread->attr.stack_base;
524 int stack_size = thread->attr.stack_size;
525 int user_stack = (thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) != 0;
526
527 // call the cleanup handlers first
528 while (thread->cleanup_stack) {
529 __pthread_cleanup_t* c = thread->cleanup_stack;
530 thread->cleanup_stack = c->__cleanup_prev;
531 c->__cleanup_routine(c->__cleanup_arg);
532 }
533
534 // call the TLS destructors, it is important to do that before removing this
535 // thread from the global list. this will ensure that if someone else deletes
536 // a TLS key, the corresponding value will be set to NULL in this thread's TLS
537 // space (see pthread_key_delete)
538 pthread_key_clean_all();
539
540 // if the thread is detached, destroy the pthread_internal_t
541 // otherwise, keep it in memory and signal any joiners
542 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
543 _pthread_internal_remove(thread);
544 _pthread_internal_free(thread);
545 } else {
546 /* the join_count field is used to store the number of threads waiting for
547 * the termination of this thread with pthread_join(),
548 *
549 * if it is positive we need to signal the waiters, and we do not touch
550 * the count (it will be decremented by the waiters, the last one will
551 * also remove/free the thread structure
552 *
553 * if it is zero, we set the count value to -1 to indicate that the
554 * thread is in 'zombie' state: it has stopped executing, and its stack
555 * is gone (as well as its TLS area). when another thread calls pthread_join()
556 * on it, it will immediately free the thread and return.
557 */
558 pthread_mutex_lock(&gThreadListLock);
559 thread->return_value = retval;
560 if (thread->join_count > 0) {
561 pthread_cond_broadcast(&thread->join_cond);
562 } else {
563 thread->join_count = -1; /* zombie thread */
564 }
565 pthread_mutex_unlock(&gThreadListLock);
566 }
567
568 // destroy the thread stack
569 if (user_stack)
570 _exit_thread((int)retval);
571 else
572 _exit_with_stack_teardown(stack_base, stack_size, (int)retval);
573}
574
575int pthread_join(pthread_t thid, void ** ret_val)
576{
577 pthread_internal_t* thread = (pthread_internal_t*)thid;
578 int count;
579
580 // check that the thread still exists and is not detached
581 pthread_mutex_lock(&gThreadListLock);
582
583 for (thread = gThreadList; thread != NULL; thread = thread->next)
584 if (thread == (pthread_internal_t*)thid)
585 break;
586
587 if (!thread) {
588 pthread_mutex_unlock(&gThreadListLock);
589 return ESRCH;
590 }
591
592 if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
593 pthread_mutex_unlock(&gThreadListLock);
594 return EINVAL;
595 }
596
597 /* wait for thread death when needed
598 *
599 * if the 'join_count' is negative, this is a 'zombie' thread that
600 * is already dead and without stack/TLS
601 *
602 * otherwise, we need to increment 'join-count' and wait to be signaled
603 */
604 count = thread->join_count;
605 if (count >= 0) {
606 thread->join_count += 1;
607 pthread_cond_wait( &thread->join_cond, &gThreadListLock );
608 count = --thread->join_count;
609 }
610 if (ret_val)
611 *ret_val = thread->return_value;
612
613 /* remove thread descriptor when we're the last joiner or when the
614 * thread was already a zombie.
615 */
616 if (count <= 0) {
617 _pthread_internal_remove_locked(thread);
618 _pthread_internal_free(thread);
619 }
620 pthread_mutex_unlock(&gThreadListLock);
621 return 0;
622}
623
624int pthread_detach( pthread_t thid )
625{
626 pthread_internal_t* thread;
627 int result = 0;
628 int flags;
629
630 pthread_mutex_lock(&gThreadListLock);
631 for (thread = gThreadList; thread != NULL; thread = thread->next)
632 if (thread == (pthread_internal_t*)thid)
633 goto FoundIt;
634
635 result = ESRCH;
636 goto Exit;
637
638FoundIt:
639 do {
640 flags = thread->attr.flags;
641
642 if ( flags & PTHREAD_ATTR_FLAG_DETACHED ) {
643 /* thread is not joinable ! */
644 result = EINVAL;
645 goto Exit;
646 }
647 }
648 while ( __atomic_cmpxchg( flags, flags | PTHREAD_ATTR_FLAG_DETACHED,
649 (volatile int*)&thread->attr.flags ) != 0 );
650Exit:
651 pthread_mutex_unlock(&gThreadListLock);
652 return result;
653}
654
655pthread_t pthread_self(void)
656{
657 return (pthread_t)__get_thread();
658}
659
660int pthread_equal(pthread_t one, pthread_t two)
661{
662 return (one == two ? 1 : 0);
663}
664
665int pthread_getschedparam(pthread_t thid, int * policy,
666 struct sched_param * param)
667{
668 int old_errno = errno;
669
670 pthread_internal_t * thread = (pthread_internal_t *)thid;
671 int err = sched_getparam(thread->kernel_id, param);
672 if (!err) {
673 *policy = sched_getscheduler(thread->kernel_id);
674 } else {
675 err = errno;
676 errno = old_errno;
677 }
678 return err;
679}
680
681int pthread_setschedparam(pthread_t thid, int policy,
682 struct sched_param const * param)
683{
684 pthread_internal_t * thread = (pthread_internal_t *)thid;
685 int old_errno = errno;
686 int ret;
687
688 ret = sched_setscheduler(thread->kernel_id, policy, param);
689 if (ret < 0) {
690 ret = errno;
691 errno = old_errno;
692 }
693 return ret;
694}
695
696
697int __futex_wait(volatile void *ftx, int val, const struct timespec *timeout);
698int __futex_wake(volatile void *ftx, int count);
699
700// mutex lock states
701//
702// 0: unlocked
703// 1: locked, no waiters
704// 2: locked, maybe waiters
705
706/* a mutex is implemented as a 32-bit integer holding the following fields
707 *
708 * bits: name description
709 * 31-16 tid owner thread's kernel id (recursive and errorcheck only)
710 * 15-14 type mutex type
711 * 13-2 counter counter of recursive mutexes
712 * 1-0 state lock state (0, 1 or 2)
713 */
714
715
716#define MUTEX_OWNER(m) (((m)->value >> 16) & 0xffff)
717#define MUTEX_COUNTER(m) (((m)->value >> 2) & 0xfff)
718
719#define MUTEX_TYPE_MASK 0xc000
720#define MUTEX_TYPE_NORMAL 0x0000
721#define MUTEX_TYPE_RECURSIVE 0x4000
722#define MUTEX_TYPE_ERRORCHECK 0x8000
723
724#define MUTEX_COUNTER_SHIFT 2
725#define MUTEX_COUNTER_MASK 0x3ffc
726
727
728
729
730int pthread_mutexattr_init(pthread_mutexattr_t *attr)
731{
732 if (attr) {
733 *attr = PTHREAD_MUTEX_DEFAULT;
734 return 0;
735 } else {
736 return EINVAL;
737 }
738}
739
740int pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
741{
742 if (attr) {
743 *attr = -1;
744 return 0;
745 } else {
746 return EINVAL;
747 }
748}
749
750int pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *type)
751{
752 if (attr && *attr >= PTHREAD_MUTEX_NORMAL &&
753 *attr <= PTHREAD_MUTEX_ERRORCHECK ) {
754 *type = *attr;
755 return 0;
756 }
757 return EINVAL;
758}
759
760int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
761{
762 if (attr && type >= PTHREAD_MUTEX_NORMAL &&
763 type <= PTHREAD_MUTEX_ERRORCHECK ) {
764 *attr = type;
765 return 0;
766 }
767 return EINVAL;
768}
769
770int pthread_mutex_init(pthread_mutex_t *mutex,
771 const pthread_mutexattr_t *attr)
772{
773 if ( mutex ) {
774 if (attr == NULL) {
775 mutex->value = MUTEX_TYPE_NORMAL;
776 return 0;
777 }
778 switch ( *attr ) {
779 case PTHREAD_MUTEX_NORMAL:
780 mutex->value = MUTEX_TYPE_NORMAL;
781 return 0;
782
783 case PTHREAD_MUTEX_RECURSIVE:
784 mutex->value = MUTEX_TYPE_RECURSIVE;
785 return 0;
786
787 case PTHREAD_MUTEX_ERRORCHECK:
788 mutex->value = MUTEX_TYPE_ERRORCHECK;
789 return 0;
790 }
791 }
792 return EINVAL;
793}
794
795int pthread_mutex_destroy(pthread_mutex_t *mutex)
796{
797 mutex->value = 0xdead10cc;
798 return 0;
799}
800
801
802/*
803 * Lock a non-recursive mutex.
804 *
805 * As noted above, there are three states:
806 * 0 (unlocked, no contention)
807 * 1 (locked, no contention)
808 * 2 (locked, contention)
809 *
810 * Non-recursive mutexes don't use the thread-id or counter fields, and the
811 * "type" value is zero, so the only bits that will be set are the ones in
812 * the lock state field.
813 */
814static __inline__ void
815_normal_lock(pthread_mutex_t* mutex)
816{
817 /*
818 * The common case is an unlocked mutex, so we begin by trying to
819 * change the lock's state from 0 to 1. __atomic_cmpxchg() returns 0
820 * if it made the swap successfully. If the result is nonzero, this
821 * lock is already held by another thread.
822 */
823 if (__atomic_cmpxchg(0, 1, &mutex->value ) != 0) {
824 /*
825 * We want to go to sleep until the mutex is available, which
826 * requires promoting it to state 2. We need to swap in the new
827 * state value and then wait until somebody wakes us up.
828 *
829 * __atomic_swap() returns the previous value. We swap 2 in and
830 * see if we got zero back; if so, we have acquired the lock. If
831 * not, another thread still holds the lock and we wait again.
832 *
833 * The second argument to the __futex_wait() call is compared
834 * against the current value. If it doesn't match, __futex_wait()
835 * returns immediately (otherwise, it sleeps for a time specified
836 * by the third argument; 0 means sleep forever). This ensures
837 * that the mutex is in state 2 when we go to sleep on it, which
838 * guarantees a wake-up call.
839 */
840 while (__atomic_swap(2, &mutex->value ) != 0)
841 __futex_wait(&mutex->value, 2, 0);
842 }
843}
844
845/*
846 * Release a non-recursive mutex. The caller is responsible for determining
847 * that we are in fact the owner of this lock.
848 */
849static __inline__ void
850_normal_unlock(pthread_mutex_t* mutex)
851{
852 /*
853 * The mutex value will be 1 or (rarely) 2. We use an atomic decrement
854 * to release the lock. __atomic_dec() returns the previous value;
855 * if it wasn't 1 we have to do some additional work.
856 */
857 if (__atomic_dec(&mutex->value) != 1) {
858 /*
859 * Start by releasing the lock. The decrement changed it from
860 * "contended lock" to "uncontended lock", which means we still
861 * hold it, and anybody who tries to sneak in will push it back
862 * to state 2.
863 *
864 * Once we set it to zero the lock is up for grabs. We follow
865 * this with a __futex_wake() to ensure that one of the waiting
866 * threads has a chance to grab it.
867 *
868 * This doesn't cause a race with the swap/wait pair in
869 * _normal_lock(), because the __futex_wait() call there will
870 * return immediately if the mutex value isn't 2.
871 */
872 mutex->value = 0;
873
874 /*
875 * Wake up one waiting thread. We don't know which thread will be
876 * woken or when it'll start executing -- futexes make no guarantees
877 * here. There may not even be a thread waiting.
878 *
879 * The newly-woken thread will replace the 0 we just set above
880 * with 2, which means that when it eventually releases the mutex
881 * it will also call FUTEX_WAKE. This results in one extra wake
882 * call whenever a lock is contended, but lets us avoid forgetting
883 * anyone without requiring us to track the number of sleepers.
884 *
885 * It's possible for another thread to sneak in and grab the lock
886 * between the zero assignment above and the wake call below. If
887 * the new thread is "slow" and holds the lock for a while, we'll
888 * wake up a sleeper, which will swap in a 2 and then go back to
889 * sleep since the lock is still held. If the new thread is "fast",
890 * running to completion before we call wake, the thread we
891 * eventually wake will find an unlocked mutex and will execute.
892 * Either way we have correct behavior and nobody is orphaned on
893 * the wait queue.
894 */
895 __futex_wake(&mutex->value, 1);
896 }
897}
898
899static pthread_mutex_t __recursive_lock = PTHREAD_MUTEX_INITIALIZER;
900
901static void
902_recursive_lock(void)
903{
904 _normal_lock( &__recursive_lock);
905}
906
907static void
908_recursive_unlock(void)
909{
910 _normal_unlock( &__recursive_lock );
911}
912
913#define __likely(cond) __builtin_expect(!!(cond), 1)
914#define __unlikely(cond) __builtin_expect(!!(cond), 0)
915
916int pthread_mutex_lock(pthread_mutex_t *mutex)
917{
918 if (__likely(mutex != NULL))
919 {
920 int mtype = (mutex->value & MUTEX_TYPE_MASK);
921
922 if ( __likely(mtype == MUTEX_TYPE_NORMAL) ) {
923 _normal_lock(mutex);
924 }
925 else
926 {
927 int tid = __get_thread()->kernel_id;
928
929 if ( tid == MUTEX_OWNER(mutex) )
930 {
931 int oldv, counter;
932
933 if (mtype == MUTEX_TYPE_ERRORCHECK) {
934 /* trying to re-lock a mutex we already acquired */
935 return EDEADLK;
936 }
937 /*
938 * We own the mutex, but other threads are able to change
939 * the contents (e.g. promoting it to "contended"), so we
940 * need to hold the global lock.
941 */
942 _recursive_lock();
943 oldv = mutex->value;
944 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
945 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
946 _recursive_unlock();
947 }
948 else
949 {
950 /*
951 * If the new lock is available immediately, we grab it in
952 * the "uncontended" state.
953 */
954 int new_lock_type = 1;
955
956 for (;;) {
957 int oldv;
958
959 _recursive_lock();
960 oldv = mutex->value;
961 if (oldv == mtype) { /* uncontended released lock => 1 or 2 */
962 mutex->value = ((tid << 16) | mtype | new_lock_type);
963 } else if ((oldv & 3) == 1) { /* locked state 1 => state 2 */
964 oldv ^= 3;
965 mutex->value = oldv;
966 }
967 _recursive_unlock();
968
969 if (oldv == mtype)
970 break;
971
972 /*
973 * The lock was held, possibly contended by others. From
974 * now on, if we manage to acquire the lock, we have to
975 * assume that others are still contending for it so that
976 * we'll wake them when we unlock it.
977 */
978 new_lock_type = 2;
979
980 __futex_wait( &mutex->value, oldv, 0 );
981 }
982 }
983 }
984 return 0;
985 }
986 return EINVAL;
987}
988
989
990int pthread_mutex_unlock(pthread_mutex_t *mutex)
991{
992 if (__likely(mutex != NULL))
993 {
994 int mtype = (mutex->value & MUTEX_TYPE_MASK);
995
996 if (__likely(mtype == MUTEX_TYPE_NORMAL)) {
997 _normal_unlock(mutex);
998 }
999 else
1000 {
1001 int tid = __get_thread()->kernel_id;
1002
1003 if ( tid == MUTEX_OWNER(mutex) )
1004 {
1005 int oldv;
1006
1007 _recursive_lock();
1008 oldv = mutex->value;
1009 if (oldv & MUTEX_COUNTER_MASK) {
1010 mutex->value = oldv - (1 << MUTEX_COUNTER_SHIFT);
1011 oldv = 0;
1012 } else {
1013 mutex->value = mtype;
1014 }
1015 _recursive_unlock();
1016
1017 if ((oldv & 3) == 2)
1018 __futex_wake( &mutex->value, 1 );
1019 }
1020 else {
1021 /* trying to unlock a lock we do not own */
1022 return EPERM;
1023 }
1024 }
1025 return 0;
1026 }
1027 return EINVAL;
1028}
1029
1030
1031int pthread_mutex_trylock(pthread_mutex_t *mutex)
1032{
1033 if (__likely(mutex != NULL))
1034 {
1035 int mtype = (mutex->value & MUTEX_TYPE_MASK);
1036
1037 if ( __likely(mtype == MUTEX_TYPE_NORMAL) )
1038 {
1039 if (__atomic_cmpxchg(0, 1, &mutex->value) == 0)
1040 return 0;
1041
1042 return EBUSY;
1043 }
1044 else
1045 {
1046 int tid = __get_thread()->kernel_id;
1047 int oldv;
1048
1049 if ( tid == MUTEX_OWNER(mutex) )
1050 {
1051 int oldv, counter;
1052
1053 if (mtype == MUTEX_TYPE_ERRORCHECK) {
1054 /* already locked by ourselves */
1055 return EDEADLK;
1056 }
1057
1058 _recursive_lock();
1059 oldv = mutex->value;
1060 counter = (oldv + (1 << MUTEX_COUNTER_SHIFT)) & MUTEX_COUNTER_MASK;
1061 mutex->value = (oldv & ~MUTEX_COUNTER_MASK) | counter;
1062 _recursive_unlock();
1063 return 0;
1064 }
1065
1066 /* try to lock it */
1067 _recursive_lock();
1068 oldv = mutex->value;
1069 if (oldv == mtype) /* uncontended released lock => state 1 */
1070 mutex->value = ((tid << 16) | mtype | 1);
1071 _recursive_unlock();
1072
1073 if (oldv != mtype)
1074 return EBUSY;
1075
1076 return 0;
1077 }
1078 }
1079 return EINVAL;
1080}
1081
1082
1083/* XXX *technically* there is a race condition that could allow
1084 * XXX a signal to be missed. If thread A is preempted in _wait()
1085 * XXX after unlocking the mutex and before waiting, and if other
1086 * XXX threads call signal or broadcast UINT_MAX times (exactly),
1087 * XXX before thread A is scheduled again and calls futex_wait(),
1088 * XXX then the signal will be lost.
1089 */
1090
1091int pthread_cond_init(pthread_cond_t *cond,
1092 const pthread_condattr_t *attr)
1093{
1094 cond->value = 0;
1095 return 0;
1096}
1097
1098int pthread_cond_destroy(pthread_cond_t *cond)
1099{
1100 cond->value = 0xdeadc04d;
1101 return 0;
1102}
1103
1104int pthread_cond_broadcast(pthread_cond_t *cond)
1105{
1106 __atomic_dec(&cond->value);
1107 __futex_wake(&cond->value, INT_MAX);
1108 return 0;
1109}
1110
1111int pthread_cond_signal(pthread_cond_t *cond)
1112{
1113 __atomic_dec(&cond->value);
1114 __futex_wake(&cond->value, 1);
1115 return 0;
1116}
1117
1118int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
1119{
1120 return pthread_cond_timedwait(cond, mutex, NULL);
1121}
1122
1123int pthread_cond_timedwait(pthread_cond_t *cond,
1124 pthread_mutex_t * mutex,
1125 const struct timespec *abstime)
1126{
1127 int oldvalue;
1128 struct timespec ts;
1129 struct timespec * tsp;
1130 int status;
1131
1132 if (abstime != NULL) {
1133 clock_gettime(CLOCK_REALTIME, &ts);
1134 ts.tv_sec = abstime->tv_sec - ts.tv_sec;
1135 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
1136 if (ts.tv_nsec < 0) {
1137 ts.tv_sec--;
1138 ts.tv_nsec += 1000000000;
1139 }
1140 if((ts.tv_nsec < 0) || (ts.tv_sec < 0)) {
1141 return ETIMEDOUT;
1142 }
1143 tsp = &ts;
1144 } else {
1145 tsp = NULL;
1146 }
1147
1148 oldvalue = cond->value;
1149
1150 pthread_mutex_unlock(mutex);
1151 status = __futex_wait(&cond->value, oldvalue, tsp);
1152 pthread_mutex_lock(mutex);
1153
1154 if(status == (-ETIMEDOUT)) return ETIMEDOUT;
1155
1156 return 0;
1157}
1158
1159
1160int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
1161 pthread_mutex_t * mutex,
1162 const struct timespec *abstime)
1163{
1164 int oldvalue;
1165 struct timespec ts;
1166 struct timespec * tsp;
1167 int status;
1168
1169 if (abstime != NULL) {
1170 clock_gettime(CLOCK_MONOTONIC, &ts);
1171 ts.tv_sec = abstime->tv_sec - ts.tv_sec;
1172 ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
1173 if (ts.tv_nsec < 0) {
1174 ts.tv_sec--;
1175 ts.tv_nsec += 1000000000;
1176 }
1177 if((ts.tv_nsec < 0) || (ts.tv_sec < 0)) {
1178 return ETIMEDOUT;
1179 }
1180 tsp = &ts;
1181 } else {
1182 tsp = NULL;
1183 }
1184
1185 oldvalue = cond->value;
1186
1187 pthread_mutex_unlock(mutex);
1188 status = __futex_wait(&cond->value, oldvalue, tsp);
1189 pthread_mutex_lock(mutex);
1190
1191 if(status == (-ETIMEDOUT)) return ETIMEDOUT;
1192
1193 return 0;
1194}
1195
1196int pthread_cond_timeout_np(pthread_cond_t *cond,
1197 pthread_mutex_t * mutex,
1198 unsigned msecs)
1199{
1200 int oldvalue;
1201 struct timespec ts;
1202 int status;
1203
1204 ts.tv_sec = msecs / 1000;
1205 ts.tv_nsec = (msecs % 1000) * 1000000;
1206
1207 oldvalue = cond->value;
1208
1209 pthread_mutex_unlock(mutex);
1210 status = __futex_wait(&cond->value, oldvalue, &ts);
1211 pthread_mutex_lock(mutex);
1212
1213 if(status == (-ETIMEDOUT)) return ETIMEDOUT;
1214
1215 return 0;
1216}
1217
1218
1219
1220/* A technical note regarding our thread-local-storage (TLS) implementation:
1221 *
1222 * There can be up to TLSMAP_SIZE independent TLS keys in a given process,
1223 * though the first TLSMAP_START keys are reserved for Bionic to hold
1224 * special thread-specific variables like errno or a pointer to
1225 * the current thread's descriptor.
1226 *
1227 * while stored in the TLS area, these entries cannot be accessed through
1228 * pthread_getspecific() / pthread_setspecific() and pthread_key_delete()
1229 *
1230 * also, some entries in the key table are pre-allocated (see tlsmap_lock)
1231 * to greatly simplify and speedup some OpenGL-related operations. though the
1232 * initialy value will be NULL on all threads.
1233 *
1234 * you can use pthread_getspecific()/setspecific() on these, and in theory
1235 * you could also call pthread_key_delete() as well, though this would
1236 * probably break some apps.
1237 *
1238 * The 'tlsmap_t' type defined below implements a shared global map of
1239 * currently created/allocated TLS keys and the destructors associated
1240 * with them. You should use tlsmap_lock/unlock to access it to avoid
1241 * any race condition.
1242 *
1243 * the global TLS map simply contains a bitmap of allocated keys, and
1244 * an array of destructors.
1245 *
1246 * each thread has a TLS area that is a simple array of TLSMAP_SIZE void*
1247 * pointers. the TLS area of the main thread is stack-allocated in
1248 * __libc_init_common, while the TLS area of other threads is placed at
1249 * the top of their stack in pthread_create.
1250 *
1251 * when pthread_key_create() is called, it finds the first free key in the
1252 * bitmap, then set it to 1, saving the destructor altogether
1253 *
1254 * when pthread_key_delete() is called. it will erase the key's bitmap bit
1255 * and its destructor, and will also clear the key data in the TLS area of
1256 * all created threads. As mandated by Posix, it is the responsability of
1257 * the caller of pthread_key_delete() to properly reclaim the objects that
1258 * were pointed to by these data fields (either before or after the call).
1259 *
1260 */
1261
1262/* TLS Map implementation
1263 */
1264
1265#define TLSMAP_START (TLS_SLOT_MAX_WELL_KNOWN+1)
1266#define TLSMAP_SIZE BIONIC_TLS_SLOTS
1267#define TLSMAP_BITS 32
1268#define TLSMAP_WORDS ((TLSMAP_SIZE+TLSMAP_BITS-1)/TLSMAP_BITS)
1269#define TLSMAP_WORD(m,k) (m)->map[(k)/TLSMAP_BITS]
1270#define TLSMAP_MASK(k) (1U << ((k)&(TLSMAP_BITS-1)))
1271
1272/* this macro is used to quickly check that a key belongs to a reasonable range */
1273#define TLSMAP_VALIDATE_KEY(key) \
1274 ((key) >= TLSMAP_START && (key) < TLSMAP_SIZE)
1275
1276/* the type of tls key destructor functions */
1277typedef void (*tls_dtor_t)(void*);
1278
1279typedef struct {
1280 int init; /* see comment in tlsmap_lock() */
1281 uint32_t map[TLSMAP_WORDS]; /* bitmap of allocated keys */
1282 tls_dtor_t dtors[TLSMAP_SIZE]; /* key destructors */
1283} tlsmap_t;
1284
1285static pthread_mutex_t _tlsmap_lock = PTHREAD_MUTEX_INITIALIZER;
1286static tlsmap_t _tlsmap;
1287
1288/* lock the global TLS map lock and return a handle to it */
1289static __inline__ tlsmap_t* tlsmap_lock(void)
1290{
1291 tlsmap_t* m = &_tlsmap;
1292
1293 pthread_mutex_lock(&_tlsmap_lock);
1294 /* we need to initialize the first entry of the 'map' array
1295 * with the value TLS_DEFAULT_ALLOC_MAP. doing it statically
1296 * when declaring _tlsmap is a bit awkward and is going to
1297 * produce warnings, so do it the first time we use the map
1298 * instead
1299 */
1300 if (__unlikely(!m->init)) {
1301 TLSMAP_WORD(m,0) = TLS_DEFAULT_ALLOC_MAP;
1302 m->init = 1;
1303 }
1304 return m;
1305}
1306
1307/* unlock the global TLS map */
1308static __inline__ void tlsmap_unlock(tlsmap_t* m)
1309{
1310 pthread_mutex_unlock(&_tlsmap_lock);
1311 (void)m; /* a good compiler is a happy compiler */
1312}
1313
1314/* test to see wether a key is allocated */
1315static __inline__ int tlsmap_test(tlsmap_t* m, int key)
1316{
1317 return (TLSMAP_WORD(m,key) & TLSMAP_MASK(key)) != 0;
1318}
1319
1320/* set the destructor and bit flag on a newly allocated key */
1321static __inline__ void tlsmap_set(tlsmap_t* m, int key, tls_dtor_t dtor)
1322{
1323 TLSMAP_WORD(m,key) |= TLSMAP_MASK(key);
1324 m->dtors[key] = dtor;
1325}
1326
1327/* clear the destructor and bit flag on an existing key */
1328static __inline__ void tlsmap_clear(tlsmap_t* m, int key)
1329{
1330 TLSMAP_WORD(m,key) &= ~TLSMAP_MASK(key);
1331 m->dtors[key] = NULL;
1332}
1333
1334/* allocate a new TLS key, return -1 if no room left */
1335static int tlsmap_alloc(tlsmap_t* m, tls_dtor_t dtor)
1336{
1337 int key;
1338
1339 for ( key = TLSMAP_START; key < TLSMAP_SIZE; key++ ) {
1340 if ( !tlsmap_test(m, key) ) {
1341 tlsmap_set(m, key, dtor);
1342 return key;
1343 }
1344 }
1345 return -1;
1346}
1347
1348
1349int pthread_key_create(pthread_key_t *key, void (*destructor_function)(void *))
1350{
1351 uint32_t err = ENOMEM;
1352 tlsmap_t* map = tlsmap_lock();
1353 int k = tlsmap_alloc(map, destructor_function);
1354
1355 if (k >= 0) {
1356 *key = k;
1357 err = 0;
1358 }
1359 tlsmap_unlock(map);
1360 return err;
1361}
1362
1363
1364/* This deletes a pthread_key_t. note that the standard mandates that this does
1365 * not call the destructor of non-NULL key values. Instead, it is the
1366 * responsability of the caller to properly dispose of the corresponding data
1367 * and resources, using any mean it finds suitable.
1368 *
1369 * On the other hand, this function will clear the corresponding key data
1370 * values in all known threads. this prevents later (invalid) calls to
1371 * pthread_getspecific() to receive invalid/stale values.
1372 */
1373int pthread_key_delete(pthread_key_t key)
1374{
1375 uint32_t err;
1376 pthread_internal_t* thr;
1377 tlsmap_t* map;
1378
1379 if (!TLSMAP_VALIDATE_KEY(key)) {
1380 return EINVAL;
1381 }
1382
1383 map = tlsmap_lock();
1384
1385 if (!tlsmap_test(map, key)) {
1386 err = EINVAL;
1387 goto err1;
1388 }
1389
1390 /* clear value in all threads */
1391 pthread_mutex_lock(&gThreadListLock);
1392 for ( thr = gThreadList; thr != NULL; thr = thr->next ) {
1393 /* avoid zombie threads with a negative 'join_count'. these are really
1394 * already dead and don't have a TLS area anymore.
1395 *
1396 * similarly, it is possible to have thr->tls == NULL for threads that
1397 * were just recently created through pthread_create() but whose
1398 * startup trampoline (__thread_entry) hasn't been run yet by the
1399 * scheduler. so check for this too.
1400 */
1401 if (thr->join_count < 0 || !thr->tls)
1402 continue;
1403
1404 thr->tls[key] = NULL;
1405 }
1406 tlsmap_clear(map, key);
1407
1408 pthread_mutex_unlock(&gThreadListLock);
1409 err = 0;
1410
1411err1:
1412 tlsmap_unlock(map);
1413 return err;
1414}
1415
1416
1417int pthread_setspecific(pthread_key_t key, const void *ptr)
1418{
1419 int err = EINVAL;
1420 tlsmap_t* map;
1421
1422 if (TLSMAP_VALIDATE_KEY(key)) {
1423 /* check that we're trying to set data for an allocated key */
1424 map = tlsmap_lock();
1425 if (tlsmap_test(map, key)) {
1426 ((uint32_t *)__get_tls())[key] = (uint32_t)ptr;
1427 err = 0;
1428 }
1429 tlsmap_unlock(map);
1430 }
1431 return err;
1432}
1433
1434void * pthread_getspecific(pthread_key_t key)
1435{
1436 if (!TLSMAP_VALIDATE_KEY(key)) {
1437 return NULL;
1438 }
1439
1440 /* for performance reason, we do not lock/unlock the global TLS map
1441 * to check that the key is properly allocated. if the key was not
1442 * allocated, the value read from the TLS should always be NULL
1443 * due to pthread_key_delete() clearing the values for all threads.
1444 */
1445 return (void *)(((unsigned *)__get_tls())[key]);
1446}
1447
1448/* Posix mandates that this be defined in <limits.h> but we don't have
1449 * it just yet.
1450 */
1451#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
1452# define PTHREAD_DESTRUCTOR_ITERATIONS 4
1453#endif
1454
1455/* this function is called from pthread_exit() to remove all TLS key data
1456 * from this thread's TLS area. this must call the destructor of all keys
1457 * that have a non-NULL data value (and a non-NULL destructor).
1458 *
1459 * because destructors can do funky things like deleting/creating other
1460 * keys, we need to implement this in a loop
1461 */
1462static void pthread_key_clean_all(void)
1463{
1464 tlsmap_t* map;
1465 void** tls = (void**)__get_tls();
1466 int rounds = PTHREAD_DESTRUCTOR_ITERATIONS;
1467
1468 map = tlsmap_lock();
1469
1470 for (rounds = PTHREAD_DESTRUCTOR_ITERATIONS; rounds > 0; rounds--)
1471 {
1472 int kk, count = 0;
1473
1474 for (kk = TLSMAP_START; kk < TLSMAP_SIZE; kk++) {
1475 if ( tlsmap_test(map, kk) )
1476 {
1477 void* data = tls[kk];
1478 tls_dtor_t dtor = map->dtors[kk];
1479
1480 if (data != NULL && dtor != NULL)
1481 {
1482 /* we need to clear the key data now, this will prevent the
1483 * destructor (or a later one) from seeing the old value if
1484 * it calls pthread_getspecific() for some odd reason
1485 *
1486 * we do not do this if 'dtor == NULL' just in case another
1487 * destructor function might be responsible for manually
1488 * releasing the corresponding data.
1489 */
1490 tls[kk] = NULL;
1491
1492 /* because the destructor is free to call pthread_key_create
1493 * and/or pthread_key_delete, we need to temporarily unlock
1494 * the TLS map
1495 */
1496 tlsmap_unlock(map);
1497 (*dtor)(data);
1498 map = tlsmap_lock();
1499
1500 count += 1;
1501 }
1502 }
1503 }
1504
1505 /* if we didn't call any destructor, there is no need to check the
1506 * TLS data again
1507 */
1508 if (count == 0)
1509 break;
1510 }
1511 tlsmap_unlock(map);
1512}
1513
1514// man says this should be in <linux/unistd.h>, but it isn't
1515extern int tkill(int tid, int sig);
1516
1517int pthread_kill(pthread_t tid, int sig)
1518{
1519 int ret;
1520 int old_errno = errno;
1521 pthread_internal_t * thread = (pthread_internal_t *)tid;
1522
1523 ret = tkill(thread->kernel_id, sig);
1524 if (ret < 0) {
1525 ret = errno;
1526 errno = old_errno;
1527 }
1528
1529 return ret;
1530}
1531
1532extern int __rt_sigprocmask(int, const sigset_t *, sigset_t *, size_t);
1533
1534int pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
1535{
1536 return __rt_sigprocmask(how, set, oset, _NSIG / 8);
1537}
1538
1539
1540int pthread_getcpuclockid(pthread_t tid, clockid_t *clockid)
1541{
1542 const int CLOCK_IDTYPE_BITS = 3;
1543 pthread_internal_t* thread = (pthread_internal_t*)tid;
1544
1545 if (!thread)
1546 return ESRCH;
1547
1548 *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
1549 return 0;
1550}
1551
1552
1553/* NOTE: this implementation doesn't support a init function that throws a C++ exception
1554 * or calls fork()
1555 */
1556int pthread_once( pthread_once_t* once_control, void (*init_routine)(void) )
1557{
1558 static pthread_mutex_t once_lock = PTHREAD_MUTEX_INITIALIZER;
1559
1560 if (*once_control == PTHREAD_ONCE_INIT) {
1561 _normal_lock( &once_lock );
1562 if (*once_control == PTHREAD_ONCE_INIT) {
1563 (*init_routine)();
1564 *once_control = ~PTHREAD_ONCE_INIT;
1565 }
1566 _normal_unlock( &once_lock );
1567 }
1568 return 0;
1569}