Fix the pthread_setname_np test.
Fix the pthread_setname_np test to take into account that emulator kernels are
so old that they don't support setting the name of other threads.
The CLONE_DETACHED thread is obsolete since 2.5 kernels.
Rename kernel_id to tid.
Fix the signature of __pthread_clone.
Clean up the clone and pthread_setname_np implementations slightly.
Change-Id: I16c2ff8845b67530544bbda9aa6618058603066d
diff --git a/libc/bionic/bionic_clone.c b/libc/bionic/bionic_clone.c
index 187b60d..8412057 100644
--- a/libc/bionic/bionic_clone.c
+++ b/libc/bionic/bionic_clone.c
@@ -58,9 +58,8 @@
int *parent_tidptr = NULL;
void *new_tls = NULL;
int *child_tidptr = NULL;
- int ret;
- /* extract optional parameters - they are cummulative */
+ /* extract optional parameters - they are cumulative. */
va_start(args, arg);
if (flags & (CLONE_PARENT_SETTID|CLONE_SETTLS|CLONE_CHILD_SETTID)) {
parent_tidptr = va_arg(args, int*);
@@ -73,6 +72,5 @@
}
va_end(args);
- ret = __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
- return ret;
+ return __bionic_clone(flags, child_stack, parent_tidptr, new_tls, child_tidptr, fn, arg);
}
diff --git a/libc/bionic/fork.c b/libc/bionic/fork.c
index 2d5a10a..d83c535 100644
--- a/libc/bionic/fork.c
+++ b/libc/bionic/fork.c
@@ -9,7 +9,7 @@
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
+ * the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
@@ -19,7 +19,7 @@
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
@@ -49,8 +49,8 @@
__timer_table_start_stop(0);
__bionic_atfork_run_parent();
} else {
- /* Adjusting the kernel id after a fork */
- (void)__pthread_settid(pthread_self(), gettid());
+ // Fix the tid in the pthread_internal_t struct after a fork.
+ __pthread_settid(pthread_self(), gettid());
/*
* Newly created process must update cpu accounting.
diff --git a/libc/bionic/libc_init_common.cpp b/libc/bionic/libc_init_common.cpp
index 266d6fa..6038056 100644
--- a/libc/bionic/libc_init_common.cpp
+++ b/libc/bionic/libc_init_common.cpp
@@ -73,14 +73,15 @@
unsigned stack_size = 128 * 1024;
unsigned stack_bottom = stack_top - stack_size;
+ static void* tls[BIONIC_TLS_SLOTS];
static pthread_internal_t thread;
+ thread.tid = gettid();
+ thread.tls = tls;
pthread_attr_init(&thread.attr);
pthread_attr_setstack(&thread.attr, (void*) stack_bottom, stack_size);
- _init_thread(&thread, gettid(), false);
-
- static void* tls_area[BIONIC_TLS_SLOTS];
- __init_tls(tls_area, &thread);
- tls_area[TLS_SLOT_BIONIC_PREINIT] = &args;
+ _init_thread(&thread, false);
+ __init_tls(&thread);
+ tls[TLS_SLOT_BIONIC_PREINIT] = &args;
}
void __libc_init_common(KernelArgumentBlock& args) {
diff --git a/libc/bionic/pthread-rwlocks.c b/libc/bionic/pthread-rwlocks.c
index ca3e95c..deee577 100644
--- a/libc/bionic/pthread-rwlocks.c
+++ b/libc/bionic/pthread-rwlocks.c
@@ -42,7 +42,7 @@
* - trying to get the read-lock while there is a writer blocks
* - a single thread can acquire the lock multiple times in the same mode
*
- * - Posix states that behaviour is undefined it a thread tries to acquire
+ * - Posix states that behavior is undefined it a thread tries to acquire
* the lock in two distinct modes (e.g. write after read, or read after write).
*
* - This implementation tries to avoid writer starvation by making the readers
@@ -61,12 +61,6 @@
extern pthread_internal_t* __get_thread(void);
-/* Return a global kernel ID for the current thread */
-static int __get_thread_id(void)
-{
- return __get_thread()->kernel_id;
-}
-
int pthread_rwlockattr_init(pthread_rwlockattr_t *attr)
{
if (!attr)
@@ -150,8 +144,6 @@
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
- int ret;
-
if (rwlock == NULL)
return EINVAL;
@@ -164,7 +156,7 @@
}
/* Returns TRUE iff we can acquire a read lock. */
-static __inline__ int read_precondition(pthread_rwlock_t *rwlock, int thread_id)
+static __inline__ int read_precondition(pthread_rwlock_t* rwlock, int tid)
{
/* We can't have the lock if any writer is waiting for it (writer bias).
* This tries to avoid starvation when there are multiple readers racing.
@@ -174,7 +166,7 @@
/* We can have the lock if there is no writer, or if we write-own it */
/* The second test avoids a self-dead lock in case of buggy code. */
- if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == thread_id)
+ if (rwlock->writerThreadId == 0 || rwlock->writerThreadId == tid)
return 1;
/* Otherwise, we can't have it */
@@ -182,14 +174,14 @@
}
/* returns TRUE iff we can acquire a write lock. */
-static __inline__ int write_precondition(pthread_rwlock_t *rwlock, int thread_id)
+static __inline__ int write_precondition(pthread_rwlock_t* rwlock, int tid)
{
/* We can get the lock if nobody has it */
if (rwlock->numLocks == 0)
return 1;
/* Or if we already own it */
- if (rwlock->writerThreadId == thread_id)
+ if (rwlock->writerThreadId == tid)
return 1;
/* Otherwise, not */
@@ -220,7 +212,7 @@
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
- if (__unlikely(!read_precondition(rwlock, __get_thread_id())))
+ if (__unlikely(!read_precondition(rwlock, __get_thread()->tid)))
ret = EBUSY;
else
rwlock->numLocks ++;
@@ -231,18 +223,18 @@
int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
{
- int thread_id, ret = 0;
+ int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
- thread_id = __get_thread_id();
- if (__unlikely(!read_precondition(rwlock, thread_id))) {
+ int tid = __get_thread()->tid;
+ if (__unlikely(!read_precondition(rwlock, tid))) {
rwlock->pendingReaders += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
- } while (ret == 0 && !read_precondition(rwlock, thread_id));
+ } while (ret == 0 && !read_precondition(rwlock, tid));
rwlock->pendingReaders -= 1;
if (ret != 0)
goto EXIT;
@@ -261,18 +253,18 @@
int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
{
- int thread_id, ret = 0;
+ int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
- thread_id = __get_thread_id();
- if (__unlikely(!write_precondition(rwlock, thread_id))) {
+ int tid = __get_thread()->tid;
+ if (__unlikely(!write_precondition(rwlock, tid))) {
ret = EBUSY;
} else {
rwlock->numLocks ++;
- rwlock->writerThreadId = thread_id;
+ rwlock->writerThreadId = tid;
}
pthread_mutex_unlock(&rwlock->lock);
return ret;
@@ -280,14 +272,14 @@
int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
{
- int thread_id, ret = 0;
+ int ret = 0;
if (rwlock == NULL)
return EINVAL;
pthread_mutex_lock(&rwlock->lock);
- thread_id = __get_thread_id();
- if (__unlikely(!write_precondition(rwlock, thread_id))) {
+ int tid = __get_thread()->tid;
+ if (__unlikely(!write_precondition(rwlock, tid))) {
/* If we can't read yet, wait until the rwlock is unlocked
* and try again. Increment pendingReaders to get the
* cond broadcast when that happens.
@@ -295,13 +287,13 @@
rwlock->pendingWriters += 1;
do {
ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
- } while (ret == 0 && !write_precondition(rwlock, thread_id));
+ } while (ret == 0 && !write_precondition(rwlock, tid));
rwlock->pendingWriters -= 1;
if (ret != 0)
goto EXIT;
}
rwlock->numLocks ++;
- rwlock->writerThreadId = thread_id;
+ rwlock->writerThreadId = tid;
EXIT:
pthread_mutex_unlock(&rwlock->lock);
return ret;
@@ -332,7 +324,7 @@
* must be ourselves.
*/
else {
- if (rwlock->writerThreadId != __get_thread_id()) {
+ if (rwlock->writerThreadId != __get_thread()->tid) {
ret = EPERM;
goto EXIT;
}
diff --git a/libc/bionic/pthread.c b/libc/bionic/pthread.c
index 59fb8b3..5848215 100644
--- a/libc/bionic/pthread.c
+++ b/libc/bionic/pthread.c
@@ -313,9 +313,9 @@
int old_errno = errno;
pthread_internal_t * thread = (pthread_internal_t *)thid;
- int err = sched_getparam(thread->kernel_id, param);
+ int err = sched_getparam(thread->tid, param);
if (!err) {
- *policy = sched_getscheduler(thread->kernel_id);
+ *policy = sched_getscheduler(thread->tid);
} else {
err = errno;
errno = old_errno;
@@ -330,7 +330,7 @@
int old_errno = errno;
int ret;
- ret = sched_setscheduler(thread->kernel_id, policy, param);
+ ret = sched_setscheduler(thread->tid, policy, param);
if (ret < 0) {
ret = errno;
errno = old_errno;
@@ -342,7 +342,7 @@
/* a mutex is implemented as a 32-bit integer holding the following fields
*
* bits: name description
- * 31-16 tid owner thread's kernel id (recursive and errorcheck only)
+ * 31-16 tid owner thread's tid (recursive and errorcheck only)
* 15-14 type mutex type
* 13 shared process-shared flag
* 12-2 counter counter of recursive mutexes
@@ -452,8 +452,8 @@
/* Mutex owner field:
*
* This is only used for recursive and errorcheck mutexes. It holds the
- * kernel TID of the owning thread. Note that this works because the Linux
- * kernel _only_ uses 16-bit values for thread ids.
+ * tid of the owning thread. Note that this works because the Linux
+ * kernel _only_ uses 16-bit values for tids.
*
* More specifically, it will wrap to 10000 when it reaches over 32768 for
* application processes. You can check this by running the following inside
@@ -783,7 +783,7 @@
}
/* Do we already own this recursive or error-check mutex ? */
- tid = __get_thread()->kernel_id;
+ tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
return _recursive_increment(mutex, mvalue, mtype);
@@ -877,7 +877,7 @@
}
/* Do we already own this recursive or error-check mutex ? */
- tid = __get_thread()->kernel_id;
+ tid = __get_thread()->tid;
if ( tid != MUTEX_OWNER_FROM_BITS(mvalue) )
return EPERM;
@@ -951,7 +951,7 @@
}
/* Do we already own this recursive or error-check mutex ? */
- tid = __get_thread()->kernel_id;
+ tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
return _recursive_increment(mutex, mvalue, mtype);
@@ -1060,7 +1060,7 @@
}
/* Do we already own this recursive or error-check mutex ? */
- tid = __get_thread()->kernel_id;
+ tid = __get_thread()->tid;
if ( tid == MUTEX_OWNER_FROM_BITS(mvalue) )
return _recursive_increment(mutex, mvalue, mtype);
@@ -1379,7 +1379,7 @@
int old_errno = errno;
pthread_internal_t * thread = (pthread_internal_t *)tid;
- ret = tgkill(getpid(), thread->kernel_id, sig);
+ ret = tgkill(getpid(), thread->tid, sig);
if (ret < 0) {
ret = errno;
errno = old_errno;
@@ -1397,7 +1397,7 @@
if (!thread)
return ESRCH;
- *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->kernel_id << CLOCK_IDTYPE_BITS);
+ *clockid = CLOCK_THREAD_CPUTIME_ID | (thread->tid << CLOCK_IDTYPE_BITS);
return 0;
}
@@ -1474,25 +1474,18 @@
return 0;
}
-/* Return the kernel thread ID for a pthread.
- * This is only defined for implementations where pthread <-> kernel is 1:1, which this is.
- * Not the same as pthread_getthreadid_np, which is commonly defined to be opaque.
- * Internal, not an NDK API.
- */
-
-pid_t __pthread_gettid(pthread_t thid)
-{
- pthread_internal_t* thread = (pthread_internal_t*)thid;
- return thread->kernel_id;
+pid_t __pthread_gettid(pthread_t thid) {
+ pthread_internal_t* thread = (pthread_internal_t*) thid;
+ return thread->tid;
}
-int __pthread_settid(pthread_t thid, pid_t tid)
-{
- if (thid == 0)
- return EINVAL;
+int __pthread_settid(pthread_t thid, pid_t tid) {
+ if (thid == 0) {
+ return EINVAL;
+ }
- pthread_internal_t* thread = (pthread_internal_t*)thid;
- thread->kernel_id = tid;
+ pthread_internal_t* thread = (pthread_internal_t*) thid;
+ thread->tid = tid;
- return 0;
+ return 0;
}
diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp
index 6e4fe45..368c46d 100644
--- a/libc/bionic/pthread_create.cpp
+++ b/libc/bionic/pthread_create.cpp
@@ -41,7 +41,7 @@
#include "private/ErrnoRestorer.h"
#include "private/ScopedPthreadMutexLocker.h"
-extern "C" int __pthread_clone(int (*fn)(void*), void* child_stack, int flags, void* arg);
+extern "C" int __pthread_clone(void* (*fn)(void*), void* child_stack, int flags, void* arg);
#ifdef __i386__
#define ATTRIBUTES __attribute__((noinline)) __attribute__((fastcall))
@@ -57,25 +57,23 @@
static pthread_mutex_t gDebuggerNotificationLock = PTHREAD_MUTEX_INITIALIZER;
-void __init_tls(void** tls, void* thread) {
- ((pthread_internal_t*) thread)->tls = tls;
-
+void __init_tls(pthread_internal_t* thread) {
// Zero-initialize all the slots.
for (size_t i = 0; i < BIONIC_TLS_SLOTS; ++i) {
- tls[i] = NULL;
+ thread->tls[i] = NULL;
}
// Slot 0 must point to itself. The x86 Linux kernel reads the TLS from %fs:0.
- tls[TLS_SLOT_SELF] = tls;
- tls[TLS_SLOT_THREAD_ID] = thread;
+ thread->tls[TLS_SLOT_SELF] = thread->tls;
+ thread->tls[TLS_SLOT_THREAD_ID] = thread;
// GCC looks in the TLS for the stack guard on x86, so copy it there from our global.
- tls[TLS_SLOT_STACK_GUARD] = (void*) __stack_chk_guard;
+ thread->tls[TLS_SLOT_STACK_GUARD] = (void*) __stack_chk_guard;
- __set_tls((void*) tls);
+ __set_tls(thread->tls);
}
// This trampoline is called from the assembly _pthread_clone() function.
-extern "C" void __thread_entry(int (*func)(void*), void *arg, void **tls) {
+extern "C" void __thread_entry(void* (*func)(void*), void* arg, void** tls) {
// Wait for our creating thread to release us. This lets it have time to
// notify gdb about this thread before we start doing anything.
// This also provides the memory barrier needed to ensure that all memory
@@ -85,27 +83,26 @@
pthread_mutex_destroy(start_mutex);
pthread_internal_t* thread = (pthread_internal_t*) tls[TLS_SLOT_THREAD_ID];
- __init_tls(tls, thread);
+ thread->tls = tls;
+ __init_tls(thread);
if ((thread->internal_flags & kPthreadInitFailed) != 0) {
pthread_exit(NULL);
}
- int result = func(arg);
- pthread_exit((void*) result);
+ void* result = func(arg);
+ pthread_exit(result);
}
__LIBC_ABI_PRIVATE__
-int _init_thread(pthread_internal_t* thread, pid_t kernel_id, bool add_to_thread_list) {
+int _init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
int error = 0;
- thread->kernel_id = kernel_id;
-
// Set the scheduling policy/priority of the thread.
if (thread->attr.sched_policy != SCHED_NORMAL) {
struct sched_param param;
param.sched_priority = thread->attr.sched_priority;
- if (sched_setscheduler(kernel_id, thread->attr.sched_policy, ¶m) == -1) {
+ if (sched_setscheduler(thread->tid, thread->attr.sched_policy, ¶m) == -1) {
// For backwards compatibility reasons, we just warn about failures here.
// error = errno;
const char* msg = "pthread_create sched_setscheduler call failed: %s\n";
@@ -198,9 +195,9 @@
tls[TLS_SLOT_THREAD_ID] = thread;
- int flags = CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM | CLONE_DETACHED;
- int tid = __pthread_clone((int(*)(void*))start_routine, tls, flags, arg);
+ int flags = CLONE_FILES | CLONE_FS | CLONE_VM | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM;
+ int tid = __pthread_clone(start_routine, tls, flags, arg);
if (tid < 0) {
int clone_errno = errno;
if ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_STACK) == 0) {
@@ -210,7 +207,9 @@
return clone_errno;
}
- int init_errno = _init_thread(thread, tid, true);
+ thread->tid = tid;
+
+ int init_errno = _init_thread(thread, true);
if (init_errno != 0) {
// Mark the thread detached and let its __thread_entry run to
// completion. (It'll just exit immediately, cleaning up its resources.)
@@ -222,7 +221,7 @@
// Notify any debuggers about the new thread.
{
ScopedPthreadMutexLocker debugger_locker(&gDebuggerNotificationLock);
- _thread_created_hook(tid);
+ _thread_created_hook(thread->tid);
}
// Publish the pthread_t and let the thread run.
diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h
index a17c37d..9122a74 100644
--- a/libc/bionic/pthread_internal.h
+++ b/libc/bionic/pthread_internal.h
@@ -38,7 +38,7 @@
struct pthread_internal_t* next;
struct pthread_internal_t* prev;
pthread_attr_t attr;
- pid_t kernel_id;
+ pid_t tid;
bool allocated_on_heap;
pthread_cond_t join_cond;
int join_count;
@@ -55,7 +55,8 @@
char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
} pthread_internal_t;
-int _init_thread(pthread_internal_t* thread, pid_t kernel_id, bool add_to_thread_list);
+int _init_thread(pthread_internal_t* thread, bool add_to_thread_list);
+void __init_tls(pthread_internal_t* thread);
void _pthread_internal_add( pthread_internal_t* thread );
pthread_internal_t* __get_thread(void);
diff --git a/libc/bionic/pthread_setname_np.cpp b/libc/bionic/pthread_setname_np.cpp
index 88b86ec..6162aea 100644
--- a/libc/bionic/pthread_setname_np.cpp
+++ b/libc/bionic/pthread_setname_np.cpp
@@ -40,7 +40,7 @@
// This value is not exported by kernel headers.
#define MAX_TASK_COMM_LEN 16
-#define TASK_COMM_FMT "/proc/self/task/%u/comm"
+#define TASK_COMM_FMT "/proc/self/task/%d/comm"
int pthread_setname_np(pthread_t thread, const char* thread_name) {
ErrnoRestorer errno_restorer;
@@ -56,14 +56,14 @@
// Changing our own name is an easy special case.
if (thread == pthread_self()) {
- return prctl(PR_SET_NAME, (unsigned long)thread_name, 0, 0, 0) ? errno : 0;
+ return prctl(PR_SET_NAME, thread_name) ? errno : 0;
}
// Have to change another thread's name.
pthread_internal_t* t = reinterpret_cast<pthread_internal_t*>(thread);
char comm_name[sizeof(TASK_COMM_FMT) + 8];
- snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, (unsigned int) t->kernel_id);
- int fd = open(comm_name, O_RDWR);
+ snprintf(comm_name, sizeof(comm_name), TASK_COMM_FMT, t->tid);
+ int fd = open(comm_name, O_WRONLY);
if (fd == -1) {
return errno;
}