<pthread.h> fixes and pthread cleanup.

<pthread.h> was missing nonnull attributes, noreturn on pthread_exit,
and had incorrect cv qualifiers for several standard functions.

I've also marked the non-standard stuff (where I count glibc rather
than POSIX as "standard") so we can revisit this cruft for LP64 and
try to ensure we're compatible with glibc.

I've also broken out the pthread_cond* functions into a new file.

I've made the remaining pthread files (plus ptrace) part of the bionic code
and fixed all the warnings.

I've added a few more smoke tests for chunks of untested pthread functionality.

We no longer need the libc_static_common_src_files hack for any of the
pthread implementation because we long since stripped out the rest of
the armv5 support, and this hack was just to ensure that __get_tls in libc.a
went via the kernel if necessary.

This patch also finishes the job of breaking up the pthread.c monolith, and
adds a handful of new tests.

Change-Id: Idc0ae7f5d8aa65989598acd4c01a874fe21582c7
diff --git a/libc/bionic/pthread-atfork.c b/libc/bionic/pthread-atfork.c
deleted file mode 100644
index 42420dc..0000000
--- a/libc/bionic/pthread-atfork.c
+++ /dev/null
@@ -1,126 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *  * Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- *  * Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in
- *    the documentation and/or other materials provided with the
- *    distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
- * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
- * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
- * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
- * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
- * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
- * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
- * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include <stdlib.h>
-#include <errno.h>
-#include <pthread.h>
-#include <sys/queue.h>
-
-static pthread_mutex_t handler_mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
-
-struct atfork_t
-{
-    CIRCLEQ_ENTRY(atfork_t) entries;
-
-    void (*prepare)(void);
-    void (*child)(void);
-    void (*parent)(void);
-};
-static CIRCLEQ_HEAD(atfork_head_t, atfork_t) atfork_head = \
-    CIRCLEQ_HEAD_INITIALIZER(atfork_head);
-
-void __bionic_atfork_run_prepare()
-{
-    struct atfork_t *cursor;
-
-    /* We will lock this here, and unlock it in the parent and child functions.
-     * This ensures that nobody can modify the handler array between the calls
-     * to the prepare and parent/child handlers.
-     *
-     * TODO: If a handler mucks with the list, it could cause problems.  Right
-     *       now it's ok because all they can do is add new items to the end
-     *       of the list, but if/when we implement cleanup in dlclose() things
-     *       will get more interesting...
-     */
-    pthread_mutex_lock(&handler_mutex);
-
-    /* Call pthread_atfork() prepare handlers.  Posix states that the prepare
-     * handlers should be called in the reverse order of the parent/child
-     * handlers, so we iterate backwards.
-     */
-    for (cursor = atfork_head.cqh_last;
-         cursor != (void*)&atfork_head;
-         cursor = cursor->entries.cqe_prev) {
-        if (cursor->prepare != NULL) {
-            cursor->prepare();
-        }
-    }
-}
-
-void __bionic_atfork_run_child()
-{
-    struct atfork_t *cursor;
-    pthread_mutexattr_t  attr;
-
-    /* Call pthread_atfork() child handlers */
-    for (cursor = atfork_head.cqh_first;
-         cursor != (void*)&atfork_head;
-         cursor = cursor->entries.cqe_next) {
-        if (cursor->child != NULL) {
-            cursor->child();
-        }
-    }
-
-    pthread_mutexattr_init(&attr);
-    pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
-    pthread_mutex_init(&handler_mutex, &attr);
-}
-
-void __bionic_atfork_run_parent()
-{
-    struct atfork_t *cursor;
-
-    /* Call pthread_atfork() parent handlers */
-    for (cursor = atfork_head.cqh_first;
-         cursor != (void*)&atfork_head;
-         cursor = cursor->entries.cqe_next) {
-        if (cursor->parent != NULL) {
-            cursor->parent();
-        }
-    }
-
-    pthread_mutex_unlock(&handler_mutex);
-}
-
-int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void))
-{
-    struct atfork_t *entry = malloc(sizeof(struct atfork_t));
-
-    if (entry == NULL) {
-        return ENOMEM;
-    }
-
-    entry->prepare = prepare;
-    entry->parent = parent;
-    entry->child = child;
-
-    pthread_mutex_lock(&handler_mutex);
-    CIRCLEQ_INSERT_TAIL(&atfork_head, entry, entries);
-    pthread_mutex_unlock(&handler_mutex);
-
-    return 0;
-}
diff --git a/libc/bionic/pthread_atfork.cpp b/libc/bionic/pthread_atfork.cpp
new file mode 100644
index 0000000..5bf63fb
--- /dev/null
+++ b/libc/bionic/pthread_atfork.cpp
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <pthread.h>
+
+static pthread_mutex_t gAtForkListMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER;
+
+struct atfork_t {
+  atfork_t* next;
+  atfork_t* prev;
+
+  void (*prepare)(void);
+  void (*child)(void);
+  void (*parent)(void);
+};
+
+struct atfork_list_t {
+  atfork_t* first;
+  atfork_t* last;
+};
+
+static atfork_list_t gAtForkList = { NULL, NULL };
+
+void __bionic_atfork_run_prepare() {
+  // We will lock this here, and unlock it in the parent and child functions.
+  // This ensures that nobody can modify the handler array between the calls
+  // to the prepare and parent/child handlers.
+  //
+  // TODO: If a handler mucks with the list, it could cause problems.  Right
+  //       now it's ok because all they can do is add new items to the end
+  //       of the list, but if/when we implement cleanup in dlclose() things
+  //       will get more interesting...
+  pthread_mutex_lock(&gAtForkListMutex);
+
+  // Call pthread_atfork() prepare handlers. POSIX states that the prepare
+  // handlers should be called in the reverse order of the parent/child
+  // handlers, so we iterate backwards.
+  for (atfork_t* it = gAtForkList.last; it != NULL; it = it->prev) {
+    if (it->prepare != NULL) {
+      it->prepare();
+    }
+  }
+}
+
+void __bionic_atfork_run_child() {
+  for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
+    if (it->child != NULL) {
+      it->child();
+    }
+  }
+
+  pthread_mutexattr_t attr;
+  pthread_mutexattr_init(&attr);
+  pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+  pthread_mutex_init(&gAtForkListMutex, &attr);
+}
+
+void __bionic_atfork_run_parent() {
+  for (atfork_t* it = gAtForkList.first; it != NULL; it = it->next) {
+    if (it->parent != NULL) {
+      it->parent();
+    }
+  }
+
+  pthread_mutex_unlock(&gAtForkListMutex);
+}
+
+int pthread_atfork(void (*prepare)(void), void (*parent)(void), void(*child)(void)) {
+  atfork_t* entry = reinterpret_cast<atfork_t*>(malloc(sizeof(atfork_t)));
+  if (entry == NULL) {
+    return ENOMEM;
+  }
+
+  entry->prepare = prepare;
+  entry->parent = parent;
+  entry->child = child;
+
+  pthread_mutex_lock(&gAtForkListMutex);
+
+  // Append 'entry' to the list.
+  entry->next = NULL;
+  entry->prev = gAtForkList.last;
+  if (entry->prev != NULL) {
+    entry->prev->next = entry;
+  }
+  if (gAtForkList.first == NULL) {
+    gAtForkList.first = entry;
+  }
+  gAtForkList.last = entry;
+
+  pthread_mutex_unlock(&gAtForkListMutex);
+
+  return 0;
+}
diff --git a/libc/bionic/pthread_attr.cpp b/libc/bionic/pthread_attr.cpp
index dfb740d..fdf2965 100644
--- a/libc/bionic/pthread_attr.cpp
+++ b/libc/bionic/pthread_attr.cpp
@@ -56,7 +56,7 @@
   return 0;
 }
 
-int pthread_attr_getdetachstate(pthread_attr_t const* attr, int* state) {
+int pthread_attr_getdetachstate(const pthread_attr_t* attr, int* state) {
   *state = (attr->flags & PTHREAD_ATTR_FLAG_DETACHED) ? PTHREAD_CREATE_DETACHED : PTHREAD_CREATE_JOINABLE;
   return 0;
 }
@@ -66,17 +66,17 @@
   return 0;
 }
 
-int pthread_attr_getschedpolicy(pthread_attr_t const* attr, int* policy) {
+int pthread_attr_getschedpolicy(const pthread_attr_t* attr, int* policy) {
   *policy = attr->sched_policy;
   return 0;
 }
 
-int pthread_attr_setschedparam(pthread_attr_t * attr, struct sched_param const* param) {
+int pthread_attr_setschedparam(pthread_attr_t* attr, const sched_param* param) {
   attr->sched_priority = param->sched_priority;
   return 0;
 }
 
-int pthread_attr_getschedparam(pthread_attr_t const* attr, struct sched_param* param) {
+int pthread_attr_getschedparam(const pthread_attr_t* attr, sched_param* param) {
   param->sched_priority = attr->sched_priority;
   return 0;
 }
@@ -89,7 +89,7 @@
   return 0;
 }
 
-int pthread_attr_getstacksize(pthread_attr_t const* attr, size_t* stack_size) {
+int pthread_attr_getstacksize(const pthread_attr_t* attr, size_t* stack_size) {
   *stack_size = attr->stack_size;
   return 0;
 }
@@ -100,7 +100,7 @@
   return ENOSYS;
 }
 
-int pthread_attr_getstackaddr(pthread_attr_t const* attr, void** stack_addr) {
+int pthread_attr_getstackaddr(const pthread_attr_t* attr, void** stack_addr) {
   // This was removed from POSIX.1-2008.
   // Needed for ABI compatibility with the NDK.
   *stack_addr = (char*)attr->stack_base + attr->stack_size;
@@ -119,7 +119,7 @@
   return 0;
 }
 
-int pthread_attr_getstack(pthread_attr_t const* attr, void** stack_base, size_t* stack_size) {
+int pthread_attr_getstack(const pthread_attr_t* attr, void** stack_base, size_t* stack_size) {
   *stack_base = attr->stack_base;
   *stack_size = attr->stack_size;
   return 0;
@@ -130,7 +130,7 @@
   return 0;
 }
 
-int pthread_attr_getguardsize(pthread_attr_t const* attr, size_t* guard_size) {
+int pthread_attr_getguardsize(const pthread_attr_t* attr, size_t* guard_size) {
   *guard_size = attr->guard_size;
   return 0;
 }
@@ -141,7 +141,7 @@
   return 0;
 }
 
-int pthread_attr_setscope(pthread_attr_t* , int scope) {
+int pthread_attr_setscope(pthread_attr_t*, int scope) {
   if (scope == PTHREAD_SCOPE_SYSTEM) {
     return 0;
   }
@@ -151,6 +151,7 @@
   return EINVAL;
 }
 
-int pthread_attr_getscope(pthread_attr_t const*) {
-  return PTHREAD_SCOPE_SYSTEM;
+int pthread_attr_getscope(const pthread_attr_t*, int* scope) {
+  *scope = PTHREAD_SCOPE_SYSTEM;
+  return 0;
 }
diff --git a/libc/bionic/pthread_cond.cpp b/libc/bionic/pthread_cond.cpp
new file mode 100644
index 0000000..abd453e
--- /dev/null
+++ b/libc/bionic/pthread_cond.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <pthread.h>
+
+#include <errno.h>
+#include <limits.h>
+#include <sys/atomics.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "pthread_internal.h"
+
+#include "private/bionic_atomic_inline.h"
+#include "private/bionic_futex.h"
+#include "private/bionic_pthread.h"
+#include "private/bionic_time_conversions.h"
+#include "private/bionic_tls.h"
+#include "private/thread_private.h"
+
+int pthread_condattr_init(pthread_condattr_t* attr) {
+  if (attr == NULL) {
+    return EINVAL;
+  }
+  *attr = PTHREAD_PROCESS_PRIVATE;
+  return 0;
+}
+
+int pthread_condattr_getpshared(const pthread_condattr_t* attr, int* pshared) {
+  if (attr == NULL || pshared == NULL) {
+    return EINVAL;
+  }
+  *pshared = *attr;
+  return 0;
+}
+
+int pthread_condattr_setpshared(pthread_condattr_t* attr, int pshared) {
+  if (attr == NULL) {
+    return EINVAL;
+  }
+  if (pshared != PTHREAD_PROCESS_SHARED && pshared != PTHREAD_PROCESS_PRIVATE) {
+    return EINVAL;
+  }
+  *attr = pshared;
+  return 0;
+}
+
+int pthread_condattr_destroy(pthread_condattr_t* attr) {
+  if (attr == NULL) {
+    return EINVAL;
+  }
+  *attr = 0xdeada11d;
+  return 0;
+}
+
+// We use one bit in condition variable values as the 'shared' flag
+// The rest is a counter.
+#define COND_SHARED_MASK        0x0001
+#define COND_COUNTER_INCREMENT  0x0002
+#define COND_COUNTER_MASK       (~COND_SHARED_MASK)
+
+#define COND_IS_SHARED(c)  (((c)->value & COND_SHARED_MASK) != 0)
+
+// XXX *technically* there is a race condition that could allow
+// XXX a signal to be missed.  If thread A is preempted in _wait()
+// XXX after unlocking the mutex and before waiting, and if other
+// XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
+// XXX before thread A is scheduled again and calls futex_wait(),
+// XXX then the signal will be lost.
+
+int pthread_cond_init(pthread_cond_t* cond, const pthread_condattr_t* attr) {
+  if (cond == NULL) {
+    return EINVAL;
+  }
+
+  cond->value = 0;
+
+  if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED) {
+    cond->value |= COND_SHARED_MASK;
+  }
+
+  return 0;
+}
+
+int pthread_cond_destroy(pthread_cond_t* cond) {
+  if (cond == NULL) {
+    return EINVAL;
+  }
+
+  cond->value = 0xdeadc04d;
+  return 0;
+}
+
+// This function is used by pthread_cond_broadcast and
+// pthread_cond_signal to atomically decrement the counter
+// then wake up 'counter' threads.
+static int __pthread_cond_pulse(pthread_cond_t* cond, int counter) {
+  if (__predict_false(cond == NULL)) {
+    return EINVAL;
+  }
+
+  long flags = (cond->value & ~COND_COUNTER_MASK);
+  while (true) {
+    long old_value = cond->value;
+    long new_value = ((old_value - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK) | flags;
+    if (__bionic_cmpxchg(old_value, new_value, &cond->value) == 0) {
+      break;
+    }
+  }
+
+  // Ensure that all memory accesses previously made by this thread are
+  // visible to the woken thread(s).  On the other side, the "wait"
+  // code will issue any necessary barriers when locking the mutex.
+  //
+  // This may not strictly be necessary -- if the caller follows
+  // recommended practice and holds the mutex before signaling the cond
+  // var, the mutex ops will provide correct semantics.  If they don't
+  // hold the mutex, they're subject to race conditions anyway.
+  ANDROID_MEMBAR_FULL();
+
+  __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
+  return 0;
+}
+
+__LIBC_HIDDEN__
+int __pthread_cond_timedwait_relative(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* reltime) {
+  int old_value = cond->value;
+
+  pthread_mutex_unlock(mutex);
+  int status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), old_value, reltime);
+  pthread_mutex_lock(mutex);
+
+  if (status == (-ETIMEDOUT)) {
+    return ETIMEDOUT;
+  }
+  return 0;
+}
+
+__LIBC_HIDDEN__
+int __pthread_cond_timedwait(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime, clockid_t clock) {
+  timespec ts;
+  timespec* tsp;
+
+  if (abstime != NULL) {
+    if (__timespec_to_absolute(&ts, abstime, clock) < 0) {
+      return ETIMEDOUT;
+    }
+    tsp = &ts;
+  } else {
+    tsp = NULL;
+  }
+
+  return __pthread_cond_timedwait_relative(cond, mutex, tsp);
+}
+
+int pthread_cond_broadcast(pthread_cond_t* cond) {
+  return __pthread_cond_pulse(cond, INT_MAX);
+}
+
+int pthread_cond_signal(pthread_cond_t* cond) {
+  return __pthread_cond_pulse(cond, 1);
+}
+
+int pthread_cond_wait(pthread_cond_t* cond, pthread_mutex_t* mutex) {
+  return __pthread_cond_timedwait(cond, mutex, NULL, CLOCK_REALTIME);
+}
+
+int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t * mutex, const timespec *abstime) {
+  return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
+}
+
+// TODO: this exists only for backward binary compatibility.
+int pthread_cond_timedwait_monotonic(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) {
+  return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
+}
+
+int pthread_cond_timedwait_monotonic_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* abstime) {
+  return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
+}
+
+int pthread_cond_timedwait_relative_np(pthread_cond_t* cond, pthread_mutex_t* mutex, const timespec* reltime) {
+  return __pthread_cond_timedwait_relative(cond, mutex, reltime);
+}
+
+int pthread_cond_timeout_np(pthread_cond_t* cond, pthread_mutex_t* mutex, unsigned ms) {
+  timespec ts;
+  timespec_from_ms(ts, ms);
+  return __pthread_cond_timedwait_relative(cond, mutex, &ts);
+}
diff --git a/libc/bionic/pthread_create.cpp b/libc/bionic/pthread_create.cpp
index f0ee222..2153310 100644
--- a/libc/bionic/pthread_create.cpp
+++ b/libc/bionic/pthread_create.cpp
@@ -82,13 +82,12 @@
   }
 }
 
-__LIBC_ABI_PRIVATE__
 int _init_thread(pthread_internal_t* thread, bool add_to_thread_list) {
   int error = 0;
 
   // Set the scheduling policy/priority of the thread.
   if (thread->attr.sched_policy != SCHED_NORMAL) {
-    struct sched_param param;
+    sched_param param;
     param.sched_priority = thread->attr.sched_priority;
     if (sched_setscheduler(thread->tid, thread->attr.sched_policy, &param) == -1) {
 #if __LP64__
diff --git a/libc/bionic/pthread_exit.cpp b/libc/bionic/pthread_exit.cpp
new file mode 100644
index 0000000..aa9bd38
--- /dev/null
+++ b/libc/bionic/pthread_exit.cpp
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <pthread.h>
+
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#include "pthread_internal.h"
+
+extern "C" void _exit_with_stack_teardown(void*, size_t, int);
+extern "C" void __exit(int);
+
+/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
+ *         and thread cancelation
+ */
+
+void __pthread_cleanup_push(__pthread_cleanup_t* c, __pthread_cleanup_func_t routine, void* arg) {
+  pthread_internal_t* thread = __get_thread();
+  c->__cleanup_routine = routine;
+  c->__cleanup_arg = arg;
+  c->__cleanup_prev = thread->cleanup_stack;
+  thread->cleanup_stack = c;
+}
+
+void __pthread_cleanup_pop(__pthread_cleanup_t* c, int execute) {
+  pthread_internal_t* thread = __get_thread();
+  thread->cleanup_stack = c->__cleanup_prev;
+  if (execute) {
+    c->__cleanup_routine(c->__cleanup_arg);
+  }
+}
+
+void pthread_exit(void* retval) {
+  pthread_internal_t* thread = __get_thread();
+
+  // Call the cleanup handlers first.
+  while (thread->cleanup_stack) {
+    __pthread_cleanup_t* c = thread->cleanup_stack;
+    thread->cleanup_stack = c->__cleanup_prev;
+    c->__cleanup_routine(c->__cleanup_arg);
+  }
+
+  // Call the TLS destructors. It is important to do that before removing this
+  // thread from the global list. This will ensure that if someone else deletes
+  // a TLS key, the corresponding value will be set to NULL in this thread's TLS
+  // space (see pthread_key_delete).
+  pthread_key_clean_all();
+
+  if (thread->alternate_signal_stack != NULL) {
+    // Tell the kernel to stop using the alternate signal stack.
+    stack_t ss;
+    ss.ss_sp = NULL;
+    ss.ss_flags = SS_DISABLE;
+    sigaltstack(&ss, NULL);
+
+    // Free it.
+    munmap(thread->alternate_signal_stack, SIGSTKSZ);
+    thread->alternate_signal_stack = NULL;
+  }
+
+  // Keep track of what we need to know about the stack before we lose the pthread_internal_t.
+  void* stack_base = thread->attr.stack_base;
+  size_t stack_size = thread->attr.stack_size;
+  bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
+
+  // If the thread is detached, destroy the pthread_internal_t,
+  // otherwise keep it in memory and signal any joiners.
+  pthread_mutex_lock(&gThreadListLock);
+  if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
+    _pthread_internal_remove_locked(thread);
+  } else {
+    // Make sure that the pthread_internal_t doesn't have stale pointers to a stack that
+    // will be unmapped after the exit call below.
+    if (!user_allocated_stack) {
+      thread->attr.stack_base = NULL;
+      thread->attr.stack_size = 0;
+      thread->tls = NULL;
+    }
+
+    // Indicate that the thread has exited for joining threads.
+    thread->attr.flags |= PTHREAD_ATTR_FLAG_ZOMBIE;
+    thread->return_value = retval;
+
+    // Signal the joining thread if present.
+    if (thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) {
+      pthread_cond_signal(&thread->join_cond);
+    }
+  }
+  pthread_mutex_unlock(&gThreadListLock);
+
+  if (user_allocated_stack) {
+    // Cleaning up this thread's stack is the creator's responsibility, not ours.
+    __exit(0);
+  } else {
+    // We need to munmap the stack we're running on before calling exit.
+    // That's not something we can do in C.
+
+    // We don't want to take a signal after we've unmapped the stack.
+    // That's one last thing we can handle in C.
+    sigset_t mask;
+    sigfillset(&mask);
+    sigprocmask(SIG_SETMASK, &mask, NULL);
+
+    _exit_with_stack_teardown(stack_base, stack_size, 0);
+  }
+
+  /* NOTREACHED, but we told the compiler this function is noreturn, and it doesn't believe us. */
+  abort();
+}
diff --git a/libc/bionic/pthread_internal.h b/libc/bionic/pthread_internal.h
index ce8b410..8cca83a 100644
--- a/libc/bionic/pthread_internal.h
+++ b/libc/bionic/pthread_internal.h
@@ -29,13 +29,8 @@
 #define _PTHREAD_INTERNAL_H_
 
 #include <pthread.h>
-#include <stdbool.h>
-#include <sys/cdefs.h>
 
-__BEGIN_DECLS
-
-typedef struct pthread_internal_t
-{
+struct pthread_internal_t {
     struct pthread_internal_t*  next;
     struct pthread_internal_t*  prev;
     pthread_attr_t              attr;
@@ -55,12 +50,12 @@
      */
 #define __BIONIC_DLERROR_BUFFER_SIZE 512
     char dlerror_buffer[__BIONIC_DLERROR_BUFFER_SIZE];
-} pthread_internal_t;
+};
 
-int _init_thread(pthread_internal_t* thread, bool add_to_thread_list);
-void __init_tls(pthread_internal_t* thread);
-void _pthread_internal_add(pthread_internal_t* thread);
-pthread_internal_t* __get_thread(void);
+__LIBC_HIDDEN__ int _init_thread(pthread_internal_t* thread, bool add_to_thread_list);
+__LIBC_HIDDEN__ void __init_tls(pthread_internal_t* thread);
+__LIBC_HIDDEN__ void _pthread_internal_add(pthread_internal_t* thread);
+__LIBC_HIDDEN__ pthread_internal_t* __get_thread(void);
 
 __LIBC_HIDDEN__ void pthread_key_clean_all(void);
 __LIBC_HIDDEN__ void _pthread_internal_remove_locked(pthread_internal_t* thread);
@@ -91,12 +86,13 @@
 __LIBC_HIDDEN__ extern pthread_internal_t* gThreadList;
 __LIBC_HIDDEN__ extern pthread_mutex_t gThreadListLock;
 
-/* needed by fork.c */
-extern void __timer_table_start_stop(int  stop);
-extern void __bionic_atfork_run_prepare();
-extern void __bionic_atfork_run_child();
-extern void __bionic_atfork_run_parent();
+__LIBC_HIDDEN__ int __timespec_to_absolute(timespec*, const timespec*, clockid_t);
 
-__END_DECLS
+/* needed by fork.c */
+__LIBC_HIDDEN__ extern void __timer_table_start_stop(int);
+__LIBC_HIDDEN__ extern void __bionic_atfork_run_prepare();
+__LIBC_HIDDEN__ extern void __bionic_atfork_run_child();
+__LIBC_HIDDEN__ extern void __bionic_atfork_run_parent();
+__LIBC_HIDDEN__ extern int __pthread_settid(pthread_t, pid_t);
 
 #endif /* _PTHREAD_INTERNAL_H_ */
diff --git a/libc/bionic/pthread_internals.cpp b/libc/bionic/pthread_internals.cpp
index 01ecd5f..4b1f6ef 100644
--- a/libc/bionic/pthread_internals.cpp
+++ b/libc/bionic/pthread_internals.cpp
@@ -28,11 +28,13 @@
 
 #include "pthread_internal.h"
 
+#include "private/bionic_futex.h"
+#include "private/bionic_pthread.h"
 #include "private/bionic_tls.h"
 #include "private/ScopedPthreadMutexLocker.h"
 
-__LIBC_HIDDEN__ pthread_internal_t* gThreadList = NULL;
-__LIBC_HIDDEN__ pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
+pthread_internal_t* gThreadList = NULL;
+pthread_mutex_t gThreadListLock = PTHREAD_MUTEX_INITIALIZER;
 
 void _pthread_internal_remove_locked(pthread_internal_t* thread) {
   if (thread->next != NULL) {
@@ -51,7 +53,7 @@
   }
 }
 
-__LIBC_ABI_PRIVATE__ void _pthread_internal_add(pthread_internal_t* thread) {
+void _pthread_internal_add(pthread_internal_t* thread) {
   ScopedPthreadMutexLocker locker(&gThreadListLock);
 
   // We insert at the head.
@@ -63,6 +65,42 @@
   gThreadList = thread;
 }
 
-__LIBC_ABI_PRIVATE__ pthread_internal_t* __get_thread(void) {
+pthread_internal_t* __get_thread(void) {
   return reinterpret_cast<pthread_internal_t*>(__get_tls()[TLS_SLOT_THREAD_ID]);
 }
+
+pid_t __pthread_gettid(pthread_t t) {
+  return reinterpret_cast<pthread_internal_t*>(t)->tid;
+}
+
+int __pthread_settid(pthread_t t, pid_t tid) {
+  if (t == 0) {
+    return EINVAL;
+  }
+  reinterpret_cast<pthread_internal_t*>(t)->tid = tid;
+  return 0;
+}
+
+// Initialize 'ts' with the difference between 'abstime' and the current time
+// according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
+int __timespec_to_absolute(timespec* ts, const timespec* abstime, clockid_t clock) {
+  clock_gettime(clock, ts);
+  ts->tv_sec  = abstime->tv_sec - ts->tv_sec;
+  ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
+  if (ts->tv_nsec < 0) {
+    ts->tv_sec--;
+    ts->tv_nsec += 1000000000;
+  }
+  if ((ts->tv_nsec < 0) || (ts->tv_sec < 0)) {
+    return -1;
+  }
+  return 0;
+}
+
+int __futex_wake_ex(volatile void* ftx, int pshared, int val) {
+  return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
+}
+
+int __futex_wait_ex(volatile void* ftx, int pshared, int val, const timespec* timeout) {
+  return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
+}
diff --git a/libc/bionic/pthread.c b/libc/bionic/pthread_mutex.cpp
similarity index 68%
rename from libc/bionic/pthread.c
rename to libc/bionic/pthread_mutex.cpp
index aa300e9..6b82151 100644
--- a/libc/bionic/pthread.c
+++ b/libc/bionic/pthread_mutex.cpp
@@ -45,119 +45,6 @@
 extern void pthread_debug_mutex_lock_check(pthread_mutex_t *mutex);
 extern void pthread_debug_mutex_unlock_check(pthread_mutex_t *mutex);
 
-extern void _exit_with_stack_teardown(void * stackBase, size_t stackSize, int status);
-extern void __exit(int status);
-
-int  __futex_wake_ex(volatile void *ftx, int pshared, int val)
-{
-    return __futex_syscall3(ftx, pshared ? FUTEX_WAKE : FUTEX_WAKE_PRIVATE, val);
-}
-
-int  __futex_wait_ex(volatile void *ftx, int pshared, int val, const struct timespec *timeout)
-{
-    return __futex_syscall4(ftx, pshared ? FUTEX_WAIT : FUTEX_WAIT_PRIVATE, val, timeout);
-}
-
-/* CAVEAT: our implementation of pthread_cleanup_push/pop doesn't support C++ exceptions
- *         and thread cancelation
- */
-
-void __pthread_cleanup_push( __pthread_cleanup_t*      c,
-                             __pthread_cleanup_func_t  routine,
-                             void*                     arg )
-{
-    pthread_internal_t*  thread = __get_thread();
-
-    c->__cleanup_routine  = routine;
-    c->__cleanup_arg      = arg;
-    c->__cleanup_prev     = thread->cleanup_stack;
-    thread->cleanup_stack = c;
-}
-
-void __pthread_cleanup_pop( __pthread_cleanup_t*  c, int  execute )
-{
-    pthread_internal_t*  thread = __get_thread();
-
-    thread->cleanup_stack = c->__cleanup_prev;
-    if (execute)
-        c->__cleanup_routine(c->__cleanup_arg);
-}
-
-void pthread_exit(void* retval) {
-    pthread_internal_t* thread = __get_thread();
-
-    // Call the cleanup handlers first.
-    while (thread->cleanup_stack) {
-        __pthread_cleanup_t* c = thread->cleanup_stack;
-        thread->cleanup_stack = c->__cleanup_prev;
-        c->__cleanup_routine(c->__cleanup_arg);
-    }
-
-    // Call the TLS destructors. It is important to do that before removing this
-    // thread from the global list. This will ensure that if someone else deletes
-    // a TLS key, the corresponding value will be set to NULL in this thread's TLS
-    // space (see pthread_key_delete).
-    pthread_key_clean_all();
-
-    if (thread->alternate_signal_stack != NULL) {
-      // Tell the kernel to stop using the alternate signal stack.
-      stack_t ss;
-      ss.ss_sp = NULL;
-      ss.ss_flags = SS_DISABLE;
-      sigaltstack(&ss, NULL);
-
-      // Free it.
-      munmap(thread->alternate_signal_stack, SIGSTKSZ);
-      thread->alternate_signal_stack = NULL;
-    }
-
-    // Keep track of what we need to know about the stack before we lose the pthread_internal_t.
-    void* stack_base = thread->attr.stack_base;
-    size_t stack_size = thread->attr.stack_size;
-    bool user_allocated_stack = ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) != 0);
-
-    // If the thread is detached, destroy the pthread_internal_t,
-    // otherwise keep it in memory and signal any joiners.
-    pthread_mutex_lock(&gThreadListLock);
-    if (thread->attr.flags & PTHREAD_ATTR_FLAG_DETACHED) {
-        _pthread_internal_remove_locked(thread);
-    } else {
-        // Make sure that the thread struct doesn't have stale pointers to a stack that
-        // will be unmapped after the exit call below.
-        if (!user_allocated_stack) {
-            thread->attr.stack_base = NULL;
-            thread->attr.stack_size = 0;
-            thread->tls = NULL;
-        }
-
-        // Indicate that the thread has exited for joining threads.
-        thread->attr.flags |= PTHREAD_ATTR_FLAG_ZOMBIE;
-        thread->return_value = retval;
-
-        // Signal the joining thread if present.
-        if (thread->attr.flags & PTHREAD_ATTR_FLAG_JOINED) {
-            pthread_cond_signal(&thread->join_cond);
-        }
-    }
-    pthread_mutex_unlock(&gThreadListLock);
-
-    if (user_allocated_stack) {
-        // Cleaning up this thread's stack is the creator's responsibility, not ours.
-        __exit(0);
-    } else {
-        // We need to munmap the stack we're running on before calling exit.
-        // That's not something we can do in C.
-
-        // We don't want to take a signal after we've unmapped the stack.
-        // That's one last thing we can handle in C.
-        sigset_t mask;
-        sigfillset(&mask);
-        sigprocmask(SIG_SETMASK, &mask, NULL);
-
-        _exit_with_stack_teardown(stack_base, stack_size, 0);
-    }
-}
-
 /* a mutex is implemented as a 32-bit integer holding the following fields
  *
  * bits:     name     description
@@ -387,8 +274,7 @@
     return EINVAL;
 }
 
-int pthread_mutexattr_getpshared(pthread_mutexattr_t *attr, int *pshared)
-{
+int pthread_mutexattr_getpshared(const pthread_mutexattr_t* attr, int* pshared) {
     if (!attr || !pshared)
         return EINVAL;
 
@@ -802,31 +688,10 @@
     return err;
 }
 
-/* initialize 'ts' with the difference between 'abstime' and the current time
- * according to 'clock'. Returns -1 if abstime already expired, or 0 otherwise.
- */
-static int
-__timespec_to_absolute(struct timespec*  ts, const struct timespec*  abstime, clockid_t  clock)
-{
-    clock_gettime(clock, ts);
-    ts->tv_sec  = abstime->tv_sec - ts->tv_sec;
-    ts->tv_nsec = abstime->tv_nsec - ts->tv_nsec;
-    if (ts->tv_nsec < 0) {
-        ts->tv_sec--;
-        ts->tv_nsec += 1000000000;
-    }
-    if ((ts->tv_nsec < 0) || (ts->tv_sec < 0))
-        return -1;
-
-    return 0;
-}
-
 /* initialize 'abstime' to the current time according to 'clock' plus 'msecs'
  * milliseconds.
  */
-static void
-__timespec_to_relative_msec(struct timespec*  abstime, unsigned  msecs, clockid_t  clock)
-{
+static void __timespec_to_relative_msec(timespec* abstime, unsigned msecs, clockid_t clock) {
     clock_gettime(clock, abstime);
     abstime->tv_sec  += msecs/1000;
     abstime->tv_nsec += (msecs%1000)*1000000;
@@ -840,8 +705,8 @@
 int pthread_mutex_lock_timeout_np_impl(pthread_mutex_t *mutex, unsigned msecs)
 {
     clockid_t        clock = CLOCK_MONOTONIC;
-    struct timespec  abstime;
-    struct timespec  ts;
+    timespec  abstime;
+    timespec  ts;
     int               mvalue, mtype, tid, shared;
 
     /* compute absolute expiration time */
@@ -900,7 +765,7 @@
     }
 
     for (;;) {
-        struct timespec ts;
+        timespec ts;
 
         /* if the value is 'unlocked', try to acquire it directly */
         /* NOTE: put state to 2 since we know there is contention */
@@ -977,299 +842,3 @@
     mutex->value = 0xdead10cc;
     return 0;
 }
-
-
-
-int pthread_condattr_init(pthread_condattr_t *attr)
-{
-    if (attr == NULL)
-        return EINVAL;
-
-    *attr = PTHREAD_PROCESS_PRIVATE;
-    return 0;
-}
-
-int pthread_condattr_getpshared(pthread_condattr_t *attr, int *pshared)
-{
-    if (attr == NULL || pshared == NULL)
-        return EINVAL;
-
-    *pshared = *attr;
-    return 0;
-}
-
-int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared)
-{
-    if (attr == NULL)
-        return EINVAL;
-
-    if (pshared != PTHREAD_PROCESS_SHARED &&
-        pshared != PTHREAD_PROCESS_PRIVATE)
-        return EINVAL;
-
-    *attr = pshared;
-    return 0;
-}
-
-int pthread_condattr_destroy(pthread_condattr_t *attr)
-{
-    if (attr == NULL)
-        return EINVAL;
-
-    *attr = 0xdeada11d;
-    return 0;
-}
-
-/* We use one bit in condition variable values as the 'shared' flag
- * The rest is a counter.
- */
-#define COND_SHARED_MASK        0x0001
-#define COND_COUNTER_INCREMENT  0x0002
-#define COND_COUNTER_MASK       (~COND_SHARED_MASK)
-
-#define COND_IS_SHARED(c)  (((c)->value & COND_SHARED_MASK) != 0)
-
-/* XXX *technically* there is a race condition that could allow
- * XXX a signal to be missed.  If thread A is preempted in _wait()
- * XXX after unlocking the mutex and before waiting, and if other
- * XXX threads call signal or broadcast UINT_MAX/2 times (exactly),
- * XXX before thread A is scheduled again and calls futex_wait(),
- * XXX then the signal will be lost.
- */
-
-int pthread_cond_init(pthread_cond_t *cond,
-                      const pthread_condattr_t *attr)
-{
-    if (cond == NULL)
-        return EINVAL;
-
-    cond->value = 0;
-
-    if (attr != NULL && *attr == PTHREAD_PROCESS_SHARED)
-        cond->value |= COND_SHARED_MASK;
-
-    return 0;
-}
-
-int pthread_cond_destroy(pthread_cond_t *cond)
-{
-    if (cond == NULL)
-        return EINVAL;
-
-    cond->value = 0xdeadc04d;
-    return 0;
-}
-
-/* This function is used by pthread_cond_broadcast and
- * pthread_cond_signal to atomically decrement the counter
- * then wake-up 'counter' threads.
- */
-static int
-__pthread_cond_pulse(pthread_cond_t *cond, int  counter)
-{
-    long flags;
-
-    if (__predict_false(cond == NULL))
-        return EINVAL;
-
-    flags = (cond->value & ~COND_COUNTER_MASK);
-    for (;;) {
-        long oldval = cond->value;
-        long newval = ((oldval - COND_COUNTER_INCREMENT) & COND_COUNTER_MASK)
-                      | flags;
-        if (__bionic_cmpxchg(oldval, newval, &cond->value) == 0)
-            break;
-    }
-
-    /*
-     * Ensure that all memory accesses previously made by this thread are
-     * visible to the woken thread(s).  On the other side, the "wait"
-     * code will issue any necessary barriers when locking the mutex.
-     *
-     * This may not strictly be necessary -- if the caller follows
-     * recommended practice and holds the mutex before signaling the cond
-     * var, the mutex ops will provide correct semantics.  If they don't
-     * hold the mutex, they're subject to race conditions anyway.
-     */
-    ANDROID_MEMBAR_FULL();
-
-    __futex_wake_ex(&cond->value, COND_IS_SHARED(cond), counter);
-    return 0;
-}
-
-int pthread_cond_broadcast(pthread_cond_t *cond)
-{
-    return __pthread_cond_pulse(cond, INT_MAX);
-}
-
-int pthread_cond_signal(pthread_cond_t *cond)
-{
-    return __pthread_cond_pulse(cond, 1);
-}
-
-int pthread_cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex)
-{
-    return pthread_cond_timedwait(cond, mutex, NULL);
-}
-
-int __pthread_cond_timedwait_relative(pthread_cond_t *cond,
-                                      pthread_mutex_t * mutex,
-                                      const struct timespec *reltime)
-{
-    int  status;
-    int  oldvalue = cond->value;
-
-    pthread_mutex_unlock(mutex);
-    status = __futex_wait_ex(&cond->value, COND_IS_SHARED(cond), oldvalue, reltime);
-    pthread_mutex_lock(mutex);
-
-    if (status == (-ETIMEDOUT)) return ETIMEDOUT;
-    return 0;
-}
-
-int __pthread_cond_timedwait(pthread_cond_t *cond,
-                             pthread_mutex_t * mutex,
-                             const struct timespec *abstime,
-                             clockid_t clock)
-{
-    struct timespec ts;
-    struct timespec * tsp;
-
-    if (abstime != NULL) {
-        if (__timespec_to_absolute(&ts, abstime, clock) < 0)
-            return ETIMEDOUT;
-        tsp = &ts;
-    } else {
-        tsp = NULL;
-    }
-
-    return __pthread_cond_timedwait_relative(cond, mutex, tsp);
-}
-
-int pthread_cond_timedwait(pthread_cond_t *cond,
-                           pthread_mutex_t * mutex,
-                           const struct timespec *abstime)
-{
-    return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_REALTIME);
-}
-
-
-/* this one exists only for backward binary compatibility */
-int pthread_cond_timedwait_monotonic(pthread_cond_t *cond,
-                                     pthread_mutex_t * mutex,
-                                     const struct timespec *abstime)
-{
-    return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
-}
-
-int pthread_cond_timedwait_monotonic_np(pthread_cond_t *cond,
-                                     pthread_mutex_t * mutex,
-                                     const struct timespec *abstime)
-{
-    return __pthread_cond_timedwait(cond, mutex, abstime, CLOCK_MONOTONIC);
-}
-
-int pthread_cond_timedwait_relative_np(pthread_cond_t *cond,
-                                      pthread_mutex_t * mutex,
-                                      const struct timespec *reltime)
-{
-    return __pthread_cond_timedwait_relative(cond, mutex, reltime);
-}
-
-int pthread_cond_timeout_np(pthread_cond_t *cond,
-                            pthread_mutex_t * mutex,
-                            unsigned ms)
-{
-    struct timespec ts;
-    timespec_from_ms(ts, ms);
-    return __pthread_cond_timedwait_relative(cond, mutex, &ts);
-}
-
-
-/* NOTE: this implementation doesn't support a init function that throws a C++ exception
- *       or calls fork()
- */
-int pthread_once( pthread_once_t*  once_control,  void (*init_routine)(void) )
-{
-    volatile pthread_once_t* ocptr = once_control;
-
-    /* PTHREAD_ONCE_INIT is 0, we use the following bit flags
-     *
-     *   bit 0 set  -> initialization is under way
-     *   bit 1 set  -> initialization is complete
-     */
-#define ONCE_INITIALIZING           (1 << 0)
-#define ONCE_COMPLETED              (1 << 1)
-
-    /* First check if the once is already initialized. This will be the common
-    * case and we want to make this as fast as possible. Note that this still
-    * requires a load_acquire operation here to ensure that all the
-    * stores performed by the initialization function are observable on
-    * this CPU after we exit.
-    */
-    if (__predict_true((*ocptr & ONCE_COMPLETED) != 0)) {
-        ANDROID_MEMBAR_FULL();
-        return 0;
-    }
-
-    for (;;) {
-        /* Try to atomically set the INITIALIZING flag.
-         * This requires a cmpxchg loop, and we may need
-         * to exit prematurely if we detect that
-         * COMPLETED is now set.
-         */
-        int32_t  oldval, newval;
-
-        do {
-            oldval = *ocptr;
-            if ((oldval & ONCE_COMPLETED) != 0)
-                break;
-
-            newval = oldval | ONCE_INITIALIZING;
-        } while (__bionic_cmpxchg(oldval, newval, ocptr) != 0);
-
-        if ((oldval & ONCE_COMPLETED) != 0) {
-            /* We detected that COMPLETED was set while in our loop */
-            ANDROID_MEMBAR_FULL();
-            return 0;
-        }
-
-        if ((oldval & ONCE_INITIALIZING) == 0) {
-            /* We got there first, we can jump out of the loop to
-             * handle the initialization */
-            break;
-        }
-
-        /* Another thread is running the initialization and hasn't completed
-         * yet, so wait for it, then try again. */
-        __futex_wait_ex(ocptr, 0, oldval, NULL);
-    }
-
-    /* call the initialization function. */
-    (*init_routine)();
-
-    /* Do a store_release indicating that initialization is complete */
-    ANDROID_MEMBAR_FULL();
-    *ocptr = ONCE_COMPLETED;
-
-    /* Wake up any waiters, if any */
-    __futex_wake_ex(ocptr, 0, INT_MAX);
-
-    return 0;
-}
-
-pid_t __pthread_gettid(pthread_t thid) {
-  pthread_internal_t* thread = (pthread_internal_t*) thid;
-  return thread->tid;
-}
-
-int __pthread_settid(pthread_t thid, pid_t tid) {
-  if (thid == 0) {
-      return EINVAL;
-  }
-
-  pthread_internal_t* thread = (pthread_internal_t*) thid;
-  thread->tid = tid;
-
-  return 0;
-}
diff --git a/libc/bionic/pthread_once.cpp b/libc/bionic/pthread_once.cpp
new file mode 100644
index 0000000..6d9d7d1
--- /dev/null
+++ b/libc/bionic/pthread_once.cpp
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *  * Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ *  * Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in
+ *    the documentation and/or other materials provided with the
+ *    distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <pthread.h>
+
+#include "private/bionic_atomic_inline.h"
+#include "private/bionic_futex.h"
+
+#define ONCE_INITIALIZING           (1 << 0)
+#define ONCE_COMPLETED              (1 << 1)
+
+/* NOTE: this implementation doesn't support a init function that throws a C++ exception
+ *       or calls fork()
+ */
+int pthread_once(pthread_once_t* once_control, void (*init_routine)(void)) {
+  volatile pthread_once_t* once_control_ptr = once_control;
+
+  // PTHREAD_ONCE_INIT is 0, we use the following bit flags
+  //   bit 0 set  -> initialization is under way
+  //   bit 1 set  -> initialization is complete
+
+  // First check if the once is already initialized. This will be the common
+  // case and we want to make this as fast as possible. Note that this still
+  // requires a load_acquire operation here to ensure that all the
+  // stores performed by the initialization function are observable on
+  // this CPU after we exit.
+  if (__predict_true((*once_control_ptr & ONCE_COMPLETED) != 0)) {
+    ANDROID_MEMBAR_FULL();
+    return 0;
+  }
+
+  while (true) {
+    // Try to atomically set the INITIALIZING flag.
+    // This requires a cmpxchg loop, and we may need
+    // to exit prematurely if we detect that
+    // COMPLETED is now set.
+    int32_t  old_value, new_value;
+
+    do {
+      old_value = *once_control_ptr;
+      if ((old_value & ONCE_COMPLETED) != 0) {
+        break;
+      }
+
+      new_value = old_value | ONCE_INITIALIZING;
+    } while (__bionic_cmpxchg(old_value, new_value, once_control_ptr) != 0);
+
+    if ((old_value & ONCE_COMPLETED) != 0) {
+      // We detected that COMPLETED was set while in our loop.
+      ANDROID_MEMBAR_FULL();
+      return 0;
+    }
+
+    if ((old_value & ONCE_INITIALIZING) == 0) {
+      // We got there first, we can jump out of the loop to handle the initialization.
+      break;
+    }
+
+    // Another thread is running the initialization and hasn't completed
+    // yet, so wait for it, then try again.
+    __futex_wait_ex(once_control_ptr, 0, old_value, NULL);
+  }
+
+  // Call the initialization function.
+  (*init_routine)();
+
+  // Do a store_release indicating that initialization is complete.
+  ANDROID_MEMBAR_FULL();
+  *once_control_ptr = ONCE_COMPLETED;
+
+  // Wake up any waiters, if any.
+  __futex_wake_ex(once_control_ptr, 0, INT_MAX);
+
+  return 0;
+}
diff --git a/libc/bionic/pthread-rwlocks.c b/libc/bionic/pthread_rwlock.cpp
similarity index 77%
rename from libc/bionic/pthread-rwlocks.c
rename to libc/bionic/pthread_rwlock.cpp
index 59e2248..0182ef3 100644
--- a/libc/bionic/pthread-rwlocks.c
+++ b/libc/bionic/pthread_rwlock.cpp
@@ -91,8 +91,7 @@
     }
 }
 
-int pthread_rwlockattr_getpshared(pthread_rwlockattr_t *attr, int *pshared)
-{
+int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t* attr, int* pshared) {
     if (!attr || !pshared)
         return EINVAL;
 
@@ -195,10 +194,62 @@
         pthread_cond_broadcast(&rwlock->cond);
 }
 
+static int __pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
+  int ret = 0;
 
-int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
-{
-    return pthread_rwlock_timedrdlock(rwlock, NULL);
+  if (rwlock == NULL) {
+    return EINVAL;
+  }
+
+  pthread_mutex_lock(&rwlock->lock);
+  int tid = __get_thread()->tid;
+  if (__predict_false(!read_precondition(rwlock, tid))) {
+    rwlock->pendingReaders += 1;
+    do {
+      ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
+    } while (ret == 0 && !read_precondition(rwlock, tid));
+    rwlock->pendingReaders -= 1;
+    if (ret != 0) {
+      goto EXIT;
+    }
+  }
+  ++rwlock->numLocks;
+EXIT:
+  pthread_mutex_unlock(&rwlock->lock);
+  return ret;
+}
+
+static int __pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
+  int ret = 0;
+
+  if (rwlock == NULL) {
+    return EINVAL;
+  }
+
+  pthread_mutex_lock(&rwlock->lock);
+  int tid = __get_thread()->tid;
+  if (__predict_false(!write_precondition(rwlock, tid))) {
+    // If we can't read yet, wait until the rwlock is unlocked
+    // and try again. Increment pendingReaders to get the
+    // cond broadcast when that happens.
+    rwlock->pendingWriters += 1;
+    do {
+      ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
+    } while (ret == 0 && !write_precondition(rwlock, tid));
+    rwlock->pendingWriters -= 1;
+    if (ret != 0) {
+      goto EXIT;
+    }
+  }
+  ++rwlock->numLocks;
+  rwlock->writerThreadId = tid;
+EXIT:
+  pthread_mutex_unlock(&rwlock->lock);
+  return ret;
+}
+
+int pthread_rwlock_rdlock(pthread_rwlock_t* rwlock) {
+  return __pthread_rwlock_timedrdlock(rwlock, NULL);
 }
 
 int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
@@ -212,40 +263,18 @@
     if (__predict_false(!read_precondition(rwlock, __get_thread()->tid)))
         ret = EBUSY;
     else
-        rwlock->numLocks ++;
+        ++rwlock->numLocks;
     pthread_mutex_unlock(&rwlock->lock);
 
     return ret;
 }
 
-int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
-{
-    int ret = 0;
-
-    if (rwlock == NULL)
-        return EINVAL;
-
-    pthread_mutex_lock(&rwlock->lock);
-    int tid = __get_thread()->tid;
-    if (__predict_false(!read_precondition(rwlock, tid))) {
-        rwlock->pendingReaders += 1;
-        do {
-            ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
-        } while (ret == 0 && !read_precondition(rwlock, tid));
-        rwlock->pendingReaders -= 1;
-        if (ret != 0)
-            goto EXIT;
-    }
-    rwlock->numLocks ++;
-EXIT:
-    pthread_mutex_unlock(&rwlock->lock);
-    return ret;
+int pthread_rwlock_timedrdlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
+  return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
 }
 
-
-int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
-{
-    return pthread_rwlock_timedwrlock(rwlock, NULL);
+int pthread_rwlock_wrlock(pthread_rwlock_t* rwlock) {
+  return __pthread_rwlock_timedwrlock(rwlock, NULL);
 }
 
 int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
@@ -260,43 +289,17 @@
     if (__predict_false(!write_precondition(rwlock, tid))) {
         ret = EBUSY;
     } else {
-        rwlock->numLocks ++;
+        ++rwlock->numLocks;
         rwlock->writerThreadId = tid;
     }
     pthread_mutex_unlock(&rwlock->lock);
     return ret;
 }
 
-int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *abs_timeout)
-{
-    int ret = 0;
-
-    if (rwlock == NULL)
-        return EINVAL;
-
-    pthread_mutex_lock(&rwlock->lock);
-    int tid = __get_thread()->tid;
-    if (__predict_false(!write_precondition(rwlock, tid))) {
-        /* If we can't read yet, wait until the rwlock is unlocked
-         * and try again. Increment pendingReaders to get the
-         * cond broadcast when that happens.
-         */
-        rwlock->pendingWriters += 1;
-        do {
-            ret = pthread_cond_timedwait(&rwlock->cond, &rwlock->lock, abs_timeout);
-        } while (ret == 0 && !write_precondition(rwlock, tid));
-        rwlock->pendingWriters -= 1;
-        if (ret != 0)
-            goto EXIT;
-    }
-    rwlock->numLocks ++;
-    rwlock->writerThreadId = tid;
-EXIT:
-    pthread_mutex_unlock(&rwlock->lock);
-    return ret;
+int pthread_rwlock_timedwrlock(pthread_rwlock_t* rwlock, const timespec* abs_timeout) {
+  return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
 }
 
-
 int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
 {
     int  ret = 0;
diff --git a/libc/bionic/pthread_setschedparam.cpp b/libc/bionic/pthread_setschedparam.cpp
index 55ec791..419cc6f 100644
--- a/libc/bionic/pthread_setschedparam.cpp
+++ b/libc/bionic/pthread_setschedparam.cpp
@@ -31,7 +31,7 @@
 #include "private/ErrnoRestorer.h"
 #include "pthread_accessor.h"
 
-int pthread_setschedparam(pthread_t t, int policy, struct sched_param const* param) {
+int pthread_setschedparam(pthread_t t, int policy, const sched_param* param) {
   ErrnoRestorer errno_restorer;
 
   pthread_accessor thread(t);
diff --git a/libc/bionic/ptrace.c b/libc/bionic/ptrace.cpp
similarity index 71%
rename from libc/bionic/ptrace.c
rename to libc/bionic/ptrace.cpp
index 0bb1acd..c0fd5de 100644
--- a/libc/bionic/ptrace.c
+++ b/libc/bionic/ptrace.cpp
@@ -25,33 +25,31 @@
  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE.
  */
+
 #include <sys/types.h>
 #include <sys/ptrace.h>
 
-extern long __ptrace(int request, pid_t pid, void *addr, void *data);
+extern "C" long __ptrace(int request, pid_t pid, void* addr, void* data);
 
-long ptrace(int request, pid_t pid, void * addr, void * data)
-{
-    switch (request) {
-        case PTRACE_PEEKUSR:
-        case PTRACE_PEEKTEXT:
-        case PTRACE_PEEKDATA:
-        {
-            long word;
-            long ret;
-
-            ret = __ptrace(request, pid, addr, &word);
-            if (ret == 0) {
-                return word;
-            } else {
-                // __ptrace will set errno for us
-                return -1;
-            }
-        }
-
-        default:
-             return __ptrace(request, pid, addr, data);
+long ptrace(int request, pid_t pid, void* addr, void* data) {
+  switch (request) {
+    case PTRACE_PEEKUSR:
+    case PTRACE_PEEKTEXT:
+    case PTRACE_PEEKDATA:
+      {
+      long word;
+      long ret = __ptrace(request, pid, addr, &word);
+      if (ret == 0) {
+        return word;
+      } else {
+        // __ptrace already set errno for us.
+        return -1;
+      }
     }
+
+    default:
+      return __ptrace(request, pid, addr, data);
+  }
 }
 
 /*
@@ -63,6 +61,7 @@
 #define ATTRIBUTES __attribute__((noinline))
 #endif
 
-void ATTRIBUTES _thread_created_hook(pid_t thread_id)
-{
+extern "C" void _thread_created_hook(pid_t) ATTRIBUTES;
+
+void _thread_created_hook(pid_t) {
 }
diff --git a/libc/bionic/thread_atexit.c b/libc/bionic/thread_atexit.cpp
similarity index 82%
rename from libc/bionic/thread_atexit.c
rename to libc/bionic/thread_atexit.cpp
index dc4a5a0..cad65d3 100644
--- a/libc/bionic/thread_atexit.c
+++ b/libc/bionic/thread_atexit.cpp
@@ -29,17 +29,18 @@
 /* some simple glue used to make the BSD atexit code happy */
 
 #include <pthread.h>
-#include "pthread_internal.h"
 
-static pthread_mutex_t   gAtExitLock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t gAtExitLock = PTHREAD_MUTEX_INITIALIZER;
 
-void  _thread_atexit_lock( void )
-{
-  pthread_mutex_lock( &gAtExitLock );
+__BEGIN_DECLS
+__LIBC_HIDDEN__ void _thread_atexit_lock();
+__LIBC_HIDDEN__ void _thread_atexit_unlock();
+__END_DECLS
+
+void _thread_atexit_lock() {
+  pthread_mutex_lock(&gAtExitLock);
 }
 
-void _thread_atexit_unlock( void )
-{
-  pthread_mutex_unlock( &gAtExitLock );
+void _thread_atexit_unlock() {
+  pthread_mutex_unlock(&gAtExitLock);
 }
-
diff --git a/libc/bionic/pthread-timers.c b/libc/bionic/timer.cpp
similarity index 87%
rename from libc/bionic/pthread-timers.c
rename to libc/bionic/timer.cpp
index d81bfef..76619f3 100644
--- a/libc/bionic/pthread-timers.c
+++ b/libc/bionic/timer.cpp
@@ -33,11 +33,8 @@
 #include <stdio.h>
 #include <string.h>
 
-extern int __pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const struct timespec*,
-                                    clockid_t);
-
-extern int __pthread_cond_timedwait_relative(pthread_cond_t*, pthread_mutex_t*,
-                                             const struct timespec*);
+extern int __pthread_cond_timedwait(pthread_cond_t*, pthread_mutex_t*, const timespec*, clockid_t);
+extern int __pthread_cond_timedwait_relative(pthread_cond_t*, pthread_mutex_t*, const timespec*);
 
 // Normal (i.e. non-SIGEV_THREAD) timers are created directly by the kernel
 // and are passed as is to/from the caller.
@@ -110,8 +107,8 @@
     pthread_cond_t            cond;      /* signal a state change to thread */
     int volatile              done;      /* set by timer_delete */
     int volatile              stopped;   /* set by _start_stop() */
-    struct timespec volatile  expires;   /* next expiration time, or 0 */
-    struct timespec volatile  period;    /* reload value, or 0 */
+    timespec volatile  expires;   /* next expiration time, or 0 */
+    timespec volatile  period;    /* reload value, or 0 */
     int volatile              overruns;  /* current number of overruns */
 };
 
@@ -240,7 +237,7 @@
 static thr_timer_table_t* __timer_table;
 
 static void __timer_table_init(void) {
-  __timer_table = calloc(1, sizeof(*__timer_table));
+  __timer_table = reinterpret_cast<thr_timer_table_t*>(calloc(1, sizeof(*__timer_table)));
   if (__timer_table != NULL) {
     thr_timer_table_init(__timer_table);
   }
@@ -258,7 +255,7 @@
  ** requirements: the timers of fork child processes must be
  ** disarmed but not deleted.
  **/
-__LIBC_HIDDEN__ void __timer_table_start_stop(int stop) {
+void __timer_table_start_stop(int stop) {
   // We access __timer_table directly so we don't create it if it doesn't yet exist.
   thr_timer_table_start_stop(__timer_table, stop);
 }
@@ -286,7 +283,7 @@
 }
 
 
-static __inline__ void timespec_add(struct timespec* a, const struct timespec* b) {
+static __inline__ void timespec_add(timespec* a, const timespec* b) {
   a->tv_sec  += b->tv_sec;
   a->tv_nsec += b->tv_nsec;
   if (a->tv_nsec >= 1000000000) {
@@ -295,7 +292,7 @@
   }
 }
 
-static __inline__ void timespec_sub(struct timespec* a, const struct timespec* b) {
+static __inline__ void timespec_sub(timespec* a, const timespec* b) {
   a->tv_sec  -= b->tv_sec;
   a->tv_nsec -= b->tv_nsec;
   if (a->tv_nsec < 0) {
@@ -304,15 +301,15 @@
   }
 }
 
-static __inline__ void timespec_zero(struct timespec* a) {
+static __inline__ void timespec_zero(timespec* a) {
   a->tv_sec = a->tv_nsec = 0;
 }
 
-static __inline__ int timespec_is_zero(const struct timespec* a) {
+static __inline__ int timespec_is_zero(const timespec* a) {
   return (a->tv_sec == 0 && a->tv_nsec == 0);
 }
 
-static __inline__ int timespec_cmp(const struct timespec* a, const struct timespec* b) {
+static __inline__ int timespec_cmp(const timespec* a, const timespec* b) {
   if (a->tv_sec  < b->tv_sec)  return -1;
   if (a->tv_sec  > b->tv_sec)  return +1;
   if (a->tv_nsec < b->tv_nsec) return -1;
@@ -320,7 +317,7 @@
   return 0;
 }
 
-static __inline__ int timespec_cmp0(const struct timespec* a) {
+static __inline__ int timespec_cmp0(const timespec* a) {
   if (a->tv_sec < 0) return -1;
   if (a->tv_sec > 0) return +1;
   if (a->tv_nsec < 0) return -1;
@@ -330,15 +327,15 @@
 
 /** POSIX TIMERS APIs */
 
-extern int __timer_create(clockid_t, struct sigevent*, timer_t*);
-extern int __timer_delete(timer_t);
-extern int __timer_gettime(timer_t, struct itimerspec*);
-extern int __timer_settime(timer_t, int, const struct itimerspec*, struct itimerspec*);
-extern int __timer_getoverrun(timer_t);
+extern "C" int __timer_create(clockid_t, sigevent*, timer_t*);
+extern "C" int __timer_delete(timer_t);
+extern "C" int __timer_gettime(timer_t, itimerspec*);
+extern "C" int __timer_settime(timer_t, int, const itimerspec*, itimerspec*);
+extern "C" int __timer_getoverrun(timer_t);
 
 static void* timer_thread_start(void*);
 
-int timer_create(clockid_t clock_id, struct sigevent* evp, timer_t* timer_id) {
+int timer_create(clockid_t clock_id, sigevent* evp, timer_t* timer_id) {
   // If not a SIGEV_THREAD timer, the kernel can handle it without our help.
   if (__predict_true(evp == NULL || evp->sigev_notify != SIGEV_THREAD)) {
     return __timer_create(clock_id, evp, timer_id);
@@ -351,7 +348,7 @@
   }
 
   // Check that the clock id is supported by the kernel.
-  struct timespec dummy;
+  timespec dummy;
   if (clock_gettime(clock_id, &dummy) < 0 && errno == EINVAL) {
     return -1;
   }
@@ -435,34 +432,26 @@
 
 /* return the relative time until the next expiration, or 0 if
  * the timer is disarmed */
-static void
-timer_gettime_internal( thr_timer_t*        timer,
-                        struct itimerspec*  spec)
-{
-    struct timespec  diff;
+static void timer_gettime_internal(thr_timer_t* timer, itimerspec* spec) {
+  timespec diff = const_cast<timespec&>(timer->expires);
+  if (!timespec_is_zero(&diff)) {
+    timespec now;
 
-    diff = timer->expires;
-    if (!timespec_is_zero(&diff))
-    {
-        struct timespec  now;
+    clock_gettime(timer->clock, &now);
+    timespec_sub(&diff, &now);
 
-        clock_gettime( timer->clock, &now );
-        timespec_sub(&diff, &now);
-
-        /* in case of overrun, return 0 */
-        if (timespec_cmp0(&diff) < 0) {
-            timespec_zero(&diff);
-        }
+    /* in case of overrun, return 0 */
+    if (timespec_cmp0(&diff) < 0) {
+      timespec_zero(&diff);
     }
+  }
 
-    spec->it_value    = diff;
-    spec->it_interval = timer->period;
+  spec->it_value = diff;
+  spec->it_interval = const_cast<timespec&>(timer->period);
 }
 
 
-int
-timer_gettime( timer_t  id, struct itimerspec*  ospec )
-{
+int timer_gettime(timer_t id, itimerspec* ospec) {
     if (ospec == NULL) {
         errno = EINVAL;
         return -1;
@@ -486,11 +475,7 @@
 
 
 int
-timer_settime( timer_t                   id,
-               int                       flags,
-               const struct itimerspec*  spec,
-               struct itimerspec*        ospec )
-{
+timer_settime(timer_t id, int flags, const itimerspec* spec, itimerspec* ospec) {
     if (spec == NULL) {
         errno = EINVAL;
         return -1;
@@ -500,7 +485,7 @@
         return __timer_settime( id, flags, spec, ospec );
     } else {
         thr_timer_t*        timer = thr_timer_from_id(id);
-        struct timespec     expires, now;
+        timespec     expires, now;
 
         if (timer == NULL) {
             errno = EINVAL;
@@ -526,8 +511,8 @@
                     expires = now;
             }
         }
-        timer->expires = expires;
-        timer->period  = spec->it_interval;
+        const_cast<timespec&>(timer->expires) = expires;
+        const_cast<timespec&>(timer->period) = spec->it_interval;
         thr_timer_unlock( timer );
 
         /* signal the change to the thread */
@@ -561,7 +546,7 @@
 
 
 static void* timer_thread_start(void* arg) {
-  thr_timer_t* timer = arg;
+  thr_timer_t* timer = reinterpret_cast<thr_timer_t*>(arg);
 
   thr_timer_lock(timer);
 
@@ -572,8 +557,8 @@
 
   // We loop until timer->done is set in timer_delete().
   while (!timer->done) {
-    struct timespec expires = timer->expires;
-    struct timespec period = timer->period;
+    timespec expires = const_cast<timespec&>(timer->expires);
+    timespec period = const_cast<timespec&>(timer->period);
 
     // If the timer is stopped or disarmed, wait indefinitely
     // for a state change from timer_settime/_delete/_start_stop.
@@ -584,13 +569,13 @@
 
     // Otherwise, we need to do a timed wait until either a
     // state change of the timer expiration time.
-    struct timespec now;
+    timespec now;
     clock_gettime(timer->clock, &now);
 
     if (timespec_cmp(&expires, &now) > 0) {
       // Cool, there was no overrun, so compute the
       // relative timeout as 'expires - now', then wait.
-      struct timespec diff = expires;
+      timespec diff = expires;
       timespec_sub(&diff, &now);
 
       int ret = __pthread_cond_timedwait_relative(&timer->cond, &timer->mutex, &diff);
@@ -627,7 +612,7 @@
     } else {
       timespec_zero(&expires);
     }
-    timer->expires = expires;
+    const_cast<timespec&>(timer->expires) = expires;
 
     // Now call the timer callback function. Release the
     // lock to allow the function to modify the timer setting